diff --git a/.codecov.yml b/.codecov.yml index dceffa66be6f2..6700ed0f05e74 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -7,6 +7,7 @@ ignore: - "pkg/apis/client/.*" - "pkg/client/.*" - "vendor/.*" +- "test/.*" coverage: status: # we've found this not to be useful diff --git a/.dockerignore b/.dockerignore index 4bb2469b09e25..e778fce267438 100644 --- a/.dockerignore +++ b/.dockerignore @@ -11,3 +11,19 @@ cmd/**/debug debug.test coverage.out ui/node_modules/ +test-results/ +test/ +manifests/ +hack/ +docs/ +examples/ +.github/ +!test/container +!test/e2e/testdata +!test/fixture +!test/remote +!hack/installers +!hack/gpg-wrapper.sh +!hack/git-verify-wrapper.sh +!hack/tool-versions.sh +!hack/install.sh \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index f5344da2b86b7..41a1b4ae95ec7 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -6,7 +6,7 @@ labels: 'bug' assignees: '' --- -If you are trying to resolve an environment-specific issue or have a one-off question about the edge case that does not require a feature then please consider asking a question in argocd slack [channel](https://argoproj.github.io/community/join-slack). + Checklist: @@ -16,19 +16,19 @@ Checklist: **Describe the bug** -A clear and concise description of what the bug is. + **To Reproduce** -A list of the steps required to reproduce the issue. Best of all, give us the URL to a repository that exhibits this issue. + **Expected behavior** -A clear and concise description of what you expected to happen. + **Screenshots** -If applicable, add screenshots to help explain your problem. + **Version** diff --git a/.github/ISSUE_TEMPLATE/release.md b/.github/ISSUE_TEMPLATE/release.md new file mode 100644 index 0000000000000..dd24ed32aee77 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/release.md @@ -0,0 +1,32 @@ +--- +name: Argo CD Release +about: Used by our Release Champion to track progress of a minor release +title: 'Argo CD Release vX.X' +labels: 'release' +assignees: '' +--- + +Target RC1 date: ___. __, ____ +Target GA date: ___. __, ____ + + - [ ] Create new section in the [Release Planning doc](https://docs.google.com/document/d/1trJIomcgXcfvLw0aYnERrFWfPjQOfYMDJOCh1S8nMBc/edit?usp=sharing) + - [ ] Schedule a Release Planning meeting roughly two weeks before the scheduled Release freeze date by adding it to the community calendar (or delegate this task to someone with write access to the community calendar) + - [ ] Include Zoom link in the invite + - [ ] Post in #argo-cd and #argo-contributors one week before the meeting + - [ ] Post again one hour before the meeting + - [ ] At the meeting, remove issues/PRs from the project's column for that release which have not been “claimed” by at least one Approver (add it to the next column if Approver requests that) + - [ ] 1wk before feature freeze post in #argo-contributors that PRs must be merged by DD-MM-YYYY to be included in the release - ask approvers to drop items from milestone they can’t merge + - [ ] At least two days before RC1 date, draft RC blog post and submit it for review (or delegate this task) + - [ ] Cut RC1 (or delegate this task to an Approver and coordinate timing) + - [ ] Create new release branch + - [ ] Add the release branch to ReadTheDocs + - [ ] Confirm that tweet and blog post are ready + - [ ] Trigger the release + - [ ] After the release is finished, publish tweet and blog post + - [ ] Post in #argo-cd and #argo-announcements with lots of emojis announcing the release and requesting help testing + - [ ] Monitor support channels for issues, cherry-picking bugfixes and docs fixes as appropriate (or delegate this task to an Approver and coordinate timing) + - [ ] At release date, evaluate if any bugs justify delaying the release. If not, cut the release (or delegate this task to an Approver and coordinate timing) + - [ ] If unreleased changes are on the release branch for {current minor version minus 3}, cut a final patch release for that series (or delegate this task to an Approver and coordinate timing) + - [ ] After the release, post in #argo-cd that the {current minor version minus 3} has reached EOL (example: https://cloud-native.slack.com/archives/C01TSERG0KZ/p1667336234059729) + - [ ] (For the next release champion) Review the [items scheduled for the next release](https://github.com/orgs/argoproj/projects/25). If any item does not have an assignee who can commit to finish the feature, move it to the next release. + - [ ] (For the next release champion) Schedule a time mid-way through the release cycle to review items again. \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/security_logs.md b/.github/ISSUE_TEMPLATE/security_logs.md new file mode 100644 index 0000000000000..bfb5d2f339c62 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/security_logs.md @@ -0,0 +1,19 @@ +--- +name: Security log +about: Propose adding security-related logs or tagging existing logs with security fields +title: "seclog: [Event Description]" +labels: security-log +assignees: notfromstatefarm +--- +# Event to be logged + +Specify the event that needs to be logged or existing logs that need to be tagged. + +# Proposed level + +What security level should these events be logged under? Refer to https://argo-cd.readthedocs.io/en/latest/operator-manual/security/#security-field for more info. + +# Common Weakness Enumeration + +Is there an associated [CWE](https://cwe.mitre.org/) that could be tagged as well? + diff --git a/.github/cherry-pick-bot.yml b/.github/cherry-pick-bot.yml new file mode 100644 index 0000000000000..a9de2afd04927 --- /dev/null +++ b/.github/cherry-pick-bot.yml @@ -0,0 +1,3 @@ +enabled: true +preservePullRequestTitle: true + diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000000..5540fb7fd93e6 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,43 @@ +version: 2 +updates: + - package-ecosystem: "gomod" + directory: "/" + schedule: + interval: "daily" + ignore: + - dependency-name: k8s.io/* + + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "daily" + + - package-ecosystem: "npm" + directory: "/ui/" + schedule: + interval: "daily" + + - package-ecosystem: "docker" + directory: "/" + schedule: + interval: "daily" + + - package-ecosystem: "docker" + directory: "/test/container/" + schedule: + interval: "daily" + + - package-ecosystem: "docker" + directory: "/test/e2e/multiarch-container/" + schedule: + interval: "daily" + + - package-ecosystem: "docker" + directory: "/test/remote/" + schedule: + interval: "daily" + + - package-ecosystem: "docker" + directory: "/ui-test/" + schedule: + interval: "daily" diff --git a/.github/pr-title-checker-config.json b/.github/pr-title-checker-config.json new file mode 100644 index 0000000000000..c3437def33834 --- /dev/null +++ b/.github/pr-title-checker-config.json @@ -0,0 +1,15 @@ +{ + "LABEL": { + "name": "title needs formatting", + "color": "EEEEEE" + }, + "CHECKS": { + "prefixes": ["[Bot] docs: "], + "regexp": "^(feat|fix|docs|test|ci|chore)!?(\\(.*\\))?!?:.*" + }, + "MESSAGES": { + "success": "PR title is valid", + "failure": "PR title is invalid", + "notice": "PR Title needs to pass regex '^(feat|fix|docs|test|ci|chore)!?(\\(.*\\))?!?:.*" + } + } diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index e64d61328d9f1..406306bbeca2e 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,17 +1,23 @@ + Checklist: * [ ] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix, or (c) this does not need to be in the release notes. * [ ] The title of the PR states what changed and the related issues number (used for the release note). +* [ ] The title of the PR conforms to the [Toolchain Guide](https://argo-cd.readthedocs.io/en/latest/developer-guide/toolchain-guide/#title-of-the-pr) * [ ] I've included "Closes [ISSUE #]" or "Fixes [ISSUE #]" in the description to automatically close the associated issue. * [ ] I've updated both the CLI and UI to expose my feature, or I plan to submit a second PR with them. * [ ] Does this PR require documentation updates? * [ ] I've updated documentation as required by this PR. * [ ] Optional. My organization is added to USERS.md. -* [ ] I have signed off all my commits as required by [DCO](https://github.com/argoproj/argoproj/tree/master/community#contributing-to-argo) +* [ ] I have signed off all my commits as required by [DCO](https://github.com/argoproj/argoproj/blob/master/community/CONTRIBUTING.md#legal) * [ ] I have written unit and/or e2e tests for my change. PRs without these are unlikely to be merged. -* [ ] My build is green ([troubleshooting builds](https://argo-cd.readthedocs.io/en/latest/developer-guide/ci/)). +* [ ] My build is green ([troubleshooting builds](https://argo-cd.readthedocs.io/en/latest/developer-guide/ci/)). +* [ ] My new feature complies with the [feature status](https://github.com/argoproj/argoproj/blob/master/community/feature-status.md) guidelines. +* [ ] I have added a brief description of why this PR is necessary and/or what this PR solves. + diff --git a/.github/workflows/README.md b/.github/workflows/README.md new file mode 100644 index 0000000000000..6d4302d2b540c --- /dev/null +++ b/.github/workflows/README.md @@ -0,0 +1,38 @@ +# Workflows + +| Workflow | Description | +|--------------------|----------------------------------------------------------------| +| ci-build.yaml | Build, lint, test, codegen, build-ui, analyze, e2e-test | +| codeql.yaml | CodeQL analysis | +| image-reuse.yaml | Build, push, and Sign container images | +| image.yaml | Build container image for PR's & publish for push events | +| pr-title-check.yaml| Lint PR for semantic information | +| init-release.yaml | Build manifests and version then create a PR for release branch| +| release.yaml | Build images, cli-binaries, provenances, and post actions | +| update-snyk.yaml | Scheduled snyk reports | + +# Reusable workflows + +## image-reuse.yaml + +- The resuable workflow can be used to publish or build images with multiple container registries(Quay,GHCR, dockerhub), and then sign them with cosign when an image is published. +- A GO version `must` be specified e.g. 1.21 +- The image name for each registry *must* contain the tag. Note: multiple tags are allowed for each registry using a CSV type. +- Multiple platforms can be specified e.g. linux/amd64,linux/arm64 +- Images are not published by default. A boolean value must be set to `true` to push images. +- An optional target can be specified. + +| Inputs | Description | Type | Required | Defaults | +|-------------------|-------------------------------------|-------------|----------|-----------------| +| go-version | Version of Go to be used | string | true | none | +| quay_image_name | Full image name and tag | CSV, string | false | none | +| ghcr_image_name | Full image name and tag | CSV, string | false | none | +| docker_image_name | Full image name and tag | CSV, string | false | none | +| platforms | Platforms to build (linux/amd64) | CSV, string | false | linux/amd64 | +| push | Whether to push image/s to registry | boolean | false | false | +| target | Target build stage | string | false | none | + +| Outputs | Description | Type | +|-------------|------------------------------------------|-------| +|image-digest | Image digest of image container created | string| + diff --git a/.github/workflows/ci-build.yaml b/.github/workflows/ci-build.yaml index fd40ecff26d62..adffe526da728 100644 --- a/.github/workflows/ci-build.yaml +++ b/.github/workflows/ci-build.yaml @@ -9,28 +9,30 @@ on: pull_request: branches: - 'master' + - 'release-*' + +env: + # Golang version to use across CI steps + GOLANG_VERSION: '1.21' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +permissions: + contents: read jobs: - build-docker: - name: Build Docker image - runs-on: ubuntu-latest - if: github.head_ref != '' - steps: - - name: Checkout code - uses: actions/checkout@v2 - - name: Build Docker image - run: | - make image check-go: name: Ensure Go modules synchronicity - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 - name: Setup Golang - uses: actions/setup-go@v1 + uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.0.0 with: - go-version: '1.16.2' + go-version: ${{ env.GOLANG_VERSION }} - name: Download all Go modules run: | go mod download @@ -41,16 +43,16 @@ jobs: build-go: name: Build & cache Go code - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 - name: Setup Golang - uses: actions/setup-go@v1 + uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.0.0 with: - go-version: '1.16.2' + go-version: ${{ env.GOLANG_VERSION }} - name: Restore go build cache - uses: actions/cache@v1 + uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 with: path: ~/.cache/go-build key: ${{ runner.os }}-go-build-v1-${{ github.run_id }} @@ -61,33 +63,43 @@ jobs: run: make build-local lint-go: + permissions: + contents: read # for actions/checkout to fetch code + pull-requests: read # for golangci/golangci-lint-action to fetch pull requests name: Lint Go code - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + - name: Setup Golang + uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.0.0 + with: + go-version: ${{ env.GOLANG_VERSION }} - name: Run golangci-lint - uses: golangci/golangci-lint-action@v2 + uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0 with: - version: v1.38.0 - args: --timeout 5m --exclude SA5011 + version: v1.54.0 + args: --enable gofmt --timeout 10m --exclude SA5011 --verbose --max-issues-per-linter 0 --max-same-issues 0 test-go: name: Run unit tests for Go packages - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 needs: - build-go + env: + GITHUB_TOKEN: ${{ secrets.E2E_TEST_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} + GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }} steps: - name: Create checkout directory run: mkdir -p ~/go/src/github.com/argoproj - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 - name: Create symlink in GOPATH run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd - name: Setup Golang - uses: actions/setup-go@v1 + uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.0.0 with: - go-version: '1.16.2' + go-version: ${{ env.GOLANG_VERSION }} - name: Install required packages run: | sudo apt-get install git -y @@ -105,13 +117,17 @@ jobs: run: | echo "/usr/local/bin" >> $GITHUB_PATH - name: Restore go build cache - uses: actions/cache@v1 + uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 with: path: ~/.cache/go-build key: ${{ runner.os }}-go-build-v1-${{ github.run_id }} - name: Install all tools required for building & testing run: | make install-test-tools-local + # We install kustomize in the dist directory + - name: Add dist to PATH + run: | + echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH - name: Setup git username and email run: | git config --global user.name "John Doe" @@ -122,32 +138,35 @@ jobs: - name: Run all unit tests run: make test-local - name: Generate code coverage artifacts - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 with: name: code-coverage path: coverage.out - name: Generate test results artifacts - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 with: name: test-results path: test-results/ test-go-race: - name: Run unit tests with -race, for Go packages - runs-on: ubuntu-latest + name: Run unit tests with -race for Go packages + runs-on: ubuntu-22.04 needs: - build-go + env: + GITHUB_TOKEN: ${{ secrets.E2E_TEST_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} + GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }} steps: - name: Create checkout directory run: mkdir -p ~/go/src/github.com/argoproj - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 - name: Create symlink in GOPATH run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd - name: Setup Golang - uses: actions/setup-go@v1 + uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.0.0 with: - go-version: '1.16.2' + go-version: ${{ env.GOLANG_VERSION }} - name: Install required packages run: | sudo apt-get install git -y @@ -165,13 +184,17 @@ jobs: run: | echo "/usr/local/bin" >> $GITHUB_PATH - name: Restore go build cache - uses: actions/cache@v1 + uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 with: path: ~/.cache/go-build key: ${{ runner.os }}-go-build-v1-${{ github.run_id }} - name: Install all tools required for building & testing run: | make install-test-tools-local + # We install kustomize in the dist directory + - name: Add dist to PATH + run: | + echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH - name: Setup git username and email run: | git config --global user.name "John Doe" @@ -182,21 +205,21 @@ jobs: - name: Run all unit tests run: make test-race-local - name: Generate test results artifacts - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 with: name: race-results path: test-results/ codegen: name: Check changes to generated code - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 - name: Setup Golang - uses: actions/setup-go@v1 + uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.0.0 with: - go-version: '1.16.2' + go-version: ${{ env.GOLANG_VERSION }} - name: Create symlink in GOPATH run: | mkdir -p ~/go/src/github.com/argoproj @@ -218,9 +241,10 @@ jobs: make install-codegen-tools-local make install-go-tools-local working-directory: /home/runner/go/src/github.com/argoproj/argo-cd - - name: Initialize local Helm + # We install kustomize in the dist directory + - name: Add dist to PATH run: | - helm2 init --client-only + echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH - name: Run codegen run: | set -x @@ -236,17 +260,17 @@ jobs: build-ui: name: Build, test & lint UI code - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 - name: Setup NodeJS - uses: actions/setup-node@v1 + uses: actions/setup-node@5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d # v3.8.1 with: - node-version: '12.18.4' + node-version: '20.7.0' - name: Restore node dependency cache id: cache-dependencies - uses: actions/cache@v1 + uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 with: path: ui/node_modules key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }} @@ -260,6 +284,7 @@ jobs: env: NODE_ENV: production NODE_ONLINE_ENV: online + HOST_ARCH: amd64 working-directory: ui/ - name: Run ESLint run: yarn lint @@ -267,7 +292,7 @@ jobs: analyze: name: Process & analyze test artifacts - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 needs: - test-go - build-ui @@ -275,12 +300,12 @@ jobs: sonar_secret: ${{ secrets.SONAR_TOKEN }} steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: fetch-depth: 0 - name: Restore node dependency cache id: cache-dependencies - uses: actions/cache@v1 + uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 with: path: ui/node_modules key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }} @@ -291,16 +316,16 @@ jobs: run: | mkdir -p test-results - name: Get code coverage artifiact - uses: actions/download-artifact@v2 + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 with: name: code-coverage - name: Get test result artifact - uses: actions/download-artifact@v2 + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 with: name: test-results path: test-results - name: Upload code coverage information to codecov.io - uses: codecov/codecov-action@v1 + uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d # v3.1.4 with: file: coverage.out - name: Perform static code analysis using SonarCloud @@ -333,10 +358,11 @@ jobs: test-e2e: name: Run end-to-end tests - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 strategy: + fail-fast: false matrix: - k3s-version: [v1.20.2, v1.19.2, v1.18.9, v1.17.11, v1.16.15] + k3s-version: [v1.28.2, v1.27.6, v1.26.9, v1.25.14] needs: - build-go env: @@ -348,14 +374,17 @@ jobs: ARGOCD_E2E_K3S: "true" ARGOCD_IN_CI: "true" ARGOCD_E2E_APISERVER_PORT: "8088" + ARGOCD_APPLICATION_NAMESPACES: "argocd-e2e-external" ARGOCD_SERVER: "127.0.0.1:8088" + GITHUB_TOKEN: ${{ secrets.E2E_TEST_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} + GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }} steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 - name: Setup Golang - uses: actions/setup-go@v1 + uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.0.0 with: - go-version: '1.16.2' + go-version: ${{ env.GOLANG_VERSION }} - name: GH actions workaround - Kill XSP4 process run: | sudo pkill mono || true @@ -369,9 +398,10 @@ jobs: sudo mkdir -p $HOME/.kube && sudo chown -R runner $HOME/.kube sudo k3s kubectl config view --raw > $HOME/.kube/config sudo chown runner $HOME/.kube/config + sudo chmod go-r $HOME/.kube/config kubectl version - name: Restore go build cache - uses: actions/cache@v1 + uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 with: path: ~/.cache/go-build key: ${{ runner.os }}-go-build-v1-${{ github.run_id }} @@ -381,10 +411,13 @@ jobs: - name: Add /usr/local/bin to PATH run: | echo "/usr/local/bin" >> $GITHUB_PATH + - name: Add ./dist to PATH + run: | + echo "$(pwd)/dist" >> $GITHUB_PATH - name: Download Go dependencies run: | go mod download - go get github.com/mattn/goreman + go install github.com/mattn/goreman@latest - name: Install all tools required for building & testing run: | make install-test-tools-local @@ -394,9 +427,9 @@ jobs: git config --global user.email "john.doe@example.com" - name: Pull Docker image required for tests run: | - docker pull quay.io/dexidp/dex:v2.25.0 + docker pull ghcr.io/dexidp/dex:v2.37.0 docker pull argoproj/argo-cd-ci-builder:v1.0.0 - docker pull redis:6.2.1-alpine + docker pull redis:7.0.11-alpine - name: Create target directory for binaries in the build-process run: | mkdir -p dist @@ -413,7 +446,7 @@ jobs: count=1 until curl -f http://127.0.0.1:8088/healthz; do sleep 10; - if test $count -ge 60; then + if test $count -ge 180; then echo "Timeout" exit 1 fi @@ -424,7 +457,7 @@ jobs: set -x make test-e2e-local - name: Upload e2e-server logs - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 with: name: e2e-server-k8s${{ matrix.k3s-version }}.log path: /tmp/e2e-server.log diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 9771ca0f4edde..58426890abcbf 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -2,32 +2,39 @@ name: "Code scanning - action" on: push: + # Secrets aren't available for dependabot on push. https://docs.github.com/en/enterprise-cloud@latest/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/troubleshooting-the-codeql-workflow#error-403-resource-not-accessible-by-integration-when-using-dependabot + branches-ignore: + - 'dependabot/**' + - 'cherry-pick-*' pull_request: schedule: - cron: '0 19 * * 0' +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +permissions: + contents: read + jobs: CodeQL-Build: + permissions: + actions: read # for github/codeql-action/init to get workflow details + contents: read # for actions/checkout to fetch code + security-events: write # for github/codeql-action/autobuild to send a status report + if: github.repository == 'argoproj/argo-cd' # CodeQL runs on ubuntu-latest and windows-latest - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Checkout repository - uses: actions/checkout@v2 - with: - # We must fetch at least the immediate parents so that if this is - # a pull request then we can checkout the head. - fetch-depth: 2 - - # If this run was triggered by a pull request event, then checkout - # the head of the pull request instead of the merge commit. - - run: git checkout HEAD^2 - if: ${{ github.event_name == 'pull_request' }} + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v1 + uses: github/codeql-action/init@8aff97f12c99086bdb92ff62ae06dbbcdf07941b # v2.1.33 # Override language selection by uncommenting this and choosing your languages # with: # languages: go, javascript, csharp, python, cpp, java @@ -35,7 +42,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@v1 + uses: github/codeql-action/autobuild@8aff97f12c99086bdb92ff62ae06dbbcdf07941b # v2.1.33 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -49,4 +56,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v1 + uses: github/codeql-action/analyze@8aff97f12c99086bdb92ff62ae06dbbcdf07941b # v2.1.33 diff --git a/.github/workflows/gh-pages.yaml b/.github/workflows/gh-pages.yaml deleted file mode 100644 index 41d775ab6a0ce..0000000000000 --- a/.github/workflows/gh-pages.yaml +++ /dev/null @@ -1,30 +0,0 @@ -name: Deploy - -on: - push: - branches: - - master - pull_request: - branches: - - 'master' - -jobs: - deploy: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v1 - - name: Setup Python - uses: actions/setup-python@v1 - with: - python-version: 3.x - - name: build - run: | - pip install -r docs/requirements.txt - mkdocs build - - name: deploy - if: ${{ github.event_name == 'push' }} - uses: peaceiris/actions-gh-pages@v2.5.0 - env: - PERSONAL_TOKEN: ${{ secrets.PERSONAL_TOKEN }} - PUBLISH_BRANCH: gh-pages - PUBLISH_DIR: ./site \ No newline at end of file diff --git a/.github/workflows/image-reuse.yaml b/.github/workflows/image-reuse.yaml new file mode 100644 index 0000000000000..55d3bc309294a --- /dev/null +++ b/.github/workflows/image-reuse.yaml @@ -0,0 +1,173 @@ +name: Publish and Sign Container Image +on: + workflow_call: + inputs: + go-version: + required: true + type: string + quay_image_name: + required: false + type: string + ghcr_image_name: + required: false + type: string + docker_image_name: + required: false + type: string + platforms: + required: true + type: string + default: linux/amd64 + push: + required: true + type: boolean + default: false + target: + required: false + type: string + + secrets: + quay_username: + required: false + quay_password: + required: false + ghcr_username: + required: false + ghcr_password: + required: false + docker_username: + required: false + docker_password: + required: false + + outputs: + image-digest: + description: "sha256 digest of container image" + value: ${{ jobs.publish.outputs.image-digest }} + +permissions: {} + +jobs: + publish: + permissions: + contents: read + packages: write # Used to push images to `ghcr.io` if used. + id-token: write # Needed to create an OIDC token for keyless signing + runs-on: ubuntu-22.04 + outputs: + image-digest: ${{ steps.image.outputs.digest }} + steps: + - name: Checkout code + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + with: + fetch-depth: 0 + token: ${{ secrets.GITHUB_TOKEN }} + if: ${{ github.ref_type == 'tag'}} + + - name: Checkout code + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + if: ${{ github.ref_type != 'tag'}} + + - name: Setup Golang + uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0 + with: + go-version: ${{ inputs.go-version }} + + - name: Install cosign + uses: sigstore/cosign-installer@11086d25041f77fe8fe7b9ea4e48e3b9192b8f19 # v3.1.2 + with: + cosign-release: 'v2.0.0' + + - uses: docker/setup-qemu-action@2b82ce82d56a2a04d2637cd93a637ae1b359c0a7 # v2.2.0 + - uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0 + + - name: Setup tags for container image as a CSV type + run: | + IMAGE_TAGS=$(for str in \ + ${{ inputs.quay_image_name }} \ + ${{ inputs.ghcr_image_name }} \ + ${{ inputs.docker_image_name}}; do + echo -n "${str}",;done | sed 's/,$//') + + echo $IMAGE_TAGS + echo "TAGS=$IMAGE_TAGS" >> $GITHUB_ENV + + - name: Setup image namespace for signing, strip off the tag + run: | + TAGS=$(for tag in \ + ${{ inputs.quay_image_name }} \ + ${{ inputs.ghcr_image_name }} \ + ${{ inputs.docker_image_name}}; do + echo -n "${tag}" | awk -F ":" '{print $1}' -;done) + + echo $TAGS + echo 'SIGNING_TAGS<> $GITHUB_ENV + echo $TAGS >> $GITHUB_ENV + echo 'EOF' >> $GITHUB_ENV + + - name: Login to Quay.io + uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0 + with: + registry: quay.io + username: ${{ secrets.quay_username }} + password: ${{ secrets.quay_password }} + if: ${{ inputs.quay_image_name && inputs.push }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0 + with: + registry: ghcr.io + username: ${{ secrets.ghcr_username }} + password: ${{ secrets.ghcr_password }} + if: ${{ inputs.ghcr_image_name && inputs.push }} + + - name: Login to dockerhub Container Registry + uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0 + with: + username: ${{ secrets.docker_username }} + password: ${{ secrets.docker_password }} + if: ${{ inputs.docker_image_name && inputs.push }} + + - name: Set up build args for container image + run: | + echo "GIT_TAG=$(if [ -z "`git status --porcelain`" ]; then git describe --exact-match --tags HEAD 2>/dev/null; fi)" >> $GITHUB_ENV + echo "GIT_COMMIT=$(git rev-parse HEAD)" >> $GITHUB_ENV + echo "BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_ENV + echo "GIT_TREE_STATE=$(if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)" >> $GITHUB_ENV + + - name: Free Disk Space (Ubuntu) + uses: jlumbroso/free-disk-space@4d9e71b726748f254fe64fa44d273194bd18ec91 + with: + large-packages: false + docker-images: false + swap-storage: false + tool-cache: false + + - name: Build and push container image + id: image + uses: docker/build-push-action@2eb1c1961a95fc15694676618e422e8ba1d63825 #v4.1.1 + with: + context: . + platforms: ${{ inputs.platforms }} + push: ${{ inputs.push }} + tags: ${{ env.TAGS }} + target: ${{ inputs.target }} + provenance: false + sbom: false + build-args: | + GIT_TAG=${{env.GIT_TAG}} + GIT_COMMIT=${{env.GIT_COMMIT}} + BUILD_DATE=${{env.BUILD_DATE}} + GIT_TREE_STATE=${{env.GIT_TREE_STATE}} + + - name: Sign container images + run: | + for signing_tag in $SIGNING_TAGS; do + cosign sign \ + -a "repo=${{ github.repository }}" \ + -a "workflow=${{ github.workflow }}" \ + -a "sha=${{ github.sha }}" \ + -y \ + "$signing_tag"@${{ steps.image.outputs.digest }} + done + if: ${{ inputs.push }} diff --git a/.github/workflows/image.yaml b/.github/workflows/image.yaml index 343877b3c649f..1bd674b952ffa 100644 --- a/.github/workflows/image.yaml +++ b/.github/workflows/image.yaml @@ -4,47 +4,114 @@ on: push: branches: - master + pull_request: + branches: + - master + types: [ labeled, unlabeled, opened, synchronize, reopened ] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +permissions: {} jobs: - publish: - runs-on: ubuntu-latest - env: - GOPATH: /home/runner/work/argo-cd/argo-cd + set-vars: + permissions: + contents: read + if: github.repository == 'argoproj/argo-cd' + runs-on: ubuntu-22.04 + outputs: + image-tag: ${{ steps.image.outputs.tag}} + platforms: ${{ steps.platforms.outputs.platforms }} steps: - - uses: actions/setup-go@v1 - with: - go-version: '1.16.2' - - uses: actions/checkout@master - with: - path: src/github.com/argoproj/argo-cd - - # get image tag - - run: echo ::set-output name=tag::$(cat ./VERSION)-${GITHUB_SHA::8} - working-directory: ./src/github.com/argoproj/argo-cd + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + + - name: Set image tag for ghcr + run: echo "tag=$(cat ./VERSION)-${GITHUB_SHA::8}" >> $GITHUB_OUTPUT id: image - # build - - run: | - docker images -a --format "{{.ID}}" | xargs -I {} docker rmi {} - make image DEV_IMAGE=true DOCKER_PUSH=false IMAGE_NAMESPACE=ghcr.io/argoproj IMAGE_TAG=${{ steps.image.outputs.tag }} - working-directory: ./src/github.com/argoproj/argo-cd + - name: Determine image platforms to use + id: platforms + run: | + IMAGE_PLATFORMS=linux/amd64 + if [[ "${{ github.event_name }}" == "push" || "${{ contains(github.event.pull_request.labels.*.name, 'test-multi-image') }}" == "true" ]] + then + IMAGE_PLATFORMS=linux/amd64,linux/arm64,linux/s390x,linux/ppc64le + fi + echo "Building image for platforms: $IMAGE_PLATFORMS" + echo "platforms=$IMAGE_PLATFORMS" >> $GITHUB_OUTPUT - # publish - - run: | - docker login ghcr.io --username $USERNAME --password $PASSWORD - docker push ghcr.io/argoproj/argocd:${{ steps.image.outputs.tag }} - env: - USERNAME: ${{ secrets.USERNAME }} - PASSWORD: ${{ secrets.TOKEN }} + build-only: + needs: [set-vars] + permissions: + contents: read + packages: write # for pushing packages to GHCR, which is used by cd.apps.argoproj.io to avoid polluting Quay with tags + id-token: write # for creating OIDC tokens for signing. + if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name != 'push' }} + uses: ./.github/workflows/image-reuse.yaml + with: + # Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations) + go-version: 1.21 + platforms: ${{ needs.set-vars.outputs.platforms }} + push: false + + build-and-publish: + needs: [set-vars] + permissions: + contents: read + packages: write # for pushing packages to GHCR, which is used by cd.apps.argoproj.io to avoid polluting Quay with tags + id-token: write # for creating OIDC tokens for signing. + if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name == 'push' }} + uses: ./.github/workflows/image-reuse.yaml + with: + quay_image_name: quay.io/argoproj/argocd:latest + ghcr_image_name: ghcr.io/argoproj/argo-cd/argocd:${{ needs.set-vars.outputs.image-tag }} + # Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations) + go-version: 1.21 + platforms: ${{ needs.set-vars.outputs.platforms }} + push: true + secrets: + quay_username: ${{ secrets.RELEASE_QUAY_USERNAME }} + quay_password: ${{ secrets.RELEASE_QUAY_TOKEN }} + ghcr_username: ${{ github.actor }} + ghcr_password: ${{ secrets.GITHUB_TOKEN }} - # deploy + build-and-publish-provenance: # Push attestations to GHCR, latest image is polluting quay.io + needs: + - build-and-publish + permissions: + actions: read # for detecting the Github Actions environment. + id-token: write # for creating OIDC tokens for signing. + packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues) + if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name == 'push' }} + # Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.7.0 + with: + image: ghcr.io/argoproj/argo-cd/argocd + digest: ${{ needs.build-and-publish.outputs.image-digest }} + registry-username: ${{ github.actor }} + secrets: + registry-password: ${{ secrets.GITHUB_TOKEN }} + + Deploy: + needs: + - build-and-publish + - set-vars + permissions: + contents: write # for git to push upgrade commit if not already deployed + packages: write # for pushing packages to GHCR, which is used by cd.apps.argoproj.io to avoid polluting Quay with tags + if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name == 'push' }} + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 - run: git clone "https://$TOKEN@github.com/argoproj/argoproj-deployments" env: TOKEN: ${{ secrets.TOKEN }} - run: | - docker run -v $(pwd):/src -w /src --rm -t lyft/kustomizer:v3.3.0 kustomize edit set image quay.io/argoproj/argocd=ghcr.io/argoproj/argocd:${{ steps.image.outputs.tag }} + docker run -u $(id -u):$(id -g) -v $(pwd):/src -w /src --rm -t ghcr.io/argoproj/argo-cd/argocd:${{ needs.set-vars.outputs.image-tag }} kustomize edit set image quay.io/argoproj/argocd=ghcr.io/argoproj/argo-cd/argocd:${{ needs.set-vars.outputs.image-tag }} git config --global user.email 'ci@argoproj.com' git config --global user.name 'CI' - git diff --exit-code && echo 'Already deployed' || (git commit -am 'Upgrade argocd to ${{ steps.image.outputs.tag }}' && git push) + git diff --exit-code && echo 'Already deployed' || (git commit -am 'Upgrade argocd to ${{ needs.set-vars.outputs.image-tag }}' && git push) working-directory: argoproj-deployments/argocd - # TODO: clean up old images once github supports it: https://github.community/t5/How-to-use-Git-and-GitHub/Deleting-images-from-Github-Package-Registry/m-p/41202/thread-id/9811 + diff --git a/.github/workflows/init-release.yaml b/.github/workflows/init-release.yaml new file mode 100644 index 0000000000000..2cd8111bd87c1 --- /dev/null +++ b/.github/workflows/init-release.yaml @@ -0,0 +1,77 @@ +name: Init ArgoCD Release +on: + workflow_dispatch: + inputs: + TARGET_BRANCH: + description: 'TARGET_BRANCH to checkout (e.g. release-2.5)' + required: true + type: string + + TARGET_VERSION: + description: 'TARGET_VERSION to build manifests (e.g. 2.5.0-rc1) Note: the `v` prefix is not used' + required: true + type: string + +permissions: {} + +jobs: + prepare-release: + permissions: + contents: write # for peter-evans/create-pull-request to create branch + pull-requests: write # for peter-evans/create-pull-request to create a PR + name: Automatically generate version and manifests on ${{ inputs.TARGET_BRANCH }} + runs-on: ubuntu-22.04 + steps: + - name: Checkout code + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + with: + fetch-depth: 0 + token: ${{ secrets.GITHUB_TOKEN }} + ref: ${{ inputs.TARGET_BRANCH }} + + - name: Check if TARGET_VERSION is well formed. + run: | + set -xue + # Target version must not contain 'v' prefix + if echo "${{ inputs.TARGET_VERSION }}" | grep -e '^v'; then + echo "::error::Target version '${{ inputs.TARGET_VERSION }}' should not begin with a 'v' prefix, refusing to continue." >&2 + exit 1 + fi + + - name: Create VERSION information + run: | + set -ue + echo "Bumping version from $(cat VERSION) to ${{ inputs.TARGET_VERSION }}" + echo "${{ inputs.TARGET_VERSION }}" > VERSION + + # We install kustomize in the dist directory + - name: Add dist to PATH + run: | + echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH + + - name: Generate new set of manifests + run: | + set -ue + make install-codegen-tools-local + make manifests-local VERSION=${{ inputs.TARGET_VERSION }} + git diff + + - name: Generate version compatibility table + run: | + git stash + bash hack/update-supported-versions.sh + git add -u . + git stash pop + + - name: Create pull request + uses: peter-evans/create-pull-request@153407881ec5c347639a548ade7d8ad1d6740e38 # v5.0.2 + with: + commit-message: "Bump version to ${{ inputs.TARGET_VERSION }}" + title: "Bump version to ${{ inputs.TARGET_VERSION }} on ${{ inputs.TARGET_BRANCH }} branch" + body: Updating VERSION and manifests to ${{ inputs.TARGET_VERSION }} + branch: update-version + branch-suffix: random + signoff: true + labels: release + + diff --git a/.github/workflows/pr-title-check.yml b/.github/workflows/pr-title-check.yml new file mode 100644 index 0000000000000..020535d7b8afa --- /dev/null +++ b/.github/workflows/pr-title-check.yml @@ -0,0 +1,29 @@ +name: "Lint PR" + +on: + pull_request_target: + types: [opened, edited, reopened, synchronize] + +# IMPORTANT: No checkout actions, scripts, or builds should be added to this workflow. Permissions should always be used +# with extreme caution. https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request_target +permissions: {} + +# PR updates can happen in quick succession leading to this +# workflow being trigger a number of times. This limits it +# to one run per PR. +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + + +jobs: + validate: + permissions: + contents: read + pull-requests: read + name: Validate PR Title + runs-on: ubuntu-latest + steps: + - uses: thehanimo/pr-title-checker@0cf5902181e78341bb97bb06646396e5bd354b3f # v1.4.0 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + configuration_path: ".github/pr-title-checker-config.json" diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index e66091ea27734..7e9303f288ae4 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -1,312 +1,293 @@ -name: Create ArgoCD release +name: Publish ArgoCD Release on: push: tags: - - 'release-v*' - - '!release-v1.5*' - - '!release-v1.4*' - - '!release-v1.3*' - - '!release-v1.2*' - - '!release-v1.1*' - - '!release-v1.0*' - - '!release-v0*' + - 'v*' + - '!v2.4*' + - '!v2.5*' + - '!v2.6*' + +permissions: {} + +env: + GOLANG_VERSION: '1.21' # Note: go-version must also be set in job argocd-image.with.go-version + jobs: - prepare-release: - name: Perform automatic release on trigger ${{ github.ref }} - runs-on: ubuntu-latest - env: - # The name of the tag as supplied by the GitHub event - SOURCE_TAG: ${{ github.ref }} - # The image namespace where Docker image will be published to - IMAGE_NAMESPACE: quay.io/argoproj - # Whether to create & push image and release assets - DRY_RUN: false - # Whether a draft release should be created, instead of public one - DRAFT_RELEASE: false - # Whether to update homebrew with this release as well - # Set RELEASE_HOMEBREW_TOKEN secret in repository for this to work - needs - # access to public repositories - UPDATE_HOMEBREW: false - # Name of the GitHub user for Git config - GIT_USERNAME: argo-bot - # E-Mail of the GitHub user for Git config - GIT_EMAIL: argoproj@gmail.com + argocd-image: + permissions: + contents: read + id-token: write # for creating OIDC tokens for signing. + packages: write # used to push images to `ghcr.io` if used. + if: github.repository == 'argoproj/argo-cd' + uses: ./.github/workflows/image-reuse.yaml + with: + quay_image_name: quay.io/argoproj/argocd:${{ github.ref_name }} + # Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations) + go-version: 1.21 + platforms: linux/amd64,linux/arm64,linux/s390x,linux/ppc64le + push: true + secrets: + quay_username: ${{ secrets.RELEASE_QUAY_USERNAME }} + quay_password: ${{ secrets.RELEASE_QUAY_TOKEN }} + + argocd-image-provenance: + needs: [argocd-image] + permissions: + actions: read # for detecting the Github Actions environment. + id-token: write # for creating OIDC tokens for signing. + packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues) + # Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator + if: github.repository == 'argoproj/argo-cd' + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.7.0 + with: + image: quay.io/argoproj/argocd + digest: ${{ needs.argocd-image.outputs.image-digest }} + secrets: + registry-username: ${{ secrets.RELEASE_QUAY_USERNAME }} + registry-password: ${{ secrets.RELEASE_QUAY_TOKEN }} + + goreleaser: + needs: + - argocd-image + - argocd-image-provenance + permissions: + contents: write # used for uploading assets + if: github.repository == 'argoproj/argo-cd' + runs-on: ubuntu-22.04 + outputs: + hashes: ${{ steps.hash.outputs.hashes }} + steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: fetch-depth: 0 token: ${{ secrets.GITHUB_TOKEN }} - - name: Check if the published tag is well formed and setup vars + - name: Fetch all tags + run: git fetch --force --tags + + - name: Set GORELEASER_PREVIOUS_TAG # Workaround, GoReleaser uses 'git-describe' to determine a previous tag. Our tags are created in realease branches. run: | set -xue - # Target version must match major.minor.patch and optional -rcX suffix - # where X must be a number. - TARGET_VERSION=${SOURCE_TAG#*release-v} - if ! echo "${TARGET_VERSION}" | egrep '^[0-9]+\.[0-9]+\.[0-9]+(-rc[0-9]+)*$'; then - echo "::error::Target version '${TARGET_VERSION}' is malformed, refusing to continue." >&2 - exit 1 + if echo ${{ github.ref_name }} | grep -E -- '-rc1+$';then + echo "GORELEASER_PREVIOUS_TAG=$(git -c 'versionsort.suffix=-rc' tag --list --sort=version:refname | tail -n 2 | head -n 1)" >> $GITHUB_ENV + else + echo "This is not the first release on the branch, Using GoReleaser defaults" fi - # Target branch is the release branch we're going to operate on - # Its name is 'release-.' - TARGET_BRANCH="release-${TARGET_VERSION%\.[0-9]*}" + - name: Setup Golang + uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.0.0 + with: + go-version: ${{ env.GOLANG_VERSION }} - # The release tag is the source tag, minus the release- prefix - RELEASE_TAG="${SOURCE_TAG#*release-}" + - name: Set environment variables for ldflags + id: set_ldflag + run: | + echo "KUBECTL_VERSION=$(go list -m k8s.io/client-go | head -n 1 | rev | cut -d' ' -f1 | rev)" >> $GITHUB_ENV + echo "GIT_TREE_STATE=$(if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)" >> $GITHUB_ENV - # Whether this is a pre-release (indicated by -rc suffix) - PRE_RELEASE=false - if echo "${RELEASE_TAG}" | egrep -- '-rc[0-9]+$'; then - PRE_RELEASE=true - fi + - name: Run GoReleaser + uses: goreleaser/goreleaser-action@7ec5c2b0c6cdda6e8bbb49444bc797dd33d74dd8 # v5.0.0 + id: run-goreleaser + with: + version: latest + args: release --clean --timeout 55m + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + KUBECTL_VERSION: ${{ env.KUBECTL_VERSION }} + GIT_TREE_STATE: ${{ env.GIT_TREE_STATE }} - # We must not have a release trigger within the same release branch, - # because that means a release for this branch is already running. - if git tag -l | grep "release-v${TARGET_VERSION%\.[0-9]*}" | grep -v "release-v${TARGET_VERSION}"; then - echo "::error::Another release for branch ${TARGET_BRANCH} is currently in progress." - exit 1 - fi + - name: Generate subject for provenance + id: hash + env: + ARTIFACTS: "${{ steps.run-goreleaser.outputs.artifacts }}" + run: | + set -euo pipefail - # Ensure that release do not yet exist - if git rev-parse ${RELEASE_TAG}; then - echo "::error::Release tag ${RELEASE_TAG} already exists in repository. Refusing to continue." - exit 1 + hashes=$(echo $ARTIFACTS | jq --raw-output '.[] | {name, "digest": (.extra.Digest // .extra.Checksum)} | select(.digest) | {digest} + {name} | join(" ") | sub("^sha256:";"")' | base64 -w0) + if test "$hashes" = ""; then # goreleaser < v1.13.0 + checksum_file=$(echo "$ARTIFACTS" | jq -r '.[] | select (.type=="Checksum") | .path') + hashes=$(cat $checksum_file | base64 -w0) fi + echo "hashes=$hashes" >> $GITHUB_OUTPUT + + goreleaser-provenance: + needs: [goreleaser] + permissions: + actions: read # for detecting the Github Actions environment + id-token: write # Needed for provenance signing and ID + contents: write # Needed for release uploads + if: github.repository == 'argoproj/argo-cd' + # Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.7.0 + with: + base64-subjects: "${{ needs.goreleaser.outputs.hashes }}" + provenance-name: "argocd-cli.intoto.jsonl" + upload-assets: true + + generate-sbom: + name: Create SBOM and generate hash + needs: + - argocd-image + - goreleaser + permissions: + contents: write # Needed for release uploads + outputs: + hashes: ${{ steps.sbom-hash.outputs.hashes}} + if: github.repository == 'argoproj/argo-cd' + runs-on: ubuntu-22.04 + steps: + - name: Checkout code + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + with: + fetch-depth: 0 + token: ${{ secrets.GITHUB_TOKEN }} - # Make the variables available in follow-up steps - echo "TARGET_VERSION=${TARGET_VERSION}" >> $GITHUB_ENV - echo "TARGET_BRANCH=${TARGET_BRANCH}" >> $GITHUB_ENV - echo "RELEASE_TAG=${RELEASE_TAG}" >> $GITHUB_ENV - echo "PRE_RELEASE=${PRE_RELEASE}" >> $GITHUB_ENV + - name: Setup Golang + uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0 + with: + go-version: ${{ env.GOLANG_VERSION }} - - name: Check if our release tag has a correct annotation + - name: Generate SBOM (spdx) + id: spdx-builder + env: + # defines the spdx/spdx-sbom-generator version to use. + SPDX_GEN_VERSION: v0.0.13 + # defines the sigs.k8s.io/bom version to use. + SIGS_BOM_VERSION: v0.2.1 + # comma delimited list of project relative folders to inspect for package + # managers (gomod, yarn, npm). + PROJECT_FOLDERS: ".,./ui" + # full qualified name of the docker image to be inspected + DOCKER_IMAGE: quay.io/argoproj/argocd:${{ github.ref_name }} run: | - set -ue - # Fetch all tag information as well - git fetch --prune --tags --force - - echo "=========== BEGIN COMMIT MESSAGE =============" - git show ${SOURCE_TAG} - echo "============ END COMMIT MESSAGE ==============" - - # Quite dirty hack to get the release notes from the annotated tag - # into a temporary file. - RELEASE_NOTES=$(mktemp -p /tmp release-notes.XXXXXX) - - prefix=true - begin=false - git show ${SOURCE_TAG} | while read line; do - # Whatever is in commit history for the tag, we only want that - # annotation from our tag. We discard everything else. - if test "$begin" = "false"; then - if echo "$line" | grep -q "tag ${SOURCE_TAG#refs/tags/}"; then begin="true"; fi - continue - fi - if test "$prefix" = "true"; then - if test -z "$line"; then prefix=false; fi - else - if echo "$line" | egrep -q '^commit [0-9a-f]+'; then - break - fi - echo "$line" >> ${RELEASE_NOTES} - fi + yarn install --cwd ./ui + go install github.com/spdx/spdx-sbom-generator/cmd/generator@$SPDX_GEN_VERSION + go install sigs.k8s.io/bom/cmd/bom@$SIGS_BOM_VERSION + + # Generate SPDX for project dependencies analyzing package managers + for folder in $(echo $PROJECT_FOLDERS | sed "s/,/ /g") + do + generator -p $folder -o /tmp done - # For debug purposes - echo "============BEGIN RELEASE NOTES=================" - cat ${RELEASE_NOTES} - echo "=============END RELEASE NOTES==================" - - # Too short release notes are suspicious. We need at least 100 bytes. - relNoteLen=$(stat -c '%s' $RELEASE_NOTES) - if test $relNoteLen -lt 100; then - echo "::error::No release notes provided in tag annotation (or tag is not annotated)" - exit 1 + # Generate SPDX for binaries analyzing the docker image + if [[ ! -z $DOCKER_IMAGE ]]; then + bom generate -o /tmp/bom-docker-image.spdx -i $DOCKER_IMAGE fi - # Check for magic string '## Quick Start' in head of release notes - if ! head -2 ${RELEASE_NOTES} | grep -iq '## Quick Start'; then - echo "::error::Release notes seem invalid, quick start section not found." - exit 1 - fi - - # We store path to temporary release notes file for later reading, we - # need it when creating release. - echo "RELEASE_NOTES=${RELEASE_NOTES}" >> $GITHUB_ENV - - - name: Setup Golang - uses: actions/setup-go@v1 + cd /tmp && tar -zcf sbom.tar.gz *.spdx + + - name: Generate SBOM hash + shell: bash + id: sbom-hash + run: | + # sha256sum generates sha256 hash for sbom. + # base64 -w0 encodes to base64 and outputs on a single line. + # sha256sum /tmp/sbom.tar.gz ... | base64 -w0 + echo "hashes=$(sha256sum /tmp/sbom.tar.gz | base64 -w0)" >> "$GITHUB_OUTPUT" + + - name: Upload SBOM + uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v0.1.15 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + files: | + /tmp/sbom.tar.gz + + sbom-provenance: + needs: [generate-sbom] + permissions: + actions: read # for detecting the Github Actions environment + id-token: write # Needed for provenance signing and ID + contents: write # Needed for release uploads + if: github.repository == 'argoproj/argo-cd' + # Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.7.0 + with: + base64-subjects: "${{ needs.generate-sbom.outputs.hashes }}" + provenance-name: "argocd-sbom.intoto.jsonl" + upload-assets: true + + post-release: + needs: + - argocd-image + - goreleaser + - generate-sbom + permissions: + contents: write # Needed to push commit to update stable tag + pull-requests: write # Needed to create PR for VERSION update. + if: github.repository == 'argoproj/argo-cd' + runs-on: ubuntu-22.04 + steps: + - name: Checkout code + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: - go-version: '1.16.2' + fetch-depth: 0 + token: ${{ secrets.GITHUB_TOKEN }} - name: Setup Git author information run: | set -ue - git config --global user.email "${GIT_EMAIL}" - git config --global user.name "${GIT_USERNAME}" + git config --global user.email 'ci@argoproj.com' + git config --global user.name 'CI' - - name: Checkout corresponding release branch + - name: Check if tag is the latest version and not a pre-release run: | - set -ue - echo "Switching to release branch '${TARGET_BRANCH}'" - if ! git checkout ${TARGET_BRANCH}; then - echo "::error::Checking out release branch '${TARGET_BRANCH}' for target version '${TARGET_VERSION}' (tagged '${RELEASE_TAG}') failed. Does it exist in repo?" - exit 1 - fi + set -xue + # Fetch all tag information + git fetch --prune --tags --force - - name: Create VERSION information - run: | - set -ue - echo "Bumping version from $(cat VERSION) to ${TARGET_VERSION}" - echo "${TARGET_VERSION}" > VERSION - git commit -m "Bump version to ${TARGET_VERSION}" VERSION + LATEST_TAG=$(git -c 'versionsort.suffix=-rc' tag --list --sort=version:refname | tail -n1) - - name: Generate new set of manifests - run: | - set -ue - make install-codegen-tools-local - helm2 init --client-only - make manifests-local VERSION=${TARGET_VERSION} - git diff - git commit manifests/ -m "Bump version to ${TARGET_VERSION}" + PRE_RELEASE=false + # Check if latest tag is a pre-release + if echo $LATEST_TAG | grep -E -- '-rc[0-9]+$';then + PRE_RELEASE=true + fi - - name: Create the release tag - run: | - set -ue - echo "Creating release ${RELEASE_TAG}" - git tag ${RELEASE_TAG} + # Ensure latest tag matches github.ref_name & not a pre-release + if [[ $LATEST_TAG == ${{ github.ref_name }} ]] && [[ $PRE_RELEASE != 'true' ]];then + echo "TAG_STABLE=true" >> $GITHUB_ENV + else + echo "TAG_STABLE=false" >> $GITHUB_ENV + fi - - name: Build Docker image for release + - name: Update stable tag to latest version run: | - set -ue - git clean -fd - mkdir -p dist/ - make image IMAGE_TAG="${TARGET_VERSION}" DOCKER_PUSH=false - make release-cli - chmod +x ./dist/argocd-linux-amd64 - ./dist/argocd-linux-amd64 version --client - if: ${{ env.DRY_RUN != 'true' }} + git tag -f stable ${{ github.ref_name }} + git push -f origin stable + if: ${{ env.TAG_STABLE == 'true' }} - - name: Push docker image to repository - env: - DOCKER_USERNAME: ${{ secrets.RELEASE_DOCKERHUB_USERNAME }} - DOCKER_TOKEN: ${{ secrets.RELEASE_DOCKERHUB_TOKEN }} - QUAY_USERNAME: ${{ secrets.RELEASE_QUAY_USERNAME }} - QUAY_TOKEN: ${{ secrets.RELEASE_QUAY_TOKEN }} + - name: Check to see if VERSION should be updated on master branch run: | - set -ue - docker login quay.io --username "${QUAY_USERNAME}" --password "${QUAY_TOKEN}" - docker push ${IMAGE_NAMESPACE}/argocd:v${TARGET_VERSION} - # Remove the following when Docker Hub is gone - docker login --username "${DOCKER_USERNAME}" --password "${DOCKER_TOKEN}" - docker tag ${IMAGE_NAMESPACE}/argocd:v${TARGET_VERSION} argoproj/argocd:v${TARGET_VERSION} - docker push argoproj/argocd:v${TARGET_VERSION} - if: ${{ env.DRY_RUN != 'true' }} - - - name: Read release notes file - id: release-notes - uses: juliangruber/read-file-action@v1 - with: - path: ${{ env.RELEASE_NOTES }} + set -xue + SOURCE_TAG=${{ github.ref_name }} + VERSION_REF="${SOURCE_TAG#*v}" + if echo "$VERSION_REF" | grep -E -- '^[0-9]+\.[0-9]+\.0-rc1';then + VERSION=$(awk 'BEGIN {FS=OFS="."} {$2++; print}' <<< "${VERSION_REF%-rc1}") + echo "Updating VERSION to: $VERSION" + echo "UPDATE_VERSION=true" >> $GITHUB_ENV + echo "NEW_VERSION=$VERSION" >> $GITHUB_ENV + else + echo "Not updating VERSION" + echo "UPDATE_VERSION=false" >> $GITHUB_ENV + fi - - name: Push changes to release branch + - name: Update VERSION on master branch run: | - set -ue - git push origin ${TARGET_BRANCH} - git push origin ${RELEASE_TAG} - - - name: Create GitHub release - uses: actions/create-release@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - id: create_release - with: - tag_name: ${{ env.RELEASE_TAG }} - release_name: ${{ env.RELEASE_TAG }} - draft: ${{ env.DRAFT_RELEASE }} - prerelease: ${{ env.PRE_RELEASE }} - body: ${{ steps.release-notes.outputs.content }} - - - name: Upload argocd-linux-amd64 binary to release assets - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} - asset_path: ./dist/argocd-linux-amd64 - asset_name: argocd-linux-amd64 - asset_content_type: application/octet-stream - if: ${{ env.DRY_RUN != 'true' }} + echo ${{ env.NEW_VERSION }} > VERSION + if: ${{ env.UPDATE_VERSION == 'true' }} - - name: Upload argocd-darwin-amd64 binary to release assets - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} - asset_path: ./dist/argocd-darwin-amd64 - asset_name: argocd-darwin-amd64 - asset_content_type: application/octet-stream - if: ${{ env.DRY_RUN != 'true' }} - - - name: Upload argocd-windows-amd64 binary to release assets - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} - asset_path: ./dist/argocd-windows-amd64.exe - asset_name: argocd-windows-amd64.exe - asset_content_type: application/octet-stream - if: ${{ env.DRY_RUN != 'true' }} - - # include argocd-util as part of release artifacts (argoproj/argo-cd#5174) - - name: Upload argocd-util-linux-amd64 binary to release assets - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} - asset_path: ./dist/argocd-linux-amd64 - asset_name: argocd-util-linux-amd64 - asset_content_type: application/octet-stream - if: ${{ env.DRY_RUN != 'true' }} - - - name: Upload argocd-util-darwin-amd64 binary to release assets - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} - asset_path: ./dist/argocd-darwin-amd64 - asset_name: argocd-util-darwin-amd64 - asset_content_type: application/octet-stream - if: ${{ env.DRY_RUN != 'true' }} - - - name: Upload argocd-util-windows-amd64 binary to release assets - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Create PR to update VERSION on master branch + uses: peter-evans/create-pull-request@153407881ec5c347639a548ade7d8ad1d6740e38 # v5.0.2 with: - upload_url: ${{ steps.create_release.outputs.upload_url }} - asset_path: ./dist/argocd-windows-amd64.exe - asset_name: argocd-util-windows-amd64.exe - asset_content_type: application/octet-stream - if: ${{ env.DRY_RUN != 'true' }} - - - name: Update homebrew formula - env: - HOMEBREW_TOKEN: ${{ secrets.RELEASE_HOMEBREW_TOKEN }} - uses: dawidd6/action-homebrew-bump-formula@v3 - with: - token: ${{env.HOMEBREW_TOKEN}} - formula: argocd - if: ${{ env.HOMEBREW_TOKEN != '' && env.UPDATE_HOMEBREW == 'true' && env.PRE_RELEASE != 'true' }} - - - name: Delete original request tag from repository - run: | - set -ue - git push --delete origin ${SOURCE_TAG} - if: ${{ always() }} + commit-message: Bump version in master + title: "chore: Bump version in master" + body: All images built from master should indicate which version we are on track for. + signoff: true + branch: update-version + branch-suffix: random + base: master + if: ${{ env.UPDATE_VERSION == 'true' }} diff --git a/.github/workflows/scorecard.yaml b/.github/workflows/scorecard.yaml new file mode 100644 index 0000000000000..e6abc5adc3c0c --- /dev/null +++ b/.github/workflows/scorecard.yaml @@ -0,0 +1,67 @@ +name: Scorecards supply-chain security +on: + # Only the default branch is supported. + branch_protection_rule: + schedule: + - cron: "39 9 * * 2" + push: + branches: ["master"] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +# Declare default permissions as read only. +permissions: read-all + +jobs: + analysis: + name: Scorecards analysis + runs-on: ubuntu-22.04 + permissions: + # Needed to upload the results to code-scanning dashboard. + security-events: write + # Used to receive a badge. (Upcoming feature) + id-token: write + # Needs for private repositories. + contents: read + actions: read + if: github.repository == 'argoproj/argo-cd' + + steps: + - name: "Checkout code" + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + with: + persist-credentials: false + + - name: "Run analysis" + uses: ossf/scorecard-action@08b4669551908b1024bb425080c797723083c031 # v2.2.0 + with: + results_file: results.sarif + results_format: sarif + # (Optional) Read-only PAT token. Uncomment the `repo_token` line below if: + # - you want to enable the Branch-Protection check on a *public* repository, or + # - you are installing Scorecards on a *private* repository + # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat. + # repo_token: ${{ secrets.SCORECARD_READ_TOKEN }} + + # Publish the results for public repositories to enable scorecard badges. For more details, see + # https://github.com/ossf/scorecard-action#publishing-results. + # For private repositories, `publish_results` will automatically be set to `false`, regardless + # of the value entered here. + publish_results: true + + # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF + # format to the repository Actions tab. + - name: "Upload artifact" + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + with: + name: SARIF file + path: results.sarif + retention-days: 5 + + # Upload the results to GitHub's code scanning dashboard. + - name: "Upload to code-scanning" + uses: github/codeql-action/upload-sarif@3ebbd71c74ef574dbc558c82f70e52732c8b44fe # v2.2.1 + with: + sarif_file: results.sarif diff --git a/.github/workflows/update-snyk.yaml b/.github/workflows/update-snyk.yaml new file mode 100644 index 0000000000000..62655b433d9e4 --- /dev/null +++ b/.github/workflows/update-snyk.yaml @@ -0,0 +1,36 @@ +name: Snyk report update +on: + workflow_dispatch: {} + schedule: + - cron: '0 0 * * 0' # midnight every Sunday + +permissions: + contents: read + +jobs: + snyk-report: + permissions: + contents: write + pull-requests: write + if: github.repository == 'argoproj/argo-cd' + name: Update Snyk report in the docs directory + runs-on: ubuntu-22.04 + steps: + - name: Checkout code + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + with: + token: ${{ secrets.GITHUB_TOKEN }} + - name: Build reports + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} + run: | + make snyk-report + pr_branch="snyk-update-$(echo $RANDOM | md5sum | head -c 20)" + git checkout -b "$pr_branch" + git config --global user.email 'ci@argoproj.com' + git config --global user.name 'CI' + git add docs/snyk + git commit -m "[Bot] docs: Update Snyk reports" --signoff + git push --set-upstream origin "$pr_branch" + gh pr create -B master -H "$pr_branch" --title '[Bot] docs: Update Snyk report' --body '' diff --git a/.gitignore b/.gitignore index aed35e25f6d1e..ab17deb0db139 100644 --- a/.gitignore +++ b/.gitignore @@ -2,7 +2,9 @@ .idea/ .DS_Store vendor/ -dist/ +dist/* +ui/dist/app/* +!ui/dist/app/gitkeep site/ *.iml # delve debug binaries @@ -14,10 +16,12 @@ test-results .scratch node_modules/ .kube/ +./test/cmp/*.sock +.envrc.remote +.*.swp # ignore built binaries cmd/argocd/argocd cmd/argocd-application-controller/argocd-application-controller cmd/argocd-repo-server/argocd-repo-server cmd/argocd-server/argocd-server -cmd/argocd-util/argocd-util \ No newline at end of file diff --git a/.gitpod.Dockerfile b/.gitpod.Dockerfile index 39867cb159156..d105f49fde2b1 100644 --- a/.gitpod.Dockerfile +++ b/.gitpod.Dockerfile @@ -1,17 +1,21 @@ -FROM gitpod/workspace-full +FROM gitpod/workspace-full@sha256:511cecde4dc129ca9eb4cc4c479d61f95e5485ebe320a07f5b902f11899956a3 USER root RUN curl -o /usr/local/bin/kubectl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" && \ chmod +x /usr/local/bin/kubectl -RUN curl -L https://go.kubebuilder.io/dl/2.3.1/$(go env GOOS)/$(go env GOARCH) | \ +RUN curl -L https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.1/kubebuilder_2.3.1_$(go env GOOS)_$(go env GOARCH).tar.gz | \ tar -xz -C /tmp/ && mv /tmp/kubebuilder_2.3.1_$(go env GOOS)_$(go env GOARCH) /usr/local/kubebuilder +ENV GOCACHE=/go-build-cache + RUN apt-get install redis-server -y -RUN go get github.com/mattn/goreman +RUN go install github.com/mattn/goreman@latest + +RUN chown -R gitpod:gitpod /go-build-cache USER gitpod ENV ARGOCD_REDIS_LOCAL=true -ENV KUBECONFIG=/tmp/kubeconfig \ No newline at end of file +ENV KUBECONFIG=/tmp/kubeconfig diff --git a/.gitpod.yml b/.gitpod.yml index 4001e29ffea04..f57f442cade5c 100644 --- a/.gitpod.yml +++ b/.gitpod.yml @@ -2,5 +2,5 @@ image: file: .gitpod.Dockerfile tasks: - - init: make mod-download-local dep-ui-local && GO111MODULE=off go get github.com/mattn/goreman + - init: make mod-download-local dep-ui-local && GO111MODULE=off go install github.com/mattn/goreman@latest command: make start-test-k8s \ No newline at end of file diff --git a/.golangci.yml b/.golangci.yml deleted file mode 100644 index ebe2918e8d179..0000000000000 --- a/.golangci.yml +++ /dev/null @@ -1,22 +0,0 @@ -run: - timeout: 2m - skip-files: - - ".*\\.pb\\.go" - skip-dirs: - - pkg/client/ - - vendor/ -linters: - enable: - - vet - - deadcode - - goimports - - varcheck - - structcheck - - ineffassign - - unconvert - - unparam -linters-settings: - goimports: - local-prefixes: github.com/argoproj/argo-cd -service: - golangci-lint-version: 1.21.0 diff --git a/.goreleaser.yaml b/.goreleaser.yaml new file mode 100644 index 0000000000000..26341aa1d80c1 --- /dev/null +++ b/.goreleaser.yaml @@ -0,0 +1,121 @@ +project_name: argocd + +before: + hooks: + - go mod download + - make build-ui + +builds: + - id: argocd-cli + main: ./cmd + binary: argocd-{{ .Os}}-{{ .Arch}} + env: + - CGO_ENABLED=0 + flags: + - -v + ldflags: + - -X github.com/argoproj/argo-cd/v2/common.version={{ .Version }} + - -X github.com/argoproj/argo-cd/v2/common.buildDate={{ .Date }} + - -X github.com/argoproj/argo-cd/v2/common.gitCommit={{ .FullCommit }} + - -X github.com/argoproj/argo-cd/v2/common.gitTreeState={{ .Env.GIT_TREE_STATE }} + - -X github.com/argoproj/argo-cd/v2/common.kubectlVersion={{ .Env.KUBECTL_VERSION }} + - -extldflags="-static" + goos: + - linux + - darwin + - windows + goarch: + - amd64 + - arm64 + - s390x + - ppc64le + ignore: + - goos: darwin + goarch: s390x + - goos: darwin + goarch: ppc64le + - goos: windows + goarch: s390x + - goos: windows + goarch: ppc64le + - goos: windows + goarch: arm64 + +archives: + - id: argocd-archive + builds: + - argocd-cli + name_template: |- + {{ .ProjectName }}-{{ .Os }}-{{ .Arch }} + format: binary + +checksum: + name_template: 'cli_checksums.txt' + algorithm: sha256 + +release: + prerelease: auto + draft: false + header: | + ## Quick Start + + ### Non-HA: + + ```shell + kubectl create namespace argocd + kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/{{.Tag}}/manifests/install.yaml + ``` + + ### HA: + + ```shell + kubectl create namespace argocd + kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/{{.Tag}}/manifests/ha/install.yaml + ``` + + ## Release Signatures and Provenance + + All Argo CD container images are signed by cosign. A Provenance is generated for container images and CLI binaries which meet the SLSA Level 3 specifications. See the [documentation](https://argo-cd.readthedocs.io/en/stable/operator-manual/signed-release-assets) on how to verify. + + + ## Upgrading + + If upgrading from a different minor version, be sure to read the [upgrading](https://argo-cd.readthedocs.io/en/stable/operator-manual/upgrading/overview/) documentation. + footer: | + **Full Changelog**: https://github.com/argoproj/argo-cd/compare/{{ .PreviousTag }}...{{ .Tag }} + + + + +snapshot: #### To be removed for PR + name_template: "2.6.0" + +changelog: + use: + github + sort: asc + abbrev: 0 + groups: # Regex use RE2 syntax as defined here: https://github.com/google/re2/wiki/Syntax. + - title: 'Features' + regexp: '^.*?feat(\([[:word:]]+\))??!?:.+$' + order: 100 + - title: 'Bug fixes' + regexp: '^.*?fix(\([[:word:]]+\))??!?:.+$' + order: 200 + - title: 'Documentation' + regexp: '^.*?docs(\([[:word:]]+\))??!?:.+$' + order: 300 + - title: 'Dependency updates' + regexp: '^.*?(feat|fix|chore)\(deps?.+\)!?:.+$' + order: 400 + - title: 'Other work' + order: 999 + filters: + exclude: + - '^test:' + - '^.*?Bump(\([[:word:]]+\))?.+$' + - '^.*?[Bot](\([[:word:]]+\))?.+$' + + +# yaml-language-server: $schema=https://goreleaser.com/static/schema.json + diff --git a/.readthedocs.yml b/.readthedocs.yml index 7b50ab9415bd7..3d69498d27c09 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -4,4 +4,8 @@ mkdocs: fail_on_warning: false python: install: - - requirements: docs/requirements.txt \ No newline at end of file + - requirements: docs/requirements.txt +build: + os: "ubuntu-22.04" + tools: + python: "3.7" diff --git a/.snyk b/.snyk new file mode 100644 index 0000000000000..1f49cbe7b8c10 --- /dev/null +++ b/.snyk @@ -0,0 +1,40 @@ +# Snyk (https://snyk.io) policy file, patches or ignores known vulnerabilities. +version: v1.22.1 +# ignores vulnerabilities until expiry date; change duration by modifying expiry date +ignore: + SNYK-JS-ANSIREGEX-1583908: + - '*': + reason: >- + Code is only run client-side in the swagger-ui endpoint. No risk of + server-side DoS. + SNYK-CC-K8S-44: + - 'manifests/core-install.yaml > *': + reason: >- + Argo CD needs wide permissions to manage resources. + - 'manifests/install.yaml > *': + reason: >- + Argo CD needs wide permissions to manage resources. + SNYK-JS-MOMENT-2440688: + - '*': + reason: >- + Code is only run client-side. No risk of directory traversal. + SNYK-GOLANG-GITHUBCOMEMICKLEIGORESTFUL-2435653: + - '*': + reason: >- + Argo CD uses go-restful as a transitive dependency of kube-openapi. kube-openapi is used to generate openapi + specs. We do not use go-restul at runtime and are therefore not vulnerable to this CORS misconfiguration + issue in go-restful. + SNYK-JS-FORMIDABLE-2838956: + - '*': + reason: >- + Code is only run client-side. No risk of arbitrary file upload. + SNYK-JS-PARSEPATH-2936439: + - '*': + reason: >- + The issue is that, for specific URLs, parse-path may incorrectly identify the "resource" (domain name) + portion. For example, in "http://127.0.0.1#@example.com", it identifies "example.com" as the "resource". + + We use parse-path on the client side, but permissions for git URLs are checked server-side. This is a + potential usability issue, but it is not a security issue. +patch: {} + diff --git a/CHANGELOG.md b/CHANGELOG.md index fd8735d333896..d371bf2fe26bf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,945 @@ # Changelog -## v2.0.0 (Unreleased) +## v2.4.8 (2022-07-29) + +### Bug fixes + +- feat: support application level extensions (#9923) +- feat: support multiple extensions per resource group/kind (#9834) +- fix: extensions is not loading for ConfigMap/Pods (#10010) +- fix: upgrade moment from 2.29.2 to 2.29.3 (#9330) +- fix: skip redirect url validation when it's the base href (#10058) (#10116) +- fix: avoid CVE-2022-28948 (#10093) +- fix: Set HOST_ARCH for yarn build from platform (#10018) + +### Other changes + +- chore(deps): bump moment from 2.29.3 to 2.29.4 in /ui (#9897) +- docs: add OpenSSH breaking change notes (#10104) +- chore: update parse-url (#10101) +- docs: add api field example in the appset security doc (#10087) +- chore: update redis to 7.0.4 avoid CVE-2022-30065 (#10059) +- docs: add argocd-server grpc metric usage (#10007) +- chore: upgrade Dex to 2.32.0 (#10036) (#10042) +- chore: update redis to avoid CVE-2022-2097 (#10031) +- chore: update haproxy to 2.0.29 for redis-ha (#10045) + +## v2.4.7 (2022-07-18) + +### Bug fixes + +fix: Support files in argocd.argoproj.io/manifest-generate-paths annotation (#9908) +fix: terminal websocket write lock to avoid races (#10011) +fix: updated all a tags to Link tags in app summary (#9777) +fix: e2e test to use func from clusterauth instead creating one with old logic (#9989) +fix: add missing download CLI tool URL response for ppc64le, s390x (#9983) + +### Other + +chore: upgrade parse-url to avoid SNYK-JS-PARSEURL-2936249 (#9826) +docs: use quotes to emphasize that ConfigMap value is a string (#9995) +docs: document directory app include/exclude fields (#9997) +docs: simplify Docker toolchain docs (#9966) (#10006) +docs: supported versions (#9876) + +## v2.4.6 (2022-07-12) + +### Features + +* feat: Treat connection reset as a retryable error (#9739) + +### Bug fixes + +* fix: 'unexpected reserved bits' breaking web terminal (#9605) (#9895) +* fix: argocd login just hangs on 2.4.0 #9679 (#9935) +* fix: CMP manifest generation fails with ENHANCE_YOUR_CALM if over 40s (#9922) +* fix: NotAfter is not set when ValidFor is set (#9911) +* fix: add missing download CLI tool link for ppc64le, s390x (#9649) +* fix: Check tracking annotation for being self-referencing (#9791) +* fix: Make change of tracking method work at runtime (#9820) +* fix: argo-cd git submodule is using SSH auth instead of HTTPs (#3118) (#9821) + +### Other + +* docs: fix typo in Generators-Git.md (#9949) +* docs: add terminal documentation (#9948) +* test: Use dedicated multi-arch workloads in e2e tests (#9921) +* docs: Adding blank line so list is formatted correctly (#9880) +* docs: small fix for plugin stream filtering (#9871) +* docs: Document the possibility of rendering Helm charts with Kustomize (#9841) +* docs: getting started notes on self-signed cert (#9429) (#9784) +* test: check for error messages from CI env (#9953) + +## v2.4.5 (2022-07-12) + +### Security fixes + +* HIGH: Certificate verification is skipped for connections to OIDC providers ([GHSA-7943-82jg-wmw5](https://github.com/argoproj/argo-cd/security/advisories/GHSA-7943-82jg-wmw5)) +* LOW: A leaked API server encryption key can allow XSS for SSO users ([GHSA-pmjg-52h9-72qv](https://github.com/argoproj/argo-cd/security/advisories/GHSA-pmjg-52h9-72qv)) + +### Potentially-breaking changes + +The fix for GHSA-7943-82jg-wmw5 enables TLS certificate validation by default for connections to OIDC providers. If +connections to your OIDC provider fails validation, SSO will be broken for your Argo CD instance. You should test 2.4.5 +before upgrading it to production. From the new documentation: + +> By default, all connections made by the API server to OIDC providers (either external providers or the bundled Dex +> instance) must pass certificate validation. These connections occur when getting the OIDC provider's well-known +> configuration, when getting the OIDC provider's keys, and when exchanging an authorization code or verifying an ID +> token as part of an OIDC login flow. +> +> Disabling certificate verification might make sense if: +> * You are using the bundled Dex instance **and** your Argo CD instance has TLS configured with a self-signed certificate + > **and** you understand and accept the risks of skipping OIDC provider cert verification. +> * You are using an external OIDC provider **and** that provider uses an invalid certificate **and** you cannot solve + > the problem by setting `oidcConfig.rootCA` **and** you understand and accept the risks of skipping OIDC provider cert + > verification. +> +> If either of those two applies, then you can disable OIDC provider certificate verification by setting +> `oidc.tls.insecure.skip.verify` to `"true"` in the `argocd-cm` ConfigMap. + +### Bug fixes + +* fix: webhook typo in case of error in GetManifests (#9671) + +## v2.4.4 (2022-07-07) + +### Bug fixes + +- fix: missing path segments for git file generator (#9839) +- fix: make sure api server informer does not stop after setting change (#9842) +- fix: support resource logs and exec (#9833) +- fix: configurable CMP tar exclusions (#9675) (#9789) +- fix: prune any deleted refs before fetching (#9504) + +### Other + +- test: Remove circular symlinks from testdata (#9886) +- docs: custom secret must be labeled (#9835) +- docs: update archlinux install with official package (#9718) +- docs: explain rightmost git generator path parameter behavior (#9799) + +## v2.4.3 (2022-06-27) + +### Bug fixes + +- fix: respect OIDC providers' supported token signing algorithms (#9433) (#9761) +- fix websockets for terminal not working on subPath (#9795) +- fix: avoid closing and re-opening port of api server settings change (#9778) +- fix: [ArgoCD] Fixing webhook typo in case of error in GetManifests (#9671) +- fix: overrides should not appear in the manifest cache key (#9601) + +## v2.4.2 (2022-06-21) + +### Bug fixes + +* fix: project filter (#9651) (#9709) +* fix: broken symlink in Dockerfile (#9674) +* fix: updated baseHRefRegex to perform lazy match (#9724) +* fix: updated config file permission requirements for windows (#9666) + +### Other + +* docs: Update sync-options.md (#9687) +* test/remote: Allow override of base image (#9734) + +## v2.4.1 (2022-06-21) + +### Security fixes + +* CRITICAL: External URLs for Deployments can include javascript ([GHSA-h4w9-6x78-8vrj](https://github.com/argoproj/argo-cd/security/advisories/GHSA-h4w9-6x78-8vrj)) +* HIGH: Insecure entropy in PKCE/Oauth2/OIDC params ([GHSA-2m7h-86qq-fp4v](https://github.com/argoproj/argo-cd/security/advisories/GHSA-2m7h-86qq-fp4v)) +* MODERATE: DoS through large directory app manifest files ([GHSA-jhqp-vf4w-rpwq](https://github.com/argoproj/argo-cd/security/advisories/GHSA-jhqp-vf4w-rpwq)) +* MODERATE: Symlink following allows leaking out-of-bounds YAML files from Argo CD repo-server ([GHSA-q4w5-4gq2-98vm](https://github.com/argoproj/argo-cd/security/advisories/GHSA-q4w5-4gq2-98vm)) + +### Potentially-breaking changes + +From the [GHSA-2m7h-86qq-fp4v](https://github.com/argoproj/argo-cd/security/advisories/GHSA-2m7h-86qq-fp4v) description: + +> The patch introduces a new `reposerver.max.combined.directory.manifests.size` config parameter, which you should tune before upgrading in production. It caps the maximum total file size of .yaml/.yml/.json files in directory-type (raw manifest) Applications. The default max is 10M per Application. This max is designed to keep any single app from consuming more than 3G of memory in the repo-server (manifests consume more space in memory than on disk). The 300x ratio assumes a maliciously-crafted manifest file. If you only want to protect against accidental excessive memory use, it is probably safe to use a smaller ratio. +> +> If your organization uses directory-type Applications with very many manifests or very large manifests then check the size of those manifests and tune the config parameter before deploying this change to production. When testing, make sure to do a "hard refresh" in either the CLI or UI to test your directory-type App. That will make sure you're using the new max logic instead of relying on cached manifest responses from Redis. + +### Other + +* test: directory app manifest generation (#9503) +* chore: Implement tests to validate aws auth retry (#9627) +* chore: Implement a retry in aws auth command (#9618) +* test: Remove temp directories from repo server tests (#9501) +* test: Make context tests idempodent (#9502) +* test: fix plugin var test for OSX (#9590) +* docs: Document how to deploy from the root of the git repository (#9632) +* docs: added environment variables documentation (#8680) + +## v2.4.0 (2022-06-10) + +### Web Terminal In Argo CD UI + +Feature enables engineers to start a shell in the running application container without leaving the web interface. Just find the required Kubernetes +Pod using the Application Details page, click on it and select the Terminal tab. The shell starts automatically and enables you to execute the required +commands, and helps to troubleshoot the application state. + +### Access Control For Pod Logs & Web Terminal + +Argo CD is used to manage the critical infrastructure of multiple organizations, which makes security the top priority of the project. We've listened to +your feedback and introduced additional access control settings that control access to Kubernetes Pod logs and the new Web Terminal feature. + +#### Pod Logs UI + +Since 2.4.9, the LOGS tab in pod view is visible in the UI only for users with explicit allow get logs policy. + +#### Known pod logs UI issue prior to 2.4.9 + +Upon pressing the "LOGS" tab in pod view by users who don't have an explicit allow get logs policy, the red "unable to load data: Internal error" is received in the bottom of the screen, and "Failed to load data, please try again" is displayed. + +### OpenTelemetry Tracing Integration + +The new feature allows emitting richer telemetry data that might make identifying performance bottlenecks easier. The new feature is available for argocd-server +and argocd-repo-server components and can be enabled using the --otlp-address flag. + +### Power PC and IBM Z Support + +The list of supported architectures has been expanded, and now includes IBM Z (s390x) and PowerPC (ppc64le). Starting with the v2.4 release the official quay.io +repository is going to have images for amd64, arm64, ppc64le, and s390x architectures. + +### Other Notable Changes + +Overall v2.4 release includes more than 300 hundred commits from nearly 90 contributors. Here is a short sample of the contributions: + +* Enforce the deployment to remote clusters only +* Native support of GCP authentication for GKE +* Secured Redis connection +* ApplicationSet Gitea support + +## v2.3.7 (2022-07-29) + +### Notes + +This is mainly a security related release and updates compatibility with Kubernetes 1.24. + +**Attention:** The base image for 2.3.x reached end-of-life on July 14, 2022. This release upgraded the base image to Ubuntu 22.04 LTS. The change should have no effect on the majority of users. But if any of your git providers only supports now-deprecated key hash algorithms, then Application syncing might break. See the [2.2-to-2.3 upgrade notes](https://argo-cd.readthedocs.io/en/latest/operator-manual/upgrading/2.2-2.3/#support-for-private-repo-ssh-keys-using-the-sha-1-signature-hash-algorithm-is-removed-in-237) for details and workaround instructions. + +### Bug fixes + +- fix: skip redirect url validation when it's the base href (#10058) (#10116) +- fix: upgrade moment from 2.29.2 to 2.29.3 (#9330) +- fix: avoid CVE-2022-28948 (#10093) +- fix: use serviceaccount name instead of struct (#9614) +- fix: create serviceaccount token for v1.24 clusters (#9546) + +### Other changes + +- test: Remove cluster e2e tests not intended for release-2.3 +- test: Remove circular symlinks from testdata (#9886) +- chore(deps): bump moment from 2.29.3 to 2.29.4 in /ui (#9897) +- chore: upgrade moment to latest version to fix CVE (#9005) +- chore: move dependencies to dev dependencies (#8541) +- docs: add OpenSSH breaking change notes (#10104) +- chore: update parse-url (#10101) +- chore: upgrade base image to 22.04 (#10103) +- docs: simplify Docker toolchain docs (#9966) (#10006) +- chore: update redis to 6.2.7 avoid CVE-2022-30065/CVE-2022-2097 (#10062) +- chore: upgrade Dex to 2.32.0 (#10036) (#10042) +- chore: update haproxy to 2.0.29 for redis-ha (#10045) +- test: check for error messages from CI env (#9953) + +## v2.3.6 (2022-07-12) + +### Security fixes + +* HIGH: Certificate verification is skipped for connections to OIDC providers ([GHSA-7943-82jg-wmw5](https://github.com/argoproj/argo-cd/security/advisories/GHSA-7943-82jg-wmw5)) +* LOW: A leaked API server encryption key can allow XSS for SSO users ([GHSA-pmjg-52h9-72qv](https://github.com/argoproj/argo-cd/security/advisories/GHSA-pmjg-52h9-72qv)) + +### Potentially-breaking changes + +The fix for GHSA-7943-82jg-wmw5 enables TLS certificate validation by default for connections to OIDC providers. If +connections to your OIDC provider fails validation, SSO will be broken for your Argo CD instance. You should test 2.3.6 +before upgrading it to production. From the new documentation: + +> By default, all connections made by the API server to OIDC providers (either external providers or the bundled Dex +> instance) must pass certificate validation. These connections occur when getting the OIDC provider's well-known +> configuration, when getting the OIDC provider's keys, and when exchanging an authorization code or verifying an ID +> token as part of an OIDC login flow. +> +> Disabling certificate verification might make sense if: +> * You are using the bundled Dex instance **and** your Argo CD instance has TLS configured with a self-signed certificate + > **and** you understand and accept the risks of skipping OIDC provider cert verification. +> * You are using an external OIDC provider **and** that provider uses an invalid certificate **and** you cannot solve + > the problem by setting `oidcConfig.rootCA` **and** you understand and accept the risks of skipping OIDC provider cert + > verification. +> +> If either of those two applies, then you can disable OIDC provider certificate verification by setting +> `oidc.tls.insecure.skip.verify` to `"true"` in the `argocd-cm` ConfigMap. + +### Bug fixes + +* fix: webhook typo in case of error in GetManifests (#9671) + +## v2.3.5 (2022-06-21) + +### Security fixes + +* CRITICAL: External URLs for Deployments can include javascript ([GHSA-h4w9-6x78-8vrj](https://github.com/argoproj/argo-cd/security/advisories/GHSA-h4w9-6x78-8vrj)) +* HIGH: Insecure entropy in PKCE/Oauth2/OIDC params ([GHSA-2m7h-86qq-fp4v](https://github.com/argoproj/argo-cd/security/advisories/GHSA-2m7h-86qq-fp4v)) +* MODERATE: DoS through large directory app manifest files ([GHSA-jhqp-vf4w-rpwq](https://github.com/argoproj/argo-cd/security/advisories/GHSA-jhqp-vf4w-rpwq)) +* MODERATE: Symlink following allows leaking out-of-bounds YAML files from Argo CD repo-server ([GHSA-q4w5-4gq2-98vm](https://github.com/argoproj/argo-cd/security/advisories/GHSA-q4w5-4gq2-98vm)) + +### Potentially-breaking changes + +From the [GHSA-2m7h-86qq-fp4v](https://github.com/argoproj/argo-cd/security/advisories/GHSA-2m7h-86qq-fp4v) description: + +> The patch introduces a new `reposerver.max.combined.directory.manifests.size` config parameter, which you should tune before upgrading in production. It caps the maximum total file size of .yaml/.yml/.json files in directory-type (raw manifest) Applications. The default max is 10M per Application. This max is designed to keep any single app from consuming more than 3G of memory in the repo-server (manifests consume more space in memory than on disk). The 300x ratio assumes a maliciously-crafted manifest file. If you only want to protect against accidental excessive memory use, it is probably safe to use a smaller ratio. +> +> If your organization uses directory-type Applications with very many manifests or very large manifests then check the size of those manifests and tune the config parameter before deploying this change to production. When testing, make sure to do a "hard refresh" in either the CLI or UI to test your directory-type App. That will make sure you're using the new max logic instead of relying on cached manifest responses from Redis. + +### Bug fixes + +* fix: missing Helm params (#9565) (#9566) + +### Other + +* test: directory app manifest generation (#9503) +* chore: eliminate go-mpatch dependency (#9045) +* chore: Make unit tests run on platforms other than amd64 (#8995) +* chore: remove obsolete repo-server unit test (#9559) +* chore: update golangci-lint (#8988) +* fix: test race (#9469) +* chore: upgrade golangci-lint to v1.46.2 (#9448) +* test: fix ErrorContains (#9445) + +## v2.3.4 (2022-05-18) + +### Security fixes + +- CRITICAL: Argo CD will trust invalid JWT claims if anonymous access is enabled (https://github.com/argoproj/argo-cd/security/advisories/GHSA-r642-gv9p-2wjj) +- LOW: Login screen allows message spoofing if SSO is enabled (https://github.com/argoproj/argo-cd/security/advisories/GHSA-xmg8-99r8-jc2j) +- MODERATE: Symlink following allows leaking out-of-bound manifests and JSON files from Argo CD repo-server (https://github.com/argoproj/argo-cd/security/advisories/GHSA-6gcg-hp2x-q54h) + +### Bug Fixes + +- fix: Fix docs build error (#8895) +- fix: fix broken monaco editor collapse icons (#8709) +- chore: upgrade to go 1.17.8 (#8866) (#9004) +- fix: allow cli/ui to follow logs (#8987) (#9065) + +## v2.3.3 (2022-03-29) + +- fix: prevent excessive repo-server disk usage for large repos (#8845) (#8897) +- fix: Set QPS and burst rate for resource ops client (#8915) + +## v2.3.2 (2022-03-22) + +- fix: application resource APIs must enforce project restrictions + +## v2.3.1 (2022-03-10) + +- fix: Retry checkbox unchecked unexpectedly; Sync up with YAML (#8682) (#8720) +- chore: Bump stable version of application set addon (#8744) +- fix: correct jsonnet paths resolution (#8721) +- fix(ui): Applications page incorrectly resets to tiles view. Fixes #8702 (#8718) + +## v2.3.0 (2022-03-05) + +### Argo CD ApplicationSet and Notifications are now part of Argo CD + +Two popular [Argoproj Labs](https://github.com/argoproj-labs) projects [Argo CD ApplicationSet](https://github.com/argoproj/applicationset) and +[Argo CD Notifications](https://github.com/argoproj-labs/argocd-notifications) are now part of Argo CD! The default Argo CD installation manifests now +bundle both projects out of the box. Going forward you can expect more tightened integration of these projects into Argo CD. + +### New sync and diff strategies + +Users can now configure the Application resource to instruct Argo CD to consider the ignore difference setup during the sync process. +In order to do so, add the new sync option RespectIgnoreDifferences=true in the Application resource. Once the sync option is added, +Argo CD won't change ignored fields during the syncing process. + +Configuring ignored fields is also easier now. Instead of listing fields one by one users can now leverage the +managedFields metadata to instruct Argo CD about trusted managers and automatically ignore any fields owned by them. A new diff customization +(managedFieldsManagers) is now available allowing users to specify managers the application should trust and to ignore all fields owned by those managers. +Read more about these changes at [New sync and diff strategies in ArgoCD](https://blog.argoproj.io/new-sync-and-diff-strategies-in-argocd-44195d3f8b8c) blog post. + +### ARM Images + +An officially supported ARM 64 image is now available. Enjoy running Argo CD on your Raspberry Pi! Additionally, the image size was reduced by nearly ~50% +and is only 200MB now. The ARM version of `argocd` CLI is also available and published as a Github release artifact. + +### Compact Tree View And Click Application Navigation + +The application details page now supports compact application resources tree visualization. Using the "Group Nodes" button, you can collapse the similar resources +into a single group node to remove the clutter and make it easier to understand the state of application resources. You still can get detailed information about the collapsed resources by clicking on the group node. The list of collapsed resources will be available in a sliding panel. Compact resource tree is still too big? +You can use the zoom in and zoom out feature to make it smaller - or even larger! + +You no longer need to move back and forth between the application details page and the application list page. Instead you can navigate directly to the required application by clicking the search icon in the application details page title. + +### Upgraded Config Management Tools + +Both bundled Helm and Kustomize binaries have been upgraded to the latest versions. Kustomize has been upgraded from 4.2.0 to 4.4.1 and Helm has been upgraded from 3.7.1 to 3.8.0. + +### Bug Fixes and Performance Enhancements + +* Config management tools enhancements: +* The skipCrds flag and ability to ignore missing values files for Helm (#8012, #8003) +* Additional environment variables for Kustomize (#8096) +* Argo CD CLI follows the XDG Base directory standard (#7638) +* Redis is no longer used during SSO login (#8241) + + +### Features + +- feat: Add app list and details page views to navigation history (#7776) (#7937) +- feat: Add skipCrds flag for helm charts (#8012) +- feat: Add visual indicator for newly created pods (#8006) +- feat: Added a new Helm option ignoreMissingValueFiles (#7767) (#8003) +- feat: Allow configuring system wide ignore differences for all resources (#8224) +- feat: Allow escaping dollar in Envsubst (#7961) +- feat: Allow external links on Application (#3487) (#8231) +- feat: Allow selecting application on detail page (#8176) +- feat: Bundle applicationset-controller with argocd (#8148) +- feat: Enable specifying root ca for oidc (#6712) +- feat: Expose ARGOCD_APP_NAME to the `kustomize build` command (#8096) +- feat: Ignore differences owned by trusted managers from managedFields (#7869) +- feat: New sync option to use ignore diff configs during sync (#8078) +- feat: Provide address flag for admin dashboard command (#8095) +- feat: Store "Group Nodes" button state in application details preferences (#8036) +- feat: Support specifying cluster by name in addition to API server URL in Cluster API (#8077) +- feat: Support XDG Base directory standard (#7638) (#7791) +- feat: Use encrypted cookie to store OAuth2 state nonce (instead of redis) (#8241) +- feat: Build images on PR and conditionally build arm64 image on push (#8108) + +### Bug Fixes + +- fix: Add "Restarting MinIO" status to MiniO Tenant health check (#8191) +- fix: Add all resources in list view (#7295) +- fix: Adding pagination to grouped nodes sliding panel#7837 (#7915) +- fix: Allow all resources to add external links (#7923) +- fix: Always call ValidateDestination (#7976) +- fix: Application exist panic when execute api call (#8188) +- fix: Application-icons-alignment (#8054) +- fix: Controller panics if resource manifest has incorrect annotation (#8022) +- fix: Correctly handle project field during partial cluster update (#7994) +- fix: Default value for retry validation #8055 (#8064) +- fix: Fix a possible crash when parsing RBAC (#8165) +- fix: Grouped node list missing resources on Compact resources view #8014 (#8018) +- fix: Issue with headless installation (#7958) +- fix: Issue with project scoped resources (#8048) +- fix: Kubernetes labels normalization for Prometheus (#7925) +- fix: Nested Refresh dropdown does not work on Application Details page #1524 (#7950) +- fix: Network line colors and menu icon alignment (#8059) +- fix: Opening app details shows UI error on some apps (#8016) (#8019) +- fix: Parse to correct uint32 type (#8177) +- fix: Prevent possible nil-pointer deref in normalizer (#8185) +- fix: Prevent possible out-of-bounds access when loading policies (#8186) +- fix: Provide a semantic version parsed version for KUBE_VERSION (#8250) +- fix: Refreshing label toast (#7979) +- fix: Resource details page crashes when resource is not deployed and hide managed fields is selected (#7971) +- fix: Retry disabled text (#8004) +- fix: Route health check stuck in 'Progressing' (#8170) +- fix: Sync window panel is crashed if resource name not contain letters (#8053) +- fix: Targetervision compatible without prefix refs/heads or refs/tags (#7939) +- fix: Trailing line in Filter Dropdown Menus #7821 (#8001) +- fix: Webhook URL matching edge cases (#7981) +- fix(ui): Use consistent case for diff modes (#7945) +- fix: Use gRPC timeout for sidecar CMPs (#8131) (#8236) + +### Other + +- chore: Bump go-jsonnet to v0.18.0 (#8011) +- chore: Escape proj in regex (#7985) +- chore: Exclude argocd-server rbac for core-install (#8234) +- chore: Log out the resource triggering reconciliation (#8192) +- chore: Migrate to use golang-jwt/jwt v4.2.0 (#8136) +- chore: Move resolveRevision from api-server to repo-server (#7966) +- chore: Update notifications version (#8267) +- chore: Update slack version (#8299) +- chore: Update to Redis 6.2.4 (#8157) +- chore: Upgrade awscli to 2.4.6 and remove python deps (#7947) +- chore: Upgrade base image to ubuntu:21.10 (#8230) +- chore: Upgrade dex to v2.30.2 (https://github.com/dexidp/dex/issues/2326) (#8237) +- chore: Upgrade gitops engine (#8288) +- chore: Upgrade golang to 1.17.6 (#8229) +- chore: Upgrade helm to most recent version (v3.7.2) (#8226) +- chore: Upgrade k8s client to v1.23 (#8213) +- chore: Upgrade kustomize to most recent version (v4.4.1) (#8227) +- refactor: Introduce 'byClusterName' secret index to speedup cluster server URL lookup (#8133) +- refactor: Move project filtering to server side (#8102) + +## v2.2.12 (2022-07-29) + +### Notes + +This is mainly a security related release and updates compatibility with Kubernetes 1.24. + +**Attention:** The base image for 2.2.x reached end-of-life on January 20, 2022. This release upgraded the base image to Ubuntu 22.04 LTS. The change should have no effect on the majority of users. But if any of your git providers only supports now-deprecated key hash algorithms, then Application syncing might break. See the [2.1-to-2.2 upgrade notes](https://argo-cd.readthedocs.io/en/latest/operator-manual/upgrading/2.1-2.2/#support-for-private-repo-ssh-keys-using-the-sha-1-signature-hash-algorithm-is-removed-in-2212) for details and workaround instructions. + +### Bug fixes + +- fix: create serviceaccount token for v1.24 clusters (#9546) +- fix: upgrade moment from 2.29.2 to 2.29.3 (#9330) +- fix: avoid CVE-2022-28948 (#10093) + +### Other changes + +- chore: Remove deprecated K8s versions from test matrix +- chore: Go mod tidy +- test: Remove circular symlinks from testdata (#9886) +- test: Fix e2e tests for release-2.2 branch +- chore: bump redoc vesion to avoid CVE-2021-23820 (#8604) +- chore(deps): bump moment from 2.29.3 to 2.29.4 in /ui (#9897) +- chore: upgrade moment to latest version to fix CVE (#9005) +- chore: move dependencies to dev dependencies (#8541) +- docs: add OpenSSH breaking change notes (#10104) +- chore: update parse-url (#10101) +- chore: fix codegen +- chore: fix codegen +- chore: upgrade base image to 22.04 (#10105) +- docs: simplify Docker toolchain docs (#9966) (#10006) +- chore: update redis to 6.2.7 avoid CVE-2022-30065/CVE-2022-2097 (#10068) +- chore: upgrade Dex to 2.32.0 (#10036) (#10042) +- chore: update haproxy to 2.0.29 for redis-ha (#10045) +- test: check for error messages from CI env (#9953) + +## v2.2.11 (2022-07-12) + +### Security fixes + +* HIGH: Certificate verification is skipped for connections to OIDC providers ([GHSA-7943-82jg-wmw5](https://github.com/argoproj/argo-cd/security/advisories/GHSA-7943-82jg-wmw5)) +* LOW: A leaked API server encryption key can allow XSS for SSO users ([GHSA-pmjg-52h9-72qv](https://github.com/argoproj/argo-cd/security/advisories/GHSA-pmjg-52h9-72qv)) + +### Potentially-breaking changes + +The fix for GHSA-7943-82jg-wmw5 enables TLS certificate validation by default for connections to OIDC providers. If +connections to your OIDC provider fails validation, SSO will be broken for your Argo CD instance. You should test 2.2.11 +before upgrading it to production. From the new documentation: + +> By default, all connections made by the API server to OIDC providers (either external providers or the bundled Dex +> instance) must pass certificate validation. These connections occur when getting the OIDC provider's well-known +> configuration, when getting the OIDC provider's keys, and when exchanging an authorization code or verifying an ID +> token as part of an OIDC login flow. +> +> Disabling certificate verification might make sense if: +> * You are using the bundled Dex instance **and** your Argo CD instance has TLS configured with a self-signed certificate + > **and** you understand and accept the risks of skipping OIDC provider cert verification. +> * You are using an external OIDC provider **and** that provider uses an invalid certificate **and** you cannot solve + > the problem by setting `oidcConfig.rootCA` **and** you understand and accept the risks of skipping OIDC provider cert + > verification. +> +> If either of those two applies, then you can disable OIDC provider certificate verification by setting +> `oidc.tls.insecure.skip.verify` to `"true"` in the `argocd-cm` ConfigMap. + +### Features + +* feat: enable specifying root ca for oidc (#6712) + +### Bug fixes + +* fix: webhook typo in case of error in GetManifests (#9671) + +## v2.2.10 (2022-06-21) + +### Security fixes + +* CRITICAL: External URLs for Deployments can include javascript ([GHSA-h4w9-6x78-8vrj](https://github.com/argoproj/argo-cd/security/advisories/GHSA-h4w9-6x78-8vrj)) +* HIGH: Insecure entropy in PKCE/Oauth2/OIDC params ([GHSA-2m7h-86qq-fp4v](https://github.com/argoproj/argo-cd/security/advisories/GHSA-2m7h-86qq-fp4v)) +* MODERATE: DoS through large directory app manifest files ([GHSA-jhqp-vf4w-rpwq](https://github.com/argoproj/argo-cd/security/advisories/GHSA-jhqp-vf4w-rpwq)) +* MODERATE: Symlink following allows leaking out-of-bounds YAML files from Argo CD repo-server ([GHSA-q4w5-4gq2-98vm](https://github.com/argoproj/argo-cd/security/advisories/GHSA-q4w5-4gq2-98vm)) + +### Potentially-breaking changes + +From the [GHSA-2m7h-86qq-fp4v](https://github.com/argoproj/argo-cd/security/advisories/GHSA-2m7h-86qq-fp4v) description: + +> The patch introduces a new `reposerver.max.combined.directory.manifests.size` config parameter, which you should tune before upgrading in production. It caps the maximum total file size of .yaml/.yml/.json files in directory-type (raw manifest) Applications. The default max is 10M per Application. This max is designed to keep any single app from consuming more than 3G of memory in the repo-server (manifests consume more space in memory than on disk). The 300x ratio assumes a maliciously-crafted manifest file. If you only want to protect against accidental excessive memory use, it is probably safe to use a smaller ratio. +> +> If your organization uses directory-type Applications with very many manifests or very large manifests then check the size of those manifests and tune the config parameter before deploying this change to production. When testing, make sure to do a "hard refresh" in either the CLI or UI to test your directory-type App. That will make sure you're using the new max logic instead of relying on cached manifest responses from Redis. + +### Bug fixes + +* fix: missing Helm params (#9565) (#9566) + +### Other + +* test: directory app manifest generation (#9503) +* test: fix erroneous test change +* chore: eliminate go-mpatch dependency (#9045) +* chore: Make unit tests run on platforms other than amd64 (#8995) +* chore: remove obsolete repo-server unit test (#9559) +* chore: upgrade golangci-lint to v1.46.2 (#9448) +* chore: update golangci-lint (#8988) + +## v2.2.9 (2022-05-18) + +### Notes + +This is a security release. We urge all users of the 2.2.z branch to update as soon as possible. Please refer to the _Security fixes_ section below for more details. + +### Security fixes + +- CRITICAL: Argo CD will trust invalid JWT claims if anonymous access is enabled (https://github.com/argoproj/argo-cd/security/advisories/GHSA-r642-gv9p-2wjj) +- LOW: Login screen allows message spoofing if SSO is enabled (https://github.com/argoproj/argo-cd/security/advisories/GHSA-xmg8-99r8-jc2j) +- MODERATE: Symlink following allows leaking out-of-bound manifests and JSON files from Argo CD repo-server (https://github.com/argoproj/argo-cd/security/advisories/GHSA-6gcg-hp2x-q54h) + +## v2.2.8 (2022-03-22) + +### Special notes + +This release contains the fix for a security issue with critical severity. We recommend users on the 2.2 release branch to update to this release as soon as possible. + +More information can be found in the related +[security advisory](https://github.com/argoproj/argo-cd/security/advisories/GHSA-2f5v-8r3f-8pww). + +### Changes + +As part of the security fix, the Argo CD UI no longer automatically presents child resources of allow-listed resources unless the child resources are also allow-listed. For example, Pods are not going to show up if only Deployment is added to the allow-list. + +If you have [projects](https://argo-cd.readthedocs.io/en/stable/user-guide/projects/) configured with allow-lists, make sure the allow-lists include all the resources you want users to be able to view/manage through the UI. For example, if your project allows `Deployments`, you would add `ReplicaSets` and `Pods`. + +#### Bug Fixes + +- fix: application resource APIs must enforce project restrictions + +## v2.2.7 (2022-03-08) + +### Bug Fixes + +- fix: correct jsonnet paths resolution (#8721) + +## v2.2.6 (2022-03-06) + +### Bug Fixes + +- fix: prevent file traversal using helm file values param and application details api (#8606) +- fix!: enforce app create/update privileges when getting repo details (#8558) +- feat: support custom helm values file schemes (#8535) + +## v2.2.5 (2022-02-04) + +- fix: Resolve symlinked value files correctly (#8387) + +## v2.2.4 (2022-02-03) + +### Special notes + +This release contains the fix for a security issue with high severity. We recommend users on the 2.2 release branch to update to this release as soon as possible. + +More information can be found in the related +[security advisory](https://github.com/argoproj/argo-cd/security/advisories/GHSA-63qx-x74g-jcr7) + +### Bug Fixes + +- fix: Prevent value files outside repository root + +### Other changes + +- chore: upgrade dex to v2.30.2 (backport of #8237) (#8257) + +## v2.2.3 (2022-01-18) + +- fix: Application exist panic when execute api call (#8188) +- fix: Route health check stuck in 'Progressing' (#8170) +- refactor: Introduce 'byClusterName' secret index to speedup cluster server URL lookup (#8133) +- chore: Update to Redis 6.2.4 (#8157) (#8158) + +## v2.2.2 (2021-12-31) + +- fix: Issue with project scoped resources (#8048) +- fix: Escape proj in regex (#7985) +- fix: Default value for retry validation #8055 (#8064) +- fix: Sync window panel is crashed if resource name not contain letters (#8053) +- fix: Upgrade github.com/argoproj/gitops-engine to v0.5.2 +- fix: Retry disabled text (#8004) +- fix: Opening app details shows UI error on some apps (#8016) (#8019) +- fix: Correctly handle project field during partial cluster update (#7994) +- fix: Cluster API does not support updating labels and annotations (#7901) + +## v2.2.1 (2021-12-16) + +- fix: Resource details page crashes when resource is not deployed and hide managed fields is selected (#7971) +- fix: Issue with headless installation (#7958) +- fix: Nil pointer (#7905) + +## v2.2.0 (2021-12-14) + +> [Upgrade instructions](./docs/operator-manual/upgrading/2.1-2.2.md) + +### Project Scoped repositories and clusters + +The project scoped repositories and clusters is a feature that simplifies registering the repositories and cluster credentials. +Instead of requiring operators to set up in advance all clusters and git repositories that can be used, developers can now do +this on their own in a self-service manner. + +### Config Management Plugins V2 + +The Config Management Plugins V2 is set of enhancement of the existing config management plugins feature. +The list includes improved installation experience, ability to package plugin into a separate image and +improved plugin manifests discovery. + +### Resource tracking + +Argo CD has traditionally tracked the resources it manages by the well-known "app.kubernetes.io/instance" property. +While using this property works ok in simple scenarios, it also has several limitations. ArgoCD now allows you to use +a new annotation (argocd.argoproj.io/tracking-id) for tracking your resources. Using this annotation is a much more flexible approach +as there are no conflicts with other Kubernetes tools, and you can easily install multiple Argo CD instances on the same clusters. + +### Bug Fixes and Performance Enhancements + +* Argo CD API server caches RBAC checks that significantly improves the GET /api/v1/applications API performance (#7587) +* Argo CD RBAC supports regex matches (#7165) +* Health check support for KubeVirt (#7176), Cassandra (#7017), Openshift Route (#7112), DeploymentConfig (#7114), Confluent (#6957) and SparkApplication (#7434) CRDs. +* Persistent banner (#7312) with custom positioning (#7462) +* Cluster name support in project destinations (#7198) +* around 30 more features and a total of 84 bug fixes + +## v2.1.16 (2022-06-21) + +### Security fixes + +* CRITICAL: External URLs for Deployments can include javascript ([GHSA-h4w9-6x78-8vrj](https://github.com/argoproj/argo-cd/security/advisories/GHSA-h4w9-6x78-8vrj)) +* HIGH: Insecure entropy in PKCE/Oauth2/OIDC params ([GHSA-2m7h-86qq-fp4v](https://github.com/argoproj/argo-cd/security/advisories/GHSA-2m7h-86qq-fp4v)) +* MODERATE: DoS through large directory app manifest files ([GHSA-jhqp-vf4w-rpwq](https://github.com/argoproj/argo-cd/security/advisories/GHSA-jhqp-vf4w-rpwq)) +* MODERATE: Symlink following allows leaking out-of-bounds YAML files from Argo CD repo-server ([GHSA-q4w5-4gq2-98vm](https://github.com/argoproj/argo-cd/security/advisories/GHSA-q4w5-4gq2-98vm)) + +**Note:** This will be the last security fix release in the 2.1.x series. Please [upgrade to a newer minor version](https://argo-cd.readthedocs.io/en/latest/operator-manual/upgrading/overview/) to continue to get security fixes. + +### Potentially-breaking changes + +From the [GHSA-2m7h-86qq-fp4v](https://github.com/argoproj/argo-cd/security/advisories/GHSA-2m7h-86qq-fp4v) description: + +> The patch introduces a new `reposerver.max.combined.directory.manifests.size` config parameter, which you should tune before upgrading in production. It caps the maximum total file size of .yaml/.yml/.json files in directory-type (raw manifest) Applications. The default max is 10M per Application. This max is designed to keep any single app from consuming more than 3G of memory in the repo-server (manifests consume more space in memory than on disk). The 300x ratio assumes a maliciously-crafted manifest file. If you only want to protect against accidental excessive memory use, it is probably safe to use a smaller ratio. +> +> If your organization uses directory-type Applications with very many manifests or very large manifests then check the size of those manifests and tune the config parameter before deploying this change to production. When testing, make sure to do a "hard refresh" in either the CLI or UI to test your directory-type App. That will make sure you're using the new max logic instead of relying on cached manifest responses from Redis. + +### Bug fixes + +* fix: missing Helm params (#9565) (#9566) + +### Other + +* test: directory app manifest generation (#9503) +* test: fix erroneous test change +* chore: eliminate go-mpatch dependency (#9045) +* chore: Make unit tests run on platforms other than amd64 (#8995) +* chore: remove obsolete repo-server unit test (#9559) +* chore: upgrade golangci-lint to v1.46.2 (#9448) +* chore: update golangci-lint (#8988) +* test: fix ErrorContains (#9445) + +## v2.1.15 (2022-05-18) + +### Notes + +This is a security release. We urge all users of the 2.1.z branch to update as soon as possible. Please refer to the _Security fixes_ section below for more details. + +### Security fixes + +- CRITICAL: Argo CD will trust invalid JWT claims if anonymous access is enabled (https://github.com/argoproj/argo-cd/security/advisories/GHSA-r642-gv9p-2wjj) +- LOW: Login screen allows message spoofing if SSO is enabled (https://github.com/argoproj/argo-cd/security/advisories/GHSA-xmg8-99r8-jc2j) +- MODERATE: Symlink following allows leaking out-of-bound manifests and JSON files from Argo CD repo-server (https://github.com/argoproj/argo-cd/security/advisories/GHSA-6gcg-hp2x-q54h) + +## v2.1.14 (2022-03-22) + +### Special notes + +This release contains the fix for a security issue with critical severity. We recommend users on the 2.1 release branch to update to this release as soon as possible. + +More information can be found in the related +[security advisory](https://github.com/argoproj/argo-cd/security/advisories/GHSA-2f5v-8r3f-8pww). + +### Changes + +As part of the security fix, the Argo CD UI no longer automatically presents child resources of allow-listed resources unless the child resources are also allow-listed. For example, Pods are not going to show up if only Deployment is added to the allow-list. + +If you have [projects](https://argo-cd.readthedocs.io/en/stable/user-guide/projects/) configured with allow-lists, make sure the allow-lists include all the resources you want users to be able to view/manage through the UI. For example, if your project allows `Deployments`, you would add `ReplicaSets` and `Pods`. + +#### Bug Fixes + +- fix: application resource APIs must enforce project restrictions + +## v2.1.13 (2022-03-22) + +Unused release number. + +## v2.1.12 (2022-03-08) + +### Bug Fixes + +- fix: correct jsonnet paths resolution (#8721) + +## v2.1.11 (2022-03-06) + +### Bug Fixes + +- fix: prevent file traversal using helm file values param and application details api (#8606) +- fix!: enforce app create/update privileges when getting repo details (#8558) +- feat: support custom helm values file schemes (#8535) + +## v2.1.10 (2022-02-04) + +### Bug Fixes + +- fix: Resolve symlinked value files correctly (#8387) + +## v2.1.9 (2022-02-03) + +### Special notes + +This release contains the fix for a security issue with high severity. We recommend users on the 2.1 release branch to update to this release as soon as possible. + +More information can be found in the related +[security advisory](https://github.com/argoproj/argo-cd/security/advisories/GHSA-63qx-x74g-jcr7) + +### Bug Fixes + +- fix: Prevent value files outside repository root + +## v2.1.8 (2021-12-13) + +### Bug Fixes + +- fix: issue with keepalive (#7861) +- fix nil pointer dereference error (#7905) +- fix: env vars to tune cluster cache were broken (#7779) +- fix: upgraded gitops engine to v0.4.2 (fixes #7561) + +## v2.1.7 (2021-12-14) + +- fix: issue with keepalive (#7861) +- fix nil pointer dereference error (#7905) +- fix: env vars to tune cluster cache were broken (#7779) +- fix: upgraded gitops engine to v0.4.2 (fixes #7561) + + +## v2.1.6 (2021-11-16) + +- fix: don't use revision caching during app creation (#7508) +- fix: supporting OCI dependencies. Fixes #6062 (#6994) + +## v2.1.5 (2021-11-16) + +- fix: Invalid memory address or nil pointer dereference in processRequestedAppOperation (#7501) + +## v2.1.4 (2021-11-15) + +- fix: Operation has completed with phase: Running (#7482) +- fix: Application status panel shows Syncing instead of Deleting (#7486) +- fix(ui): Add Error Boundary around Extensions and comply with new Extensions API (#7215) + + +## v2.1.3 (2021-10-29) + +- fix: core-install.yaml always refers to latest argocd image (#7321) +- fix: handle applicationset backup forbidden error (#7306) +- fix: Argo CD should not use cached git/helm revision during app creation/update validation (#7244) + +## v2.1.2 (2021-10-02) + +- fix: cluster filter popping out of box (#7135) +- fix: gracefully shutdown metrics server when dex config changes (#7138) +- fix: upgrade gitops engine version to v0.4.1 (#7088) +- fix: repository name already exists when multiple helm dependencies (#7096) + + +## v2.1.1 (2021-08-25) + +### Bug Fixes + +- fix: password reset requirements (#7071) +- fix: Custom Styles feature is broken (#7067) +- fix(ui): Add State to props passed to Extensions (#7045) +- fix: keep uid_entrypoint.sh for backward compatibility (#7047) + +## v2.1.0 (2021-08-20) + +> [Upgrade instructions](./docs/operator-manual/upgrading/2.0-2.1.md) + +### Argo CD Core + +Argo CD Core - lightweight Argo CD distribution that packages only core GitOps features and relies +on Kubernetes API/RBAC to power UI and CLI. + +### Core Features + +* The synchronization process became much much faster and requires significantly less memory. +* An additional caching that ensures that each repository's target revisions are queried only once per + reconciliation cycle. This dramatically reduces the number of Git requests. +* Improved Diffing Customizations: use JQ path expressions to exclude required fields from the diffing. +* Health assessment support for new CRDs: introduced health assessment of CRDs from trident.netapp.io, + elasticsearch.k8s.elastic.co, cluster.x-k8s.io, and minio.min.io API groups. + +### Improved Settings + +A set of changes had been implemented to simplify configuring Argo CD. + +* Simplified Repository Registration: you no longer need to modify the argocd-cm ConfigMap to register a + new Git or Helm repository. +* Enhanced Resource Customizations: the resource.customizations key has been deprecated in favor of + a separate ConfigMap key per resource. +* Reference secret values from any Kubernetes secret: starting v2.1 you can use sensitive data stored in + any Kubernetes secret to configure Argo CD. +* Simplify parametrization of Argo CD server processes: an additional optional ConfigMap argocd-cmd-params-cm + has been introduced. + +### Refreshed User Interface + +* Enhanced and more consistent filters on Applications List and Applications Details pages. +* Status bar on the Application List page. +* The redesigned search box on the Application List page and more. + +### The argocd-util CLI deprecation + +The argocd CLI and now available under argocd admin subcommand. + +## v2.0.5 (2021-07-22) + +* fix: allow argocd-notification ingress to repo-server (#6746) +* fix: argocd-server crashes due to nil pointer dereference (#6757) +* fix: WebUI failure when loading pod view 't.parentRefs is undefined' (#6490) (#6535) +* fix: prevent 'cannot read property "filter" of undefined' during nodes filtering (#6453) +* fix: download Pod Logs button not honouring argocd-server rootpath (#6548) (#6627) +* fix: Version warning banner in docs (#6682) +* fix: upgrade gitops engine to fix workflow health check + +## v2.0.4 (2021-06-22) + +* fix: typo in networkPolicy definition in manifests (#6532) +* fix: Update redis to 6.2.4 (#6475) +* fix: allows access to dex metrics from any pod (#6420) +* fix: add client side retry to prevent 'transport is closing' errors (#6402) +* fix: Update documentation Argocd app CRD health with app of apps (#6281) +* fix(ui): Crash on application pod view (#6384) +* chore: pin mkdocs version to fix docs build (#6421) +* chore: regenerate manifests using codegen (#6422) +* refactor: use RLock and RUnlock for project to improve performance (#6225) +* chore: Update Golang to v1.16.4 (#6358) + +## v2.0.3 (2021-05-27) + +### Bug Fixes + +* fix: add missing --container flag to 'argocd app logs' command (#6320) +* fix: grpc web proxy must ensure to read full header (#6319) +* fix: controller should refresh app before running sync operation (#6294) + +## v2.0.2 (2021-05-20) + +### Bug Fixes + +* fix: enable access to metrics port in embedded network policies (#6277) +* fix: display log streaming error in logs viewer (#6100) (#6273) +* fix: Don't count errored or completed neighbor pods toward resource consumption (#6259) +* fix: Enable kex algo diffie-hellman-group-exchange-sha256 for go-git ssh (#6256) +* fix: copy github app key from repocreds (#6140, #6197) +* fix(ui): UI crashes after reinstalling ArgoCD (#6218) +* fix: add network policies to restrict traffic flow between argocd components (#6156) +* fix: Revert "feat: Add health checks for kubernetes-external-secrets (#5435)" +* chore: Allow ingress traffic to argocd-server by default (#6179) + +## v2.0.1 (2021-04-15) + +### Bug Fixes + +* fix: spark application check fails on missing section (#6036) +* fix: Adding explicit bind to redis and sentinel for IPv4 clusters #5957 (#6005) +* fix: fix: use correct field for evaluating whether or not GitHub Enterprise is selected (#5987) + +## v2.0.0 (2021-04-07) > [Upgrade instructions](./docs/operator-manual/upgrading/1.8-2.0.md) @@ -69,7 +1008,7 @@ resources, you will have to adapt your cluster resources allow lists to explicit ## v1.8.4 (2021-02-05) - feat: set X-XSS-Protection while serving static content (#5412) -- fix: version info should be avaialble if anonymous access is enabled (#5422) +- fix: version info should be available if anonymous access is enabled (#5422) - fix: disable jwt claim audience validation #5381 (#5413) - fix: /api/version should not return tools version for unauthenticated requests (#5415) - fix: account tokens should be rejected if required capability is disabled (#5414) @@ -198,7 +1137,7 @@ In addition to new features and enhancements, we’ve fixed more than 50 bugs an ## v1.7.5 (2020-09-15) - fix: app create with -f should not ignore other options (#4322) -- fix: limit concurrent list requests accross all clusters (#4328) +- fix: limit concurrent list requests across all clusters (#4328) - fix: fix possible deadlock in /v1/api/stream/applications and /v1/api/application APIs (#4315) - fix: WatchResourceTree does not enforce RBAC (#4311) - fix: app refresh API should use app resource version (#4303) @@ -332,7 +1271,7 @@ use cases, such as bootstrapping a Kubernetes cluster, or decentralized manageme #### Other -- refactoring: Gitops engine (#3066) +- refactoring: GitOps engine (#3066) ## v1.5.8 (2020-06-16) @@ -395,7 +1334,7 @@ customizations, custom resource health checks, and more. ### Other * New Project and Application CRD settings ([#2900](https://github.com/argoproj/argo-cd/issues/2900), [#2873](https://github.com/argoproj/argo-cd/issues/2873)) that allows customizing Argo CD behavior. -* Upgraded Dex (v2.22.0) enables seamless [SSO integration](https://www.openshift.com/blog/openshift-authentication-integration-with-argocd) with Openshift. +* Upgraded Dex (v2.22.0) enables seamless [SSO integration](https://www.openshift.com/blog/openshift-authentication-integration-with-argocd) with OpenShift. #### Enhancements @@ -427,7 +1366,7 @@ customizations, custom resource health checks, and more. * fix for helm repo add with flag --insecure-skip-server-verification (#3420) * fix: app diff --local support for helm repo. #3151 (#3407) * fix: Syncing apps incorrectly states "app synced", but this is not true (#3286) -* fix: for jsonnet when it is localed in nested subdirectory and uses import (#3372) +* fix: for jsonnet when it is located in nested subdirectory and uses import (#3372) * fix: Update 4.5.3 redis-ha helm manifest (#3370) * fix: return 401 error code if username does not exist (#3369) * fix: Do not panic while running hooks with short revision (#3368) @@ -543,7 +1482,7 @@ Last-minute bugs that will be addressed in 1.5.1 shortly: - fix: argocd-util backup produced truncated backups. import app status (#3096) - fix: upgrade redis-ha chart and enable haproxy (#3147) - fix: make dex server deployment init container resilient to restarts (#3136) -- fix: reduct secret values of manifests stored in git (#3088) +- fix: redact secret values of manifests stored in git (#3088) - fix: labels not being deleted via UI (#3081) - fix: HTTP|HTTPS|NO_PROXY env variable reading #3055 (#3063) - fix: Correct usage text for repo add command regarding insecure repos (#3068) @@ -650,10 +1589,10 @@ More documentation and tools are coming in patch releases. The Argo CD deletes all **in-flight** hooks if you terminate running sync operation. The hook state assessment change implemented in this release the Argo CD enables detection of an in-flight state for all Kubernetes resources including `Deployment`, `PVC`, `StatefulSet`, `ReplicaSet` etc. So if you terminate the sync operation that has, for example, `StatefulSet` hook that is `Progressing` it will be deleted. The long-running jobs are not supposed to be used as a sync hook and you should consider using -[Sync Waves](https://argoproj.github.io/argo-cd/user-guide/sync-waves/) instead. +[Sync Waves](https://argo-cd.readthedocs.io/en/stable/user-guide/sync-waves/) instead. #### Enhancements -* feat: Add custom healthchecks for cert-manager v0.11.0 (#2689) +* feat: Add custom health checks for cert-manager v0.11.0 (#2689) * feat: add git submodule support (#2495) * feat: Add repository credential management API and CLI (addresses #2136) (#2207) * feat: add support for --additional-headers cli flag (#2467) @@ -838,7 +1777,7 @@ There may be instances when you want to control the times during which an Argo C #### Bug Fixes - failed parsing on parameters with comma (#1660) -- Statefulset with OnDelete Update Strategy stuck progressing (#1881) +- StatefulSet with OnDelete Update Strategy stuck progressing (#1881) - Warning during secret diffing (#1923) - Error message "Unable to load data: key is missing" is confusing (#1944) - OIDC group bindings are truncated (#2006) @@ -920,7 +1859,7 @@ There may be instances when you want to control the times during which an Argo C ## v1.2.3 (2019-10-1) * Make argo-cd docker images openshift friendly (#2362) (@duboisf) * Add dest-server and dest-namespace field to reconciliation logs (#2354) -- Stop loggin /repository.RepositoryService/ValidateAccess parameters (#2386) +- Stop logging /repository.RepositoryService/ValidateAccess parameters (#2386) ## v1.2.2 (2019-09-26) + Resource action equivalent to `kubectl rollout restart` (#2177) @@ -1005,7 +1944,7 @@ Support for Git LFS enabled repositories - now you can store Helm charts as tar - Wait for CRD creation during sync process (#1940) - Added a button to select out of sync items in the sync panel (#1902) - Proper handling of an excluded resource in an application (#1621) -- Stop repeating logs on stoped container (#1614) +- Stop repeating logs on stopped container (#1614) - Fix git repo url parsing on application list view (#2174) - Fix nil pointer dereference error during app reconciliation (#2146) - Fix history api fallback implementation to support app names with dots (#2114) @@ -1061,7 +2000,7 @@ optimized which significantly reduced the number of Git requests. With v1.1 rele #### User Defined Application Metadata User-defined Application metadata enables the user to define a list of useful URLs for their specific application and expose those links on the UI -(e.g. reference tp a CI pipeline or an application-specific management tool). These links should provide helpful shortcuts that make easier to integrate Argo CD into existing +(e.g. reference to a CI pipeline or an application-specific management tool). These links should provide helpful shortcuts that make easier to integrate Argo CD into existing systems by making it easier to find other components inside and outside Argo CD. ### Deprecation Notice @@ -1425,7 +2364,7 @@ has a minimum client version of v0.12.0. Older CLI clients will be rejected. * Deprecate componentParameterOverrides in favor of source specific config (#1207) * Support talking to Dex using local cluster address instead of public address (#1211) * Use Recreate deployment strategy for controller (#1315) -* Honor os environment variables for helm commands (#1306) (@1337andre) +* Honor OS environment variables for helm commands (#1306) (@1337andre) * Disable CGO_ENABLED for server/controller binaries (#1286) * Documentation fixes and improvements (@twz123, @yann-soubeyrand, @OmerKahani, @dulltz) - Fix CRD creation/deletion handling (#1249) @@ -1917,8 +2856,8 @@ RBAC policy rules, need to be rewritten to include one extra column with the eff + Override parameters ## v0.1.0 (2018-03-12) -+ Define app in Github with dev and preprod environment using KSonnet ++ Define app in GitHub with dev and preprod environment using KSonnet + Add cluster Diff App with a cluster Deploy app in a cluster + Deploy a new version of the app in the cluster -+ App sync based on Github app config change - polling only ++ App sync based on GitHub app config change - polling only + Basic UI: App diff between Git and k8s cluster for all environments Basic GUI diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 0000000000000..507193dad5611 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,10 @@ +# All +** @argoproj/argocd-approvers + +# Docs +/docs/** @argoproj/argocd-approvers @argoproj/argocd-approvers-docs +/USERS.md @argoproj/argocd-approvers @argoproj/argocd-approvers-docs + +# CI +/.github/** @argoproj/argocd-approvers @argoproj/argocd-approvers-ci +/.goreleaser.yaml @argoproj/argocd-approvers @argoproj/argocd-approvers-ci diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000000..0cef196dca5a1 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1 @@ +Please refer to [the Contribution Guide](https://argo-cd.readthedocs.io/en/latest/developer-guide/code-contributions/) diff --git a/Dockerfile b/Dockerfile index ffe9ea16fcf8b..2c31b5077f67e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,140 +1,146 @@ -ARG BASE_IMAGE=docker.io/library/ubuntu:20.10 +ARG BASE_IMAGE=docker.io/library/ubuntu:22.04@sha256:0bced47fffa3361afa981854fcabcd4577cd43cebbb808cea2b1f33a3dd7f508 #################################################################################################### # Builder image # Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image # Also used as the image in CI jobs so needs all dependencies #################################################################################################### -FROM docker.io/library/golang:1.16.2 as builder +FROM docker.io/library/golang:1.21.3@sha256:02d7116222536a5cf0fcf631f90b507758b669648e0f20186d2dc94a9b419a9b AS builder RUN echo 'deb http://deb.debian.org/debian buster-backports main' >> /etc/apt/sources.list -RUN apt-get update && apt-get install -y \ +RUN apt-get update && apt-get install --no-install-recommends -y \ openssh-server \ nginx \ + unzip \ fcgiwrap \ git \ git-lfs \ make \ wget \ gcc \ + sudo \ zip && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* WORKDIR /tmp -ADD hack/install.sh . -ADD hack/installers installers -ADD hack/tool-versions.sh . +COPY hack/install.sh hack/tool-versions.sh ./ +COPY hack/installers installers -RUN ./install.sh packr-linux -RUN ./install.sh ksonnet-linux -RUN ./install.sh helm2-linux -RUN ./install.sh helm-linux -RUN ./install.sh kustomize-linux +RUN ./install.sh helm-linux && \ + INSTALL_PATH=/usr/local/bin ./install.sh kustomize #################################################################################################### # Argo CD Base - used as the base for both the release and dev argocd images #################################################################################################### -FROM $BASE_IMAGE as argocd-base +FROM $BASE_IMAGE AS argocd-base + +LABEL org.opencontainers.image.source="https://github.com/argoproj/argo-cd" USER root +ENV ARGOCD_USER_ID=999 ENV DEBIAN_FRONTEND=noninteractive -RUN groupadd -g 999 argocd && \ - useradd -r -u 999 -g argocd argocd && \ +RUN groupadd -g $ARGOCD_USER_ID argocd && \ + useradd -r -u $ARGOCD_USER_ID -g argocd argocd && \ mkdir -p /home/argocd && \ chown argocd:0 /home/argocd && \ chmod g=u /home/argocd && \ - chmod g=u /etc/passwd && \ apt-get update && \ apt-get dist-upgrade -y && \ - apt-get install -y git git-lfs python3-pip tini gpg && \ + apt-get install -y \ + git git-lfs tini gpg tzdata && \ apt-get clean && \ - pip3 install awscli==1.18.80 && \ rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* -COPY hack/git-ask-pass.sh /usr/local/bin/git-ask-pass.sh COPY hack/gpg-wrapper.sh /usr/local/bin/gpg-wrapper.sh COPY hack/git-verify-wrapper.sh /usr/local/bin/git-verify-wrapper.sh -COPY --from=builder /usr/local/bin/ks /usr/local/bin/ks -COPY --from=builder /usr/local/bin/helm2 /usr/local/bin/helm2 COPY --from=builder /usr/local/bin/helm /usr/local/bin/helm COPY --from=builder /usr/local/bin/kustomize /usr/local/bin/kustomize -# script to add current (possibly arbitrary) user to /etc/passwd at runtime -# (if it's not already there, to be openshift friendly) -COPY uid_entrypoint.sh /usr/local/bin/uid_entrypoint.sh +COPY entrypoint.sh /usr/local/bin/entrypoint.sh +# keep uid_entrypoint.sh for backward compatibility +RUN ln -s /usr/local/bin/entrypoint.sh /usr/local/bin/uid_entrypoint.sh # support for mounting configuration from a configmap -RUN mkdir -p /app/config/ssh && \ - touch /app/config/ssh/ssh_known_hosts && \ - ln -s /app/config/ssh/ssh_known_hosts /etc/ssh/ssh_known_hosts +WORKDIR /app/config/ssh +RUN touch ssh_known_hosts && \ + ln -s /app/config/ssh/ssh_known_hosts /etc/ssh/ssh_known_hosts -RUN mkdir -p /app/config/tls -RUN mkdir -p /app/config/gpg/source && \ - mkdir -p /app/config/gpg/keys && \ - chown argocd /app/config/gpg/keys && \ - chmod 0700 /app/config/gpg/keys +WORKDIR /app/config +RUN mkdir -p tls && \ + mkdir -p gpg/source && \ + mkdir -p gpg/keys && \ + chown argocd gpg/keys && \ + chmod 0700 gpg/keys -# workaround ksonnet issue https://github.com/ksonnet/ksonnet/issues/298 ENV USER=argocd -USER 999 +USER $ARGOCD_USER_ID WORKDIR /home/argocd #################################################################################################### # Argo CD UI stage #################################################################################################### -FROM docker.io/library/node:12.18.4 as argocd-ui +FROM --platform=$BUILDPLATFORM docker.io/library/node:20.6.1@sha256:14bd39208dbc0eb171cbfb26ccb9ac09fa1b2eba04ccd528ab5d12983fd9ee24 AS argocd-ui WORKDIR /src -ADD ["ui/package.json", "ui/yarn.lock", "./"] +COPY ["ui/package.json", "ui/yarn.lock", "./"] -RUN yarn install +RUN yarn install --network-timeout 200000 && \ + yarn cache clean -ADD ["ui/", "."] +COPY ["ui/", "."] ARG ARGO_VERSION=latest ENV ARGO_VERSION=$ARGO_VERSION -RUN NODE_ENV='production' NODE_ONLINE_ENV='online' yarn build +ARG TARGETARCH +RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OPTIONS=--max_old_space_size=8192 yarn build #################################################################################################### # Argo CD Build stage which performs the actual build of Argo CD binaries #################################################################################################### -FROM golang:1.16.0 as argocd-build - -COPY --from=builder /usr/local/bin/packr /usr/local/bin/packr +FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.21.3@sha256:02d7116222536a5cf0fcf631f90b507758b669648e0f20186d2dc94a9b419a9b AS argocd-build WORKDIR /go/src/github.com/argoproj/argo-cd -COPY go.mod go.mod -COPY go.sum go.sum - +COPY go.* ./ RUN go mod download # Perform the build COPY . . -RUN make argocd-all - -ARG BUILD_ALL_CLIS=true -RUN if [ "$BUILD_ALL_CLIS" = "true" ] ; then \ - make BIN_NAME=argocd-darwin-amd64 GOOS=darwin argocd-all && \ - make BIN_NAME=argocd-windows-amd64.exe GOOS=windows argocd-all \ - ; fi +COPY --from=argocd-ui /src/dist/app /go/src/github.com/argoproj/argo-cd/ui/dist/app +ARG TARGETOS +ARG TARGETARCH +# These build args are optional; if not specified the defaults will be taken from the Makefile +ARG GIT_TAG +ARG BUILD_DATE +ARG GIT_TREE_STATE +ARG GIT_COMMIT +RUN GIT_COMMIT=$GIT_COMMIT \ + GIT_TREE_STATE=$GIT_TREE_STATE \ + GIT_TAG=$GIT_TAG \ + BUILD_DATE=$BUILD_DATE \ + GOOS=$TARGETOS \ + GOARCH=$TARGETARCH \ + make argocd-all #################################################################################################### # Final image #################################################################################################### FROM argocd-base COPY --from=argocd-build /go/src/github.com/argoproj/argo-cd/dist/argocd* /usr/local/bin/ -COPY --from=argocd-ui ./src/dist/app /shared/app USER root -RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-util -RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-server -RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-repo-server -RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-application-controller -RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-dex - -USER 999 \ No newline at end of file +RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-server && \ + ln -s /usr/local/bin/argocd /usr/local/bin/argocd-repo-server && \ + ln -s /usr/local/bin/argocd /usr/local/bin/argocd-cmp-server && \ + ln -s /usr/local/bin/argocd /usr/local/bin/argocd-application-controller && \ + ln -s /usr/local/bin/argocd /usr/local/bin/argocd-dex && \ + ln -s /usr/local/bin/argocd /usr/local/bin/argocd-notifications && \ + ln -s /usr/local/bin/argocd /usr/local/bin/argocd-applicationset-controller && \ + ln -s /usr/local/bin/argocd /usr/local/bin/argocd-k8s-auth + +USER $ARGOCD_USER_ID +ENTRYPOINT ["/usr/bin/tini", "--"] diff --git a/Dockerfile.dev b/Dockerfile.dev index df7cc471d4670..978304f80fc44 100644 --- a/Dockerfile.dev +++ b/Dockerfile.dev @@ -3,17 +3,13 @@ #################################################################################################### FROM argocd-base COPY argocd /usr/local/bin/ -COPY argocd-darwin-amd64 /usr/local/bin/ -COPY argocd-windows-amd64.exe /usr/local/bin/ USER root -RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-server -RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-repo-server -RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-application-controller -RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-dex -RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-util -RUN ln -s /usr/local/bin/argocd-darwin-amd64 /usr/local/bin/argocd-util-darwin-amd64 -RUN ln -s /usr/local/bin/argocd-windows-amd64.exe /usr/local/bin/argocd-util-windows-amd64.exe -USER 999 +RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-server && \ + ln -s /usr/local/bin/argocd /usr/local/bin/argocd-repo-server && \ + ln -s /usr/local/bin/argocd /usr/local/bin/argocd-application-controller && \ + ln -s /usr/local/bin/argocd /usr/local/bin/argocd-dex && \ + ln -s /usr/local/bin/argocd /usr/local/bin/argocd-notifications && \ + ln -s /usr/local/bin/argocd /usr/local/bin/argocd-applicationset-controller -COPY --from=argocd-ui ./src/dist/app /shared/app +USER 999 diff --git a/Makefile b/Makefile index f59965c0be7c8..4d245b9bf15b5 100644 --- a/Makefile +++ b/Makefile @@ -2,20 +2,22 @@ PACKAGE=github.com/argoproj/argo-cd/v2/common CURRENT_DIR=$(shell pwd) DIST_DIR=${CURRENT_DIR}/dist CLI_NAME=argocd -UTIL_CLI_NAME=argocd-util BIN_NAME=argocd +GEN_RESOURCES_CLI_NAME=argocd-resources-gen + HOST_OS:=$(shell go env GOOS) HOST_ARCH:=$(shell go env GOARCH) +TARGET_ARCH?=linux/amd64 + VERSION=$(shell cat ${CURRENT_DIR}/VERSION) -BUILD_DATE=$(shell date -u +'%Y-%m-%dT%H:%M:%SZ') -GIT_COMMIT=$(shell git rev-parse HEAD) -GIT_TAG=$(shell if [ -z "`git status --porcelain`" ]; then git describe --exact-match --tags HEAD 2>/dev/null; fi) -GIT_TREE_STATE=$(shell if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi) -PACKR_CMD=$(shell if [ "`which packr`" ]; then echo "packr"; else echo "go run github.com/gobuffalo/packr/packr"; fi) +BUILD_DATE:=$(if $(BUILD_DATE),$(BUILD_DATE),$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')) +GIT_COMMIT:=$(if $(GIT_COMMIT),$(GIT_COMMIT),$(shell git rev-parse HEAD)) +GIT_TAG:=$(if $(GIT_TAG),$(GIT_TAG),$(shell if [ -z "`git status --porcelain`" ]; then git describe --exact-match --tags HEAD 2>/dev/null; fi)) +GIT_TREE_STATE:=$(if $(GIT_TREE_STATE),$(GIT_TREE_STATE),$(shell if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)) VOLUME_MOUNT=$(shell if test "$(go env GOOS)" = "darwin"; then echo ":delegated"; elif test selinuxenabled; then echo ":delegated"; else echo ""; fi) -KUBECTL_VERSION=$(shell go list -m all | grep k8s.io/client-go | cut -d ' ' -f5) +KUBECTL_VERSION=$(shell go list -m k8s.io/client-go | head -n 1 | rev | cut -d' ' -f1 | rev) GOPATH?=$(shell if test -x `which go`; then go env GOPATH; else echo "$(HOME)/go"; fi) GOCACHE?=$(HOME)/.cache/go-build @@ -25,7 +27,7 @@ DOCKER_WORKDIR?=/go/src/github.com/argoproj/argo-cd ARGOCD_PROCFILE?=Procfile -# Strict mode has been disabled in latest versions of mkdocs-material. +# Strict mode has been disabled in latest versions of mkdocs-material. # Thus pointing to the older image of mkdocs-material matching the version used by argo-cd. MKDOCS_DOCKER_IMAGE?=squidfunk/mkdocs-material:4.1.1 MKDOCS_RUN_ARGS?= @@ -47,10 +49,11 @@ ARGOCD_E2E_DEX_PORT?=5556 ARGOCD_E2E_YARN_HOST?=localhost ARGOCD_E2E_DISABLE_AUTH?= -ARGOCD_E2E_TEST_TIMEOUT?=20m +ARGOCD_E2E_TEST_TIMEOUT?=60m ARGOCD_IN_CI?=false ARGOCD_TEST_E2E?=true +ARGOCD_BIN_MODE?=true ARGOCD_LINT_GOGC?=20 @@ -63,13 +66,20 @@ else DOCKER_SRC_MOUNT="$(PWD):/go/src/github.com/argoproj/argo-cd$(VOLUME_MOUNT)" endif +# User and group IDs to map to the test container +CONTAINER_UID=$(shell id -u) +CONTAINER_GID=$(shell id -g) + +# Set SUDO to sudo to run privileged commands with sudo +SUDO?= + # Runs any command in the argocd-test-utils container in server mode # Server mode container will start with uid 0 and drop privileges during runtime define run-in-test-server - docker run --rm -it \ + $(SUDO) docker run --rm -it \ --name argocd-test-server \ - -u $(shell id -u):$(shell id -g) \ - -e USER_ID=$(shell id -u) \ + -u $(CONTAINER_UID):$(CONTAINER_GID) \ + -e USER_ID=$(CONTAINER_UID) \ -e HOME=/home/user \ -e GOPATH=/go \ -e GOCACHE=/tmp/go-build-cache \ @@ -77,6 +87,11 @@ define run-in-test-server -e ARGOCD_E2E_TEST=$(ARGOCD_E2E_TEST) \ -e ARGOCD_E2E_YARN_HOST=$(ARGOCD_E2E_YARN_HOST) \ -e ARGOCD_E2E_DISABLE_AUTH=$(ARGOCD_E2E_DISABLE_AUTH) \ + -e ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} \ + -e ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} \ + -e ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} \ + -e ARGOCD_APPLICATION_NAMESPACES \ + -e GITHUB_TOKEN \ -v ${DOCKER_SRC_MOUNT} \ -v ${GOPATH}/pkg/mod:/go/pkg/mod${VOLUME_MOUNT} \ -v ${GOCACHE}:/tmp/go-build-cache${VOLUME_MOUNT} \ @@ -92,12 +107,13 @@ endef # Runs any command in the argocd-test-utils container in client mode define run-in-test-client - docker run --rm -it \ + $(SUDO) docker run --rm -it \ --name argocd-test-client \ - -u $(shell id -u):$(shell id -g) \ + -u $(CONTAINER_UID):$(CONTAINER_GID) \ -e HOME=/home/user \ -e GOPATH=/go \ -e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) \ + -e GITHUB_TOKEN \ -e GOCACHE=/tmp/go-build-cache \ -e ARGOCD_LINT_GOGC=$(ARGOCD_LINT_GOGC) \ -v ${DOCKER_SRC_MOUNT} \ @@ -110,9 +126,9 @@ define run-in-test-client bash -c "$(1)" endef -# +# define exec-in-test-server - docker exec -it -u $(shell id -u):$(shell id -g) -e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) argocd-test-server $(1) + $(SUDO) docker exec -it -u $(CONTAINER_UID):$(CONTAINER_GID) -e ARGOCD_E2E_RECORD=$(ARGOCD_E2E_RECORD) -e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) argocd-test-server $(1) endef PATH:=$(PATH):$(PWD)/hack @@ -132,8 +148,8 @@ override LDFLAGS += \ -X ${PACKAGE}.buildDate=${BUILD_DATE} \ -X ${PACKAGE}.gitCommit=${GIT_COMMIT} \ -X ${PACKAGE}.gitTreeState=${GIT_TREE_STATE}\ - -X ${PACKAGE}.gitTreeState=${GIT_TREE_STATE}\ - -X ${PACKAGE}.kubectlVersion=${KUBECTL_VERSION} + -X ${PACKAGE}.kubectlVersion=${KUBECTL_VERSION}\ + -X "${PACKAGE}.extraBuildInfo=${EXTRA_BUILD_INFO}" ifeq (${STATIC_BUILD}, true) override LDFLAGS += -extldflags "-static" @@ -157,7 +173,7 @@ IMAGE_PREFIX=${IMAGE_NAMESPACE}/ endif .PHONY: all -all: cli image argocd-util +all: cli image # We have some legacy requirements for being checked out within $GOPATH. # The ensure-gopath target can be used as dependency to ensure we are running @@ -176,7 +192,7 @@ gogen: ensure-gopath go generate ./util/argo/... .PHONY: protogen -protogen: ensure-gopath +protogen: ensure-gopath mod-vendor-local export GO111MODULE=off ./hack/generate-proto.sh @@ -185,6 +201,16 @@ openapigen: ensure-gopath export GO111MODULE=off ./hack/update-openapi.sh +.PHONY: notification-catalog +notification-catalog: + go run ./hack/gen-catalog catalog + +.PHONY: notification-docs +notification-docs: + go run ./hack/gen-docs + go run ./hack/gen-catalog docs + + .PHONY: clientgen clientgen: ensure-gopath export GO111MODULE=off @@ -192,10 +218,11 @@ clientgen: ensure-gopath .PHONY: clidocsgen clidocsgen: ensure-gopath - go run tools/cmd-docs/main.go + go run tools/cmd-docs/main.go + .PHONY: codegen-local -codegen-local: ensure-gopath mod-vendor-local gogen protogen clientgen openapigen clidocsgen manifests-local +codegen-local: ensure-gopath mod-vendor-local gogen protogen clientgen openapigen clidocsgen manifests-local notification-docs notification-catalog rm -rf vendor/ .PHONY: codegen @@ -208,34 +235,28 @@ cli: test-tools-image .PHONY: cli-local cli-local: clean-debug - CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${CLI_NAME} ./cmd + CGO_ENABLED=0 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${CLI_NAME} ./cmd -.PHONY: cli-argocd -cli-argocd: - go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${CLI_NAME} ./cmd +.PHONY: gen-resources-cli-local +gen-resources-cli-local: clean-debug + CGO_ENABLED=0 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${GEN_RESOURCES_CLI_NAME} ./hack/gen-resources/cmd .PHONY: release-cli -release-cli: clean-debug image - docker create --name tmp-argocd-linux $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) - docker cp tmp-argocd-linux:/usr/local/bin/argocd ${DIST_DIR}/argocd-linux-amd64 - docker cp tmp-argocd-linux:/usr/local/bin/argocd-darwin-amd64 ${DIST_DIR}/argocd-darwin-amd64 - docker cp tmp-argocd-linux:/usr/local/bin/argocd-windows-amd64.exe ${DIST_DIR}/argocd-windows-amd64.exe - docker rm tmp-argocd-linux - -.PHONY: argocd-util -argocd-util: clean-debug - # Build argocd-util as a statically linked binary, so it could run within the alpine-based dex container (argoproj/argo-cd#844) - CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${UTIL_CLI_NAME} ./cmd - -# .PHONY: dev-tools-image -# dev-tools-image: -# docker build -t $(DEV_TOOLS_PREFIX)$(DEV_TOOLS_IMAGE) . -f hack/Dockerfile.dev-tools -# docker tag $(DEV_TOOLS_PREFIX)$(DEV_TOOLS_IMAGE) $(DEV_TOOLS_PREFIX)$(DEV_TOOLS_IMAGE):$(DEV_TOOLS_VERSION) +release-cli: clean-debug build-ui + make BIN_NAME=argocd-darwin-amd64 GOOS=darwin argocd-all + make BIN_NAME=argocd-darwin-arm64 GOOS=darwin GOARCH=arm64 argocd-all + make BIN_NAME=argocd-linux-amd64 GOOS=linux argocd-all + make BIN_NAME=argocd-linux-arm64 GOOS=linux GOARCH=arm64 argocd-all + make BIN_NAME=argocd-linux-ppc64le GOOS=linux GOARCH=ppc64le argocd-all + make BIN_NAME=argocd-linux-s390x GOOS=linux GOARCH=s390x argocd-all + make BIN_NAME=argocd-windows-amd64.exe GOOS=windows argocd-all .PHONY: test-tools-image test-tools-image: - docker build --build-arg UID=$(shell id -u) -t $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) -f test/container/Dockerfile . - docker tag $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG) +ifndef SKIP_TEST_TOOLS_IMAGE + $(SUDO) docker build --build-arg UID=$(CONTAINER_UID) -t $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) -f test/container/Dockerfile . + $(SUDO) docker tag $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG) +endif .PHONY: manifests-local manifests-local: @@ -248,25 +269,25 @@ manifests: test-tools-image # consolidated binary for cli, util, server, repo-server, controller .PHONY: argocd-all argocd-all: clean-debug - CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${BIN_NAME} ./cmd + CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${BIN_NAME} ./cmd -# NOTE: we use packr to do the build instead of go, since we embed swagger files and policy.csv -# files into the go binary .PHONY: server server: clean-debug - CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-server ./cmd + CGO_ENABLED=0 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-server ./cmd .PHONY: repo-server repo-server: - CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-repo-server ./cmd + CGO_ENABLED=0 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-repo-server ./cmd .PHONY: controller controller: - CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-application-controller ./cmd + CGO_ENABLED=0 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-application-controller ./cmd -.PHONY: packr -packr: - go build -o ${DIST_DIR}/packr github.com/gobuffalo/packr/packr/ +.PHONY: build-ui +build-ui: + DOCKER_BUILDKIT=1 docker build -t argocd-ui --platform=$(TARGET_ARCH) --target argocd-ui . + find ./ui/dist -type f -not -name gitkeep -delete + docker run -v ${CURRENT_DIR}/ui/dist/app:/tmp/app --rm -t argocd-ui sh -c 'cp -r ./dist/app/* /tmp/app/' .PHONY: image ifeq ($(DEV_IMAGE), true) @@ -274,32 +295,25 @@ ifeq ($(DEV_IMAGE), true) # which speeds up builds. Dockerfile.dev needs to be copied into dist to perform the build, since # the dist directory is under .dockerignore. IMAGE_TAG="dev-$(shell git describe --always --dirty)" -image: packr - docker build -t argocd-base --target argocd-base . - docker build -t argocd-ui --target argocd-ui . - CGO_ENABLED=0 GOOS=linux GOARCH=amd64 dist/packr build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd ./cmd - CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 dist/packr build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-darwin-amd64 ./cmd - CGO_ENABLED=0 GOOS=windows GOARCH=amd64 dist/packr build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-windows-amd64.exe ./cmd +image: build-ui + DOCKER_BUILDKIT=1 docker build --platform=$(TARGET_ARCH) -t argocd-base --target argocd-base . + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd ./cmd ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-server ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-application-controller ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-repo-server + ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-cmp-server ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-dex - ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-util - ln -sfn ${DIST_DIR}/argocd-darwin-amd64 ${DIST_DIR}/argocd-util-darwin-amd64 - ln -sfn ${DIST_DIR}/argocd-windows-amd64.exe ${DIST_DIR}/argocd-util-windows-amd64.exe cp Dockerfile.dev dist - docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) -f dist/Dockerfile.dev dist + DOCKER_BUILDKIT=1 docker build --platform=$(TARGET_ARCH) -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) -f dist/Dockerfile.dev dist else image: - docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) . + DOCKER_BUILDKIT=1 docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) --platform=$(TARGET_ARCH) . endif @if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) ; fi .PHONY: armimage -# The "BUILD_ALL_CLIS" argument is to skip building the CLIs for darwin and windows -# which would take a really long time. armimage: - docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG)-arm . --build-arg BUILD_ALL_CLIS="false" + docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG)-arm . .PHONY: builder-image builder-image: @@ -322,7 +336,7 @@ mod-vendor: test-tools-image mod-vendor-local: mod-download-local go mod vendor -# Deprecated - replace by install-local-tools +# Deprecated - replace by install-tools-local .PHONY: install-lint-tools install-lint-tools: ./hack/install.sh lint-tools @@ -338,7 +352,7 @@ lint-local: golangci-lint --version # NOTE: If you get a "Killed" OOM message, try reducing the value of GOGC # See https://github.com/golangci/golangci-lint#memory-usage-of-golangci-lint - GOGC=$(ARGOCD_LINT_GOGC) GOMAXPROCS=2 golangci-lint run --fix --verbose --timeout 300s + GOGC=$(ARGOCD_LINT_GOGC) GOMAXPROCS=2 golangci-lint run --enable gofmt --fix --verbose --timeout 3000s --max-issues-per-linter 0 --max-same-issues 0 .PHONY: lint-ui lint-ui: test-tools-image @@ -357,7 +371,7 @@ build: test-tools-image # Build all Go code (local version) .PHONY: build-local build-local: - go build -v `go list ./... | grep -v 'resource_customizations\|test/e2e'` + GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v `go list ./... | grep -v 'resource_customizations\|test/e2e'` # Run all unit tests # @@ -372,9 +386,9 @@ test: test-tools-image .PHONY: test-local test-local: if test "$(TEST_MODULE)" = ""; then \ - ./hack/test.sh -coverprofile=coverage.out `go list ./... | grep -v 'test/e2e'`; \ + DIST_DIR=${DIST_DIR} RERUN_FAILS=0 PACKAGES=`go list ./... | grep -v 'test/e2e'` ./hack/test.sh -coverprofile=coverage.out; \ else \ - ./hack/test.sh -coverprofile=coverage.out "$(TEST_MODULE)"; \ + DIST_DIR=${DIST_DIR} RERUN_FAILS=0 PACKAGES="$(TEST_MODULE)" ./hack/test.sh -coverprofile=coverage.out "$(TEST_MODULE)"; \ fi .PHONY: test-race @@ -386,9 +400,9 @@ test-race: test-tools-image .PHONY: test-race-local test-race-local: if test "$(TEST_MODULE)" = ""; then \ - ./hack/test.sh -race -coverprofile=coverage.out `go list ./... | grep -v 'test/e2e'`; \ + DIST_DIR=${DIST_DIR} RERUN_FAILS=0 PACKAGES=`go list ./... | grep -v 'test/e2e'` ./hack/test.sh -race -coverprofile=coverage.out; \ else \ - ./hack/test.sh -race -coverprofile=coverage.out "$(TEST_MODULE)"; \ + DIST_DIR=${DIST_DIR} RERUN_FAILS=0 PACKAGES="$(TEST_MODULE)" ./hack/test.sh -race -coverprofile=coverage.out; \ fi # Run the E2E test suite. E2E test servers (see start-e2e target) must be @@ -402,7 +416,7 @@ test-e2e: test-e2e-local: cli-local # NO_PROXY ensures all tests don't go out through a proxy if one is configured on the test system export GO111MODULE=off - ARGOCD_GPG_ENABLED=true NO_PROXY=* ./hack/test.sh -timeout $(ARGOCD_E2E_TEST_TIMEOUT) -v ./test/e2e + DIST_DIR=${DIST_DIR} RERUN_FAILS=5 PACKAGES="./test/e2e" ARGOCD_E2E_RECORD=${ARGOCD_E2E_RECORD} ARGOCD_GPG_ENABLED=true NO_PROXY=* ./hack/test.sh -timeout $(ARGOCD_E2E_TEST_TIMEOUT) -v # Spawns a shell in the test server container for debugging purposes debug-test-server: test-tools-image @@ -421,30 +435,39 @@ start-e2e: test-tools-image # Starts e2e server locally (or within a container) .PHONY: start-e2e-local -start-e2e-local: +start-e2e-local: mod-vendor-local dep-ui-local cli-local kubectl create ns argocd-e2e || true + kubectl create ns argocd-e2e-external || true kubectl config set-context --current --namespace=argocd-e2e kustomize build test/manifests/base | kubectl apply -f - + kubectl apply -f https://raw.githubusercontent.com/open-cluster-management/api/a6845f2ebcb186ec26b832f60c988537a58f3859/cluster/v1alpha1/0000_04_clusters.open-cluster-management.io_placementdecisions.crd.yaml # Create GPG keys and source directories if test -d /tmp/argo-e2e/app/config/gpg; then rm -rf /tmp/argo-e2e/app/config/gpg/*; fi mkdir -p /tmp/argo-e2e/app/config/gpg/keys && chmod 0700 /tmp/argo-e2e/app/config/gpg/keys mkdir -p /tmp/argo-e2e/app/config/gpg/source && chmod 0700 /tmp/argo-e2e/app/config/gpg/source + mkdir -p /tmp/argo-e2e/app/config/plugin && chmod 0700 /tmp/argo-e2e/app/config/plugin # set paths for locally managed ssh known hosts and tls certs data ARGOCD_SSH_DATA_PATH=/tmp/argo-e2e/app/config/ssh \ ARGOCD_TLS_DATA_PATH=/tmp/argo-e2e/app/config/tls \ ARGOCD_GPG_DATA_PATH=/tmp/argo-e2e/app/config/gpg/source \ ARGOCD_GNUPGHOME=/tmp/argo-e2e/app/config/gpg/keys \ - ARGOCD_GPG_ENABLED=true \ + ARGOCD_GPG_ENABLED=$(ARGOCD_GPG_ENABLED) \ + ARGOCD_PLUGINCONFIGFILEPATH=/tmp/argo-e2e/app/config/plugin \ + ARGOCD_PLUGINSOCKFILEPATH=/tmp/argo-e2e/app/config/plugin \ ARGOCD_E2E_DISABLE_AUTH=false \ ARGOCD_ZJWT_FEATURE_FLAG=always \ ARGOCD_IN_CI=$(ARGOCD_IN_CI) \ + BIN_MODE=$(ARGOCD_BIN_MODE) \ + ARGOCD_APPLICATION_NAMESPACES=argocd-e2e-external \ + ARGOCD_APPLICATIONSET_CONTROLLER_NAMESPACES=argocd-e2e-external \ + ARGOCD_APPLICATIONSET_CONTROLLER_ALLOWED_SCM_PROVIDERS=http://127.0.0.1:8341,http://127.0.0.1:8342,http://127.0.0.1:8343,http://127.0.0.1:8344 \ ARGOCD_E2E_TEST=true \ goreman -f $(ARGOCD_PROCFILE) start ${ARGOCD_START} -# Cleans VSCode debug.test files from sub-dirs to prevent them from being included in packr boxes +# Cleans VSCode debug.test files from sub-dirs to prevent them from being included in by golang embed .PHONY: clean-debug clean-debug: - -find ${CURRENT_DIR} -name debug.test | xargs rm -f + -find ${CURRENT_DIR} -name debug.test -exec rm -f {} + .PHONY: clean clean: clean-debug @@ -457,7 +480,7 @@ start: test-tools-image # Starts a local instance of ArgoCD .PHONY: start-local -start-local: mod-vendor-local dep-ui-local +start-local: mod-vendor-local dep-ui-local cli-local # check we can connect to Docker to start Redis killall goreman || true kubectl create ns argocd || true @@ -467,10 +490,17 @@ start-local: mod-vendor-local dep-ui-local mkdir -p /tmp/argocd-local/gpg/source ARGOCD_ZJWT_FEATURE_FLAG=always \ ARGOCD_IN_CI=false \ - ARGOCD_GPG_ENABLED=true \ + ARGOCD_GPG_ENABLED=$(ARGOCD_GPG_ENABLED) \ ARGOCD_E2E_TEST=false \ + ARGOCD_APPLICATION_NAMESPACES=$(ARGOCD_APPLICATION_NAMESPACES) \ goreman -f $(ARGOCD_PROCFILE) start ${ARGOCD_START} +# Run goreman start with exclude option , provide exclude env variable with list of services +.PHONY: run +run: + bash ./hack/goreman-start.sh + + # Runs pre-commit validation with the virtualized toolchain .PHONY: pre-commit pre-commit: codegen build lint test @@ -494,7 +524,7 @@ build-docs-local: .PHONY: build-docs build-docs: - docker run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}:/docs ${MKDOCS_DOCKER_IMAGE} build + docker run ${MKDOCS_RUN_ARGS} --rm -it -v ${CURRENT_DIR}:/docs --entrypoint "" ${MKDOCS_DOCKER_IMAGE} sh -c 'pip install -r docs/requirements.txt; mkdocs build' .PHONY: serve-docs-local serve-docs-local: @@ -502,12 +532,8 @@ serve-docs-local: .PHONY: serve-docs serve-docs: - docker run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}:/docs ${MKDOCS_DOCKER_IMAGE} serve -a 0.0.0.0:8000 + docker run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}/site:/site -w /site --entrypoint "" ${MKDOCS_DOCKER_IMAGE} python3 -m http.server --bind 0.0.0.0 8000 -.PHONY: lint-docs -lint-docs: - # https://github.com/dkhamsing/awesome_bot - find docs -name '*.md' -exec grep -l http {} + | xargs docker run --rm -v $(PWD):/mnt:ro dkhamsing/awesome_bot -t 3 --allow-dupe --allow-redirect --white-list `cat white-list | grep -v "#" | tr "\n" ','` --skip-save-results -- # Verify that kubectl can connect to your K8s cluster from Docker .PHONY: verify-kube-connect @@ -529,16 +555,14 @@ install-tools-local: install-test-tools-local install-codegen-tools-local instal # Installs all tools required for running unit & end-to-end tests (Linux packages) .PHONY: install-test-tools-local install-test-tools-local: - sudo ./hack/install.sh packr-linux - sudo ./hack/install.sh kustomize-linux - sudo ./hack/install.sh ksonnet-linux - sudo ./hack/install.sh helm2-linux - sudo ./hack/install.sh helm-linux + ./hack/install.sh kustomize + ./hack/install.sh helm-linux + ./hack/install.sh gotestsum # Installs all tools required for running codegen (Linux packages) .PHONY: install-codegen-tools-local install-codegen-tools-local: - sudo ./hack/install.sh codegen-tools + ./hack/install.sh codegen-tools # Installs all tools required for running codegen (Go packages) .PHONY: install-go-tools-local @@ -554,3 +578,79 @@ dep-ui-local: start-test-k8s: go run ./hack/k8s + +.PHONY: list +list: + @LC_ALL=C $(MAKE) -pRrq -f $(lastword $(MAKEFILE_LIST)) : 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | sort | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' + +.PHONY: applicationset-controller +applicationset-controller: + GODEBUG="tarinsecurepath=0,zipinsecurepath=0" CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-applicationset-controller ./cmd + +.PHONY: checksums +checksums: + sha256sum ./dist/$(BIN_NAME)-* | awk -F './dist/' '{print $$1 $$2}' > ./dist/$(BIN_NAME)-$(TARGET_VERSION)-checksums.txt + +.PHONY: snyk-container-tests +snyk-container-tests: + ./hack/snyk-container-tests.sh + +.PHONY: snyk-non-container-tests +snyk-non-container-tests: + ./hack/snyk-non-container-tests.sh + +.PHONY: snyk-report +snyk-report: + ./hack/snyk-report.sh $(target_branch) + +.PHONY: help +help: + @echo 'Note: Generally an item w/ (-local) will run inside docker unless you use the -local variant' + @echo + @echo 'Common targets' + @echo + @echo 'all -- make cli and image' + @echo + @echo 'components:' + @echo ' applicationset-controller -- applicationset controller' + @echo ' cli(-local) -- argocd cli program' + @echo ' controller -- controller (orchestrator)' + @echo ' repo-server -- repo server (manage repository instances)' + @echo ' server -- argocd web application' + @echo + @echo 'build:' + @echo ' image -- make image of the following items' + @echo ' build(-local) -- compile go' + @echo ' build-docs(-local) -- build docs' + @echo ' build-ui -- compile typescript' + @echo + @echo 'run:' + @echo ' run -- run the components locally' + @echo ' serve-docs(-local) -- expose the documents for viewing in a browser' + @echo + @echo 'release:' + @echo ' release-cli' + @echo ' release-precheck' + @echo ' checksums' + @echo + @echo 'docs:' + @echo ' build-docs(-local)' + @echo ' serve-docs(-local)' + @echo ' notification-docs' + @echo ' clidocsgen' + @echo + @echo 'testing:' + @echo ' test(-local)' + @echo ' start-e2e(-local)' + @echo ' test-e2e(-local)' + @echo ' test-race(-local)' + @echo + @echo 'debug:' + @echo ' list -- list all make targets' + @echo ' install-tools-local -- install all the tools below' + @echo ' install-lint-tools(-local)' + @echo + @echo 'codegen:' + @echo ' codegen(-local) -- if using -local, run the following targets first' + @echo ' install-codegen-tools-local -- run this to install the codegen tools' + @echo ' install-go-tools-local -- run this to install go libraries for codegen' diff --git a/OWNERS b/OWNERS index 15cd6333dca21..d8532c550005a 100644 --- a/OWNERS +++ b/OWNERS @@ -8,9 +8,26 @@ approvers: - jannfis - jessesuen - jgwest +- keithchong - mayzhang2000 +- rbreeze +- leoluz +- crenshaw-dev +- pasha-codefresh reviewers: - dthomson25 - tetchel +- terrytangyuan - wtam2018 +- ishitasequeira +- reginapizza +- hblixt +- chetan-rns +- wanghong230 +- ciiay +- saumeya +- zachaller +- 34fathombelow +- alexef +- gdsoumya diff --git a/Procfile b/Procfile index 02714e7f3b4ec..2bb26a086fb1d 100644 --- a/Procfile +++ b/Procfile @@ -1,9 +1,12 @@ -controller: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller go run ./cmd/main.go --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}" -api-server: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server go run ./cmd/main.go --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --staticassets ui/dist/app" -dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v2/cmd gendexcfg -o `pwd`/dist/dex.yaml && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:v2.27.0 serve /dex.yaml" -redis: bash -c "if [ $ARGOCD_REDIS_LOCAL == 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} redis:6.2.1-alpine --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi" -repo-server: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server go run ./cmd/main.go --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379}" +controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''}" +api-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''}" +dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v2/cmd gendexcfg -o `pwd`/dist/dex.yaml && (test -f dist/dex.yaml || { echo 'Failed to generate dex configuration'; exit 1; }) && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:$(grep "image: ghcr.io/dexidp/dex" manifests/base/dex/argocd-dex-server-deployment.yaml | cut -d':' -f3) dex serve /dex.yaml" +redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" = 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} docker.io/library/redis:$(grep "image: redis" manifests/base/redis/argocd-redis-deployment.yaml | cut -d':' -f3) --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi" +repo-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --otlp-address=${ARGOCD_OTLP_ADDRESS}" +cmp-server: [ "$ARGOCD_E2E_TEST" = 'true' ] && exit 0 || [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_BINARY_NAME=argocd-cmp-server ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} $COMMAND --config-dir-path ./test/cmp --loglevel debug --otlp-address=${ARGOCD_OTLP_ADDRESS}" ui: sh -c 'cd ui && ${ARGOCD_E2E_YARN_CMD:-yarn} start' git-server: test/fixture/testrepos/start-git.sh helm-registry: test/fixture/testrepos/start-helm-registry.sh dev-mounter: [[ "$ARGOCD_E2E_TEST" != "true" ]] && go run hack/dev-mounter/main.go --configmap argocd-ssh-known-hosts-cm=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} --configmap argocd-tls-certs-cm=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} --configmap argocd-gpg-keys-cm=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} +applicationset-controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-applicationset-controller $COMMAND --loglevel debug --metrics-addr localhost:12345 --probe-addr localhost:12346 --argocd-repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}" +notification: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_BINARY_NAME=argocd-notifications $COMMAND --loglevel debug" diff --git a/README.md b/README.md index ef40fe8781a7d..ef5664de5b5b7 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,18 @@ +**Releases:** +[![Release Version](https://img.shields.io/github/v/release/argoproj/argo-cd?label=argo-cd)](https://github.com/argoproj/argo-cd/releases/latest) +[![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/argo-cd)](https://artifacthub.io/packages/helm/argo/argo-cd) +[![SLSA 3](https://slsa.dev/images/gh-badge-level3.svg)](https://slsa.dev) + +**Code:** [![Integration tests](https://github.com/argoproj/argo-cd/workflows/Integration%20tests/badge.svg?branch=master)](https://github.com/argoproj/argo-cd/actions?query=workflow%3A%22Integration+tests%22) -[![slack](https://img.shields.io/badge/slack-argoproj-brightgreen.svg?logo=slack)](https://argoproj.github.io/community/join-slack) [![codecov](https://codecov.io/gh/argoproj/argo-cd/branch/master/graph/badge.svg)](https://codecov.io/gh/argoproj/argo-cd) -[![Release Version](https://img.shields.io/github/v/release/argoproj/argo-cd?label=argo-cd)](https://github.com/argoproj/argo-cd/releases/latest) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4486/badge)](https://bestpractices.coreinfrastructure.org/projects/4486) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/argoproj/argo-cd/badge)](https://api.securityscorecards.dev/projects/github.com/argoproj/argo-cd) +[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fargoproj%2Fargo-cd.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Fargoproj%2Fargo-cd?ref=badge_shield) + +**Social:** +[![Twitter Follow](https://img.shields.io/twitter/follow/argoproj?style=social)](https://twitter.com/argoproj) +[![Slack](https://img.shields.io/badge/slack-argoproj-brightgreen.svg?logo=slack)](https://argoproj.github.io/community/join-slack) # Argo CD - Declarative Continuous Delivery for Kubernetes @@ -36,8 +46,8 @@ Check live demo at https://cd.apps.argoproj.io/. * Q & A : [Github Discussions](https://github.com/argoproj/argo-cd/discussions) * Chat : [The #argo-cd Slack channel](https://argoproj.github.io/community/join-slack) -* Contributors Office Hours: [Every Thursday](https://calendar.google.com/calendar/u/0/embed?src=argoproj@gmail.com) | [Agenda](https://docs.google.com/document/d/1ttgw98MO45Dq7ZUHpIiOIEfbyeitKHNfMjbY5dLLMKQ) -* User Community meeting: [Every other Wednesday](https://calendar.google.com/calendar/u/0/embed?src=argoproj@gmail.com) | [Agenda](https://docs.google.com/document/d/1xkoFkVviB70YBzSEa4bDnu-rUZ1sIFtwKKG1Uw8XsY8) +* Contributors Office Hours: [Every Thursday](https://calendar.google.com/calendar/u/0/embed?src=argoproj@gmail.com) | [Agenda](https://docs.google.com/document/d/1xkoFkVviB70YBzSEa4bDnu-rUZ1sIFtwKKG1Uw8XsY8) +* User Community meeting: [First Wednesday of the month](https://calendar.google.com/calendar/u/0/embed?src=argoproj@gmail.com) | [Agenda](https://docs.google.com/document/d/1ttgw98MO45Dq7ZUHpIiOIEfbyeitKHNfMjbY5dLLMKQ) Participation in the Argo CD project is governed by the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md) @@ -45,6 +55,10 @@ Participation in the Argo CD project is governed by the [CNCF Code of Conduct](h ### Blogs and Presentations +1. [Awesome-Argo: A Curated List of Awesome Projects and Resources Related to Argo](https://github.com/terrytangyuan/awesome-argo) +1. [Unveil the Secret Ingredients of Continuous Delivery at Enterprise Scale with Argo CD](https://akuity.io/blog/unveil-the-secret-ingredients-of-continuous-delivery-at-enterprise-scale-with-argocd-kubecon-china-2021/) +1. [GitOps Without Pipelines With ArgoCD Image Updater](https://youtu.be/avPUQin9kzU) +1. [Combining Argo CD (GitOps), Crossplane (Control Plane), And KubeVela (OAM)](https://youtu.be/eEcgn_gU3SM) 1. [How to Apply GitOps to Everything - Combining Argo CD and Crossplane](https://youtu.be/yrj4lmScKHQ) 1. [Couchbase - How To Run a Database Cluster in Kubernetes Using Argo CD](https://youtu.be/nkPoPaVzExY) 1. [Automation of Everything - How To Combine Argo Events, Workflows & Pipelines, CD, and Rollouts](https://youtu.be/XNXJtxkUKeY) @@ -53,10 +67,9 @@ Participation in the Argo CD project is governed by the [CNCF Code of Conduct](h 1. [Creating Temporary Preview Environments Based On Pull Requests With Argo CD And Codefresh](https://codefresh.io/continuous-deployment/creating-temporary-preview-environments-based-pull-requests-argo-cd-codefresh/) 1. [Tutorial: Everything You Need To Become A GitOps Ninja](https://www.youtube.com/watch?v=r50tRQjisxw) 90m tutorial on GitOps and Argo CD. 1. [Comparison of Argo CD, Spinnaker, Jenkins X, and Tekton](https://www.inovex.de/blog/spinnaker-vs-argo-cd-vs-tekton-vs-jenkins-x/) -1. [Simplify and Automate Deployments Using GitOps with IBM Multicloud Manager 3.1.2](https://medium.com/ibm-cloud/simplify-and-automate-deployments-using-gitops-with-ibm-multicloud-manager-3-1-2-4395af317359) +1. [Simplify and Automate Deployments Using GitOps with IBM Multicloud Manager 3.1.2](https://www.ibm.com/cloud/blog/simplify-and-automate-deployments-using-gitops-with-ibm-multicloud-manager-3-1-2) 1. [GitOps for Kubeflow using Argo CD](https://v0-6.kubeflow.org/docs/use-cases/gitops-for-kubeflow/) 1. [GitOps Toolsets on Kubernetes with CircleCI and Argo CD](https://www.digitalocean.com/community/tutorials/webinar-series-gitops-tool-sets-on-kubernetes-with-circleci-and-argo-cd) -1. [Simplify and Automate Deployments Using GitOps with IBM Multicloud Manager](https://www.ibm.com/blogs/bluemix/2019/02/simplify-and-automate-deployments-using-gitops-with-ibm-multicloud-manager-3-1-2/) 1. [CI/CD in Light Speed with K8s and Argo CD](https://www.youtube.com/watch?v=OdzH82VpMwI&feature=youtu.be) 1. [Machine Learning as Code](https://www.youtube.com/watch?v=VXrGp5er1ZE&t=0s&index=135&list=PLj6h78yzYM2PZf9eA7bhWnIh_mK1vyOfU). Among other things, describes how Kubeflow uses Argo CD to implement GitOPs for ML 1. [Argo CD - GitOps Continuous Delivery for Kubernetes](https://www.youtube.com/watch?v=aWDIQMbp1cc&feature=youtu.be&t=1m4s) @@ -68,3 +81,8 @@ Participation in the Argo CD project is governed by the [CNCF Code of Conduct](h 1. [Setting up Argo CD with Helm](https://www.arthurkoziel.com/setting-up-argocd-with-helm/) 1. [Applied GitOps with Argo CD](https://thenewstack.io/applied-gitops-with-argocd/) 1. [Solving configuration drift using GitOps with Argo CD](https://www.cncf.io/blog/2020/12/17/solving-configuration-drift-using-gitops-with-argo-cd/) +1. [Decentralized GitOps over environments](https://blogs.sap.com/2021/05/06/decentralized-gitops-over-environments/) +1. [Getting Started with ArgoCD for GitOps Deployments](https://youtu.be/AvLuplh1skA) +1. [Using Argo CD & Datree for Stable Kubernetes CI/CD Deployments](https://youtu.be/17894DTru2Y) +1. [How to create Argo CD Applications Automatically using ApplicationSet? "Automation of GitOps"](https://amralaayassen.medium.com/how-to-create-argocd-applications-automatically-using-applicationset-automation-of-the-gitops-59455eaf4f72) + diff --git a/SECURITY.md b/SECURITY.md index 1181cf58d469e..479cd5ef29c97 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1,22 +1,45 @@ # Security Policy for Argo CD -Version: **v1.0 (2020-02-26)** +Version: **v1.5 (2023-03-06)** ## Preface As a deployment tool, Argo CD needs to have production access which makes security a very important topic. The Argoproj team takes security very -seriously and is continuously working on improving it. +seriously and is continuously working on improving it. + +## A word about security scanners + +Many organisations these days employ security scanners to validate their +container images before letting them on their clusters, and that is a good +thing. However, the quality and results of these scanners vary greatly, +many of them produce false positives and require people to look at the +issues reported and validate them for correctness. A great example of that +is, that some scanners report kernel vulnerabilities for container images +just because they are derived from some distribution. + +We kindly ask you to not raise issues or contact us regarding any issues +that are found by your security scanner. Many of those produce a lot of false +positives, and many of these issues don't affect Argo CD. We do have scanners +in place for our code, dependencies and container images that we publish. We +are well aware of the issues that may affect Argo CD and are constantly +working on the remediation of those that affect Argo CD and our users. + +If you believe that we might have missed an issue that we should take a look +at (that can happen), then please discuss it with us. If there is a CVE +assigned to the issue, please do open an issue on our GitHub tracker instead +of writing to the security contact e-mail, since things reported by scanners +are public already and the discussion that might emerge is of benefit to the +general community. However, please validate your scanner results and its +impact on Argo CD before opening an issue at least roughly. ## Supported Versions -We currently support the most recent release (`N`, e.g. `1.8`) and the release -previous to the most recent one (`N-1`, e.g. `1.7`). With the release of -`N+1`, `N-1` drops out of support and `N` becomes `N-1`. +We currently support the last 3 minor versions of Argo CD with security and bug fixes. We regularly perform patch releases (e.g. `1.8.5` and `1.7.12`) for the supported versions, which will contain fixes for security vulnerabilities and -important bugs. Prior releases might receive critical security fixes on a best +important bugs. Prior releases might receive critical security fixes on best effort basis, however, it cannot be guaranteed that security fixes get back-ported to these unsupported versions. @@ -27,7 +50,7 @@ of releasing it within a patch branch for the currently supported releases. ## Reporting a Vulnerability -If you find a security related bug in ArgoCD, we kindly ask you for responsible +If you find a security related bug in Argo CD, we kindly ask you for responsible disclosure and for giving us appropriate time to react, analyze and develop a fix to mitigate the found security vulnerability. @@ -35,13 +58,32 @@ We will do our best to react quickly on your inquiry, and to coordinate a fix and disclosure with you. Sometimes, it might take a little longer for us to react (e.g. out of office conditions), so please bear with us in these cases. -We will publish security advisiories using the Git Hub SA feature to keep our -community well informed, and will credit you for your findings (unless you -prefer to stay anonymous, of course). +We will publish security advisories using the +[GitHub Security Advisories](https://github.com/argoproj/argo-cd/security/advisories) +feature to keep our community well-informed, and will credit you for your +findings (unless you prefer to stay anonymous, of course). + +There are two ways to report a vulnerability to the Argo CD team: + +* By opening a draft GitHub security advisory: https://github.com/argoproj/argo-cd/security/advisories/new +* By e-mail to the following address: cncf-argo-security@lists.cncf.io + +## Internet Bug Bounty collaboration + +We're happy to announce that the Argo project is collaborating with the great +folks over at +[Hacker One](https://hackerone.com/) and their +[Internet Bug Bounty program](https://hackerone.com/ibb) +to reward the awesome people who find security vulnerabilities in the four +main Argo projects (CD, Events, Rollouts and Workflows) and then work with +us to fix and disclose them in a responsible manner. + +If you report a vulnerability to us as outlined in this security policy, we +will work together with you to find out whether your finding is eligible for +claiming a bounty, and also on how to claim it. -Please report vulnerabilities by e-mail to all of the following people: +## Securing your Argo CD Instance -* jfischer@redhat.com -* Jesse_Suen@intuit.com -* Alexander_Matyushentsev@intuit.com -* Edward_Lee@intuit.com +See the [operator manual security page](docs/operator-manual/security.md) for +additional information about Argo CD's security features and how to make your +Argo CD production ready. diff --git a/SECURITY_CONTACTS b/SECURITY_CONTACTS index 1d5575609323e..e4651b11531dc 100644 --- a/SECURITY_CONTACTS +++ b/SECURITY_CONTACTS @@ -1,7 +1,7 @@ # Defined below are the security contacts for this repo. # # DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE -# INSTRUCTIONS AT https://argo-cd.readthedocs.io/en/latest/security_considerations/#reporting-vulnerabilities +# INSTRUCTIONS AT https://github.com/argoproj/argo-cd/security/policy alexmt edlee2121 diff --git a/USERS.md b/USERS.md index 76364536821a6..652a68c6e679f 100644 --- a/USERS.md +++ b/USERS.md @@ -1,130 +1,323 @@ ## Who uses Argo CD? -As the Argo Community grows, we'd like to keep track of our users. Please send a PR with your organization name if you are using Argo CD. +As the Argo Community grows, we'd like to keep track of our users. Please send a +PR with your organization name if you are using Argo CD. Currently, the following organizations are **officially** using Argo CD: 1. [127Labs](https://127labs.com/) 1. [3Rein](https://www.3rein.com/) +1. [4data](https://4data.ch/) 1. [7shifts](https://www.7shifts.com/) 1. [Adevinta](https://www.adevinta.com/) +1. [Adfinis](https://adfinis.com) 1. [Adventure](https://jp.adventurekk.com/) +1. [Adyen](https://www.adyen.com) +1. [AirQo](https://airqo.net/) +1. [Akuity](https://akuity.io/) +1. [Albert Heijn](https://ah.nl/) +1. [Alibaba Group](https://www.alibabagroup.com/) +1. [Allianz Direct](https://www.allianzdirect.de/) +1. [Amadeus IT Group](https://amadeus.com/) 1. [Ambassador Labs](https://www.getambassador.io/) -1. [Ant Group](https://www.antgroup.com/) 1. [ANSTO - Australian Synchrotron](https://www.synchrotron.org.au/) +1. [Ant Group](https://www.antgroup.com/) 1. [AppDirect](https://www.appdirect.com) 1. [Arctiq Inc.](https://www.arctiq.ca) -1. [ARZ Allgemeines Rechenzentrum GmbH ](https://www.arz.at/) +1. [ARZ Allgemeines Rechenzentrum GmbH](https://www.arz.at/) +2. [Autodesk](https://www.autodesk.com) +1. [Axual B.V.](https://axual.com) +1. [Back Market](https://www.backmarket.com) 1. [Baloise](https://www.baloise.com) 1. [BCDevExchange DevOps Platform](https://bcdevexchange.org/DevOpsPlatform) 1. [Beat](https://thebeat.co/en/) 1. [Beez Innovation Labs](https://www.beezlabs.com/) +1. [Beleza Na Web](https://www.belezanaweb.com.br/) +1. [BigPanda](https://bigpanda.io) 1. [BioBox Analytics](https://biobox.io) +1. [BMW Group](https://www.bmwgroup.com/) +1. [Boozt](https://www.booztgroup.com/) +1. [Boticario](https://www.boticario.com.br/) +1. [Bulder Bank](https://bulderbank.no) 1. [Camptocamp](https://camptocamp.com) +1. [Candis](https://www.candis.io) +1. [Capital One](https://www.capitalone.com) 1. [CARFAX](https://www.carfax.com) +1. [CARFAX Europe](https://www.carfax.eu) +1. [Carrefour Group](https://www.carrefour.com) +1. [Casavo](https://casavo.com) 1. [Celonis](https://www.celonis.com/) +1. [CERN](https://home.cern/) +1. [Chargetrip](https://chargetrip.com) +1. [Chainnodes](https://chainnodes.org) +1. [Chime](https://www.chime.com) +1. [Cisco ET&I](https://eti.cisco.com/) +1. [Cloud Posse](https://www.cloudposse.com/) +1. [Cloud Scale](https://cloudscaleinc.com/) +1. [Cloudmate](https://cloudmt.co.kr/) +1. [Cloudogu](https://cloudogu.com/) +1. [Cobalt](https://www.cobalt.io/) 1. [Codefresh](https://www.codefresh.io/) 1. [Codility](https://www.codility.com/) 1. [Commonbond](https://commonbond.co/) +1. [Coralogix](https://coralogix.com/) +1. [Crédit Agricole CIB](https://www.ca-cib.com) 1. [CROZ d.o.o.](https://croz.net/) 1. [CyberAgent](https://www.cyberagent.co.jp/en/) 1. [Cybozu](https://cybozu-global.com) 1. [D2iQ](https://www.d2iq.com) +1. [DaoCloud](https://daocloud.io/) +1. [Datarisk](https://www.datarisk.io/) +1. [Deloitte](https://www.deloitte.com/) +1. [Deutsche Telekom AG](https://telekom.com) +1. [Devopsi - Poland Software/DevOps Consulting](https://devopsi.pl/) 1. [Devtron Labs](https://github.com/devtron-labs/devtron) +1. [DigitalOcean](https://www.digitalocean.com) +1. [Divistant](https://divistant.com) +1. [Dott](https://ridedott.com) +1. [Doximity](https://www.doximity.com/) 1. [EDF Renewables](https://www.edf-re.com/) 1. [edX](https://edx.org) -1. [Electronic Arts Inc. ](https://www.ea.com) +1. [Elastic](https://elastic.co/) +1. [Electronic Arts Inc.](https://www.ea.com) +1. [Elementor](https://elementor.com/) 1. [Elium](https://www.elium.com) 1. [END.](https://www.endclothing.com/) 1. [Energisme](https://energisme.com/) +1. [enigmo](https://enigmo.co.jp/) +1. [Envoy](https://envoy.com/) +1. [Factorial](https://factorialhr.com/) +1. [Farfetch](https://www.farfetch.com) +1. [Faro](https://www.faro.com/) 1. [Fave](https://myfave.com) +1. [Flexport](https://www.flexport.com/) +1. [Flip](https://flip.id) +1. [Fonoa](https://www.fonoa.com/) +1. [freee](https://corp.freee.co.jp/en/company/) +1. [Freshop, Inc](https://www.freshop.com/) 1. [Future PLC](https://www.futureplc.com/) +1. [G DATA CyberDefense AG](https://www.gdata-software.com/) 1. [Garner](https://www.garnercorp.com) +1. [Generali Deutschland AG](https://www.generali.de/) +1. [Gepardec](https://gepardec.com/) +1. [GetYourGuide](https://www.getyourguide.com/) +1. [Gitpod](https://www.gitpod.io) +1. [Gllue](https://gllue.com) +1. [gloat](https://gloat.com/) +1. [GLOBIS](https://globis.com) +1. [Glovo](https://www.glovoapp.com) +1. [GlueOps](https://glueops.dev) 1. [GMETRI](https://gmetri.com/) 1. [Gojek](https://www.gojek.io/) +1. [GoTo](https://www.goto.com/) +1. [GoTo Financial](https://gotofinancial.com/) 1. [Greenpass](https://www.greenpass.com.br/) +1. [Gridfuse](https://gridfuse.com/) +1. [Groww](https://groww.in) +1. [Grupo MasMovil](https://grupomasmovil.com/en/) 1. [Handelsbanken](https://www.handelsbanken.se) 1. [Healy](https://www.healyworld.net) +1. [Helio](https://helio.exchange) +1. [Hetki](https://hetki.ai) 1. [hipages](https://hipages.com.au/) +1. [Hiya](https://hiya.com) 1. [Honestbank](https://honestbank.com) +1. [Hostinger](https://www.hostinger.com) 1. [IBM](https://www.ibm.com/) +1. [Ibotta](https://home.ibotta.com) +1. [IITS-Consulting](https://iits-consulting.de) +1. [imaware](https://imaware.health) +1. [Indeed](https://indeed.com) +1. [Index Exchange](https://www.indexexchange.com/) +1. [Info Support](https://www.infosupport.com/) 1. [InsideBoard](https://www.insideboard.com) 1. [Intuit](https://www.intuit.com/) +1. [Jellysmack](https://www.jellysmack.com) +1. [Joblift](https://joblift.com/) 1. [JovianX](https://www.jovianx.com/) +1. [Kaltura](https://corp.kaltura.com/) +1. [Kandji](https://www.kandji.io/) 1. [Karrot](https://www.daangn.com/) +1. [KarrotPay](https://www.daangnpay.com/) 1. [Kasa](https://kasa.co.kr/) +1. [Keeeb](https://www.keeeb.com/) +1. [KelkooGroup](https://www.kelkoogroup.com) 1. [Keptn](https://keptn.sh) 1. [Kinguin](https://www.kinguin.net/) 1. [KintoHub](https://www.kintohub.com/) 1. [KompiTech GmbH](https://www.kompitech.com/) +1. [KPMG](https://kpmg.com/uk) +1. [KubeSphere](https://github.com/kubesphere) +1. [Kurly](https://www.kurly.com/) +1. [Kvist](https://kvistsolutions.com) 1. [LexisNexis](https://www.lexisnexis.com/) +1. [Lian Chu Securities](https://lczq.com) +1. [Liatrio](https://www.liatrio.com) +1. [Lightricks](https://www.lightricks.com/) 1. [LINE](https://linecorp.com/en/) +1. [Loom](https://www.loom.com/) +1. [Lucid Motors](https://www.lucidmotors.com/) 1. [Lytt](https://www.lytt.co/) +1. [Magic Leap](https://www.magicleap.com/) +1. [Majid Al Futtaim](https://www.majidalfuttaim.com/) 1. [Major League Baseball](https://mlb.com) 1. [Mambu](https://www.mambu.com/) +1. [MariaDB](https://mariadb.com) +1. [Mattermost](https://www.mattermost.com) 1. [Max Kelsen](https://www.maxkelsen.com/) +1. [MeDirect](https://medirect.com.mt/) +1. [Meican](https://meican.com/) +1. [Meilleurs Agents](https://www.meilleursagents.com/) +1. [Mercedes-Benz Tech Innovation](https://www.mercedes-benz-techinnovation.com/) +1. [Mercedes-Benz.io](https://www.mercedes-benz.io/) +1. [Metanet](http://www.metanet.co.kr/en/) 1. [MindSpore](https://mindspore.cn) 1. [Mirantis](https://mirantis.com/) +1. [Mission Lane](https://missionlane.com) +1. [mixi Group](https://mixi.co.jp/) 1. [Moengage](https://www.moengage.com/) 1. [Money Forward](https://corp.moneyforward.com/en/) 1. [MOO Print](https://www.moo.com/) 1. [MTN Group](https://www.mtn.com/) +1. [Natura &Co](https://naturaeco.com/) +1. [Nethopper](https://nethopper.io) 1. [New Relic](https://newrelic.com/) 1. [Nextdoor](https://nextdoor.com/) 1. [Nikkei](https://www.nikkei.co.jp/nikkeiinfo/en/) +1. [Nitro](https://gonitro.com) +1. [NYCU, CS IT Center](https://it.cs.nycu.edu.tw) +1. [Objective](https://www.objective.com.br/) +1. [OCCMundial](https://occ.com.mx) 1. [Octadesk](https://octadesk.com) +1. [Olfeo](https://www.olfeo.com/) +1. [omegaUp](https://omegaUp.com) +1. [Omni](https://omni.se/) 1. [openEuler](https://openeuler.org) 1. [openGauss](https://opengauss.org/) +1. [OpenGov](https://opengov.com) 1. [openLooKeng](https://openlookeng.io) 1. [OpenSaaS Studio](https://opensaas.studio) 1. [Opensurvey](https://www.opensurvey.co.kr/) +1. [OpsMx](https://opsmx.io) +1. [OpsVerse](https://opsverse.io) 1. [Optoro](https://www.optoro.com/) 1. [Orbital Insight](https://orbitalinsight.com/) +1. [Oscar Health Insurance](https://hioscar.com/) +1. [p3r](https://www.p3r.one/) +1. [Packlink](https://www.packlink.com/) +1. [PagerDuty](https://www.pagerduty.com/) +1. [Pandosearch](https://www.pandosearch.com/en/home) +1. [Patreon](https://www.patreon.com/) 1. [PayPay](https://paypay.ne.jp/) 1. [Peloton Interactive](https://www.onepeloton.com/) +1. [PGS](https://www.pgs.com) +1. [Pigment](https://www.gopigment.com/) 1. [Pipefy](https://www.pipefy.com/) +1. [Pismo](https://pismo.io/) +1. [Platform9 Systems](https://platform9.com/) 1. [Polarpoint.io](https://polarpoint.io) +1. [PostFinance](https://github.com/postfinance) 1. [Preferred Networks](https://preferred.jp/en/) +1. [Previder BV](https://previder.nl) +1. [Procore](https://www.procore.com) +1. [Productboard](https://www.productboard.com/) 1. [Prudential](https://prudential.com.sg) +1. [PT Boer Technology (Btech)](https://btech.id/) 1. [PUBG](https://www.pubg.com) +1. [Puzzle ITC](https://www.puzzle.ch/) 1. [Qonto](https://qonto.com) 1. [QuintoAndar](https://quintoandar.com.br) 1. [Quipper](https://www.quipper.com/) +1. [RapidAPI](https://www.rapidapi.com/) 1. [Recreation.gov](https://www.recreation.gov/) 1. [Red Hat](https://www.redhat.com/) +1. [Redpill Linpro](https://www.redpill-linpro.com/) +1. [Reenigne Cloud](https://reenigne.ca) +1. [reev.com](https://www.reev.com/) +1. [RightRev](https://rightrev.com/) +1. [Rise](https://www.risecard.eu/) 1. [Riskified](https://www.riskified.com/) 1. [Robotinfra](https://www.robotinfra.com) +1. [Rubin Observatory](https://www.lsst.org) 1. [Saildrone](https://www.saildrone.com/) 1. [Saloodo! GmbH](https://www.saloodo.com) +1. [Sap Labs](http://sap.com) +1. [Sauce Labs](https://saucelabs.com/) 1. [Schwarz IT](https://jobs.schwarz/it-mission) +1. [SCRM Lidl International Hub](https://scrm.lidl) +1. [SEEK](https://seek.com.au) +1. [SI Analytics](https://si-analytics.ai) +1. [Skit](https://skit.ai/) +1. [Skyscanner](https://www.skyscanner.net/) +1. [Smart Pension](https://www.smartpension.co.uk/) +1. [Smilee.io](https://smilee.io) +1. [Smood.ch](https://www.smood.ch/) +1. [Snapp](https://snapp.ir/) +1. [Snyk](https://snyk.io/) +1. [Softway Medical](https://www.softwaymedical.fr/) +1. [South China Morning Post (SCMP)](https://www.scmp.com/) 1. [Speee](https://speee.jp/) 1. [Spendesk](https://spendesk.com/) +1. [Splunk](https://splunk.com/) +1. [Spores Labs](https://spores.app) +1. [StreamNative](https://streamnative.io) +1. [Stuart](https://stuart.com/) 1. [Sumo Logic](https://sumologic.com/) +1. [Sutpc](http://www.sutpc.com/) +1. [Swiss Post](https://github.com/swisspost) 1. [Swisscom](https://www.swisscom.ch) 1. [Swissquote](https://github.com/swissquote) 1. [Syncier](https://syncier.com/) 1. [TableCheck](https://tablecheck.com/) 1. [Tailor Brands](https://www.tailorbrands.com) +1. [Tamkeen Technologies](https://tamkeentech.sa/) +1. [Techcombank](https://www.techcombank.com.vn/trang-chu) +1. [Technacy](https://www.technacy.it/) 1. [Tesla](https://tesla.com/) +1. [The Scale Factory](https://www.scalefactory.com/) 1. [ThousandEyes](https://www.thousandeyes.com/) 1. [Ticketmaster](https://ticketmaster.com) 1. [Tiger Analytics](https://www.tigeranalytics.com/) +1. [Tigera](https://www.tigera.io/) 1. [Toss](https://toss.im/en) +1. [Trendyol](https://www.trendyol.com/) 1. [tru.ID](https://tru.id) +1. [Trusting Social](https://trustingsocial.com/) +1. [Twilio Segment](https://segment.com/) 1. [Twilio SendGrid](https://sendgrid.com) 1. [tZERO](https://www.tzero.com/) +1. [U.S. Veterans Affairs Department](https://www.va.gov/) 1. [UBIO](https://ub.io/) 1. [UFirstGroup](https://www.ufirstgroup.com/en/) +1. [ungleich.ch](https://ungleich.ch/) +1. [Unifonic Inc](https://www.unifonic.com/) 1. [Universidad Mesoamericana](https://www.umes.edu.gt/) +1. [Upsider Inc.](https://up-sider.com/lp/) +1. [Urbantz](https://urbantz.com/) +1. [Vectra](https://www.vectra.ai) +1. [Veepee](https://www.veepee.com) 1. [Viaduct](https://www.viaduct.ai/) +1. [VietMoney](https://vietmoney.vn/) +1. [Vinted](https://vinted.com/) 1. [Virtuo](https://www.govirtuo.com/) 1. [VISITS Technologies](https://visits.world/en) 1. [Volvo Cars](https://www.volvocars.com/) +1. [Voyager Digital](https://www.investvoyager.com/) 1. [VSHN - The DevOps Company](https://vshn.ch/) 1. [Walkbase](https://www.walkbase.com/) +1. [Webstores](https://www.webstores.nl) +1. [Wehkamp](https://www.wehkamp.nl/) +1. [WeMaintain](https://www.wemaintain.com/) 1. [WeMo Scooter](https://www.wemoscooter.com/) 1. [Whitehat Berlin](https://whitehat.berlin) by Guido Maria Serra +Fenaroli +1. [Witick](https://witick.io/) +1. [Wolffun Game](https://www.wolffungame.com/) +1. [WooliesX](https://wooliesx.com.au/) +1. [Woolworths Group](https://www.woolworthsgroup.com.au/) +1. [WSpot](https://www.wspot.com.br/) 1. [Yieldlab](https://www.yieldlab.de/) -1. [Sap Labs](http://sap.com) -1. [Smilee.io](https://smilee.io) -1. [Metanet](http://www.metanet.co.kr/en/) +1. [Youverify](https://youverify.co/) +1. [Yubo](https://www.yubo.live/) +1. [ZDF](https://www.zdf.de/) +1. [Zimpler](https://www.zimpler.com/) +1. [ZOZO](https://corp.zozo.com/) diff --git a/VERSION b/VERSION index 227cea215648b..c8e38b614057b 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0.0 +2.9.0 diff --git a/applicationset/controllers/applicationset_controller.go b/applicationset/controllers/applicationset_controller.go new file mode 100644 index 0000000000000..60bab2564d92c --- /dev/null +++ b/applicationset/controllers/applicationset_controller.go @@ -0,0 +1,1524 @@ +/* +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "time" + + log "github.com/sirupsen/logrus" + corev1 "k8s.io/api/core/v1" + apierr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes" + k8scache "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/argoproj/argo-cd/v2/applicationset/generators" + "github.com/argoproj/argo-cd/v2/applicationset/utils" + "github.com/argoproj/argo-cd/v2/common" + argodiff "github.com/argoproj/argo-cd/v2/util/argo/diff" + "github.com/argoproj/argo-cd/v2/util/db" + "github.com/argoproj/argo-cd/v2/util/glob" + + argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned" + argoutil "github.com/argoproj/argo-cd/v2/util/argo" + + "github.com/argoproj/argo-cd/v2/pkg/apis/application" +) + +const ( + // Rather than importing the whole argocd-notifications controller, just copying the const here + // https://github.com/argoproj-labs/argocd-notifications/blob/33d345fa838829bb50fca5c08523aba380d2c12b/pkg/controller/subscriptions.go#L12 + // https://github.com/argoproj-labs/argocd-notifications/blob/33d345fa838829bb50fca5c08523aba380d2c12b/pkg/controller/state.go#L17 + NotifiedAnnotationKey = "notified.notifications.argoproj.io" + ReconcileRequeueOnValidationError = time.Minute * 3 +) + +var ( + defaultPreservedAnnotations = []string{ + NotifiedAnnotationKey, + argov1alpha1.AnnotationKeyRefresh, + } +) + +// ApplicationSetReconciler reconciles a ApplicationSet object +type ApplicationSetReconciler struct { + client.Client + Scheme *runtime.Scheme + Recorder record.EventRecorder + Generators map[string]generators.Generator + ArgoDB db.ArgoDB + ArgoAppClientset appclientset.Interface + KubeClientset kubernetes.Interface + Policy argov1alpha1.ApplicationsSyncPolicy + EnablePolicyOverride bool + utils.Renderer + ArgoCDNamespace string + ApplicationSetNamespaces []string + EnableProgressiveSyncs bool + SCMRootCAPath string + GlobalPreservedAnnotations []string + GlobalPreservedLabels []string + Cache cache.Cache +} + +// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets/status,verbs=get;update;patch + +func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logCtx := log.WithField("applicationset", req.NamespacedName) + + var applicationSetInfo argov1alpha1.ApplicationSet + parametersGenerated := false + + if err := r.Get(ctx, req.NamespacedName, &applicationSetInfo); err != nil { + if client.IgnoreNotFound(err) != nil { + logCtx.WithError(err).Infof("unable to get ApplicationSet: '%v' ", err) + } + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // Do not attempt to further reconcile the ApplicationSet if it is being deleted. + if applicationSetInfo.ObjectMeta.DeletionTimestamp != nil { + return ctrl.Result{}, nil + } + + // Log a warning if there are unrecognized generators + _ = utils.CheckInvalidGenerators(&applicationSetInfo) + // desiredApplications is the main list of all expected Applications from all generators in this appset. + desiredApplications, applicationSetReason, err := r.generateApplications(applicationSetInfo) + if err != nil { + _ = r.setApplicationSetStatusCondition(ctx, + &applicationSetInfo, + argov1alpha1.ApplicationSetCondition{ + Type: argov1alpha1.ApplicationSetConditionErrorOccurred, + Message: err.Error(), + Reason: string(applicationSetReason), + Status: argov1alpha1.ApplicationSetConditionStatusTrue, + }, parametersGenerated, + ) + return ctrl.Result{}, err + } + + parametersGenerated = true + + validateErrors, err := r.validateGeneratedApplications(ctx, desiredApplications, applicationSetInfo) + if err != nil { + // While some generators may return an error that requires user intervention, + // other generators reference external resources that may change to cause + // the error to no longer occur. We thus log the error and requeue + // with a timeout to give this another shot at a later time. + // + // Changes to watched resources will cause this to be reconciled sooner than + // the RequeueAfter time. + logCtx.Errorf("error occurred during application validation: %s", err.Error()) + + _ = r.setApplicationSetStatusCondition(ctx, + &applicationSetInfo, + argov1alpha1.ApplicationSetCondition{ + Type: argov1alpha1.ApplicationSetConditionErrorOccurred, + Message: err.Error(), + Reason: argov1alpha1.ApplicationSetReasonApplicationValidationError, + Status: argov1alpha1.ApplicationSetConditionStatusTrue, + }, parametersGenerated, + ) + return ctrl.Result{RequeueAfter: ReconcileRequeueOnValidationError}, nil + } + + // appMap is a name->app collection of Applications in this ApplicationSet. + appMap := map[string]argov1alpha1.Application{} + // appSyncMap tracks which apps will be synced during this reconciliation. + appSyncMap := map[string]bool{} + + if r.EnableProgressiveSyncs { + if applicationSetInfo.Spec.Strategy == nil && len(applicationSetInfo.Status.ApplicationStatus) > 0 { + log.Infof("Removing %v unnecessary AppStatus entries from ApplicationSet %v", len(applicationSetInfo.Status.ApplicationStatus), applicationSetInfo.Name) + + err := r.setAppSetApplicationStatus(ctx, &applicationSetInfo, []argov1alpha1.ApplicationSetApplicationStatus{}) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to clear previous AppSet application statuses for %v: %w", applicationSetInfo.Name, err) + } + } else { + applications, err := r.getCurrentApplications(ctx, applicationSetInfo) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to get current applications for application set: %w", err) + } + + for _, app := range applications { + appMap[app.Name] = app + } + + appSyncMap, err = r.performProgressiveSyncs(ctx, applicationSetInfo, applications, desiredApplications, appMap) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to perform progressive sync reconciliation for application set: %w", err) + } + } + } + + var validApps []argov1alpha1.Application + for i := range desiredApplications { + if validateErrors[i] == nil { + validApps = append(validApps, desiredApplications[i]) + } + } + + if len(validateErrors) > 0 { + var message string + for _, v := range validateErrors { + message = v.Error() + logCtx.Errorf("validation error found during application validation: %s", message) + } + if len(validateErrors) > 1 { + // Only the last message gets added to the appset status, to keep the size reasonable. + message = fmt.Sprintf("%s (and %d more)", message, len(validateErrors)-1) + } + _ = r.setApplicationSetStatusCondition(ctx, + &applicationSetInfo, + argov1alpha1.ApplicationSetCondition{ + Type: argov1alpha1.ApplicationSetConditionErrorOccurred, + Message: message, + Reason: argov1alpha1.ApplicationSetReasonApplicationValidationError, + Status: argov1alpha1.ApplicationSetConditionStatusTrue, + }, parametersGenerated, + ) + } + + if r.EnableProgressiveSyncs { + // trigger appropriate application syncs if RollingSync strategy is enabled + if progressiveSyncsStrategyEnabled(&applicationSetInfo, "RollingSync") { + validApps, err = r.syncValidApplications(ctx, &applicationSetInfo, appSyncMap, appMap, validApps) + + if err != nil { + _ = r.setApplicationSetStatusCondition(ctx, + &applicationSetInfo, + argov1alpha1.ApplicationSetCondition{ + Type: argov1alpha1.ApplicationSetConditionErrorOccurred, + Message: err.Error(), + Reason: argov1alpha1.ApplicationSetReasonSyncApplicationError, + Status: argov1alpha1.ApplicationSetConditionStatusTrue, + }, parametersGenerated, + ) + return ctrl.Result{}, err + } + } + } + + if utils.DefaultPolicy(applicationSetInfo.Spec.SyncPolicy, r.Policy, r.EnablePolicyOverride).AllowUpdate() { + err = r.createOrUpdateInCluster(ctx, applicationSetInfo, validApps) + if err != nil { + _ = r.setApplicationSetStatusCondition(ctx, + &applicationSetInfo, + argov1alpha1.ApplicationSetCondition{ + Type: argov1alpha1.ApplicationSetConditionErrorOccurred, + Message: err.Error(), + Reason: argov1alpha1.ApplicationSetReasonUpdateApplicationError, + Status: argov1alpha1.ApplicationSetConditionStatusTrue, + }, parametersGenerated, + ) + return ctrl.Result{}, err + } + } else { + err = r.createInCluster(ctx, applicationSetInfo, validApps) + if err != nil { + _ = r.setApplicationSetStatusCondition(ctx, + &applicationSetInfo, + argov1alpha1.ApplicationSetCondition{ + Type: argov1alpha1.ApplicationSetConditionErrorOccurred, + Message: err.Error(), + Reason: argov1alpha1.ApplicationSetReasonCreateApplicationError, + Status: argov1alpha1.ApplicationSetConditionStatusTrue, + }, parametersGenerated, + ) + return ctrl.Result{}, err + } + } + + if utils.DefaultPolicy(applicationSetInfo.Spec.SyncPolicy, r.Policy, r.EnablePolicyOverride).AllowDelete() { + err = r.deleteInCluster(ctx, applicationSetInfo, desiredApplications) + if err != nil { + _ = r.setApplicationSetStatusCondition(ctx, + &applicationSetInfo, + argov1alpha1.ApplicationSetCondition{ + Type: argov1alpha1.ApplicationSetConditionResourcesUpToDate, + Message: err.Error(), + Reason: argov1alpha1.ApplicationSetReasonDeleteApplicationError, + Status: argov1alpha1.ApplicationSetConditionStatusTrue, + }, parametersGenerated, + ) + return ctrl.Result{}, err + } + } + + if applicationSetInfo.RefreshRequired() { + delete(applicationSetInfo.Annotations, common.AnnotationApplicationSetRefresh) + err := r.Client.Update(ctx, &applicationSetInfo) + if err != nil { + logCtx.Warnf("error occurred while updating ApplicationSet: %v", err) + _ = r.setApplicationSetStatusCondition(ctx, + &applicationSetInfo, + argov1alpha1.ApplicationSetCondition{ + Type: argov1alpha1.ApplicationSetConditionErrorOccurred, + Message: err.Error(), + Reason: argov1alpha1.ApplicationSetReasonRefreshApplicationError, + Status: argov1alpha1.ApplicationSetConditionStatusTrue, + }, parametersGenerated, + ) + return ctrl.Result{}, err + } + } + + requeueAfter := r.getMinRequeueAfter(&applicationSetInfo) + + if len(validateErrors) == 0 { + if err := r.setApplicationSetStatusCondition(ctx, + &applicationSetInfo, + argov1alpha1.ApplicationSetCondition{ + Type: argov1alpha1.ApplicationSetConditionResourcesUpToDate, + Message: "All applications have been generated successfully", + Reason: argov1alpha1.ApplicationSetReasonApplicationSetUpToDate, + Status: argov1alpha1.ApplicationSetConditionStatusTrue, + }, parametersGenerated, + ); err != nil { + return ctrl.Result{}, err + } + } else if requeueAfter == time.Duration(0) { + // Ensure that the request is requeued if there are validation errors. + requeueAfter = ReconcileRequeueOnValidationError + } + + logCtx.WithField("requeueAfter", requeueAfter).Info("end reconcile") + + return ctrl.Result{ + RequeueAfter: requeueAfter, + }, nil +} + +func getParametersGeneratedCondition(parametersGenerated bool, message string) argov1alpha1.ApplicationSetCondition { + var paramtersGeneratedCondition argov1alpha1.ApplicationSetCondition + if parametersGenerated { + paramtersGeneratedCondition = argov1alpha1.ApplicationSetCondition{ + Type: argov1alpha1.ApplicationSetConditionParametersGenerated, + Message: "Successfully generated parameters for all Applications", + Reason: argov1alpha1.ApplicationSetReasonParametersGenerated, + Status: argov1alpha1.ApplicationSetConditionStatusTrue, + } + } else { + paramtersGeneratedCondition = argov1alpha1.ApplicationSetCondition{ + Type: argov1alpha1.ApplicationSetConditionParametersGenerated, + Message: message, + Reason: argov1alpha1.ApplicationSetReasonErrorOccurred, + Status: argov1alpha1.ApplicationSetConditionStatusFalse, + } + } + return paramtersGeneratedCondition +} + +func getResourceUpToDateCondition(errorOccurred bool, message string, reason string) argov1alpha1.ApplicationSetCondition { + var resourceUpToDateCondition argov1alpha1.ApplicationSetCondition + if errorOccurred { + resourceUpToDateCondition = argov1alpha1.ApplicationSetCondition{ + Type: argov1alpha1.ApplicationSetConditionResourcesUpToDate, + Message: message, + Reason: reason, + Status: argov1alpha1.ApplicationSetConditionStatusFalse, + } + } else { + resourceUpToDateCondition = argov1alpha1.ApplicationSetCondition{ + Type: argov1alpha1.ApplicationSetConditionResourcesUpToDate, + Message: "ApplicationSet up to date", + Reason: argov1alpha1.ApplicationSetReasonApplicationSetUpToDate, + Status: argov1alpha1.ApplicationSetConditionStatusTrue, + } + } + return resourceUpToDateCondition +} + +func (r *ApplicationSetReconciler) setApplicationSetStatusCondition(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, condition argov1alpha1.ApplicationSetCondition, paramtersGenerated bool) error { + // check if error occurred during reconcile process + errOccurred := condition.Type == argov1alpha1.ApplicationSetConditionErrorOccurred + + var errOccurredCondition argov1alpha1.ApplicationSetCondition + + if errOccurred { + errOccurredCondition = condition + } else { + errOccurredCondition = argov1alpha1.ApplicationSetCondition{ + Type: argov1alpha1.ApplicationSetConditionErrorOccurred, + Message: "Successfully generated parameters for all Applications", + Reason: argov1alpha1.ApplicationSetReasonApplicationSetUpToDate, + Status: argov1alpha1.ApplicationSetConditionStatusFalse, + } + } + + paramtersGeneratedCondition := getParametersGeneratedCondition(paramtersGenerated, condition.Message) + resourceUpToDateCondition := getResourceUpToDateCondition(errOccurred, condition.Message, condition.Reason) + + newConditions := []argov1alpha1.ApplicationSetCondition{errOccurredCondition, paramtersGeneratedCondition, resourceUpToDateCondition} + + needToUpdateConditions := false + for _, condition := range newConditions { + // do nothing if appset already has same condition + for _, c := range applicationSet.Status.Conditions { + if c.Type == condition.Type && (c.Reason != condition.Reason || c.Status != condition.Status || c.Message != condition.Message) { + needToUpdateConditions = true + break + } + } + } + evaluatedTypes := map[argov1alpha1.ApplicationSetConditionType]bool{ + argov1alpha1.ApplicationSetConditionErrorOccurred: true, + argov1alpha1.ApplicationSetConditionParametersGenerated: true, + argov1alpha1.ApplicationSetConditionResourcesUpToDate: true, + } + + if needToUpdateConditions || len(applicationSet.Status.Conditions) < 3 { + // fetch updated Application Set object before updating it + namespacedName := types.NamespacedName{Namespace: applicationSet.Namespace, Name: applicationSet.Name} + if err := r.Get(ctx, namespacedName, applicationSet); err != nil { + if client.IgnoreNotFound(err) != nil { + return nil + } + return fmt.Errorf("error fetching updated application set: %v", err) + } + + applicationSet.Status.SetConditions( + newConditions, evaluatedTypes, + ) + + // Update the newly fetched object with new set of conditions + err := r.Client.Status().Update(ctx, applicationSet) + if err != nil && !apierr.IsNotFound(err) { + return fmt.Errorf("unable to set application set condition: %v", err) + } + } + + return nil +} + +// validateGeneratedApplications uses the Argo CD validation functions to verify the correctness of the +// generated applications. +func (r *ApplicationSetReconciler) validateGeneratedApplications(ctx context.Context, desiredApplications []argov1alpha1.Application, applicationSetInfo argov1alpha1.ApplicationSet) (map[int]error, error) { + errorsByIndex := map[int]error{} + namesSet := map[string]bool{} + for i, app := range desiredApplications { + + if !namesSet[app.Name] { + namesSet[app.Name] = true + } else { + errorsByIndex[i] = fmt.Errorf("ApplicationSet %s contains applications with duplicate name: %s", applicationSetInfo.Name, app.Name) + continue + } + _, err := r.ArgoAppClientset.ArgoprojV1alpha1().AppProjects(r.ArgoCDNamespace).Get(ctx, app.Spec.GetProject(), metav1.GetOptions{}) + if err != nil { + if apierr.IsNotFound(err) { + errorsByIndex[i] = fmt.Errorf("application references project %s which does not exist", app.Spec.Project) + continue + } + return nil, err + } + + if err := utils.ValidateDestination(ctx, &app.Spec.Destination, r.KubeClientset, r.ArgoCDNamespace); err != nil { + errorsByIndex[i] = fmt.Errorf("application destination spec is invalid: %s", err.Error()) + continue + } + + } + + return errorsByIndex, nil +} + +func (r *ApplicationSetReconciler) getMinRequeueAfter(applicationSetInfo *argov1alpha1.ApplicationSet) time.Duration { + var res time.Duration + for _, requestedGenerator := range applicationSetInfo.Spec.Generators { + + relevantGenerators := generators.GetRelevantGenerators(&requestedGenerator, r.Generators) + + for _, g := range relevantGenerators { + t := g.GetRequeueAfter(&requestedGenerator) + + if res == 0 { + res = t + } else if t != 0 && t < res { + res = t + } + } + } + + return res +} + +func getTempApplication(applicationSetTemplate argov1alpha1.ApplicationSetTemplate) *argov1alpha1.Application { + var tmplApplication argov1alpha1.Application + tmplApplication.Annotations = applicationSetTemplate.Annotations + tmplApplication.Labels = applicationSetTemplate.Labels + tmplApplication.Namespace = applicationSetTemplate.Namespace + tmplApplication.Name = applicationSetTemplate.Name + tmplApplication.Spec = applicationSetTemplate.Spec + tmplApplication.Finalizers = applicationSetTemplate.Finalizers + + return &tmplApplication +} + +func (r *ApplicationSetReconciler) generateApplications(applicationSetInfo argov1alpha1.ApplicationSet) ([]argov1alpha1.Application, argov1alpha1.ApplicationSetReasonType, error) { + var res []argov1alpha1.Application + + var firstError error + var applicationSetReason argov1alpha1.ApplicationSetReasonType + + for _, requestedGenerator := range applicationSetInfo.Spec.Generators { + t, err := generators.Transform(requestedGenerator, r.Generators, applicationSetInfo.Spec.Template, &applicationSetInfo, map[string]interface{}{}) + if err != nil { + log.WithError(err).WithField("generator", requestedGenerator). + Error("error generating application from params") + if firstError == nil { + firstError = err + applicationSetReason = argov1alpha1.ApplicationSetReasonApplicationParamsGenerationError + } + continue + } + + for _, a := range t { + tmplApplication := getTempApplication(a.Template) + + for _, p := range a.Params { + app, err := r.Renderer.RenderTemplateParams(tmplApplication, applicationSetInfo.Spec.SyncPolicy, p, applicationSetInfo.Spec.GoTemplate, applicationSetInfo.Spec.GoTemplateOptions) + if err != nil { + log.WithError(err).WithField("params", a.Params).WithField("generator", requestedGenerator). + Error("error generating application from params") + + if firstError == nil { + firstError = err + applicationSetReason = argov1alpha1.ApplicationSetReasonRenderTemplateParamsError + } + continue + } + res = append(res, *app) + } + } + + log.WithField("generator", requestedGenerator).Infof("generated %d applications", len(res)) + log.WithField("generator", requestedGenerator).Debugf("apps from generator: %+v", res) + } + + return res, applicationSetReason, firstError +} + +func ignoreNotAllowedNamespaces(namespaces []string) predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return glob.MatchStringInList(namespaces, e.Object.GetNamespace(), false) + }, + } +} + +func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager, enableProgressiveSyncs bool, maxConcurrentReconciliations int) error { + if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &argov1alpha1.Application{}, ".metadata.controller", func(rawObj client.Object) []string { + // grab the job object, extract the owner... + app := rawObj.(*argov1alpha1.Application) + owner := metav1.GetControllerOf(app) + if owner == nil { + return nil + } + // ...make sure it's a application set... + if owner.APIVersion != argov1alpha1.SchemeGroupVersion.String() || owner.Kind != "ApplicationSet" { + return nil + } + + // ...and if so, return it + return []string{owner.Name} + }); err != nil { + return fmt.Errorf("error setting up with manager: %w", err) + } + + ownsHandler := getOwnsHandlerPredicates(enableProgressiveSyncs) + + return ctrl.NewControllerManagedBy(mgr).WithOptions(controller.Options{ + MaxConcurrentReconciles: maxConcurrentReconciliations, + }).For(&argov1alpha1.ApplicationSet{}). + Owns(&argov1alpha1.Application{}, builder.WithPredicates(ownsHandler)). + WithEventFilter(ignoreNotAllowedNamespaces(r.ApplicationSetNamespaces)). + Watches( + &source.Kind{Type: &corev1.Secret{}}, + &clusterSecretEventHandler{ + Client: mgr.GetClient(), + Log: log.WithField("type", "createSecretEventHandler"), + }). + // TODO: also watch Applications and respond on changes if we own them. + Complete(r) +} + +func (r *ApplicationSetReconciler) updateCache(ctx context.Context, obj client.Object, logger *log.Entry) { + informer, err := r.Cache.GetInformer(ctx, obj) + if err != nil { + logger.Errorf("failed to get informer: %v", err) + return + } + // The controller runtime abstract away informers creation + // so unfortunately could not find any other way to access informer store. + k8sInformer, ok := informer.(k8scache.SharedInformer) + if !ok { + logger.Error("informer is not a kubernetes informer") + return + } + if err := k8sInformer.GetStore().Update(obj); err != nil { + logger.Errorf("failed to update cache: %v", err) + return + } +} + +// createOrUpdateInCluster will create / update application resources in the cluster. +// - For new applications, it will call create +// - For existing application, it will call update +// The function also adds owner reference to all applications, and uses it to delete them. +func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error { + + var firstError error + // Creates or updates the application in appList + for _, generatedApp := range desiredApplications { + + appLog := log.WithFields(log.Fields{"app": generatedApp.Name, "appSet": applicationSet.Name}) + generatedApp.Namespace = applicationSet.Namespace + + // Normalize to avoid fighting with the application controller. + generatedApp.Spec = *argoutil.NormalizeApplicationSpec(&generatedApp.Spec) + + found := &argov1alpha1.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: generatedApp.Name, + Namespace: generatedApp.Namespace, + }, + TypeMeta: metav1.TypeMeta{ + Kind: application.ApplicationKind, + APIVersion: "argoproj.io/v1alpha1", + }, + } + + action, err := utils.CreateOrUpdate(ctx, r.Client, found, func() error { + // Copy only the Application/ObjectMeta fields that are significant, from the generatedApp + found.Spec = generatedApp.Spec + + // allow setting the Operation field to trigger a sync operation on an Application + if generatedApp.Operation != nil { + found.Operation = generatedApp.Operation + } + + preservedAnnotations := make([]string, 0) + preservedLabels := make([]string, 0) + + if applicationSet.Spec.PreservedFields != nil { + preservedAnnotations = append(preservedAnnotations, applicationSet.Spec.PreservedFields.Annotations...) + preservedLabels = append(preservedLabels, applicationSet.Spec.PreservedFields.Labels...) + } + + if len(r.GlobalPreservedAnnotations) > 0 { + preservedAnnotations = append(preservedAnnotations, r.GlobalPreservedAnnotations...) + } + + if len(r.GlobalPreservedLabels) > 0 { + preservedLabels = append(preservedLabels, r.GlobalPreservedLabels...) + } + + // Preserve specially treated argo cd annotations: + // * https://github.com/argoproj/applicationset/issues/180 + // * https://github.com/argoproj/argo-cd/issues/10500 + preservedAnnotations = append(preservedAnnotations, defaultPreservedAnnotations...) + + for _, key := range preservedAnnotations { + if state, exists := found.ObjectMeta.Annotations[key]; exists { + if generatedApp.Annotations == nil { + generatedApp.Annotations = map[string]string{} + } + generatedApp.Annotations[key] = state + } + } + + for _, key := range preservedLabels { + if state, exists := found.ObjectMeta.Labels[key]; exists { + if generatedApp.Labels == nil { + generatedApp.Labels = map[string]string{} + } + generatedApp.Labels[key] = state + } + } + + found.ObjectMeta.Annotations = generatedApp.Annotations + + found.ObjectMeta.Finalizers = generatedApp.Finalizers + found.ObjectMeta.Labels = generatedApp.Labels + + if found != nil && len(found.Spec.IgnoreDifferences) > 0 { + err := applyIgnoreDifferences(applicationSet.Spec.IgnoreApplicationDifferences, found, generatedApp) + if err != nil { + return fmt.Errorf("failed to apply ignore differences: %w", err) + } + } + + return controllerutil.SetControllerReference(&applicationSet, found, r.Scheme) + }) + + if err != nil { + appLog.WithError(err).WithField("action", action).Errorf("failed to %s Application", action) + if firstError == nil { + firstError = err + } + continue + } + r.updateCache(ctx, found, appLog) + + if action != controllerutil.OperationResultNone { + // Don't pollute etcd with "unchanged Application" events + r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, fmt.Sprint(action), "%s Application %q", action, generatedApp.Name) + appLog.Logf(log.InfoLevel, "%s Application", action) + } else { + // "unchanged Application" can be inferred by Reconcile Complete with no action being listed + // Or enable debug logging + appLog.Logf(log.DebugLevel, "%s Application", action) + } + } + return firstError +} + +// applyIgnoreDifferences applies the ignore differences rules to the found application. It modifies the found application in place. +func applyIgnoreDifferences(applicationSetIgnoreDifferences argov1alpha1.ApplicationSetIgnoreDifferences, found *argov1alpha1.Application, generatedApp argov1alpha1.Application) error { + diffConfig, err := argodiff.NewDiffConfigBuilder(). + WithDiffSettings(applicationSetIgnoreDifferences.ToApplicationIgnoreDifferences(), nil, false). + WithNoCache(). + Build() + if err != nil { + return fmt.Errorf("failed to build diff config: %w", err) + } + unstructuredFound, err := appToUnstructured(found) + if err != nil { + return fmt.Errorf("failed to convert found application to unstructured: %w", err) + } + unstructuredGenerated, err := appToUnstructured(&generatedApp) + if err != nil { + return fmt.Errorf("failed to convert found application to unstructured: %w", err) + } + result, err := argodiff.Normalize([]*unstructured.Unstructured{unstructuredFound}, []*unstructured.Unstructured{unstructuredGenerated}, diffConfig) + if err != nil { + return fmt.Errorf("failed to normalize application spec: %w", err) + } + if len(result.Targets) != 1 { + return fmt.Errorf("expected 1 normalized application, got %d", len(result.Targets)) + } + jsonNormalized, err := json.Marshal(result.Targets[0].Object) + if err != nil { + return fmt.Errorf("failed to marshal normalized app to json: %w", err) + } + err = json.Unmarshal(jsonNormalized, &found) + if err != nil { + return fmt.Errorf("failed to unmarshal normalized app json to structured app: %w", err) + } + // Prohibit jq queries from mutating silly things. + found.TypeMeta = generatedApp.TypeMeta + found.Name = generatedApp.Name + found.Namespace = generatedApp.Namespace + found.Operation = generatedApp.Operation + return nil +} + +func appToUnstructured(app *argov1alpha1.Application) (*unstructured.Unstructured, error) { + u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(app) + if err != nil { + return nil, fmt.Errorf("failed to convert app object to unstructured: %w", err) + } + return &unstructured.Unstructured{Object: u}, nil +} + +// createInCluster will filter from the desiredApplications only the application that needs to be created +// Then it will call createOrUpdateInCluster to do the actual create +func (r *ApplicationSetReconciler) createInCluster(ctx context.Context, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error { + + var createApps []argov1alpha1.Application + current, err := r.getCurrentApplications(ctx, applicationSet) + if err != nil { + return fmt.Errorf("error getting current applications: %w", err) + } + + m := make(map[string]bool) // Will holds the app names that are current in the cluster + + for _, app := range current { + m[app.Name] = true + } + + // filter applications that are not in m[string]bool (new to the cluster) + for _, app := range desiredApplications { + _, exists := m[app.Name] + + if !exists { + createApps = append(createApps, app) + } + } + + return r.createOrUpdateInCluster(ctx, applicationSet, createApps) +} + +func (r *ApplicationSetReconciler) getCurrentApplications(_ context.Context, applicationSet argov1alpha1.ApplicationSet) ([]argov1alpha1.Application, error) { + // TODO: Should this use the context param? + var current argov1alpha1.ApplicationList + err := r.Client.List(context.Background(), ¤t, client.MatchingFields{".metadata.controller": applicationSet.Name}) + + if err != nil { + return nil, fmt.Errorf("error retrieving applications: %w", err) + } + + return current.Items, nil +} + +// deleteInCluster will delete Applications that are currently on the cluster, but not in appList. +// The function must be called after all generators had been called and generated applications +func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error { + // settingsMgr := settings.NewSettingsManager(context.TODO(), r.KubeClientset, applicationSet.Namespace) + // argoDB := db.NewDB(applicationSet.Namespace, settingsMgr, r.KubeClientset) + // clusterList, err := argoDB.ListClusters(ctx) + clusterList, err := utils.ListClusters(ctx, r.KubeClientset, r.ArgoCDNamespace) + if err != nil { + return fmt.Errorf("error listing clusters: %w", err) + } + + // Save current applications to be able to delete the ones that are not in appList + current, err := r.getCurrentApplications(ctx, applicationSet) + if err != nil { + return fmt.Errorf("error getting current applications: %w", err) + } + + m := make(map[string]bool) // Will holds the app names in appList for the deletion process + + for _, app := range desiredApplications { + m[app.Name] = true + } + + // Delete apps that are not in m[string]bool + var firstError error + for _, app := range current { + appLog := log.WithFields(log.Fields{"app": app.Name, "appSet": applicationSet.Name}) + _, exists := m[app.Name] + + if !exists { + + // Removes the Argo CD resources finalizer if the application contains an invalid target (eg missing cluster) + err := r.removeFinalizerOnInvalidDestination(ctx, applicationSet, &app, clusterList, appLog) + if err != nil { + appLog.WithError(err).Error("failed to update Application") + if firstError != nil { + firstError = err + } + continue + } + + err = r.Client.Delete(ctx, &app) + if err != nil { + appLog.WithError(err).Error("failed to delete Application") + if firstError != nil { + firstError = err + } + continue + } + r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, "Deleted", "Deleted Application %q", app.Name) + appLog.Log(log.InfoLevel, "Deleted application") + } + } + return firstError +} + +// removeFinalizerOnInvalidDestination removes the Argo CD resources finalizer if the application contains an invalid target (eg missing cluster) +func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx context.Context, applicationSet argov1alpha1.ApplicationSet, app *argov1alpha1.Application, clusterList *argov1alpha1.ClusterList, appLog *log.Entry) error { + + // Only check if the finalizers need to be removed IF there are finalizers to remove + if len(app.Finalizers) == 0 { + return nil + } + + var validDestination bool + + // Detect if the destination is invalid (name doesn't correspond to a matching cluster) + if err := utils.ValidateDestination(ctx, &app.Spec.Destination, r.KubeClientset, r.ArgoCDNamespace); err != nil { + appLog.Warnf("The destination cluster for %s couldn't be found: %v", app.Name, err) + validDestination = false + } else { + + // Detect if the destination's server field does not match an existing cluster + + matchingCluster := false + for _, cluster := range clusterList.Items { + + // Server fields must match. Note that ValidateDestination ensures that the server field is set, if applicable. + if app.Spec.Destination.Server != cluster.Server { + continue + } + + // The name must match, if it is not empty + if app.Spec.Destination.Name != "" && cluster.Name != app.Spec.Destination.Name { + continue + } + + matchingCluster = true + break + } + + if !matchingCluster { + appLog.Warnf("A match for the destination cluster for %s, by server url, couldn't be found.", app.Name) + } + + validDestination = matchingCluster + } + // If the destination is invalid (for example the cluster is no longer defined), then remove + // the application finalizers to avoid triggering Argo CD bug #5817 + if !validDestination { + + // Filter out the Argo CD finalizer from the finalizer list + var newFinalizers []string + for _, existingFinalizer := range app.Finalizers { + if existingFinalizer != argov1alpha1.ResourcesFinalizerName { // only remove this one + newFinalizers = append(newFinalizers, existingFinalizer) + } + } + + // If the finalizer length changed (due to filtering out an Argo finalizer), update the finalizer list on the app + if len(newFinalizers) != len(app.Finalizers) { + updated := app.DeepCopy() + updated.Finalizers = newFinalizers + if err := r.Client.Patch(ctx, updated, client.MergeFrom(app)); err != nil { + return fmt.Errorf("error updating finalizers: %w", err) + } + r.updateCache(ctx, updated, appLog) + // Application must have updated list of finalizers + updated.DeepCopyInto(app) + + r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, "Updated", "Updated Application %q finalizer before deletion, because application has an invalid destination", app.Name) + appLog.Log(log.InfoLevel, "Updating application finalizer before deletion, because application has an invalid destination") + } + } + + return nil +} + +func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context, appset argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, desiredApplications []argov1alpha1.Application, appMap map[string]argov1alpha1.Application) (map[string]bool, error) { + + appDependencyList, appStepMap, err := r.buildAppDependencyList(ctx, appset, desiredApplications) + if err != nil { + return nil, fmt.Errorf("failed to build app dependency list: %w", err) + } + + _, err = r.updateApplicationSetApplicationStatus(ctx, &appset, applications, appStepMap) + if err != nil { + return nil, fmt.Errorf("failed to update applicationset app status: %w", err) + } + + log.Infof("ApplicationSet %v step list:", appset.Name) + for i, step := range appDependencyList { + log.Infof("step %v: %+v", i+1, step) + } + + appSyncMap, err := r.buildAppSyncMap(ctx, appset, appDependencyList, appMap) + if err != nil { + return nil, fmt.Errorf("failed to build app sync map: %w", err) + } + + log.Infof("Application allowed to sync before maxUpdate?: %+v", appSyncMap) + + _, err = r.updateApplicationSetApplicationStatusProgress(ctx, &appset, appSyncMap, appStepMap, appMap) + if err != nil { + return nil, fmt.Errorf("failed to update applicationset application status progress: %w", err) + } + + _, err = r.updateApplicationSetApplicationStatusConditions(ctx, &appset) + if err != nil { + return nil, fmt.Errorf("failed to update applicationset application status conditions: %w", err) + } + + return appSyncMap, nil +} + +// this list tracks which Applications belong to each RollingUpdate step +func (r *ApplicationSetReconciler) buildAppDependencyList(ctx context.Context, applicationSet argov1alpha1.ApplicationSet, applications []argov1alpha1.Application) ([][]string, map[string]int, error) { + + if applicationSet.Spec.Strategy == nil || applicationSet.Spec.Strategy.Type == "" || applicationSet.Spec.Strategy.Type == "AllAtOnce" { + return [][]string{}, map[string]int{}, nil + } + + steps := []argov1alpha1.ApplicationSetRolloutStep{} + if progressiveSyncsStrategyEnabled(&applicationSet, "RollingSync") { + steps = applicationSet.Spec.Strategy.RollingSync.Steps + } + + appDependencyList := make([][]string, 0) + for range steps { + appDependencyList = append(appDependencyList, make([]string, 0)) + } + + appStepMap := map[string]int{} + + // use applicationLabelSelectors to filter generated Applications into steps and status by name + for _, app := range applications { + for i, step := range steps { + + selected := true // default to true, assuming the current Application is a match for the given step matchExpression + + for _, matchExpression := range step.MatchExpressions { + + if val, ok := app.Labels[matchExpression.Key]; ok { + valueMatched := labelMatchedExpression(val, matchExpression) + + if !valueMatched { // none of the matchExpression values was a match with the Application'ss labels + selected = false + break + } + } else if matchExpression.Operator == "In" { + selected = false // no matching label key with "In" operator means this Application will not be included in the current step + break + } + } + + if selected { + appDependencyList[i] = append(appDependencyList[i], app.Name) + if val, ok := appStepMap[app.Name]; ok { + log.Warnf("AppSet '%v' has a invalid matchExpression that selects Application '%v' label twice, in steps %v and %v", applicationSet.Name, app.Name, val+1, i+1) + } else { + appStepMap[app.Name] = i + } + } + } + } + + return appDependencyList, appStepMap, nil +} + +func labelMatchedExpression(val string, matchExpression argov1alpha1.ApplicationMatchExpression) bool { + if matchExpression.Operator != "In" && matchExpression.Operator != "NotIn" { + log.Errorf("skipping AppSet rollingUpdate step Application selection, invalid matchExpression operator provided: %q ", matchExpression.Operator) + return false + } + + // if operator == In, default to false + // if operator == NotIn, default to true + valueMatched := matchExpression.Operator == "NotIn" + + for _, value := range matchExpression.Values { + if val == value { + // first "In" match returns true + // first "NotIn" match returns false + return matchExpression.Operator == "In" + } + } + return valueMatched +} + +// this map is used to determine which stage of Applications are ready to be updated in the reconciler loop +func (r *ApplicationSetReconciler) buildAppSyncMap(ctx context.Context, applicationSet argov1alpha1.ApplicationSet, appDependencyList [][]string, appMap map[string]argov1alpha1.Application) (map[string]bool, error) { + appSyncMap := map[string]bool{} + syncEnabled := true + + // healthy stages and the first non-healthy stage should have sync enabled + // every stage after should have sync disabled + + for i := range appDependencyList { + // set the syncEnabled boolean for every Application in the current step + for _, appName := range appDependencyList[i] { + appSyncMap[appName] = syncEnabled + } + + // detect if we need to halt before progressing to the next step + for _, appName := range appDependencyList[i] { + + idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, appName) + if idx == -1 { + // no Application status found, likely because the Application is being newly created + syncEnabled = false + break + } + + appStatus := applicationSet.Status.ApplicationStatus[idx] + + if app, ok := appMap[appName]; ok { + + syncEnabled = appSyncEnabledForNextStep(&applicationSet, app, appStatus) + if !syncEnabled { + break + } + } else { + // application name not found in the list of applications managed by this ApplicationSet, maybe because it's being deleted + syncEnabled = false + break + } + } + } + + return appSyncMap, nil +} + +func appSyncEnabledForNextStep(appset *argov1alpha1.ApplicationSet, app argov1alpha1.Application, appStatus argov1alpha1.ApplicationSetApplicationStatus) bool { + + if progressiveSyncsStrategyEnabled(appset, "RollingSync") { + // we still need to complete the current step if the Application is not yet Healthy or there are still pending Application changes + return isApplicationHealthy(app) && appStatus.Status == "Healthy" + } + + return true +} + +func progressiveSyncsStrategyEnabled(appset *argov1alpha1.ApplicationSet, strategyType string) bool { + if appset.Spec.Strategy == nil || appset.Spec.Strategy.Type != strategyType { + return false + } + + if strategyType == "RollingSync" && appset.Spec.Strategy.RollingSync == nil { + return false + } + + return true +} + +func isApplicationHealthy(app argov1alpha1.Application) bool { + healthStatusString, syncStatusString, operationPhaseString := statusStrings(app) + + if healthStatusString == "Healthy" && syncStatusString != "OutOfSync" && (operationPhaseString == "Succeeded" || operationPhaseString == "") { + return true + } + return false +} + +func statusStrings(app argov1alpha1.Application) (string, string, string) { + healthStatusString := string(app.Status.Health.Status) + syncStatusString := string(app.Status.Sync.Status) + operationPhaseString := "" + if app.Status.OperationState != nil { + operationPhaseString = string(app.Status.OperationState.Phase) + } + + return healthStatusString, syncStatusString, operationPhaseString +} + +// check the status of each Application's status and promote Applications to the next status if needed +func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, appStepMap map[string]int) ([]argov1alpha1.ApplicationSetApplicationStatus, error) { + + now := metav1.Now() + appStatuses := make([]argov1alpha1.ApplicationSetApplicationStatus, 0, len(applications)) + + for _, app := range applications { + + healthStatusString, syncStatusString, operationPhaseString := statusStrings(app) + + idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, app.Name) + + currentAppStatus := argov1alpha1.ApplicationSetApplicationStatus{} + + if idx == -1 { + // AppStatus not found, set default status of "Waiting" + currentAppStatus = argov1alpha1.ApplicationSetApplicationStatus{ + Application: app.Name, + LastTransitionTime: &now, + Message: "No Application status found, defaulting status to Waiting.", + Status: "Waiting", + Step: fmt.Sprint(appStepMap[app.Name] + 1), + } + } else { + // we have an existing AppStatus + currentAppStatus = applicationSet.Status.ApplicationStatus[idx] + } + + appOutdated := false + if progressiveSyncsStrategyEnabled(applicationSet, "RollingSync") { + appOutdated = syncStatusString == "OutOfSync" + } + + if appOutdated && currentAppStatus.Status != "Waiting" && currentAppStatus.Status != "Pending" { + log.Infof("Application %v is outdated, updating its ApplicationSet status to Waiting", app.Name) + currentAppStatus.LastTransitionTime = &now + currentAppStatus.Status = "Waiting" + currentAppStatus.Message = "Application has pending changes, setting status to Waiting." + currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1) + } + + if currentAppStatus.Status == "Pending" { + // check for successful syncs started less than 10s before the Application transitioned to Pending + // this covers race conditions where syncs initiated by RollingSync miraculously have a sync time before the transition to Pending state occurred (could be a few seconds) + if operationPhaseString == "Succeeded" && app.Status.OperationState.StartedAt.Add(time.Duration(10)*time.Second).After(currentAppStatus.LastTransitionTime.Time) { + if !app.Status.OperationState.StartedAt.After(currentAppStatus.LastTransitionTime.Time) { + log.Warnf("Application %v was synced less than 10s prior to entering Pending status, we'll assume the AppSet controller triggered this sync and update its status to Progressing", app.Name) + } + log.Infof("Application %v has completed a sync successfully, updating its ApplicationSet status to Progressing", app.Name) + currentAppStatus.LastTransitionTime = &now + currentAppStatus.Status = "Progressing" + currentAppStatus.Message = "Application resource completed a sync successfully, updating status from Pending to Progressing." + currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1) + } else if operationPhaseString == "Running" || healthStatusString == "Progressing" { + log.Infof("Application %v has entered Progressing status, updating its ApplicationSet status to Progressing", app.Name) + currentAppStatus.LastTransitionTime = &now + currentAppStatus.Status = "Progressing" + currentAppStatus.Message = "Application resource became Progressing, updating status from Pending to Progressing." + currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1) + } + } + + if currentAppStatus.Status == "Waiting" && isApplicationHealthy(app) { + log.Infof("Application %v is already synced and healthy, updating its ApplicationSet status to Healthy", app.Name) + currentAppStatus.LastTransitionTime = &now + currentAppStatus.Status = healthStatusString + currentAppStatus.Message = "Application resource is already Healthy, updating status from Waiting to Healthy." + currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1) + } + + if currentAppStatus.Status == "Progressing" && isApplicationHealthy(app) { + log.Infof("Application %v has completed Progressing status, updating its ApplicationSet status to Healthy", app.Name) + currentAppStatus.LastTransitionTime = &now + currentAppStatus.Status = healthStatusString + currentAppStatus.Message = "Application resource became Healthy, updating status from Progressing to Healthy." + currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1) + } + + appStatuses = append(appStatuses, currentAppStatus) + } + + err := r.setAppSetApplicationStatus(ctx, applicationSet, appStatuses) + if err != nil { + return nil, fmt.Errorf("failed to set AppSet application statuses: %w", err) + } + + return appStatuses, nil +} + +// check Applications that are in Waiting status and promote them to Pending if needed +func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appStepMap map[string]int, appMap map[string]argov1alpha1.Application) ([]argov1alpha1.ApplicationSetApplicationStatus, error) { + now := metav1.Now() + + appStatuses := make([]argov1alpha1.ApplicationSetApplicationStatus, 0, len(applicationSet.Status.ApplicationStatus)) + + // if we have no RollingUpdate steps, clear out the existing ApplicationStatus entries + if applicationSet.Spec.Strategy != nil && applicationSet.Spec.Strategy.Type != "" && applicationSet.Spec.Strategy.Type != "AllAtOnce" { + updateCountMap := []int{} + totalCountMap := []int{} + + length := 0 + if progressiveSyncsStrategyEnabled(applicationSet, "RollingSync") { + length = len(applicationSet.Spec.Strategy.RollingSync.Steps) + } + for s := 0; s < length; s++ { + updateCountMap = append(updateCountMap, 0) + totalCountMap = append(totalCountMap, 0) + } + + // populate updateCountMap with counts of existing Pending and Progressing Applications + for _, appStatus := range applicationSet.Status.ApplicationStatus { + totalCountMap[appStepMap[appStatus.Application]] += 1 + + if progressiveSyncsStrategyEnabled(applicationSet, "RollingSync") { + if appStatus.Status == "Pending" || appStatus.Status == "Progressing" { + updateCountMap[appStepMap[appStatus.Application]] += 1 + } + } + } + + for _, appStatus := range applicationSet.Status.ApplicationStatus { + + maxUpdateAllowed := true + maxUpdate := &intstr.IntOrString{} + if progressiveSyncsStrategyEnabled(applicationSet, "RollingSync") { + maxUpdate = applicationSet.Spec.Strategy.RollingSync.Steps[appStepMap[appStatus.Application]].MaxUpdate + } + + // by default allow all applications to update if maxUpdate is unset + if maxUpdate != nil { + maxUpdateVal, err := intstr.GetScaledValueFromIntOrPercent(maxUpdate, totalCountMap[appStepMap[appStatus.Application]], false) + if err != nil { + log.Warnf("AppSet '%v' has a invalid maxUpdate value '%+v', ignoring maxUpdate logic for this step: %v", applicationSet.Name, maxUpdate, err) + } + + // ensure that percentage values greater than 0% always result in at least 1 Application being selected + if maxUpdate.Type == intstr.String && maxUpdate.StrVal != "0%" && maxUpdateVal < 1 { + maxUpdateVal = 1 + } + + if updateCountMap[appStepMap[appStatus.Application]] >= maxUpdateVal { + maxUpdateAllowed = false + log.Infof("Application %v is not allowed to update yet, %v/%v Applications already updating in step %v in AppSet %v", appStatus.Application, updateCountMap[appStepMap[appStatus.Application]], maxUpdateVal, appStepMap[appStatus.Application]+1, applicationSet.Name) + } + + } + + if appStatus.Status == "Waiting" && appSyncMap[appStatus.Application] && maxUpdateAllowed { + log.Infof("Application %v moved to Pending status, watching for the Application to start Progressing", appStatus.Application) + appStatus.LastTransitionTime = &now + appStatus.Status = "Pending" + appStatus.Message = "Application moved to Pending status, watching for the Application resource to start Progressing." + appStatus.Step = fmt.Sprint(appStepMap[appStatus.Application] + 1) + + updateCountMap[appStepMap[appStatus.Application]] += 1 + } + + appStatuses = append(appStatuses, appStatus) + } + } + + err := r.setAppSetApplicationStatus(ctx, applicationSet, appStatuses) + if err != nil { + return nil, fmt.Errorf("failed to set AppSet app status: %w", err) + } + + return appStatuses, nil +} + +func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusConditions(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet) ([]argov1alpha1.ApplicationSetCondition, error) { + + appSetProgressing := false + for _, appStatus := range applicationSet.Status.ApplicationStatus { + if appStatus.Status != "Healthy" { + appSetProgressing = true + break + } + } + + appSetConditionProgressing := false + for _, appSetCondition := range applicationSet.Status.Conditions { + if appSetCondition.Type == argov1alpha1.ApplicationSetConditionRolloutProgressing && appSetCondition.Status == argov1alpha1.ApplicationSetConditionStatusTrue { + appSetConditionProgressing = true + break + } + } + + if appSetProgressing && !appSetConditionProgressing { + _ = r.setApplicationSetStatusCondition(ctx, + applicationSet, + argov1alpha1.ApplicationSetCondition{ + Type: argov1alpha1.ApplicationSetConditionRolloutProgressing, + Message: "ApplicationSet Rollout Rollout started", + Reason: argov1alpha1.ApplicationSetReasonApplicationSetModified, + Status: argov1alpha1.ApplicationSetConditionStatusTrue, + }, false, + ) + } else if !appSetProgressing && appSetConditionProgressing { + _ = r.setApplicationSetStatusCondition(ctx, + applicationSet, + argov1alpha1.ApplicationSetCondition{ + Type: argov1alpha1.ApplicationSetConditionRolloutProgressing, + Message: "ApplicationSet Rollout Rollout complete", + Reason: argov1alpha1.ApplicationSetReasonApplicationSetRolloutComplete, + Status: argov1alpha1.ApplicationSetConditionStatusFalse, + }, false, + ) + } + + return applicationSet.Status.Conditions, nil +} + +func findApplicationStatusIndex(appStatuses []argov1alpha1.ApplicationSetApplicationStatus, application string) int { + for i := range appStatuses { + if appStatuses[i].Application == application { + return i + } + } + return -1 +} + +// setApplicationSetApplicationStatus updates the ApplicatonSet's status field +// with any new/changed Application statuses. +func (r *ApplicationSetReconciler) setAppSetApplicationStatus(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, applicationStatuses []argov1alpha1.ApplicationSetApplicationStatus) error { + needToUpdateStatus := false + + if len(applicationStatuses) != len(applicationSet.Status.ApplicationStatus) { + needToUpdateStatus = true + } else { + for i := range applicationStatuses { + appStatus := applicationStatuses[i] + idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, appStatus.Application) + if idx == -1 { + needToUpdateStatus = true + break + } + currentStatus := applicationSet.Status.ApplicationStatus[idx] + if currentStatus.Message != appStatus.Message || currentStatus.Status != appStatus.Status || currentStatus.Step != appStatus.Step { + needToUpdateStatus = true + break + } + } + } + + if needToUpdateStatus { + namespacedName := types.NamespacedName{Namespace: applicationSet.Namespace, Name: applicationSet.Name} + + // rebuild ApplicationStatus from scratch, we don't need any previous status history + applicationSet.Status.ApplicationStatus = []argov1alpha1.ApplicationSetApplicationStatus{} + for i := range applicationStatuses { + applicationSet.Status.SetApplicationStatus(applicationStatuses[i]) + } + + // Update the newly fetched object with new set of ApplicationStatus + err := r.Client.Status().Update(ctx, applicationSet) + if err != nil { + + log.Errorf("unable to set application set status: %v", err) + return fmt.Errorf("unable to set application set status: %v", err) + } + + if err := r.Get(ctx, namespacedName, applicationSet); err != nil { + if client.IgnoreNotFound(err) != nil { + return nil + } + return fmt.Errorf("error fetching updated application set: %v", err) + } + } + + return nil +} + +func (r *ApplicationSetReconciler) syncValidApplications(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appMap map[string]argov1alpha1.Application, validApps []argov1alpha1.Application) ([]argov1alpha1.Application, error) { + rolloutApps := []argov1alpha1.Application{} + for i := range validApps { + pruneEnabled := false + + // ensure that Applications generated with RollingSync do not have an automated sync policy, since the AppSet controller will handle triggering the sync operation instead + if validApps[i].Spec.SyncPolicy != nil && validApps[i].Spec.SyncPolicy.Automated != nil { + pruneEnabled = validApps[i].Spec.SyncPolicy.Automated.Prune + validApps[i].Spec.SyncPolicy.Automated = nil + } + + appSetStatusPending := false + idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, validApps[i].Name) + if idx > -1 && applicationSet.Status.ApplicationStatus[idx].Status == "Pending" { + // only trigger a sync for Applications that are in Pending status, since this is governed by maxUpdate + appSetStatusPending = true + } + + // check appSyncMap to determine which Applications are ready to be updated and which should be skipped + if appSyncMap[validApps[i].Name] && appMap[validApps[i].Name].Status.Sync.Status == "OutOfSync" && appSetStatusPending { + log.Infof("triggering sync for application: %v, prune enabled: %v", validApps[i].Name, pruneEnabled) + validApps[i], _ = syncApplication(validApps[i], pruneEnabled) + } + rolloutApps = append(rolloutApps, validApps[i]) + } + return rolloutApps, nil +} + +// used by the RollingSync Progressive Sync strategy to trigger a sync of a particular Application resource +func syncApplication(application argov1alpha1.Application, prune bool) (argov1alpha1.Application, error) { + + operation := argov1alpha1.Operation{ + InitiatedBy: argov1alpha1.OperationInitiator{ + Username: "applicationset-controller", + Automated: true, + }, + Info: []*argov1alpha1.Info{ + { + Name: "Reason", + Value: "ApplicationSet RollingSync triggered a sync of this Application resource.", + }, + }, + Sync: &argov1alpha1.SyncOperation{}, + } + + if application.Spec.SyncPolicy != nil { + if application.Spec.SyncPolicy.Retry != nil { + operation.Retry = *application.Spec.SyncPolicy.Retry + } + if application.Spec.SyncPolicy.SyncOptions != nil { + operation.Sync.SyncOptions = application.Spec.SyncPolicy.SyncOptions + } + operation.Sync.Prune = prune + } + application.Operation = &operation + + return application, nil +} + +func getOwnsHandlerPredicates(enableProgressiveSyncs bool) predicate.Funcs { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + // if we are the owner and there is a create event, we most likely created it and do not need to + // re-reconcile + log.Debugln("received create event from owning an application") + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + log.Debugln("received delete event from owning an application") + return true + }, + UpdateFunc: func(e event.UpdateEvent) bool { + log.Debugln("received update event from owning an application") + appOld, isApp := e.ObjectOld.(*argov1alpha1.Application) + if !isApp { + return false + } + appNew, isApp := e.ObjectNew.(*argov1alpha1.Application) + if !isApp { + return false + } + requeue := shouldRequeueApplicationSet(appOld, appNew, enableProgressiveSyncs) + log.Debugf("requeue: %t caused by application %s\n", requeue, appNew.Name) + return requeue + }, + GenericFunc: func(e event.GenericEvent) bool { + log.Debugln("received generic event from owning an application") + return true + }, + } +} + +// shouldRequeueApplicationSet determines when we want to requeue an ApplicationSet for reconciling based on an owned +// application change +// The applicationset controller owns a subset of the Application CR. +// We do not need to re-reconcile if parts of the application change outside the applicationset's control. +// An example being, Application.ApplicationStatus.ReconciledAt which gets updated by the application controller. +// Additionally, Application.ObjectMeta.ResourceVersion and Application.ObjectMeta.Generation which are set by K8s. +func shouldRequeueApplicationSet(appOld *argov1alpha1.Application, appNew *argov1alpha1.Application, enableProgressiveSyncs bool) bool { + if appOld == nil || appNew == nil { + return false + } + + // the applicationset controller owns the application spec, labels, annotations, and finalizers on the applications + if !reflect.DeepEqual(appOld.Spec, appNew.Spec) || + !reflect.DeepEqual(appOld.ObjectMeta.GetAnnotations(), appNew.ObjectMeta.GetAnnotations()) || + !reflect.DeepEqual(appOld.ObjectMeta.GetLabels(), appNew.ObjectMeta.GetLabels()) || + !reflect.DeepEqual(appOld.ObjectMeta.GetFinalizers(), appNew.ObjectMeta.GetFinalizers()) { + return true + } + + // progressive syncs use the application status for updates. if they differ, requeue to trigger the next progression + if enableProgressiveSyncs { + if appOld.Status.Health.Status != appNew.Status.Health.Status || appOld.Status.Sync.Status != appNew.Status.Sync.Status { + return true + } + + if appOld.Status.OperationState != nil && appNew.Status.OperationState != nil { + if appOld.Status.OperationState.Phase != appNew.Status.OperationState.Phase || + appOld.Status.OperationState.StartedAt != appNew.Status.OperationState.StartedAt { + return true + } + } + } + + return false +} + +var _ handler.EventHandler = &clusterSecretEventHandler{} diff --git a/applicationset/controllers/applicationset_controller_test.go b/applicationset/controllers/applicationset_controller_test.go new file mode 100644 index 0000000000000..7c3721e2ee6ed --- /dev/null +++ b/applicationset/controllers/applicationset_controller_test.go @@ -0,0 +1,5891 @@ +package controllers + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + "time" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + kubefake "k8s.io/client-go/kubernetes/fake" + k8scache "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + crtclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + + "github.com/argoproj/gitops-engine/pkg/health" + "github.com/argoproj/gitops-engine/pkg/sync/common" + + "github.com/argoproj/argo-cd/v2/applicationset/generators" + "github.com/argoproj/argo-cd/v2/applicationset/utils" + + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/fake" + "github.com/argoproj/argo-cd/v2/util/collections" + dbmocks "github.com/argoproj/argo-cd/v2/util/db/mocks" + + "github.com/argoproj/argo-cd/v2/pkg/apis/application" +) + +type fakeStore struct { + k8scache.Store +} + +func (f *fakeStore) Update(obj interface{}) error { + return nil +} + +type fakeInformer struct { + k8scache.SharedInformer +} + +func (f *fakeInformer) AddIndexers(indexers k8scache.Indexers) error { + return nil +} + +func (f *fakeInformer) GetStore() k8scache.Store { + return &fakeStore{} +} + +type fakeCache struct { + cache.Cache +} + +func (f *fakeCache) GetInformer(ctx context.Context, obj crtclient.Object) (cache.Informer, error) { + return &fakeInformer{}, nil +} + +type generatorMock struct { + mock.Mock +} + +func (g *generatorMock) GetTemplate(appSetGenerator *v1alpha1.ApplicationSetGenerator) *v1alpha1.ApplicationSetTemplate { + args := g.Called(appSetGenerator) + + return args.Get(0).(*v1alpha1.ApplicationSetTemplate) +} + +func (g *generatorMock) GenerateParams(appSetGenerator *v1alpha1.ApplicationSetGenerator, _ *v1alpha1.ApplicationSet) ([]map[string]interface{}, error) { + args := g.Called(appSetGenerator) + + return args.Get(0).([]map[string]interface{}), args.Error(1) +} + +type rendererMock struct { + mock.Mock +} + +func (g *generatorMock) GetRequeueAfter(appSetGenerator *v1alpha1.ApplicationSetGenerator) time.Duration { + args := g.Called(appSetGenerator) + + return args.Get(0).(time.Duration) +} + +func (r *rendererMock) RenderTemplateParams(tmpl *v1alpha1.Application, syncPolicy *v1alpha1.ApplicationSetSyncPolicy, params map[string]interface{}, useGoTemplate bool, goTemplateOptions []string) (*v1alpha1.Application, error) { + args := r.Called(tmpl, params, useGoTemplate, goTemplateOptions) + + if args.Error(1) != nil { + return nil, args.Error(1) + } + + return args.Get(0).(*v1alpha1.Application), args.Error(1) + +} + +func TestExtractApplications(t *testing.T) { + scheme := runtime.NewScheme() + err := v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + err = v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + for _, c := range []struct { + name string + params []map[string]interface{} + template v1alpha1.ApplicationSetTemplate + generateParamsError error + rendererError error + expectErr bool + expectedReason v1alpha1.ApplicationSetReasonType + }{ + { + name: "Generate two applications", + params: []map[string]interface{}{{"name": "app1"}, {"name": "app2"}}, + template: v1alpha1.ApplicationSetTemplate{ + ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{ + Name: "name", + Namespace: "namespace", + Labels: map[string]string{"label_name": "label_value"}, + }, + Spec: v1alpha1.ApplicationSpec{}, + }, + expectedReason: "", + }, + { + name: "Handles error from the generator", + generateParamsError: fmt.Errorf("error"), + expectErr: true, + expectedReason: v1alpha1.ApplicationSetReasonApplicationParamsGenerationError, + }, + { + name: "Handles error from the render", + params: []map[string]interface{}{{"name": "app1"}, {"name": "app2"}}, + template: v1alpha1.ApplicationSetTemplate{ + ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{ + Name: "name", + Namespace: "namespace", + Labels: map[string]string{"label_name": "label_value"}, + }, + Spec: v1alpha1.ApplicationSpec{}, + }, + rendererError: fmt.Errorf("error"), + expectErr: true, + expectedReason: v1alpha1.ApplicationSetReasonRenderTemplateParamsError, + }, + } { + cc := c + app := v1alpha1.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + } + + t.Run(cc.name, func(t *testing.T) { + + appSet := &v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + } + + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(appSet).Build() + + generatorMock := generatorMock{} + generator := v1alpha1.ApplicationSetGenerator{ + List: &v1alpha1.ListGenerator{}, + } + + generatorMock.On("GenerateParams", &generator). + Return(cc.params, cc.generateParamsError) + + generatorMock.On("GetTemplate", &generator). + Return(&v1alpha1.ApplicationSetTemplate{}) + + rendererMock := rendererMock{} + + var expectedApps []v1alpha1.Application + + if cc.generateParamsError == nil { + for _, p := range cc.params { + + if cc.rendererError != nil { + rendererMock.On("RenderTemplateParams", getTempApplication(cc.template), p, false, []string(nil)). + Return(nil, cc.rendererError) + } else { + rendererMock.On("RenderTemplateParams", getTempApplication(cc.template), p, false, []string(nil)). + Return(&app, nil) + expectedApps = append(expectedApps, app) + } + } + } + + r := ApplicationSetReconciler{ + Client: client, + Scheme: scheme, + Recorder: record.NewFakeRecorder(1), + Generators: map[string]generators.Generator{ + "List": &generatorMock, + }, + Renderer: &rendererMock, + KubeClientset: kubefake.NewSimpleClientset(), + Cache: &fakeCache{}, + } + + got, reason, err := r.generateApplications(v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Generators: []v1alpha1.ApplicationSetGenerator{generator}, + Template: cc.template, + }, + }) + + if cc.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + assert.Equal(t, expectedApps, got) + assert.Equal(t, cc.expectedReason, reason) + generatorMock.AssertNumberOfCalls(t, "GenerateParams", 1) + + if cc.generateParamsError == nil { + rendererMock.AssertNumberOfCalls(t, "RenderTemplateParams", len(cc.params)) + } + + }) + } + +} + +func TestMergeTemplateApplications(t *testing.T) { + scheme := runtime.NewScheme() + _ = v1alpha1.AddToScheme(scheme) + _ = v1alpha1.AddToScheme(scheme) + + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + for _, c := range []struct { + name string + params []map[string]interface{} + template v1alpha1.ApplicationSetTemplate + overrideTemplate v1alpha1.ApplicationSetTemplate + expectedMerged v1alpha1.ApplicationSetTemplate + expectedApps []v1alpha1.Application + }{ + { + name: "Generate app", + params: []map[string]interface{}{{"name": "app1"}}, + template: v1alpha1.ApplicationSetTemplate{ + ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{ + Name: "name", + Namespace: "namespace", + Labels: map[string]string{"label_name": "label_value"}, + }, + Spec: v1alpha1.ApplicationSpec{}, + }, + overrideTemplate: v1alpha1.ApplicationSetTemplate{ + ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{ + Name: "test", + Labels: map[string]string{"foo": "bar"}, + }, + Spec: v1alpha1.ApplicationSpec{}, + }, + expectedMerged: v1alpha1.ApplicationSetTemplate{ + ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{ + Name: "test", + Namespace: "namespace", + Labels: map[string]string{"label_name": "label_value", "foo": "bar"}, + }, + Spec: v1alpha1.ApplicationSpec{}, + }, + expectedApps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + Labels: map[string]string{"foo": "bar"}, + }, + Spec: v1alpha1.ApplicationSpec{}, + }, + }, + }, + } { + cc := c + + t.Run(cc.name, func(t *testing.T) { + + generatorMock := generatorMock{} + generator := v1alpha1.ApplicationSetGenerator{ + List: &v1alpha1.ListGenerator{}, + } + + generatorMock.On("GenerateParams", &generator). + Return(cc.params, nil) + + generatorMock.On("GetTemplate", &generator). + Return(&cc.overrideTemplate) + + rendererMock := rendererMock{} + + rendererMock.On("RenderTemplateParams", getTempApplication(cc.expectedMerged), cc.params[0], false, []string(nil)). + Return(&cc.expectedApps[0], nil) + + r := ApplicationSetReconciler{ + Client: client, + Scheme: scheme, + Recorder: record.NewFakeRecorder(1), + Generators: map[string]generators.Generator{ + "List": &generatorMock, + }, + Renderer: &rendererMock, + KubeClientset: kubefake.NewSimpleClientset(), + } + + got, _, _ := r.generateApplications(v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Generators: []v1alpha1.ApplicationSetGenerator{generator}, + Template: cc.template, + }, + }, + ) + + assert.Equal(t, cc.expectedApps, got) + }) + } + +} + +func TestCreateOrUpdateInCluster(t *testing.T) { + + scheme := runtime.NewScheme() + err := v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + err = v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + for _, c := range []struct { + // name is human-readable test name + name string + // appSet is the ApplicationSet we are generating resources for + appSet v1alpha1.ApplicationSet + // existingApps are the apps that already exist on the cluster + existingApps []v1alpha1.Application + // desiredApps are the generated apps to create/update + desiredApps []v1alpha1.Application + // expected is what we expect the cluster Applications to look like, after createOrUpdateInCluster + expected []v1alpha1.Application + }{ + { + name: "Create an app that doesn't exist", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + }, + existingApps: nil, + desiredApps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + }, + }, + expected: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{ + Kind: application.ApplicationKind, + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + Namespace: "namespace", + ResourceVersion: "1", + }, + Spec: v1alpha1.ApplicationSpec{Project: "default"}, + }, + }, + }, + { + name: "Update an existing app with a different project name", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Template: v1alpha1.ApplicationSetTemplate{ + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + }, + existingApps: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{ + Kind: application.ApplicationKind, + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + Namespace: "namespace", + ResourceVersion: "2", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "test", + }, + }, + }, + desiredApps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + expected: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{ + Kind: application.ApplicationKind, + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + Namespace: "namespace", + ResourceVersion: "3", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + }, + { + name: "Create a new app and check it doesn't replace the existing app", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Template: v1alpha1.ApplicationSetTemplate{ + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + }, + existingApps: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{ + Kind: application.ApplicationKind, + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + Namespace: "namespace", + ResourceVersion: "2", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "test", + }, + }, + }, + desiredApps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app2", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + expected: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{ + Kind: application.ApplicationKind, + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "app2", + Namespace: "namespace", + ResourceVersion: "1", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + }, + { + name: "Ensure that labels and annotations are added (via update) into an exiting application", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Template: v1alpha1.ApplicationSetTemplate{ + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + }, + existingApps: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{ + Kind: application.ApplicationKind, + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + Namespace: "namespace", + ResourceVersion: "2", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + desiredApps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + Labels: map[string]string{"label-key": "label-value"}, + Annotations: map[string]string{"annot-key": "annot-value"}, + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + expected: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{ + Kind: application.ApplicationKind, + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + Namespace: "namespace", + Labels: map[string]string{"label-key": "label-value"}, + Annotations: map[string]string{"annot-key": "annot-value"}, + ResourceVersion: "3", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + }, + { + name: "Ensure that labels and annotations are removed from an existing app", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Template: v1alpha1.ApplicationSetTemplate{ + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + }, + existingApps: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{ + Kind: application.ApplicationKind, + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + Namespace: "namespace", + ResourceVersion: "2", + Labels: map[string]string{"label-key": "label-value"}, + Annotations: map[string]string{"annot-key": "annot-value"}, + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + desiredApps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + expected: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{ + Kind: application.ApplicationKind, + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + Namespace: "namespace", + ResourceVersion: "3", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + }, + { + name: "Ensure that status and operation fields are not overridden by an update, when removing labels/annotations", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Template: v1alpha1.ApplicationSetTemplate{ + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + }, + existingApps: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{ + Kind: application.ApplicationKind, + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + Namespace: "namespace", + ResourceVersion: "2", + Labels: map[string]string{"label-key": "label-value"}, + Annotations: map[string]string{"annot-key": "annot-value"}, + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + Status: v1alpha1.ApplicationStatus{ + Resources: []v1alpha1.ResourceStatus{{Name: "sample-name"}}, + }, + Operation: &v1alpha1.Operation{ + Sync: &v1alpha1.SyncOperation{Revision: "sample-revision"}, + }, + }, + }, + desiredApps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + expected: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{ + Kind: application.ApplicationKind, + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + Namespace: "namespace", + ResourceVersion: "3", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + Status: v1alpha1.ApplicationStatus{ + Resources: []v1alpha1.ResourceStatus{{Name: "sample-name"}}, + }, + Operation: &v1alpha1.Operation{ + Sync: &v1alpha1.SyncOperation{Revision: "sample-revision"}, + }, + }, + }, + }, + { + name: "Ensure that status and operation fields are not overridden by an update, when removing labels/annotations and adding other fields", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Template: v1alpha1.ApplicationSetTemplate{ + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + Source: &v1alpha1.ApplicationSource{Path: "path", TargetRevision: "revision", RepoURL: "repoURL"}, + Destination: v1alpha1.ApplicationDestination{Server: "server", Namespace: "namespace"}, + }, + }, + }, + }, + existingApps: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{ + Kind: application.ApplicationKind, + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + Namespace: "namespace", + ResourceVersion: "2", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + Status: v1alpha1.ApplicationStatus{ + Resources: []v1alpha1.ResourceStatus{{Name: "sample-name"}}, + }, + Operation: &v1alpha1.Operation{ + Sync: &v1alpha1.SyncOperation{Revision: "sample-revision"}, + }, + }, + }, + desiredApps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + Labels: map[string]string{"label-key": "label-value"}, + Annotations: map[string]string{"annot-key": "annot-value"}, + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + Source: &v1alpha1.ApplicationSource{Path: "path", TargetRevision: "revision", RepoURL: "repoURL"}, + Destination: v1alpha1.ApplicationDestination{Server: "server", Namespace: "namespace"}, + }, + }, + }, + expected: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{ + Kind: application.ApplicationKind, + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + Namespace: "namespace", + Labels: map[string]string{"label-key": "label-value"}, + Annotations: map[string]string{"annot-key": "annot-value"}, + ResourceVersion: "3", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + Source: &v1alpha1.ApplicationSource{Path: "path", TargetRevision: "revision", RepoURL: "repoURL"}, + Destination: v1alpha1.ApplicationDestination{Server: "server", Namespace: "namespace"}, + }, + Status: v1alpha1.ApplicationStatus{ + Resources: []v1alpha1.ResourceStatus{{Name: "sample-name"}}, + }, + Operation: &v1alpha1.Operation{ + Sync: &v1alpha1.SyncOperation{Revision: "sample-revision"}, + }, + }, + }, + }, + { + name: "Ensure that argocd notifications state and refresh annotation is preserved from an existing app", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Template: v1alpha1.ApplicationSetTemplate{ + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + }, + existingApps: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{ + Kind: application.ApplicationKind, + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + Namespace: "namespace", + ResourceVersion: "2", + Labels: map[string]string{"label-key": "label-value"}, + Annotations: map[string]string{ + "annot-key": "annot-value", + NotifiedAnnotationKey: `{"b620d4600c771a6f4cxxxxxxx:on-deployed:[0].y7b5sbwa2Q329JYHxxxxxx-fBs:slack:slack-test":1617144614}`, + v1alpha1.AnnotationKeyRefresh: string(v1alpha1.RefreshTypeNormal), + }, + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + desiredApps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + expected: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{ + Kind: application.ApplicationKind, + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + Namespace: "namespace", + ResourceVersion: "3", + Annotations: map[string]string{ + NotifiedAnnotationKey: `{"b620d4600c771a6f4cxxxxxxx:on-deployed:[0].y7b5sbwa2Q329JYHxxxxxx-fBs:slack:slack-test":1617144614}`, + v1alpha1.AnnotationKeyRefresh: string(v1alpha1.RefreshTypeNormal), + }, + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + }, { + name: "Ensure that configured preserved annotations are preserved from an existing app", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Template: v1alpha1.ApplicationSetTemplate{ + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + PreservedFields: &v1alpha1.ApplicationPreservedFields{ + Annotations: []string{"preserved-annot-key"}, + }, + }, + }, + existingApps: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{ + Kind: "Application", + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + Namespace: "namespace", + ResourceVersion: "2", + Annotations: map[string]string{ + "annot-key": "annot-value", + "preserved-annot-key": "preserved-annot-value", + }, + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + desiredApps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + expected: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{ + Kind: "Application", + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + Namespace: "namespace", + ResourceVersion: "3", + Annotations: map[string]string{ + "preserved-annot-key": "preserved-annot-value", + }, + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + }, { + name: "Ensure that the app spec is normalized before applying", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Template: v1alpha1.ApplicationSetTemplate{ + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + Source: &v1alpha1.ApplicationSource{ + Directory: &v1alpha1.ApplicationSourceDirectory{ + Jsonnet: v1alpha1.ApplicationSourceJsonnet{}, + }, + }, + }, + }, + }, + }, + desiredApps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + Source: &v1alpha1.ApplicationSource{ + Directory: &v1alpha1.ApplicationSourceDirectory{ + Jsonnet: v1alpha1.ApplicationSourceJsonnet{}, + }, + }, + }, + }, + }, + expected: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{ + Kind: "Application", + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + Namespace: "namespace", + ResourceVersion: "1", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + Source: &v1alpha1.ApplicationSource{ + // Directory and jsonnet block are removed + }, + }, + }, + }, + }, + } { + + t.Run(c.name, func(t *testing.T) { + + initObjs := []crtclient.Object{&c.appSet} + + for _, a := range c.existingApps { + err = controllerutil.SetControllerReference(&c.appSet, &a, scheme) + assert.Nil(t, err) + initObjs = append(initObjs, &a) + } + + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build() + + r := ApplicationSetReconciler{ + Client: client, + Scheme: scheme, + Recorder: record.NewFakeRecorder(len(initObjs) + len(c.expected)), + Cache: &fakeCache{}, + } + + err = r.createOrUpdateInCluster(context.TODO(), c.appSet, c.desiredApps) + assert.Nil(t, err) + + for _, obj := range c.expected { + got := &v1alpha1.Application{} + _ = client.Get(context.Background(), crtclient.ObjectKey{ + Namespace: obj.Namespace, + Name: obj.Name, + }, got) + + err = controllerutil.SetControllerReference(&c.appSet, &obj, r.Scheme) + assert.Nil(t, err) + assert.Equal(t, obj, *got) + } + }) + } +} + +func TestRemoveFinalizerOnInvalidDestination_FinalizerTypes(t *testing.T) { + + scheme := runtime.NewScheme() + err := v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + err = v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + for _, c := range []struct { + // name is human-readable test name + name string + existingFinalizers []string + expectedFinalizers []string + }{ + { + name: "no finalizers", + existingFinalizers: []string{}, + expectedFinalizers: nil, + }, + { + name: "contains only argo finalizer", + existingFinalizers: []string{v1alpha1.ResourcesFinalizerName}, + expectedFinalizers: nil, + }, + { + name: "contains only non-argo finalizer", + existingFinalizers: []string{"non-argo-finalizer"}, + expectedFinalizers: []string{"non-argo-finalizer"}, + }, + { + name: "contains both argo and non-argo finalizer", + existingFinalizers: []string{"non-argo-finalizer", v1alpha1.ResourcesFinalizerName}, + expectedFinalizers: []string{"non-argo-finalizer"}, + }, + } { + t.Run(c.name, func(t *testing.T) { + + appSet := v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Template: v1alpha1.ApplicationSetTemplate{ + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + } + + app := v1alpha1.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + Finalizers: c.existingFinalizers, + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + Source: &v1alpha1.ApplicationSource{Path: "path", TargetRevision: "revision", RepoURL: "repoURL"}, + // Destination is always invalid, for this test: + Destination: v1alpha1.ApplicationDestination{Name: "my-cluster", Namespace: "namespace"}, + }, + } + + initObjs := []crtclient.Object{&app, &appSet} + + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build() + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-secret", + Namespace: "namespace", + Labels: map[string]string{ + generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster, + }, + }, + Data: map[string][]byte{ + // Since this test requires the cluster to be an invalid destination, we + // always return a cluster named 'my-cluster2' (different from app 'my-cluster', above) + "name": []byte("mycluster2"), + "server": []byte("https://kubernetes.default.svc"), + "config": []byte("{\"username\":\"foo\",\"password\":\"foo\"}"), + }, + } + + objects := append([]runtime.Object{}, secret) + kubeclientset := kubefake.NewSimpleClientset(objects...) + + r := ApplicationSetReconciler{ + Client: client, + Scheme: scheme, + Recorder: record.NewFakeRecorder(10), + KubeClientset: kubeclientset, + Cache: &fakeCache{}, + } + //settingsMgr := settings.NewSettingsManager(context.TODO(), kubeclientset, "namespace") + //argoDB := db.NewDB("namespace", settingsMgr, r.KubeClientset) + //clusterList, err := argoDB.ListClusters(context.Background()) + clusterList, err := utils.ListClusters(context.Background(), kubeclientset, "namespace") + assert.NoError(t, err, "Unexpected error") + + appLog := log.WithFields(log.Fields{"app": app.Name, "appSet": ""}) + + appInputParam := app.DeepCopy() + + err = r.removeFinalizerOnInvalidDestination(context.Background(), appSet, appInputParam, clusterList, appLog) + assert.NoError(t, err, "Unexpected error") + + retrievedApp := v1alpha1.Application{} + err = client.Get(context.Background(), crtclient.ObjectKeyFromObject(&app), &retrievedApp) + assert.NoError(t, err, "Unexpected error") + + // App on the cluster should have the expected finalizers + assert.ElementsMatch(t, c.expectedFinalizers, retrievedApp.Finalizers) + + // App object passed in as a parameter should have the expected finaliers + assert.ElementsMatch(t, c.expectedFinalizers, appInputParam.Finalizers) + + bytes, _ := json.MarshalIndent(retrievedApp, "", " ") + t.Log("Contents of app after call:", string(bytes)) + + }) + } +} + +func TestRemoveFinalizerOnInvalidDestination_DestinationTypes(t *testing.T) { + + scheme := runtime.NewScheme() + err := v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + err = v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + for _, c := range []struct { + // name is human-readable test name + name string + destinationField v1alpha1.ApplicationDestination + expectFinalizerRemoved bool + }{ + { + name: "invalid cluster: empty destination", + destinationField: v1alpha1.ApplicationDestination{ + Namespace: "namespace", + }, + expectFinalizerRemoved: true, + }, + { + name: "invalid cluster: invalid server url", + destinationField: v1alpha1.ApplicationDestination{ + Namespace: "namespace", + Server: "https://1.2.3.4", + }, + expectFinalizerRemoved: true, + }, + { + name: "invalid cluster: invalid cluster name", + destinationField: v1alpha1.ApplicationDestination{ + Namespace: "namespace", + Name: "invalid-cluster", + }, + expectFinalizerRemoved: true, + }, + { + name: "invalid cluster by both valid", + destinationField: v1alpha1.ApplicationDestination{ + Namespace: "namespace", + Name: "mycluster2", + Server: "https://kubernetes.default.svc", + }, + expectFinalizerRemoved: true, + }, + { + name: "invalid cluster by both invalid", + destinationField: v1alpha1.ApplicationDestination{ + Namespace: "namespace", + Name: "mycluster3", + Server: "https://4.5.6.7", + }, + expectFinalizerRemoved: true, + }, + { + name: "valid cluster by name", + destinationField: v1alpha1.ApplicationDestination{ + Namespace: "namespace", + Name: "mycluster2", + }, + expectFinalizerRemoved: false, + }, + { + name: "valid cluster by server", + destinationField: v1alpha1.ApplicationDestination{ + Namespace: "namespace", + Server: "https://kubernetes.default.svc", + }, + expectFinalizerRemoved: false, + }, + } { + + t.Run(c.name, func(t *testing.T) { + + appSet := v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Template: v1alpha1.ApplicationSetTemplate{ + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + } + + app := v1alpha1.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + Finalizers: []string{v1alpha1.ResourcesFinalizerName}, + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + Source: &v1alpha1.ApplicationSource{Path: "path", TargetRevision: "revision", RepoURL: "repoURL"}, + Destination: c.destinationField, + }, + } + + initObjs := []crtclient.Object{&app, &appSet} + + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build() + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-secret", + Namespace: "namespace", + Labels: map[string]string{ + generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster, + }, + }, + Data: map[string][]byte{ + // Since this test requires the cluster to be an invalid destination, we + // always return a cluster named 'my-cluster2' (different from app 'my-cluster', above) + "name": []byte("mycluster2"), + "server": []byte("https://kubernetes.default.svc"), + "config": []byte("{\"username\":\"foo\",\"password\":\"foo\"}"), + }, + } + + objects := append([]runtime.Object{}, secret) + kubeclientset := kubefake.NewSimpleClientset(objects...) + + r := ApplicationSetReconciler{ + Client: client, + Scheme: scheme, + Recorder: record.NewFakeRecorder(10), + KubeClientset: kubeclientset, + Cache: &fakeCache{}, + } + // settingsMgr := settings.NewSettingsManager(context.TODO(), kubeclientset, "argocd") + // argoDB := db.NewDB("argocd", settingsMgr, r.KubeClientset) + // clusterList, err := argoDB.ListClusters(context.Background()) + clusterList, err := utils.ListClusters(context.Background(), kubeclientset, "namespace") + assert.NoError(t, err, "Unexpected error") + + appLog := log.WithFields(log.Fields{"app": app.Name, "appSet": ""}) + + appInputParam := app.DeepCopy() + + err = r.removeFinalizerOnInvalidDestination(context.Background(), appSet, appInputParam, clusterList, appLog) + assert.NoError(t, err, "Unexpected error") + + retrievedApp := v1alpha1.Application{} + err = client.Get(context.Background(), crtclient.ObjectKeyFromObject(&app), &retrievedApp) + assert.NoError(t, err, "Unexpected error") + + finalizerRemoved := len(retrievedApp.Finalizers) == 0 + + assert.True(t, c.expectFinalizerRemoved == finalizerRemoved) + + bytes, _ := json.MarshalIndent(retrievedApp, "", " ") + t.Log("Contents of app after call:", string(bytes)) + + }) + } +} + +func TestCreateApplications(t *testing.T) { + + scheme := runtime.NewScheme() + err := v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + err = v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + testCases := []struct { + name string + appSet v1alpha1.ApplicationSet + existsApps []v1alpha1.Application + apps []v1alpha1.Application + expected []v1alpha1.Application + }{ + { + name: "no existing apps", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + }, + existsApps: nil, + apps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + }, + }, + expected: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{ + Kind: application.ApplicationKind, + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + Namespace: "namespace", + ResourceVersion: "1", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "default", + }, + }, + }, + }, + { + name: "existing apps", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Template: v1alpha1.ApplicationSetTemplate{ + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + }, + existsApps: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{ + Kind: application.ApplicationKind, + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + Namespace: "namespace", + ResourceVersion: "2", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "test", + }, + }, + }, + apps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + expected: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{ + Kind: application.ApplicationKind, + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + Namespace: "namespace", + ResourceVersion: "2", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "test", + }, + }, + }, + }, + { + name: "existing apps with different project", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Template: v1alpha1.ApplicationSetTemplate{ + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + }, + existsApps: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{ + Kind: application.ApplicationKind, + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + Namespace: "namespace", + ResourceVersion: "2", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "test", + }, + }, + }, + apps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app2", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + expected: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{ + Kind: application.ApplicationKind, + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "app2", + Namespace: "namespace", + ResourceVersion: "1", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + }, + } + + for _, c := range testCases { + t.Run(c.name, func(t *testing.T) { + initObjs := []crtclient.Object{&c.appSet} + for _, a := range c.existsApps { + err = controllerutil.SetControllerReference(&c.appSet, &a, scheme) + assert.Nil(t, err) + initObjs = append(initObjs, &a) + } + + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build() + + r := ApplicationSetReconciler{ + Client: client, + Scheme: scheme, + Recorder: record.NewFakeRecorder(len(initObjs) + len(c.expected)), + Cache: &fakeCache{}, + } + + err = r.createInCluster(context.TODO(), c.appSet, c.apps) + assert.Nil(t, err) + + for _, obj := range c.expected { + got := &v1alpha1.Application{} + _ = client.Get(context.Background(), crtclient.ObjectKey{ + Namespace: obj.Namespace, + Name: obj.Name, + }, got) + + err = controllerutil.SetControllerReference(&c.appSet, &obj, r.Scheme) + assert.Nil(t, err) + + assert.Equal(t, obj, *got) + } + }) + } +} + +func TestDeleteInCluster(t *testing.T) { + + scheme := runtime.NewScheme() + err := v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + err = v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + for _, c := range []struct { + // appSet is the application set on which the delete function is called + appSet v1alpha1.ApplicationSet + // existingApps is the current state of Applications on the cluster + existingApps []v1alpha1.Application + // desireApps is the apps generated by the generator that we wish to keep alive + desiredApps []v1alpha1.Application + // expected is the list of applications that we expect to exist after calling delete + expected []v1alpha1.Application + // notExpected is the list of applications that we expect not to exist after calling delete + notExpected []v1alpha1.Application + }{ + { + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Template: v1alpha1.ApplicationSetTemplate{ + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + }, + existingApps: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{ + Kind: application.ApplicationKind, + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "delete", + Namespace: "namespace", + ResourceVersion: "2", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + { + TypeMeta: metav1.TypeMeta{ + Kind: application.ApplicationKind, + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "keep", + Namespace: "namespace", + ResourceVersion: "2", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + desiredApps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "keep", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + expected: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{ + Kind: application.ApplicationKind, + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "keep", + Namespace: "namespace", + ResourceVersion: "2", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + notExpected: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{ + Kind: application.ApplicationKind, + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "delete", + Namespace: "namespace", + ResourceVersion: "1", + }, + Spec: v1alpha1.ApplicationSpec{ + Project: "project", + }, + }, + }, + }, + } { + initObjs := []crtclient.Object{&c.appSet} + for _, a := range c.existingApps { + temp := a + err = controllerutil.SetControllerReference(&c.appSet, &temp, scheme) + assert.Nil(t, err) + initObjs = append(initObjs, &temp) + } + + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build() + + r := ApplicationSetReconciler{ + Client: client, + Scheme: scheme, + Recorder: record.NewFakeRecorder(len(initObjs) + len(c.expected)), + KubeClientset: kubefake.NewSimpleClientset(), + } + + err = r.deleteInCluster(context.TODO(), c.appSet, c.desiredApps) + assert.Nil(t, err) + + // For each of the expected objects, verify they exist on the cluster + for _, obj := range c.expected { + got := &v1alpha1.Application{} + _ = client.Get(context.Background(), crtclient.ObjectKey{ + Namespace: obj.Namespace, + Name: obj.Name, + }, got) + + err = controllerutil.SetControllerReference(&c.appSet, &obj, r.Scheme) + assert.Nil(t, err) + + assert.Equal(t, obj, *got) + } + + // Verify each of the unexpected objs cannot be found + for _, obj := range c.notExpected { + got := &v1alpha1.Application{} + err := client.Get(context.Background(), crtclient.ObjectKey{ + Namespace: obj.Namespace, + Name: obj.Name, + }, got) + + assert.EqualError(t, err, fmt.Sprintf("applications.argoproj.io \"%s\" not found", obj.Name)) + } + } +} + +func TestGetMinRequeueAfter(t *testing.T) { + scheme := runtime.NewScheme() + err := v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + err = v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + generator := v1alpha1.ApplicationSetGenerator{ + List: &v1alpha1.ListGenerator{}, + Git: &v1alpha1.GitGenerator{}, + Clusters: &v1alpha1.ClusterGenerator{}, + } + + generatorMock0 := generatorMock{} + generatorMock0.On("GetRequeueAfter", &generator). + Return(generators.NoRequeueAfter) + + generatorMock1 := generatorMock{} + generatorMock1.On("GetRequeueAfter", &generator). + Return(time.Duration(1) * time.Second) + + generatorMock10 := generatorMock{} + generatorMock10.On("GetRequeueAfter", &generator). + Return(time.Duration(10) * time.Second) + + r := ApplicationSetReconciler{ + Client: client, + Scheme: scheme, + Recorder: record.NewFakeRecorder(0), + Cache: &fakeCache{}, + Generators: map[string]generators.Generator{ + "List": &generatorMock10, + "Git": &generatorMock1, + "Clusters": &generatorMock1, + }, + } + + got := r.getMinRequeueAfter(&v1alpha1.ApplicationSet{ + Spec: v1alpha1.ApplicationSetSpec{ + Generators: []v1alpha1.ApplicationSetGenerator{generator}, + }, + }) + + assert.Equal(t, time.Duration(1)*time.Second, got) +} + +func TestValidateGeneratedApplications(t *testing.T) { + + scheme := runtime.NewScheme() + err := v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + err = v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + // Valid cluster + myCluster := v1alpha1.Cluster{ + Server: "https://kubernetes.default.svc", + Name: "my-cluster", + } + + // Valid project + myProject := &v1alpha1.AppProject{ + ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "namespace"}, + Spec: v1alpha1.AppProjectSpec{ + SourceRepos: []string{"*"}, + Destinations: []v1alpha1.ApplicationDestination{ + { + Namespace: "*", + Server: "*", + }, + }, + ClusterResourceWhitelist: []metav1.GroupKind{ + { + Group: "*", + Kind: "*", + }, + }, + }, + } + + // Test a subset of the validations that 'validateGeneratedApplications' performs + for _, cc := range []struct { + name string + apps []v1alpha1.Application + expectedErrors []string + validationErrors map[int]error + }{ + { + name: "valid app should return true", + apps: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, + Spec: v1alpha1.ApplicationSpec{ + Project: "default", + Source: &v1alpha1.ApplicationSource{ + RepoURL: "https://url", + Path: "/", + TargetRevision: "HEAD", + }, + Destination: v1alpha1.ApplicationDestination{ + Namespace: "namespace", + Name: "my-cluster", + }, + }, + }, + }, + expectedErrors: []string{}, + validationErrors: map[int]error{}, + }, + { + name: "can't have both name and server defined", + apps: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, + Spec: v1alpha1.ApplicationSpec{ + Project: "default", + Source: &v1alpha1.ApplicationSource{ + RepoURL: "https://url", + Path: "/", + TargetRevision: "HEAD", + }, + Destination: v1alpha1.ApplicationDestination{ + Namespace: "namespace", + Server: "my-server", + Name: "my-cluster", + }, + }, + }, + }, + expectedErrors: []string{"application destination can't have both name and server defined"}, + validationErrors: map[int]error{0: fmt.Errorf("application destination spec is invalid: application destination can't have both name and server defined: my-cluster my-server")}, + }, + { + name: "project mismatch should return error", + apps: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, + Spec: v1alpha1.ApplicationSpec{ + Project: "DOES-NOT-EXIST", + Source: &v1alpha1.ApplicationSource{ + RepoURL: "https://url", + Path: "/", + TargetRevision: "HEAD", + }, + Destination: v1alpha1.ApplicationDestination{ + Namespace: "namespace", + Name: "my-cluster", + }, + }, + }, + }, + expectedErrors: []string{"application references project DOES-NOT-EXIST which does not exist"}, + validationErrors: map[int]error{0: fmt.Errorf("application references project DOES-NOT-EXIST which does not exist")}, + }, + { + name: "valid app should return true", + apps: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, + Spec: v1alpha1.ApplicationSpec{ + Project: "default", + Source: &v1alpha1.ApplicationSource{ + RepoURL: "https://url", + Path: "/", + TargetRevision: "HEAD", + }, + Destination: v1alpha1.ApplicationDestination{ + Namespace: "namespace", + Name: "my-cluster", + }, + }, + }, + }, + expectedErrors: []string{}, + validationErrors: map[int]error{}, + }, + { + name: "cluster should match", + apps: []v1alpha1.Application{ + { + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, + Spec: v1alpha1.ApplicationSpec{ + Project: "default", + Source: &v1alpha1.ApplicationSource{ + RepoURL: "https://url", + Path: "/", + TargetRevision: "HEAD", + }, + Destination: v1alpha1.ApplicationDestination{ + Namespace: "namespace", + Name: "nonexistent-cluster", + }, + }, + }, + }, + expectedErrors: []string{"there are no clusters with this name: nonexistent-cluster"}, + validationErrors: map[int]error{0: fmt.Errorf("application destination spec is invalid: unable to find destination server: there are no clusters with this name: nonexistent-cluster")}, + }, + } { + + t.Run(cc.name, func(t *testing.T) { + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-secret", + Namespace: "namespace", + Labels: map[string]string{ + generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster, + }, + }, + Data: map[string][]byte{ + "name": []byte("my-cluster"), + "server": []byte("https://kubernetes.default.svc"), + "config": []byte("{\"username\":\"foo\",\"password\":\"foo\"}"), + }, + } + + objects := append([]runtime.Object{}, secret) + kubeclientset := kubefake.NewSimpleClientset(objects...) + + argoDBMock := dbmocks.ArgoDB{} + argoDBMock.On("GetCluster", mock.Anything, "https://kubernetes.default.svc").Return(&myCluster, nil) + argoDBMock.On("ListClusters", mock.Anything).Return(&v1alpha1.ClusterList{Items: []v1alpha1.Cluster{ + myCluster, + }}, nil) + + argoObjs := []runtime.Object{myProject} + for _, app := range cc.apps { + argoObjs = append(argoObjs, &app) + } + + r := ApplicationSetReconciler{ + Client: client, + Scheme: scheme, + Recorder: record.NewFakeRecorder(1), + Cache: &fakeCache{}, + Generators: map[string]generators.Generator{}, + ArgoDB: &argoDBMock, + ArgoCDNamespace: "namespace", + ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...), + KubeClientset: kubeclientset, + } + + appSetInfo := v1alpha1.ApplicationSet{} + + validationErrors, _ := r.validateGeneratedApplications(context.TODO(), cc.apps, appSetInfo) + var errorMessages []string + for _, v := range validationErrors { + errorMessages = append(errorMessages, v.Error()) + } + + if len(errorMessages) == 0 { + assert.Equal(t, len(cc.expectedErrors), 0, "Expected errors but none were seen") + } else { + // An error was returned: it should be expected + matched := false + for _, expectedErr := range cc.expectedErrors { + foundMatch := strings.Contains(strings.Join(errorMessages, ";"), expectedErr) + assert.True(t, foundMatch, "Unble to locate expected error: %s", cc.expectedErrors) + matched = matched || foundMatch + } + assert.True(t, matched, "An unexpected error occurrred: %v", err) + // validation message was returned: it should be expected + matched = false + foundMatch := reflect.DeepEqual(validationErrors, cc.validationErrors) + var message string + for _, v := range validationErrors { + message = v.Error() + break + } + assert.True(t, foundMatch, "Unble to locate validation message: %s", message) + matched = matched || foundMatch + assert.True(t, matched, "An unexpected error occurrred: %v", err) + } + }) + } +} + +func TestReconcilerValidationProjectErrorBehaviour(t *testing.T) { + + scheme := runtime.NewScheme() + err := v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + err = v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + project := v1alpha1.AppProject{ + ObjectMeta: metav1.ObjectMeta{Name: "good-project", Namespace: "argocd"}, + } + appSet := v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + GoTemplate: true, + Generators: []v1alpha1.ApplicationSetGenerator{ + { + List: &v1alpha1.ListGenerator{ + Elements: []apiextensionsv1.JSON{{ + Raw: []byte(`{"project": "good-project"}`), + }, { + Raw: []byte(`{"project": "bad-project"}`), + }}, + }, + }, + }, + Template: v1alpha1.ApplicationSetTemplate{ + ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{ + Name: "{{.project}}", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSpec{ + Source: &v1alpha1.ApplicationSource{RepoURL: "https://github.com/argoproj/argocd-example-apps", Path: "guestbook"}, + Project: "{{.project}}", + Destination: v1alpha1.ApplicationDestination{Server: "https://kubernetes.default.svc"}, + }, + }, + }, + } + + kubeclientset := kubefake.NewSimpleClientset() + argoDBMock := dbmocks.ArgoDB{} + argoObjs := []runtime.Object{&project} + + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build() + r := ApplicationSetReconciler{ + Client: client, + Scheme: scheme, + Renderer: &utils.Render{}, + Recorder: record.NewFakeRecorder(1), + Cache: &fakeCache{}, + Generators: map[string]generators.Generator{ + "List": generators.NewListGenerator(), + }, + ArgoDB: &argoDBMock, + ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...), + KubeClientset: kubeclientset, + Policy: v1alpha1.ApplicationsSyncPolicySync, + ArgoCDNamespace: "argocd", + } + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "argocd", + Name: "name", + }, + } + + // Verify that on validation error, no error is returned, but the object is requeued + res, err := r.Reconcile(context.Background(), req) + assert.Nil(t, err) + assert.True(t, res.RequeueAfter == ReconcileRequeueOnValidationError) + + var app v1alpha1.Application + + // make sure good app got created + err = r.Client.Get(context.TODO(), crtclient.ObjectKey{Namespace: "argocd", Name: "good-project"}, &app) + assert.NoError(t, err) + assert.Equal(t, app.Name, "good-project") + + // make sure bad app was not created + err = r.Client.Get(context.TODO(), crtclient.ObjectKey{Namespace: "argocd", Name: "bad-project"}, &app) + assert.Error(t, err) +} + +func TestSetApplicationSetStatusCondition(t *testing.T) { + scheme := runtime.NewScheme() + err := v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + err = v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + appSet := v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Generators: []v1alpha1.ApplicationSetGenerator{ + {List: &v1alpha1.ListGenerator{ + Elements: []apiextensionsv1.JSON{{ + Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`), + }}, + }}, + }, + Template: v1alpha1.ApplicationSetTemplate{}, + }, + } + + appCondition := v1alpha1.ApplicationSetCondition{ + Type: v1alpha1.ApplicationSetConditionResourcesUpToDate, + Message: "All applications have been generated successfully", + Reason: v1alpha1.ApplicationSetReasonApplicationSetUpToDate, + Status: v1alpha1.ApplicationSetConditionStatusTrue, + } + + kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...) + argoDBMock := dbmocks.ArgoDB{} + argoObjs := []runtime.Object{} + + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build() + + r := ApplicationSetReconciler{ + Client: client, + Scheme: scheme, + Renderer: &utils.Render{}, + Recorder: record.NewFakeRecorder(1), + Cache: &fakeCache{}, + Generators: map[string]generators.Generator{ + "List": generators.NewListGenerator(), + }, + ArgoDB: &argoDBMock, + ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...), + KubeClientset: kubeclientset, + } + + err = r.setApplicationSetStatusCondition(context.TODO(), &appSet, appCondition, true) + assert.Nil(t, err) + + assert.Len(t, appSet.Status.Conditions, 3) +} + +func applicationsUpdateSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alpha1.ApplicationsSyncPolicy, recordBuffer int, allowPolicyOverride bool) v1alpha1.Application { + + scheme := runtime.NewScheme() + err := v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + err = v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + defaultProject := v1alpha1.AppProject{ + ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "argocd"}, + Spec: v1alpha1.AppProjectSpec{SourceRepos: []string{"*"}, Destinations: []v1alpha1.ApplicationDestination{{Namespace: "*", Server: "https://good-cluster"}}}, + } + appSet := v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Generators: []v1alpha1.ApplicationSetGenerator{ + { + List: &v1alpha1.ListGenerator{ + Elements: []apiextensionsv1.JSON{{ + Raw: []byte(`{"cluster": "good-cluster","url": "https://good-cluster"}`), + }}, + }, + }, + }, + SyncPolicy: &v1alpha1.ApplicationSetSyncPolicy{ + ApplicationsSync: &applicationsSyncPolicy, + }, + Template: v1alpha1.ApplicationSetTemplate{ + ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{ + Name: "{{cluster}}", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSpec{ + Source: &v1alpha1.ApplicationSource{RepoURL: "https://github.com/argoproj/argocd-example-apps", Path: "guestbook"}, + Project: "default", + Destination: v1alpha1.ApplicationDestination{Server: "{{url}}"}, + }, + }, + }, + } + + kubeclientset := kubefake.NewSimpleClientset() + argoDBMock := dbmocks.ArgoDB{} + argoObjs := []runtime.Object{&defaultProject} + + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build() + goodCluster := v1alpha1.Cluster{Server: "https://good-cluster", Name: "good-cluster"} + argoDBMock.On("GetCluster", mock.Anything, "https://good-cluster").Return(&goodCluster, nil) + argoDBMock.On("ListClusters", mock.Anything).Return(&v1alpha1.ClusterList{Items: []v1alpha1.Cluster{ + goodCluster, + }}, nil) + + r := ApplicationSetReconciler{ + Client: client, + Scheme: scheme, + Renderer: &utils.Render{}, + Recorder: record.NewFakeRecorder(recordBuffer), + Cache: &fakeCache{}, + Generators: map[string]generators.Generator{ + "List": generators.NewListGenerator(), + }, + ArgoDB: &argoDBMock, + ArgoCDNamespace: "argocd", + ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...), + KubeClientset: kubeclientset, + Policy: v1alpha1.ApplicationsSyncPolicySync, + EnablePolicyOverride: allowPolicyOverride, + } + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "argocd", + Name: "name", + }, + } + + // Verify that on validation error, no error is returned, but the object is requeued + resCreate, err := r.Reconcile(context.Background(), req) + assert.Nil(t, err) + assert.True(t, resCreate.RequeueAfter == 0) + + var app v1alpha1.Application + + // make sure good app got created + err = r.Client.Get(context.TODO(), crtclient.ObjectKey{Namespace: "argocd", Name: "good-cluster"}, &app) + assert.Nil(t, err) + assert.Equal(t, app.Name, "good-cluster") + + // Update resource + var retrievedApplicationSet v1alpha1.ApplicationSet + err = r.Client.Get(context.TODO(), crtclient.ObjectKey{Namespace: "argocd", Name: "name"}, &retrievedApplicationSet) + assert.Nil(t, err) + + retrievedApplicationSet.Spec.Template.Annotations = map[string]string{"annotation-key": "annotation-value"} + retrievedApplicationSet.Spec.Template.Labels = map[string]string{"label-key": "label-value"} + + retrievedApplicationSet.Spec.Template.Spec.Source.Helm = &v1alpha1.ApplicationSourceHelm{ + Values: "global.test: test", + } + + err = r.Client.Update(context.TODO(), &retrievedApplicationSet) + assert.Nil(t, err) + + resUpdate, err := r.Reconcile(context.Background(), req) + assert.Nil(t, err) + + err = r.Client.Get(context.TODO(), crtclient.ObjectKey{Namespace: "argocd", Name: "good-cluster"}, &app) + assert.Nil(t, err) + assert.True(t, resUpdate.RequeueAfter == 0) + assert.Equal(t, app.Name, "good-cluster") + + return app +} + +func TestUpdateNotPerformedWithSyncPolicyCreateOnly(t *testing.T) { + + applicationsSyncPolicy := v1alpha1.ApplicationsSyncPolicyCreateOnly + + app := applicationsUpdateSyncPolicyTest(t, applicationsSyncPolicy, 1, true) + + assert.Nil(t, app.Spec.Source.Helm) + assert.Nil(t, app.ObjectMeta.Annotations) +} + +func TestUpdateNotPerformedWithSyncPolicyCreateDelete(t *testing.T) { + + applicationsSyncPolicy := v1alpha1.ApplicationsSyncPolicyCreateDelete + + app := applicationsUpdateSyncPolicyTest(t, applicationsSyncPolicy, 1, true) + + assert.Nil(t, app.Spec.Source.Helm) + assert.Nil(t, app.ObjectMeta.Annotations) +} + +func TestUpdatePerformedWithSyncPolicyCreateUpdate(t *testing.T) { + + applicationsSyncPolicy := v1alpha1.ApplicationsSyncPolicyCreateUpdate + + app := applicationsUpdateSyncPolicyTest(t, applicationsSyncPolicy, 2, true) + + assert.Equal(t, "global.test: test", app.Spec.Source.Helm.Values) + assert.Equal(t, map[string]string{"annotation-key": "annotation-value"}, app.ObjectMeta.Annotations) + assert.Equal(t, map[string]string{"label-key": "label-value"}, app.ObjectMeta.Labels) +} + +func TestUpdatePerformedWithSyncPolicySync(t *testing.T) { + + applicationsSyncPolicy := v1alpha1.ApplicationsSyncPolicySync + + app := applicationsUpdateSyncPolicyTest(t, applicationsSyncPolicy, 2, true) + + assert.Equal(t, "global.test: test", app.Spec.Source.Helm.Values) + assert.Equal(t, map[string]string{"annotation-key": "annotation-value"}, app.ObjectMeta.Annotations) + assert.Equal(t, map[string]string{"label-key": "label-value"}, app.ObjectMeta.Labels) +} + +func TestUpdatePerformedWithSyncPolicyCreateOnlyAndAllowPolicyOverrideFalse(t *testing.T) { + + applicationsSyncPolicy := v1alpha1.ApplicationsSyncPolicyCreateOnly + + app := applicationsUpdateSyncPolicyTest(t, applicationsSyncPolicy, 2, false) + + assert.Equal(t, "global.test: test", app.Spec.Source.Helm.Values) + assert.Equal(t, map[string]string{"annotation-key": "annotation-value"}, app.ObjectMeta.Annotations) + assert.Equal(t, map[string]string{"label-key": "label-value"}, app.ObjectMeta.Labels) +} + +func applicationsDeleteSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alpha1.ApplicationsSyncPolicy, recordBuffer int, allowPolicyOverride bool) v1alpha1.ApplicationList { + + scheme := runtime.NewScheme() + err := v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + err = v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + defaultProject := v1alpha1.AppProject{ + ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "argocd"}, + Spec: v1alpha1.AppProjectSpec{SourceRepos: []string{"*"}, Destinations: []v1alpha1.ApplicationDestination{{Namespace: "*", Server: "https://good-cluster"}}}, + } + appSet := v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Generators: []v1alpha1.ApplicationSetGenerator{ + { + List: &v1alpha1.ListGenerator{ + Elements: []apiextensionsv1.JSON{{ + Raw: []byte(`{"cluster": "good-cluster","url": "https://good-cluster"}`), + }}, + }, + }, + }, + SyncPolicy: &v1alpha1.ApplicationSetSyncPolicy{ + ApplicationsSync: &applicationsSyncPolicy, + }, + Template: v1alpha1.ApplicationSetTemplate{ + ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{ + Name: "{{cluster}}", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSpec{ + Source: &v1alpha1.ApplicationSource{RepoURL: "https://github.com/argoproj/argocd-example-apps", Path: "guestbook"}, + Project: "default", + Destination: v1alpha1.ApplicationDestination{Server: "{{url}}"}, + }, + }, + }, + } + + kubeclientset := kubefake.NewSimpleClientset() + argoDBMock := dbmocks.ArgoDB{} + argoObjs := []runtime.Object{&defaultProject} + + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build() + goodCluster := v1alpha1.Cluster{Server: "https://good-cluster", Name: "good-cluster"} + argoDBMock.On("GetCluster", mock.Anything, "https://good-cluster").Return(&goodCluster, nil) + argoDBMock.On("ListClusters", mock.Anything).Return(&v1alpha1.ClusterList{Items: []v1alpha1.Cluster{ + goodCluster, + }}, nil) + + r := ApplicationSetReconciler{ + Client: client, + Scheme: scheme, + Renderer: &utils.Render{}, + Recorder: record.NewFakeRecorder(recordBuffer), + Cache: &fakeCache{}, + Generators: map[string]generators.Generator{ + "List": generators.NewListGenerator(), + }, + ArgoDB: &argoDBMock, + ArgoCDNamespace: "argocd", + ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...), + KubeClientset: kubeclientset, + Policy: v1alpha1.ApplicationsSyncPolicySync, + EnablePolicyOverride: allowPolicyOverride, + } + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "argocd", + Name: "name", + }, + } + + // Verify that on validation error, no error is returned, but the object is requeued + resCreate, err := r.Reconcile(context.Background(), req) + assert.Nil(t, err) + assert.True(t, resCreate.RequeueAfter == 0) + + var app v1alpha1.Application + + // make sure good app got created + err = r.Client.Get(context.TODO(), crtclient.ObjectKey{Namespace: "argocd", Name: "good-cluster"}, &app) + assert.Nil(t, err) + assert.Equal(t, app.Name, "good-cluster") + + // Update resource + var retrievedApplicationSet v1alpha1.ApplicationSet + err = r.Client.Get(context.TODO(), crtclient.ObjectKey{Namespace: "argocd", Name: "name"}, &retrievedApplicationSet) + assert.Nil(t, err) + retrievedApplicationSet.Spec.Generators = []v1alpha1.ApplicationSetGenerator{ + { + List: &v1alpha1.ListGenerator{ + Elements: []apiextensionsv1.JSON{}, + }, + }, + } + + err = r.Client.Update(context.TODO(), &retrievedApplicationSet) + assert.Nil(t, err) + + resUpdate, err := r.Reconcile(context.Background(), req) + assert.Nil(t, err) + + var apps v1alpha1.ApplicationList + + err = r.Client.List(context.TODO(), &apps) + assert.Nil(t, err) + assert.True(t, resUpdate.RequeueAfter == 0) + + return apps +} + +func TestDeleteNotPerformedWithSyncPolicyCreateOnly(t *testing.T) { + + applicationsSyncPolicy := v1alpha1.ApplicationsSyncPolicyCreateOnly + + apps := applicationsDeleteSyncPolicyTest(t, applicationsSyncPolicy, 1, true) + + assert.Equal(t, "good-cluster", apps.Items[0].Name) +} + +func TestDeleteNotPerformedWithSyncPolicyCreateUpdate(t *testing.T) { + + applicationsSyncPolicy := v1alpha1.ApplicationsSyncPolicyCreateUpdate + + apps := applicationsDeleteSyncPolicyTest(t, applicationsSyncPolicy, 2, true) + + assert.Equal(t, "good-cluster", apps.Items[0].Name) +} + +func TestDeletePerformedWithSyncPolicyCreateDelete(t *testing.T) { + + applicationsSyncPolicy := v1alpha1.ApplicationsSyncPolicyCreateDelete + + apps := applicationsDeleteSyncPolicyTest(t, applicationsSyncPolicy, 3, true) + + assert.Equal(t, 0, len(apps.Items)) +} + +func TestDeletePerformedWithSyncPolicySync(t *testing.T) { + + applicationsSyncPolicy := v1alpha1.ApplicationsSyncPolicySync + + apps := applicationsDeleteSyncPolicyTest(t, applicationsSyncPolicy, 3, true) + + assert.Equal(t, 0, len(apps.Items)) +} + +func TestDeletePerformedWithSyncPolicyCreateOnlyAndAllowPolicyOverrideFalse(t *testing.T) { + + applicationsSyncPolicy := v1alpha1.ApplicationsSyncPolicyCreateOnly + + apps := applicationsDeleteSyncPolicyTest(t, applicationsSyncPolicy, 3, false) + + assert.Equal(t, 0, len(apps.Items)) +} + +// Test app generation from a go template application set using a pull request generator +func TestGenerateAppsUsingPullRequestGenerator(t *testing.T) { + scheme := runtime.NewScheme() + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + for _, cases := range []struct { + name string + params []map[string]interface{} + template v1alpha1.ApplicationSetTemplate + expectedApp []v1alpha1.Application + }{ + { + name: "Generate an application from a go template application set manifest using a pull request generator", + params: []map[string]interface{}{{ + "number": "1", + "branch": "branch1", + "branch_slug": "branchSlug1", + "head_sha": "089d92cbf9ff857a39e6feccd32798ca700fb958", + "head_short_sha": "089d92cb", + "labels": []string{"label1"}}}, + template: v1alpha1.ApplicationSetTemplate{ + ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{ + Name: "AppSet-{{.branch}}-{{.number}}", + Labels: map[string]string{ + "app1": "{{index .labels 0}}", + }, + }, + Spec: v1alpha1.ApplicationSpec{ + Source: &v1alpha1.ApplicationSource{ + RepoURL: "https://testurl/testRepo", + TargetRevision: "{{.head_short_sha}}", + }, + Destination: v1alpha1.ApplicationDestination{ + Server: "https://kubernetes.default.svc", + Namespace: "AppSet-{{.branch_slug}}-{{.head_sha}}", + }, + }, + }, + expectedApp: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "AppSet-branch1-1", + Labels: map[string]string{ + "app1": "label1", + }, + }, + Spec: v1alpha1.ApplicationSpec{ + Source: &v1alpha1.ApplicationSource{ + RepoURL: "https://testurl/testRepo", + TargetRevision: "089d92cb", + }, + Destination: v1alpha1.ApplicationDestination{ + Server: "https://kubernetes.default.svc", + Namespace: "AppSet-branchSlug1-089d92cbf9ff857a39e6feccd32798ca700fb958", + }, + }, + }, + }, + }, + } { + + t.Run(cases.name, func(t *testing.T) { + + generatorMock := generatorMock{} + generator := v1alpha1.ApplicationSetGenerator{ + PullRequest: &v1alpha1.PullRequestGenerator{}, + } + + generatorMock.On("GenerateParams", &generator). + Return(cases.params, nil) + + generatorMock.On("GetTemplate", &generator). + Return(&cases.template, nil) + + appSetReconciler := ApplicationSetReconciler{ + Client: client, + Scheme: scheme, + Recorder: record.NewFakeRecorder(1), + Cache: &fakeCache{}, + Generators: map[string]generators.Generator{ + "PullRequest": &generatorMock, + }, + Renderer: &utils.Render{}, + KubeClientset: kubefake.NewSimpleClientset(), + } + + gotApp, _, _ := appSetReconciler.generateApplications(v1alpha1.ApplicationSet{ + Spec: v1alpha1.ApplicationSetSpec{ + GoTemplate: true, + Generators: []v1alpha1.ApplicationSetGenerator{{ + PullRequest: &v1alpha1.PullRequestGenerator{}, + }}, + Template: cases.template, + }, + }, + ) + assert.EqualValues(t, cases.expectedApp[0].ObjectMeta.Name, gotApp[0].ObjectMeta.Name) + assert.EqualValues(t, cases.expectedApp[0].Spec.Source.TargetRevision, gotApp[0].Spec.Source.TargetRevision) + assert.EqualValues(t, cases.expectedApp[0].Spec.Destination.Namespace, gotApp[0].Spec.Destination.Namespace) + assert.True(t, collections.StringMapsEqual(cases.expectedApp[0].ObjectMeta.Labels, gotApp[0].ObjectMeta.Labels)) + }) + } +} + +func TestPolicies(t *testing.T) { + scheme := runtime.NewScheme() + err := v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + err = v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + defaultProject := v1alpha1.AppProject{ + ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "argocd"}, + Spec: v1alpha1.AppProjectSpec{SourceRepos: []string{"*"}, Destinations: []v1alpha1.ApplicationDestination{{Namespace: "*", Server: "https://kubernetes.default.svc"}}}, + } + myCluster := v1alpha1.Cluster{ + Server: "https://kubernetes.default.svc", + Name: "my-cluster", + } + + kubeclientset := kubefake.NewSimpleClientset() + argoDBMock := dbmocks.ArgoDB{} + argoDBMock.On("GetCluster", mock.Anything, "https://kubernetes.default.svc").Return(&myCluster, nil) + argoObjs := []runtime.Object{&defaultProject} + + for _, c := range []struct { + name string + policyName string + allowedUpdate bool + allowedDelete bool + }{ + { + name: "Apps are allowed to update and delete", + policyName: "sync", + allowedUpdate: true, + allowedDelete: true, + }, + { + name: "Apps are not allowed to update and delete", + policyName: "create-only", + allowedUpdate: false, + allowedDelete: false, + }, + { + name: "Apps are allowed to update, not allowed to delete", + policyName: "create-update", + allowedUpdate: true, + allowedDelete: false, + }, + { + name: "Apps are allowed to delete, not allowed to update", + policyName: "create-delete", + allowedUpdate: false, + allowedDelete: true, + }, + } { + t.Run(c.name, func(t *testing.T) { + policy := utils.Policies[c.policyName] + assert.NotNil(t, policy) + + appSet := v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + GoTemplate: true, + Generators: []v1alpha1.ApplicationSetGenerator{ + { + List: &v1alpha1.ListGenerator{ + Elements: []apiextensionsv1.JSON{ + { + Raw: []byte(`{"name": "my-app"}`), + }, + }, + }, + }, + }, + Template: v1alpha1.ApplicationSetTemplate{ + ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{ + Name: "{{.name}}", + Namespace: "argocd", + Annotations: map[string]string{ + "key": "value", + }, + }, + Spec: v1alpha1.ApplicationSpec{ + Source: &v1alpha1.ApplicationSource{RepoURL: "https://github.com/argoproj/argocd-example-apps", Path: "guestbook"}, + Project: "default", + Destination: v1alpha1.ApplicationDestination{Server: "https://kubernetes.default.svc"}, + }, + }, + }, + } + + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build() + + r := ApplicationSetReconciler{ + Client: client, + Scheme: scheme, + Renderer: &utils.Render{}, + Recorder: record.NewFakeRecorder(10), + Cache: &fakeCache{}, + Generators: map[string]generators.Generator{ + "List": generators.NewListGenerator(), + }, + ArgoDB: &argoDBMock, + ArgoCDNamespace: "argocd", + ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...), + KubeClientset: kubeclientset, + Policy: policy, + } + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "argocd", + Name: "name", + }, + } + + // Check if Application is created + res, err := r.Reconcile(context.Background(), req) + assert.Nil(t, err) + assert.True(t, res.RequeueAfter == 0) + + var app v1alpha1.Application + err = r.Client.Get(context.TODO(), crtclient.ObjectKey{Namespace: "argocd", Name: "my-app"}, &app) + assert.NoError(t, err) + assert.Equal(t, app.Annotations["key"], "value") + + // Check if Application is updated + app.Annotations["key"] = "edited" + err = r.Client.Update(context.TODO(), &app) + assert.NoError(t, err) + + res, err = r.Reconcile(context.Background(), req) + assert.Nil(t, err) + assert.True(t, res.RequeueAfter == 0) + + err = r.Client.Get(context.TODO(), crtclient.ObjectKey{Namespace: "argocd", Name: "my-app"}, &app) + assert.NoError(t, err) + + if c.allowedUpdate { + assert.Equal(t, app.Annotations["key"], "value") + } else { + assert.Equal(t, app.Annotations["key"], "edited") + } + + // Check if Application is deleted + err = r.Client.Get(context.TODO(), crtclient.ObjectKey{Namespace: "argocd", Name: "name"}, &appSet) + assert.NoError(t, err) + appSet.Spec.Generators[0] = v1alpha1.ApplicationSetGenerator{ + List: &v1alpha1.ListGenerator{ + Elements: []apiextensionsv1.JSON{}, + }, + } + err = r.Client.Update(context.TODO(), &appSet) + assert.NoError(t, err) + + res, err = r.Reconcile(context.Background(), req) + assert.Nil(t, err) + assert.True(t, res.RequeueAfter == 0) + + err = r.Client.Get(context.TODO(), crtclient.ObjectKey{Namespace: "argocd", Name: "my-app"}, &app) + assert.NoError(t, err) + if c.allowedDelete { + assert.NotNil(t, app.DeletionTimestamp) + } else { + assert.Nil(t, app.DeletionTimestamp) + } + }) + } +} + +func TestSetApplicationSetApplicationStatus(t *testing.T) { + scheme := runtime.NewScheme() + err := v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + err = v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...) + argoDBMock := dbmocks.ArgoDB{} + argoObjs := []runtime.Object{} + + for _, cc := range []struct { + name string + appSet v1alpha1.ApplicationSet + appStatuses []v1alpha1.ApplicationSetApplicationStatus + expectedAppStatuses []v1alpha1.ApplicationSetApplicationStatus + }{ + { + name: "sets a single appstatus", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Generators: []v1alpha1.ApplicationSetGenerator{ + {List: &v1alpha1.ListGenerator{ + Elements: []apiextensionsv1.JSON{{ + Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`), + }}, + }}, + }, + Template: v1alpha1.ApplicationSetTemplate{}, + }, + }, + appStatuses: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "testing SetApplicationSetApplicationStatus to Healthy", + Status: "Healthy", + }, + }, + expectedAppStatuses: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "testing SetApplicationSetApplicationStatus to Healthy", + Status: "Healthy", + }, + }, + }, + { + name: "removes an appstatus", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Generators: []v1alpha1.ApplicationSetGenerator{ + {List: &v1alpha1.ListGenerator{ + Elements: []apiextensionsv1.JSON{{ + Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`), + }}, + }}, + }, + Template: v1alpha1.ApplicationSetTemplate{}, + }, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "testing SetApplicationSetApplicationStatus to Healthy", + Status: "Healthy", + }, + }, + }, + }, + appStatuses: []v1alpha1.ApplicationSetApplicationStatus{}, + expectedAppStatuses: nil, + }, + } { + + t.Run(cc.name, func(t *testing.T) { + + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&cc.appSet).Build() + + r := ApplicationSetReconciler{ + Client: client, + Scheme: scheme, + Renderer: &utils.Render{}, + Recorder: record.NewFakeRecorder(1), + Cache: &fakeCache{}, + Generators: map[string]generators.Generator{ + "List": generators.NewListGenerator(), + }, + ArgoDB: &argoDBMock, + ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...), + KubeClientset: kubeclientset, + } + + err = r.setAppSetApplicationStatus(context.TODO(), &cc.appSet, cc.appStatuses) + assert.Nil(t, err) + + assert.Equal(t, cc.expectedAppStatuses, cc.appSet.Status.ApplicationStatus) + }) + } +} + +func TestBuildAppDependencyList(t *testing.T) { + + scheme := runtime.NewScheme() + err := v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + err = v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + for _, cc := range []struct { + name string + appSet v1alpha1.ApplicationSet + apps []v1alpha1.Application + expectedList [][]string + expectedStepMap map[string]int + }{ + { + name: "handles an empty set of applications and no strategy", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{}, + }, + apps: []v1alpha1.Application{}, + expectedList: [][]string{}, + expectedStepMap: map[string]int{}, + }, + { + name: "handles an empty set of applications and ignores AllAtOnce strategy", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "AllAtOnce", + }, + }, + }, + apps: []v1alpha1.Application{}, + expectedList: [][]string{}, + expectedStepMap: map[string]int{}, + }, + { + name: "handles an empty set of applications with good 'In' selectors", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{ + Steps: []v1alpha1.ApplicationSetRolloutStep{ + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{ + { + Key: "env", + Operator: "In", + Values: []string{ + "dev", + }, + }, + }, + }, + }, + }, + }, + }, + }, + apps: []v1alpha1.Application{}, + expectedList: [][]string{ + {}, + }, + expectedStepMap: map[string]int{}, + }, + { + name: "handles selecting 1 application with 1 'In' selector", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{ + Steps: []v1alpha1.ApplicationSetRolloutStep{ + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{ + { + Key: "env", + Operator: "In", + Values: []string{ + "dev", + }, + }, + }, + }, + }, + }, + }, + }, + }, + apps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app-dev", + Labels: map[string]string{ + "env": "dev", + }, + }, + }, + }, + expectedList: [][]string{ + {"app-dev"}, + }, + expectedStepMap: map[string]int{ + "app-dev": 0, + }, + }, + { + name: "handles 'In' selectors that select no applications", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{ + Steps: []v1alpha1.ApplicationSetRolloutStep{ + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{ + { + Key: "env", + Operator: "In", + Values: []string{ + "dev", + }, + }, + }, + }, + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{ + { + Key: "env", + Operator: "In", + Values: []string{ + "qa", + }, + }, + }, + }, + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{ + { + Key: "env", + Operator: "In", + Values: []string{ + "prod", + }, + }, + }, + }, + }, + }, + }, + }, + }, + apps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app-qa", + Labels: map[string]string{ + "env": "qa", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app-prod", + Labels: map[string]string{ + "env": "prod", + }, + }, + }, + }, + expectedList: [][]string{ + {}, + {"app-qa"}, + {"app-prod"}, + }, + expectedStepMap: map[string]int{ + "app-qa": 1, + "app-prod": 2, + }, + }, + { + name: "multiple 'In' selectors in the same matchExpression only select Applications that match all selectors", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{ + Steps: []v1alpha1.ApplicationSetRolloutStep{ + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{ + { + Key: "region", + Operator: "In", + Values: []string{ + "us-east-2", + }, + }, + { + Key: "env", + Operator: "In", + Values: []string{ + "qa", + }, + }, + }, + }, + }, + }, + }, + }, + }, + apps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app-qa1", + Labels: map[string]string{ + "env": "qa", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app-qa2", + Labels: map[string]string{ + "env": "qa", + "region": "us-east-2", + }, + }, + }, + }, + expectedList: [][]string{ + {"app-qa2"}, + }, + expectedStepMap: map[string]int{ + "app-qa2": 0, + }, + }, + { + name: "multiple values in the same 'In' matchExpression can match on any value", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{ + Steps: []v1alpha1.ApplicationSetRolloutStep{ + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{ + { + Key: "env", + Operator: "In", + Values: []string{ + "qa", + "prod", + }, + }, + }, + }, + }, + }, + }, + }, + }, + apps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app-dev", + Labels: map[string]string{ + "env": "dev", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app-qa", + Labels: map[string]string{ + "env": "qa", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app-prod", + Labels: map[string]string{ + "env": "prod", + "region": "us-east-2", + }, + }, + }, + }, + expectedList: [][]string{ + {"app-qa", "app-prod"}, + }, + expectedStepMap: map[string]int{ + "app-qa": 0, + "app-prod": 0, + }, + }, + { + name: "handles an empty set of applications with good 'NotIn' selectors", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{ + Steps: []v1alpha1.ApplicationSetRolloutStep{ + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{ + { + Key: "env", + Operator: "In", + Values: []string{ + "dev", + }, + }, + }, + }, + }, + }, + }, + }, + }, + apps: []v1alpha1.Application{}, + expectedList: [][]string{ + {}, + }, + expectedStepMap: map[string]int{}, + }, + { + name: "selects 1 application with 1 'NotIn' selector", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{ + Steps: []v1alpha1.ApplicationSetRolloutStep{ + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{ + { + Key: "env", + Operator: "NotIn", + Values: []string{ + "qa", + }, + }, + }, + }, + }, + }, + }, + }, + }, + apps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app-dev", + Labels: map[string]string{ + "env": "dev", + }, + }, + }, + }, + expectedList: [][]string{ + {"app-dev"}, + }, + expectedStepMap: map[string]int{ + "app-dev": 0, + }, + }, + { + name: "'NotIn' selectors that select no applications", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{ + Steps: []v1alpha1.ApplicationSetRolloutStep{ + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{ + { + Key: "env", + Operator: "NotIn", + Values: []string{ + "dev", + }, + }, + }, + }, + }, + }, + }, + }, + }, + apps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app-qa", + Labels: map[string]string{ + "env": "qa", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app-prod", + Labels: map[string]string{ + "env": "prod", + }, + }, + }, + }, + expectedList: [][]string{ + {"app-qa", "app-prod"}, + }, + expectedStepMap: map[string]int{ + "app-qa": 0, + "app-prod": 0, + }, + }, + { + name: "multiple 'NotIn' selectors remove Applications with mising labels on any match", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{ + Steps: []v1alpha1.ApplicationSetRolloutStep{ + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{ + { + Key: "region", + Operator: "NotIn", + Values: []string{ + "us-east-2", + }, + }, + { + Key: "env", + Operator: "NotIn", + Values: []string{ + "qa", + }, + }, + }, + }, + }, + }, + }, + }, + }, + apps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app-qa1", + Labels: map[string]string{ + "env": "qa", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app-qa2", + Labels: map[string]string{ + "env": "qa", + "region": "us-east-2", + }, + }, + }, + }, + expectedList: [][]string{ + {}, + }, + expectedStepMap: map[string]int{}, + }, + { + name: "multiple 'NotIn' selectors filter all matching Applications", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{ + Steps: []v1alpha1.ApplicationSetRolloutStep{ + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{ + { + Key: "region", + Operator: "NotIn", + Values: []string{ + "us-east-2", + }, + }, + { + Key: "env", + Operator: "NotIn", + Values: []string{ + "qa", + }, + }, + }, + }, + }, + }, + }, + }, + }, + apps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app-qa1", + Labels: map[string]string{ + "env": "qa", + "region": "us-east-1", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app-qa2", + Labels: map[string]string{ + "env": "qa", + "region": "us-east-2", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app-prod1", + Labels: map[string]string{ + "env": "prod", + "region": "us-east-1", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app-prod2", + Labels: map[string]string{ + "env": "prod", + "region": "us-east-2", + }, + }, + }, + }, + expectedList: [][]string{ + {"app-prod1"}, + }, + expectedStepMap: map[string]int{ + "app-prod1": 0, + }, + }, + { + name: "multiple values in the same 'NotIn' matchExpression exclude a match from any value", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{ + Steps: []v1alpha1.ApplicationSetRolloutStep{ + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{ + { + Key: "env", + Operator: "NotIn", + Values: []string{ + "qa", + "prod", + }, + }, + }, + }, + }, + }, + }, + }, + }, + apps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app-dev", + Labels: map[string]string{ + "env": "dev", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app-qa", + Labels: map[string]string{ + "env": "qa", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app-prod", + Labels: map[string]string{ + "env": "prod", + "region": "us-east-2", + }, + }, + }, + }, + expectedList: [][]string{ + {"app-dev"}, + }, + expectedStepMap: map[string]int{ + "app-dev": 0, + }, + }, + { + name: "in a mix of 'In' and 'NotIn' selectors, 'NotIn' takes precedence", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{ + Steps: []v1alpha1.ApplicationSetRolloutStep{ + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{ + { + Key: "env", + Operator: "In", + Values: []string{ + "qa", + "prod", + }, + }, + { + Key: "region", + Operator: "NotIn", + Values: []string{ + "us-west-2", + }, + }, + }, + }, + }, + }, + }, + }, + }, + apps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app-dev", + Labels: map[string]string{ + "env": "dev", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app-qa1", + Labels: map[string]string{ + "env": "qa", + "region": "us-west-2", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app-qa2", + Labels: map[string]string{ + "env": "qa", + "region": "us-east-2", + }, + }, + }, + }, + expectedList: [][]string{ + {"app-qa2"}, + }, + expectedStepMap: map[string]int{ + "app-qa2": 0, + }, + }, + } { + + t.Run(cc.name, func(t *testing.T) { + + kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...) + argoDBMock := dbmocks.ArgoDB{} + argoObjs := []runtime.Object{} + + r := ApplicationSetReconciler{ + Client: client, + Scheme: scheme, + Recorder: record.NewFakeRecorder(1), + Cache: &fakeCache{}, + Generators: map[string]generators.Generator{}, + ArgoDB: &argoDBMock, + ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...), + KubeClientset: kubeclientset, + } + + appDependencyList, appStepMap, err := r.buildAppDependencyList(context.TODO(), cc.appSet, cc.apps) + assert.Equal(t, err, nil, "expected no errors, but errors occured") + assert.Equal(t, cc.expectedList, appDependencyList, "expected appDependencyList did not match actual") + assert.Equal(t, cc.expectedStepMap, appStepMap, "expected appStepMap did not match actual") + }) + } +} + +func TestBuildAppSyncMap(t *testing.T) { + + scheme := runtime.NewScheme() + err := v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + err = v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + for _, cc := range []struct { + name string + appSet v1alpha1.ApplicationSet + appMap map[string]v1alpha1.Application + appDependencyList [][]string + expectedMap map[string]bool + }{ + { + name: "handles an empty app dependency list", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{}, + }, + }, + }, + appDependencyList: [][]string{}, + expectedMap: map[string]bool{}, + }, + { + name: "handles two applications with no statuses", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{}, + }, + }, + }, + appDependencyList: [][]string{ + {"app1"}, + {"app2"}, + }, + expectedMap: map[string]bool{ + "app1": true, + "app2": false, + }, + }, + { + name: "handles applications after an empty selection", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{}, + }, + }, + }, + appDependencyList: [][]string{ + {}, + {"app1", "app2"}, + }, + expectedMap: map[string]bool{ + "app1": true, + "app2": true, + }, + }, + { + name: "handles RollingSync applications that are healthy and have no changes", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{}, + }, + }, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Status: "Healthy", + }, + { + Application: "app2", + Status: "Healthy", + }, + }, + }, + }, + appMap: map[string]v1alpha1.Application{ + "app1": { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: health.HealthStatusHealthy, + }, + OperationState: &v1alpha1.OperationState{ + Phase: common.OperationSucceeded, + }, + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeSynced, + }, + }, + }, + "app2": { + ObjectMeta: metav1.ObjectMeta{ + Name: "app2", + }, + Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: health.HealthStatusHealthy, + }, + OperationState: &v1alpha1.OperationState{ + Phase: common.OperationSucceeded, + }, + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeSynced, + }, + }, + }, + }, + appDependencyList: [][]string{ + {"app1"}, + {"app2"}, + }, + expectedMap: map[string]bool{ + "app1": true, + "app2": true, + }, + }, + { + name: "blocks RollingSync applications that are healthy and have no changes, but are still pending", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{}, + }, + }, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Status: "Pending", + }, + { + Application: "app2", + Status: "Healthy", + }, + }, + }, + }, + appMap: map[string]v1alpha1.Application{ + "app1": { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: health.HealthStatusHealthy, + }, + OperationState: &v1alpha1.OperationState{ + Phase: common.OperationSucceeded, + }, + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeSynced, + }, + }, + }, + "app2": { + ObjectMeta: metav1.ObjectMeta{ + Name: "app2", + }, + Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: health.HealthStatusHealthy, + }, + OperationState: &v1alpha1.OperationState{ + Phase: common.OperationSucceeded, + }, + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeSynced, + }, + }, + }, + }, + appDependencyList: [][]string{ + {"app1"}, + {"app2"}, + }, + expectedMap: map[string]bool{ + "app1": true, + "app2": false, + }, + }, + { + name: "handles RollingSync applications that are up to date and healthy, but still syncing", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{}, + }, + }, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Status: "Progressing", + }, + { + Application: "app2", + Status: "Progressing", + }, + }, + }, + }, + appMap: map[string]v1alpha1.Application{ + "app1": { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: health.HealthStatusHealthy, + }, + OperationState: &v1alpha1.OperationState{ + Phase: common.OperationRunning, + }, + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeSynced, + }, + }, + }, + "app2": { + ObjectMeta: metav1.ObjectMeta{ + Name: "app2", + }, + Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: health.HealthStatusHealthy, + }, + OperationState: &v1alpha1.OperationState{ + Phase: common.OperationRunning, + }, + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeSynced, + }, + }, + }, + }, + appDependencyList: [][]string{ + {"app1"}, + {"app2"}, + }, + expectedMap: map[string]bool{ + "app1": true, + "app2": false, + }, + }, + { + name: "handles RollingSync applications that are up to date and synced, but degraded", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{}, + }, + }, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Status: "Progressing", + }, + { + Application: "app2", + Status: "Progressing", + }, + }, + }, + }, + appMap: map[string]v1alpha1.Application{ + "app1": { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: health.HealthStatusDegraded, + }, + OperationState: &v1alpha1.OperationState{ + Phase: common.OperationRunning, + }, + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeSynced, + }, + }, + }, + "app2": { + ObjectMeta: metav1.ObjectMeta{ + Name: "app2", + }, + Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: health.HealthStatusDegraded, + }, + OperationState: &v1alpha1.OperationState{ + Phase: common.OperationRunning, + }, + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeSynced, + }, + }, + }, + }, + appDependencyList: [][]string{ + {"app1"}, + {"app2"}, + }, + expectedMap: map[string]bool{ + "app1": true, + "app2": false, + }, + }, + { + name: "handles RollingSync applications that are OutOfSync and healthy", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{}, + }, + }, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Status: "Healthy", + }, + { + Application: "app2", + Status: "Healthy", + }, + }, + }, + }, + appDependencyList: [][]string{ + {"app1"}, + {"app2"}, + }, + appMap: map[string]v1alpha1.Application{ + "app1": { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: health.HealthStatusHealthy, + }, + OperationState: &v1alpha1.OperationState{ + Phase: common.OperationSucceeded, + }, + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeOutOfSync, + }, + }, + }, + "app2": { + ObjectMeta: metav1.ObjectMeta{ + Name: "app2", + }, + Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: health.HealthStatusHealthy, + }, + OperationState: &v1alpha1.OperationState{ + Phase: common.OperationSucceeded, + }, + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeOutOfSync, + }, + }, + }, + }, + expectedMap: map[string]bool{ + "app1": true, + "app2": false, + }, + }, + { + name: "handles a lot of applications", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{}, + }, + }, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Status: "Healthy", + }, + { + Application: "app2", + Status: "Healthy", + }, + { + Application: "app3", + Status: "Healthy", + }, + { + Application: "app4", + Status: "Healthy", + }, + { + Application: "app5", + Status: "Healthy", + }, + { + Application: "app7", + Status: "Healthy", + }, + }, + }, + }, + appMap: map[string]v1alpha1.Application{ + "app1": { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: health.HealthStatusHealthy, + }, + OperationState: &v1alpha1.OperationState{ + Phase: common.OperationSucceeded, + }, + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeSynced, + }, + }, + }, + "app2": { + ObjectMeta: metav1.ObjectMeta{ + Name: "app2", + }, + Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: health.HealthStatusHealthy, + }, + OperationState: &v1alpha1.OperationState{ + Phase: common.OperationSucceeded, + }, + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeSynced, + }, + }, + }, + "app3": { + ObjectMeta: metav1.ObjectMeta{ + Name: "app3", + }, + Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: health.HealthStatusHealthy, + }, + OperationState: &v1alpha1.OperationState{ + Phase: common.OperationSucceeded, + }, + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeSynced, + }, + }, + }, + "app5": { + ObjectMeta: metav1.ObjectMeta{ + Name: "app5", + }, + Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: health.HealthStatusHealthy, + }, + OperationState: &v1alpha1.OperationState{ + Phase: common.OperationSucceeded, + }, + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeSynced, + }, + }, + }, + "app6": { + ObjectMeta: metav1.ObjectMeta{ + Name: "app6", + }, + Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: health.HealthStatusDegraded, + }, + OperationState: &v1alpha1.OperationState{ + Phase: common.OperationSucceeded, + }, + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeSynced, + }, + }, + }, + }, + appDependencyList: [][]string{ + {"app1", "app2", "app3"}, + {"app4", "app5", "app6"}, + {"app7", "app8", "app9"}, + }, + expectedMap: map[string]bool{ + "app1": true, + "app2": true, + "app3": true, + "app4": true, + "app5": true, + "app6": true, + "app7": false, + "app8": false, + "app9": false, + }, + }, + } { + + t.Run(cc.name, func(t *testing.T) { + + kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...) + argoDBMock := dbmocks.ArgoDB{} + argoObjs := []runtime.Object{} + + r := ApplicationSetReconciler{ + Client: client, + Scheme: scheme, + Recorder: record.NewFakeRecorder(1), + Cache: &fakeCache{}, + Generators: map[string]generators.Generator{}, + ArgoDB: &argoDBMock, + ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...), + KubeClientset: kubeclientset, + } + + appSyncMap, err := r.buildAppSyncMap(context.TODO(), cc.appSet, cc.appDependencyList, cc.appMap) + assert.Equal(t, err, nil, "expected no errors, but errors occured") + assert.Equal(t, cc.expectedMap, appSyncMap, "expected appSyncMap did not match actual") + }) + } +} + +func TestUpdateApplicationSetApplicationStatus(t *testing.T) { + + scheme := runtime.NewScheme() + err := v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + err = v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + for _, cc := range []struct { + name string + appSet v1alpha1.ApplicationSet + apps []v1alpha1.Application + appStepMap map[string]int + expectedAppStatus []v1alpha1.ApplicationSetApplicationStatus + }{ + { + name: "handles a nil list of statuses and no applications", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{}, + }, + }, + }, + apps: []v1alpha1.Application{}, + expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{}, + }, + { + name: "handles a nil list of statuses with a healthy application", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{}, + }, + }, + }, + apps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: health.HealthStatusHealthy, + }, + OperationState: &v1alpha1.OperationState{ + Phase: common.OperationSucceeded, + }, + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeSynced, + }, + }, + }, + }, + expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "Application resource is already Healthy, updating status from Waiting to Healthy.", + Status: "Healthy", + Step: "1", + }, + }, + }, + { + name: "handles an empty list of statuses with a healthy application", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{}, + }, + }, + Status: v1alpha1.ApplicationSetStatus{}, + }, + apps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: health.HealthStatusHealthy, + }, + OperationState: &v1alpha1.OperationState{ + Phase: common.OperationSucceeded, + }, + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeSynced, + }, + }, + }, + }, + expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "Application resource is already Healthy, updating status from Waiting to Healthy.", + Status: "Healthy", + Step: "1", + }, + }, + }, + { + name: "progresses an OutOfSync RollingSync application to waiting", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{}, + }, + }, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "", + Status: "Healthy", + Step: "1", + }, + }, + }, + }, + apps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + Status: v1alpha1.ApplicationStatus{ + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeOutOfSync, + }, + }, + }, + }, + expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "Application has pending changes, setting status to Waiting.", + Status: "Waiting", + Step: "1", + }, + }, + }, + { + name: "progresses a pending progressing application to progressing", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{}, + }, + }, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "", + Status: "Pending", + Step: "1", + }, + }, + }, + }, + apps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: health.HealthStatusProgressing, + }, + }, + }, + }, + expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "Application resource became Progressing, updating status from Pending to Progressing.", + Status: "Progressing", + Step: "1", + }, + }, + }, + { + name: "progresses a pending syncing application to progressing", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{}, + }, + }, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "", + Status: "Pending", + Step: "1", + }, + }, + }, + }, + apps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: health.HealthStatusHealthy, + }, + OperationState: &v1alpha1.OperationState{ + Phase: common.OperationRunning, + }, + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeSynced, + }, + }, + }, + }, + expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "Application resource became Progressing, updating status from Pending to Progressing.", + Status: "Progressing", + Step: "1", + }, + }, + }, + { + name: "progresses a progressing application to healthy", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{}, + }, + }, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "", + Status: "Progressing", + Step: "1", + }, + }, + }, + }, + apps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: health.HealthStatusHealthy, + }, + OperationState: &v1alpha1.OperationState{ + Phase: common.OperationSucceeded, + }, + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeSynced, + }, + }, + }, + }, + expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "Application resource became Healthy, updating status from Progressing to Healthy.", + Status: "Healthy", + Step: "1", + }, + }, + }, + { + name: "progresses a waiting healthy application to healthy", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{}, + }, + }, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "", + Status: "Waiting", + Step: "1", + }, + }, + }, + }, + apps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: health.HealthStatusHealthy, + }, + OperationState: &v1alpha1.OperationState{ + Phase: common.OperationSucceeded, + }, + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeSynced, + }, + }, + }, + }, + expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "Application resource is already Healthy, updating status from Waiting to Healthy.", + Status: "Healthy", + Step: "1", + }, + }, + }, + { + name: "progresses a new outofsync application in a later step to waiting", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{}, + }, + }, + }, + apps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: health.HealthStatusHealthy, + }, + OperationState: &v1alpha1.OperationState{ + Phase: common.OperationSucceeded, + }, + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeOutOfSync, + }, + }, + }, + }, + appStepMap: map[string]int{ + "app1": 1, + "app2": 0, + }, + expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "No Application status found, defaulting status to Waiting.", + Status: "Waiting", + Step: "2", + }, + }, + }, + { + name: "progresses a pending application with a successful sync to progressing", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{}, + }, + }, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + LastTransitionTime: &metav1.Time{ + Time: time.Now().Add(time.Duration(-1) * time.Minute), + }, + Message: "", + Status: "Pending", + Step: "1", + }, + }, + }, + }, + apps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: health.HealthStatusDegraded, + }, + OperationState: &v1alpha1.OperationState{ + Phase: common.OperationSucceeded, + StartedAt: metav1.Time{ + Time: time.Now(), + }, + }, + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeSynced, + }, + }, + }, + }, + expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "Application resource completed a sync successfully, updating status from Pending to Progressing.", + Status: "Progressing", + Step: "1", + }, + }, + }, + { + name: "progresses a pending application with a successful sync <1s ago to progressing", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{}, + }, + }, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + LastTransitionTime: &metav1.Time{ + Time: time.Now(), + }, + Message: "", + Status: "Pending", + Step: "1", + }, + }, + }, + }, + apps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: health.HealthStatusDegraded, + }, + OperationState: &v1alpha1.OperationState{ + Phase: common.OperationSucceeded, + StartedAt: metav1.Time{ + Time: time.Now().Add(time.Duration(-1) * time.Second), + }, + }, + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeSynced, + }, + }, + }, + }, + expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "Application resource completed a sync successfully, updating status from Pending to Progressing.", + Status: "Progressing", + Step: "1", + }, + }, + }, + { + name: "does not progresses a pending application with an old successful sync to progressing", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{}, + }, + }, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + LastTransitionTime: &metav1.Time{ + Time: time.Now(), + }, + Message: "Application moved to Pending status, watching for the Application resource to start Progressing.", + Status: "Pending", + Step: "1", + }, + }, + }, + }, + apps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: health.HealthStatusDegraded, + }, + OperationState: &v1alpha1.OperationState{ + Phase: common.OperationSucceeded, + StartedAt: metav1.Time{ + Time: time.Now().Add(time.Duration(-11) * time.Second), + }, + }, + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeSynced, + }, + }, + }, + }, + expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "Application moved to Pending status, watching for the Application resource to start Progressing.", + Status: "Pending", + Step: "1", + }, + }, + }, + { + name: "removes the appStatus for applications that no longer exist", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{}, + }, + }, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "Application has pending changes, setting status to Waiting.", + Status: "Waiting", + Step: "1", + }, + { + Application: "app2", + Message: "Application has pending changes, setting status to Waiting.", + Status: "Waiting", + Step: "1", + }, + }, + }, + }, + apps: []v1alpha1.Application{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: health.HealthStatusHealthy, + }, + OperationState: &v1alpha1.OperationState{ + Phase: common.OperationSucceeded, + }, + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeSynced, + }, + }, + }, + }, + expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "Application resource is already Healthy, updating status from Waiting to Healthy.", + Status: "Healthy", + Step: "1", + }, + }, + }, + } { + + t.Run(cc.name, func(t *testing.T) { + + kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...) + argoDBMock := dbmocks.ArgoDB{} + argoObjs := []runtime.Object{} + + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&cc.appSet).Build() + + r := ApplicationSetReconciler{ + Client: client, + Scheme: scheme, + Recorder: record.NewFakeRecorder(1), + Cache: &fakeCache{}, + Generators: map[string]generators.Generator{}, + ArgoDB: &argoDBMock, + ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...), + KubeClientset: kubeclientset, + } + + appStatuses, err := r.updateApplicationSetApplicationStatus(context.TODO(), &cc.appSet, cc.apps, cc.appStepMap) + + // opt out of testing the LastTransitionTime is accurate + for i := range appStatuses { + appStatuses[i].LastTransitionTime = nil + } + + assert.Equal(t, err, nil, "expected no errors, but errors occured") + assert.Equal(t, cc.expectedAppStatus, appStatuses, "expected appStatuses did not match actual") + }) + } +} + +func TestUpdateApplicationSetApplicationStatusProgress(t *testing.T) { + + scheme := runtime.NewScheme() + err := v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + err = v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + for _, cc := range []struct { + name string + appSet v1alpha1.ApplicationSet + appSyncMap map[string]bool + appStepMap map[string]int + appMap map[string]v1alpha1.Application + expectedAppStatus []v1alpha1.ApplicationSetApplicationStatus + }{ + { + name: "handles an empty appSync and appStepMap", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{ + Steps: []v1alpha1.ApplicationSetRolloutStep{ + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{}, + }, + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{}, + }, + }, + }, + }, + }, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{}, + }, + }, + appSyncMap: map[string]bool{}, + appStepMap: map[string]int{}, + expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{}, + }, + { + name: "handles an empty strategy", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{}, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{}, + }, + }, + appSyncMap: map[string]bool{}, + appStepMap: map[string]int{}, + expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{}, + }, + { + name: "handles an empty applicationset strategy", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{}, + }, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{}, + }, + }, + appSyncMap: map[string]bool{}, + appStepMap: map[string]int{}, + expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{}, + }, + { + name: "handles an appSyncMap with no existing statuses", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{}, + }, + }, + appSyncMap: map[string]bool{ + "app1": true, + "app2": false, + }, + appStepMap: map[string]int{ + "app1": 0, + "app2": 1, + }, + expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{}, + }, + { + name: "handles updating a RollingSync status from Waiting to Pending", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{ + Steps: []v1alpha1.ApplicationSetRolloutStep{ + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{}, + }, + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{}, + }, + }, + }, + }, + }, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "Application is out of date with the current AppSet generation, setting status to Waiting.", + Status: "Waiting", + }, + }, + }, + }, + appSyncMap: map[string]bool{ + "app1": true, + }, + appStepMap: map[string]int{ + "app1": 0, + }, + expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + LastTransitionTime: nil, + Message: "Application moved to Pending status, watching for the Application resource to start Progressing.", + Status: "Pending", + Step: "1", + }, + }, + }, + { + name: "does not update a RollingSync status if appSyncMap is false", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{ + Steps: []v1alpha1.ApplicationSetRolloutStep{ + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{}, + }, + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{}, + }, + }, + }, + }, + }, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "Application is out of date with the current AppSet generation, setting status to Waiting.", + Status: "Waiting", + Step: "1", + }, + }, + }, + }, + appSyncMap: map[string]bool{ + "app1": false, + }, + appStepMap: map[string]int{ + "app1": 0, + }, + expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + LastTransitionTime: nil, + Message: "Application is out of date with the current AppSet generation, setting status to Waiting.", + Status: "Waiting", + Step: "1", + }, + }, + }, + { + name: "does not update a status if status is not pending", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{ + Steps: []v1alpha1.ApplicationSetRolloutStep{ + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{}, + }, + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{}, + }, + }, + }, + }, + }, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "Application Pending status timed out while waiting to become Progressing, reset status to Healthy.", + Status: "Healthy", + Step: "1", + }, + }, + }, + }, + appSyncMap: map[string]bool{ + "app1": true, + }, + appStepMap: map[string]int{ + "app1": 0, + }, + expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + LastTransitionTime: nil, + Message: "Application Pending status timed out while waiting to become Progressing, reset status to Healthy.", + Status: "Healthy", + Step: "1", + }, + }, + }, + { + name: "does not update a status if maxUpdate has already been reached with RollingSync", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{ + Steps: []v1alpha1.ApplicationSetRolloutStep{ + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{}, + MaxUpdate: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 3, + }, + }, + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{}, + }, + }, + }, + }, + }, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "Application resource became Progressing, updating status from Pending to Progressing.", + Status: "Progressing", + Step: "1", + }, + { + Application: "app2", + Message: "Application is out of date with the current AppSet generation, setting status to Waiting.", + Status: "Waiting", + Step: "1", + }, + { + Application: "app3", + Message: "Application is out of date with the current AppSet generation, setting status to Waiting.", + Status: "Waiting", + Step: "1", + }, + { + Application: "app4", + Message: "Application moved to Pending status, watching for the Application resource to start Progressing.", + Status: "Pending", + Step: "1", + }, + }, + }, + }, + appSyncMap: map[string]bool{ + "app1": true, + "app2": true, + "app3": true, + "app4": true, + }, + appStepMap: map[string]int{ + "app1": 0, + "app2": 0, + "app3": 0, + "app4": 0, + }, + appMap: map[string]v1alpha1.Application{ + "app1": { + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + }, + Status: v1alpha1.ApplicationStatus{ + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeOutOfSync, + }, + }, + }, + "app2": { + ObjectMeta: metav1.ObjectMeta{ + Name: "app2", + }, + Status: v1alpha1.ApplicationStatus{ + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeOutOfSync, + }, + }, + }, + "app3": { + ObjectMeta: metav1.ObjectMeta{ + Name: "app3", + }, + Status: v1alpha1.ApplicationStatus{ + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeOutOfSync, + }, + }, + }, + "app4": { + ObjectMeta: metav1.ObjectMeta{ + Name: "app4", + }, + Status: v1alpha1.ApplicationStatus{ + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeOutOfSync, + }, + }, + }, + }, + expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + LastTransitionTime: nil, + Message: "Application resource became Progressing, updating status from Pending to Progressing.", + Status: "Progressing", + Step: "1", + }, + { + Application: "app2", + LastTransitionTime: nil, + Message: "Application moved to Pending status, watching for the Application resource to start Progressing.", + Status: "Pending", + Step: "1", + }, + { + Application: "app3", + LastTransitionTime: nil, + Message: "Application is out of date with the current AppSet generation, setting status to Waiting.", + Status: "Waiting", + Step: "1", + }, + { + Application: "app4", + LastTransitionTime: nil, + Message: "Application moved to Pending status, watching for the Application resource to start Progressing.", + Status: "Pending", + Step: "1", + }, + }, + }, + { + name: "rounds down for maxUpdate set to percentage string", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{ + Steps: []v1alpha1.ApplicationSetRolloutStep{ + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{}, + MaxUpdate: &intstr.IntOrString{ + Type: intstr.String, + StrVal: "50%", + }, + }, + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{}, + }, + }, + }, + }, + }, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "Application is out of date with the current AppSet generation, setting status to Waiting.", + Status: "Waiting", + Step: "1", + }, + { + Application: "app2", + Message: "Application is out of date with the current AppSet generation, setting status to Waiting.", + Status: "Waiting", + Step: "1", + }, + { + Application: "app3", + Message: "Application is out of date with the current AppSet generation, setting status to Waiting.", + Status: "Waiting", + Step: "1", + }, + }, + }, + }, + appSyncMap: map[string]bool{ + "app1": true, + "app2": true, + "app3": true, + }, + appStepMap: map[string]int{ + "app1": 0, + "app2": 0, + "app3": 0, + }, + expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + LastTransitionTime: nil, + Message: "Application moved to Pending status, watching for the Application resource to start Progressing.", + Status: "Pending", + Step: "1", + }, + { + Application: "app2", + LastTransitionTime: nil, + Message: "Application is out of date with the current AppSet generation, setting status to Waiting.", + Status: "Waiting", + Step: "1", + }, + { + Application: "app3", + LastTransitionTime: nil, + Message: "Application is out of date with the current AppSet generation, setting status to Waiting.", + Status: "Waiting", + Step: "1", + }, + }, + }, + { + name: "does not update any applications with maxUpdate set to 0", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{ + Steps: []v1alpha1.ApplicationSetRolloutStep{ + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{}, + MaxUpdate: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 0, + }, + }, + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{}, + }, + }, + }, + }, + }, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "Application is out of date with the current AppSet generation, setting status to Waiting.", + Status: "Waiting", + Step: "1", + }, + { + Application: "app2", + Message: "Application is out of date with the current AppSet generation, setting status to Waiting.", + Status: "Waiting", + Step: "1", + }, + { + Application: "app3", + Message: "Application is out of date with the current AppSet generation, setting status to Waiting.", + Status: "Waiting", + Step: "1", + }, + }, + }, + }, + appSyncMap: map[string]bool{ + "app1": true, + "app2": true, + "app3": true, + }, + appStepMap: map[string]int{ + "app1": 0, + "app2": 0, + "app3": 0, + }, + expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + LastTransitionTime: nil, + Message: "Application is out of date with the current AppSet generation, setting status to Waiting.", + Status: "Waiting", + Step: "1", + }, + { + Application: "app2", + LastTransitionTime: nil, + Message: "Application is out of date with the current AppSet generation, setting status to Waiting.", + Status: "Waiting", + Step: "1", + }, + { + Application: "app3", + LastTransitionTime: nil, + Message: "Application is out of date with the current AppSet generation, setting status to Waiting.", + Status: "Waiting", + Step: "1", + }, + }, + }, + { + name: "updates all applications with maxUpdate set to 100%", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{ + Steps: []v1alpha1.ApplicationSetRolloutStep{ + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{}, + MaxUpdate: &intstr.IntOrString{ + Type: intstr.String, + StrVal: "100%", + }, + }, + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{}, + }, + }, + }, + }, + }, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "Application is out of date with the current AppSet generation, setting status to Waiting.", + Status: "Waiting", + Step: "1", + }, + { + Application: "app2", + Message: "Application is out of date with the current AppSet generation, setting status to Waiting.", + Status: "Waiting", + Step: "1", + }, + { + Application: "app3", + Message: "Application is out of date with the current AppSet generation, setting status to Waiting.", + Status: "Waiting", + Step: "1", + }, + }, + }, + }, + appSyncMap: map[string]bool{ + "app1": true, + "app2": true, + "app3": true, + }, + appStepMap: map[string]int{ + "app1": 0, + "app2": 0, + "app3": 0, + }, + expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + LastTransitionTime: nil, + Message: "Application moved to Pending status, watching for the Application resource to start Progressing.", + Status: "Pending", + Step: "1", + }, + { + Application: "app2", + LastTransitionTime: nil, + Message: "Application moved to Pending status, watching for the Application resource to start Progressing.", + Status: "Pending", + Step: "1", + }, + { + Application: "app3", + LastTransitionTime: nil, + Message: "Application moved to Pending status, watching for the Application resource to start Progressing.", + Status: "Pending", + Step: "1", + }, + }, + }, + { + name: "updates at least 1 application with maxUpdate >0%", + appSet: v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Strategy: &v1alpha1.ApplicationSetStrategy{ + Type: "RollingSync", + RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{ + Steps: []v1alpha1.ApplicationSetRolloutStep{ + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{}, + MaxUpdate: &intstr.IntOrString{ + Type: intstr.String, + StrVal: "1%", + }, + }, + { + MatchExpressions: []v1alpha1.ApplicationMatchExpression{}, + }, + }, + }, + }, + }, + Status: v1alpha1.ApplicationSetStatus{ + ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + Message: "Application is out of date with the current AppSet generation, setting status to Waiting.", + Status: "Waiting", + Step: "1", + }, + { + Application: "app2", + Message: "Application is out of date with the current AppSet generation, setting status to Waiting.", + Status: "Waiting", + Step: "1", + }, + { + Application: "app3", + Message: "Application is out of date with the current AppSet generation, setting status to Waiting.", + Status: "Waiting", + Step: "1", + }, + }, + }, + }, + appSyncMap: map[string]bool{ + "app1": true, + "app2": true, + "app3": true, + }, + appStepMap: map[string]int{ + "app1": 0, + "app2": 0, + "app3": 0, + }, + expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{ + { + Application: "app1", + LastTransitionTime: nil, + Message: "Application moved to Pending status, watching for the Application resource to start Progressing.", + Status: "Pending", + Step: "1", + }, + { + Application: "app2", + LastTransitionTime: nil, + Message: "Application is out of date with the current AppSet generation, setting status to Waiting.", + Status: "Waiting", + Step: "1", + }, + { + Application: "app3", + LastTransitionTime: nil, + Message: "Application is out of date with the current AppSet generation, setting status to Waiting.", + Status: "Waiting", + Step: "1", + }, + }, + }, + } { + + t.Run(cc.name, func(t *testing.T) { + + kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...) + argoDBMock := dbmocks.ArgoDB{} + argoObjs := []runtime.Object{} + + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&cc.appSet).Build() + + r := ApplicationSetReconciler{ + Client: client, + Scheme: scheme, + Recorder: record.NewFakeRecorder(1), + Cache: &fakeCache{}, + Generators: map[string]generators.Generator{}, + ArgoDB: &argoDBMock, + ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...), + KubeClientset: kubeclientset, + } + + appStatuses, err := r.updateApplicationSetApplicationStatusProgress(context.TODO(), &cc.appSet, cc.appSyncMap, cc.appStepMap, cc.appMap) + + // opt out of testing the LastTransitionTime is accurate + for i := range appStatuses { + appStatuses[i].LastTransitionTime = nil + } + + assert.Equal(t, err, nil, "expected no errors, but errors occured") + assert.Equal(t, cc.expectedAppStatus, appStatuses, "expected appStatuses did not match actual") + }) + } +} + +func TestOwnsHandler(t *testing.T) { + // progressive syncs do not affect create, delete, or generic + ownsHandler := getOwnsHandlerPredicates(true) + assert.False(t, ownsHandler.CreateFunc(event.CreateEvent{})) + assert.True(t, ownsHandler.DeleteFunc(event.DeleteEvent{})) + assert.True(t, ownsHandler.GenericFunc(event.GenericEvent{})) + ownsHandler = getOwnsHandlerPredicates(false) + assert.False(t, ownsHandler.CreateFunc(event.CreateEvent{})) + assert.True(t, ownsHandler.DeleteFunc(event.DeleteEvent{})) + assert.True(t, ownsHandler.GenericFunc(event.GenericEvent{})) + + now := metav1.Now() + type args struct { + e event.UpdateEvent + enableProgressiveSyncs bool + } + tests := []struct { + name string + args args + want bool + }{ + {name: "SameApplicationReconciledAtDiff", args: args{e: event.UpdateEvent{ + ObjectOld: &v1alpha1.Application{Status: v1alpha1.ApplicationStatus{ReconciledAt: &now}}, + ObjectNew: &v1alpha1.Application{Status: v1alpha1.ApplicationStatus{ReconciledAt: &now}}, + }}, want: false}, + {name: "SameApplicationResourceVersionDiff", args: args{e: event.UpdateEvent{ + ObjectOld: &v1alpha1.Application{ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "foo", + }}, + ObjectNew: &v1alpha1.Application{ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "bar", + }}, + }}, want: false}, + {name: "ApplicationHealthStatusDiff", args: args{e: event.UpdateEvent{ + ObjectOld: &v1alpha1.Application{Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: "Unknown", + }, + }}, + ObjectNew: &v1alpha1.Application{Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{ + Status: "Healthy", + }, + }}, + }, + enableProgressiveSyncs: true, + }, want: true}, + {name: "ApplicationSyncStatusDiff", args: args{e: event.UpdateEvent{ + ObjectOld: &v1alpha1.Application{Status: v1alpha1.ApplicationStatus{ + Sync: v1alpha1.SyncStatus{ + Status: "OutOfSync", + }, + }}, + ObjectNew: &v1alpha1.Application{Status: v1alpha1.ApplicationStatus{ + Sync: v1alpha1.SyncStatus{ + Status: "Synced", + }, + }}, + }, + enableProgressiveSyncs: true, + }, want: true}, + {name: "ApplicationOperationStateDiff", args: args{e: event.UpdateEvent{ + ObjectOld: &v1alpha1.Application{Status: v1alpha1.ApplicationStatus{ + OperationState: &v1alpha1.OperationState{ + Phase: "foo", + }, + }}, + ObjectNew: &v1alpha1.Application{Status: v1alpha1.ApplicationStatus{ + OperationState: &v1alpha1.OperationState{ + Phase: "bar", + }, + }}, + }, + enableProgressiveSyncs: true, + }, want: true}, + {name: "ApplicationOperationStartedAtDiff", args: args{e: event.UpdateEvent{ + ObjectOld: &v1alpha1.Application{Status: v1alpha1.ApplicationStatus{ + OperationState: &v1alpha1.OperationState{ + StartedAt: now, + }, + }}, + ObjectNew: &v1alpha1.Application{Status: v1alpha1.ApplicationStatus{ + OperationState: &v1alpha1.OperationState{ + StartedAt: metav1.NewTime(now.Add(time.Minute * 1)), + }, + }}, + }, + enableProgressiveSyncs: true, + }, want: true}, + {name: "SameApplicationGeneration", args: args{e: event.UpdateEvent{ + ObjectOld: &v1alpha1.Application{ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }}, + ObjectNew: &v1alpha1.Application{ObjectMeta: metav1.ObjectMeta{ + Generation: 2, + }}, + }}, want: false}, + {name: "DifferentApplicationSpec", args: args{e: event.UpdateEvent{ + ObjectOld: &v1alpha1.Application{Spec: v1alpha1.ApplicationSpec{Project: "default"}}, + ObjectNew: &v1alpha1.Application{Spec: v1alpha1.ApplicationSpec{Project: "not-default"}}, + }}, want: true}, + {name: "DifferentApplicationLabels", args: args{e: event.UpdateEvent{ + ObjectOld: &v1alpha1.Application{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}}, + ObjectNew: &v1alpha1.Application{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"bar": "foo"}}}, + }}, want: true}, + {name: "DifferentApplicationAnnotations", args: args{e: event.UpdateEvent{ + ObjectOld: &v1alpha1.Application{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{"foo": "bar"}}}, + ObjectNew: &v1alpha1.Application{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{"bar": "foo"}}}, + }}, want: true}, + {name: "DifferentApplicationFinalizers", args: args{e: event.UpdateEvent{ + ObjectOld: &v1alpha1.Application{ObjectMeta: metav1.ObjectMeta{Finalizers: []string{"argo"}}}, + ObjectNew: &v1alpha1.Application{ObjectMeta: metav1.ObjectMeta{Finalizers: []string{"none"}}}, + }}, want: true}, + {name: "NotAnAppOld", args: args{e: event.UpdateEvent{ + ObjectOld: &v1alpha1.AppProject{}, + ObjectNew: &v1alpha1.Application{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"bar": "foo"}}}, + }}, want: false}, + {name: "NotAnAppNew", args: args{e: event.UpdateEvent{ + ObjectOld: &v1alpha1.Application{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}}, + ObjectNew: &v1alpha1.AppProject{}, + }}, want: false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ownsHandler = getOwnsHandlerPredicates(tt.args.enableProgressiveSyncs) + assert.Equalf(t, tt.want, ownsHandler.UpdateFunc(tt.args.e), "UpdateFunc(%v)", tt.args.e) + }) + } +} + +func Test_applyIgnoreDifferences(t *testing.T) { + appMeta := metav1.TypeMeta{ + APIVersion: v1alpha1.ApplicationSchemaGroupVersionKind.GroupVersion().String(), + Kind: v1alpha1.ApplicationSchemaGroupVersionKind.Kind, + } + testCases := []struct { + name string + ignoreDifferences v1alpha1.ApplicationSetIgnoreDifferences + foundApp string + generatedApp string + expectedApp string + }{ + { + name: "empty ignoreDifferences", + foundApp: ` +spec: {}`, + generatedApp: ` +spec: {}`, + expectedApp: ` +spec: {}`, + }, + { + // For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1191138278 + name: "ignore target revision with jq", + ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{ + {JQPathExpressions: []string{".spec.source.targetRevision"}}, + }, + foundApp: ` +spec: + source: + targetRevision: foo`, + generatedApp: ` +spec: + source: + targetRevision: bar`, + expectedApp: ` +spec: + source: + targetRevision: foo`, + }, + { + // For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1103593714 + name: "ignore helm parameter with jq", + ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{ + {JQPathExpressions: []string{`.spec.source.helm.parameters | select(.name == "image.tag")`}}, + }, + foundApp: ` +spec: + source: + helm: + parameters: + - name: image.tag + value: test + - name: another + value: value`, + generatedApp: ` +spec: + source: + helm: + parameters: + - name: image.tag + value: v1.0.0 + - name: another + value: value`, + expectedApp: ` +spec: + source: + helm: + parameters: + - name: image.tag + value: test + - name: another + value: value`, + }, + { + // For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1191138278 + name: "ignore auto-sync with jq", + ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{ + {JQPathExpressions: []string{".spec.syncPolicy.automated"}}, + }, + foundApp: ` +spec: + syncPolicy: + retry: + limit: 5`, + generatedApp: ` +spec: + syncPolicy: + automated: + selfHeal: true + retry: + limit: 5`, + expectedApp: ` +spec: + syncPolicy: + retry: + limit: 5`, + }, + { + // For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1420656537 + name: "ignore a one-off annotation with jq", + ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{ + {JQPathExpressions: []string{`.metadata.annotations | select(.["foo.bar"] == "baz")`}}, + }, + foundApp: ` +metadata: + annotations: + foo.bar: baz + some.other: annotation`, + generatedApp: ` +metadata: + annotations: + some.other: annotation`, + expectedApp: ` +metadata: + annotations: + foo.bar: baz + some.other: annotation`, + }, + { + // For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1515672638 + name: "ignore the source.plugin field with a json pointer", + ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{ + {JSONPointers: []string{"/spec/source/plugin"}}, + }, + foundApp: ` +spec: + source: + plugin: + parameters: + - name: url + string: https://example.com`, + generatedApp: ` +spec: + source: + plugin: + parameters: + - name: url + string: https://example.com/wrong`, + expectedApp: ` +spec: + source: + plugin: + parameters: + - name: url + string: https://example.com`, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + foundApp := v1alpha1.Application{TypeMeta: appMeta} + err := yaml.Unmarshal([]byte(tc.foundApp), &foundApp) + require.NoError(t, err, tc.foundApp) + generatedApp := v1alpha1.Application{TypeMeta: appMeta} + err = yaml.Unmarshal([]byte(tc.generatedApp), &generatedApp) + require.NoError(t, err, tc.generatedApp) + err = applyIgnoreDifferences(tc.ignoreDifferences, &foundApp, generatedApp) + require.NoError(t, err) + jsonFound, err := json.Marshal(tc.foundApp) + require.NoError(t, err) + jsonExpected, err := json.Marshal(tc.expectedApp) + require.NoError(t, err) + assert.Equal(t, string(jsonExpected), string(jsonFound)) + }) + } +} diff --git a/applicationset/controllers/clustereventhandler.go b/applicationset/controllers/clustereventhandler.go new file mode 100644 index 0000000000000..951da0cb6bc44 --- /dev/null +++ b/applicationset/controllers/clustereventhandler.go @@ -0,0 +1,165 @@ +package controllers + +import ( + "context" + "fmt" + + log "github.com/sirupsen/logrus" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + + "github.com/argoproj/argo-cd/v2/applicationset/generators" + argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +// clusterSecretEventHandler is used when watching Secrets to check if they are ArgoCD Cluster Secrets, and if so +// requeue any related ApplicationSets. +type clusterSecretEventHandler struct { + //handler.EnqueueRequestForOwner + Log log.FieldLogger + Client client.Client +} + +func (h *clusterSecretEventHandler) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) { + h.queueRelatedAppGenerators(q, e.Object) +} + +func (h *clusterSecretEventHandler) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) { + h.queueRelatedAppGenerators(q, e.ObjectNew) +} + +func (h *clusterSecretEventHandler) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) { + h.queueRelatedAppGenerators(q, e.Object) +} + +func (h *clusterSecretEventHandler) Generic(e event.GenericEvent, q workqueue.RateLimitingInterface) { + h.queueRelatedAppGenerators(q, e.Object) +} + +// addRateLimitingInterface defines the Add method of workqueue.RateLimitingInterface, allow us to easily mock +// it for testing purposes. +type addRateLimitingInterface interface { + Add(item interface{}) +} + +func (h *clusterSecretEventHandler) queueRelatedAppGenerators(q addRateLimitingInterface, object client.Object) { + // Check for label, lookup all ApplicationSets that might match the cluster, queue them all + if object.GetLabels()[generators.ArgoCDSecretTypeLabel] != generators.ArgoCDSecretTypeCluster { + return + } + + h.Log.WithFields(log.Fields{ + "namespace": object.GetNamespace(), + "name": object.GetName(), + }).Info("processing event for cluster secret") + + appSetList := &argoprojiov1alpha1.ApplicationSetList{} + err := h.Client.List(context.Background(), appSetList) + if err != nil { + h.Log.WithError(err).Error("unable to list ApplicationSets") + return + } + + h.Log.WithField("count", len(appSetList.Items)).Info("listed ApplicationSets") + for _, appSet := range appSetList.Items { + + foundClusterGenerator := false + for _, generator := range appSet.Spec.Generators { + if generator.Clusters != nil { + foundClusterGenerator = true + break + } + + if generator.Matrix != nil { + ok, err := nestedGeneratorsHaveClusterGenerator(generator.Matrix.Generators) + if err != nil { + h.Log. + WithFields(log.Fields{ + "namespace": appSet.GetNamespace(), + "name": appSet.GetName(), + }). + WithError(err). + Error("Unable to check if ApplicationSet matrix generators have cluster generator") + } + if ok { + foundClusterGenerator = true + break + } + } + + if generator.Merge != nil { + ok, err := nestedGeneratorsHaveClusterGenerator(generator.Merge.Generators) + if err != nil { + h.Log. + WithFields(log.Fields{ + "namespace": appSet.GetNamespace(), + "name": appSet.GetName(), + }). + WithError(err). + Error("Unable to check if ApplicationSet merge generators have cluster generator") + } + if ok { + foundClusterGenerator = true + break + } + } + } + if foundClusterGenerator { + + // TODO: only queue the AppGenerator if the labels match this cluster + req := ctrl.Request{NamespacedName: types.NamespacedName{Namespace: appSet.Namespace, Name: appSet.Name}} + q.Add(req) + } + } +} + +// nestedGeneratorsHaveClusterGenerator iterate over provided nested generators to check if they have a cluster generator. +func nestedGeneratorsHaveClusterGenerator(generators []argoprojiov1alpha1.ApplicationSetNestedGenerator) (bool, error) { + for _, generator := range generators { + if ok, err := nestedGeneratorHasClusterGenerator(generator); ok || err != nil { + return ok, err + } + } + return false, nil +} + +// nestedGeneratorHasClusterGenerator checks if the provided generator has a cluster generator. +func nestedGeneratorHasClusterGenerator(nested argoprojiov1alpha1.ApplicationSetNestedGenerator) (bool, error) { + if nested.Clusters != nil { + return true, nil + } + + if nested.Matrix != nil { + nestedMatrix, err := argoprojiov1alpha1.ToNestedMatrixGenerator(nested.Matrix) + if err != nil { + return false, fmt.Errorf("unable to get nested matrix generator: %w", err) + } + if nestedMatrix != nil { + hasClusterGenerator, err := nestedGeneratorsHaveClusterGenerator(nestedMatrix.ToMatrixGenerator().Generators) + if err != nil { + return false, fmt.Errorf("error evaluating nested matrix generator: %w", err) + } + return hasClusterGenerator, nil + } + } + + if nested.Merge != nil { + nestedMerge, err := argoprojiov1alpha1.ToNestedMergeGenerator(nested.Merge) + if err != nil { + return false, fmt.Errorf("unable to get nested merge generator: %w", err) + } + if nestedMerge != nil { + hasClusterGenerator, err := nestedGeneratorsHaveClusterGenerator(nestedMerge.ToMergeGenerator().Generators) + if err != nil { + return false, fmt.Errorf("error evaluating nested merge generator: %w", err) + } + return hasClusterGenerator, nil + } + } + + return false, nil +} diff --git a/applicationset/controllers/clustereventhandler_test.go b/applicationset/controllers/clustereventhandler_test.go new file mode 100644 index 0000000000000..7e850fc44c66d --- /dev/null +++ b/applicationset/controllers/clustereventhandler_test.go @@ -0,0 +1,640 @@ +package controllers + +import ( + "testing" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/argoproj/argo-cd/v2/applicationset/generators" + argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +func TestClusterEventHandler(t *testing.T) { + + scheme := runtime.NewScheme() + err := argov1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + err = argov1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + tests := []struct { + name string + items []argov1alpha1.ApplicationSet + secret corev1.Secret + expectedRequests []ctrl.Request + }{ + { + name: "no application sets should mean no requests", + items: []argov1alpha1.ApplicationSet{}, + secret: corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "argocd", + Name: "my-secret", + Labels: map[string]string{ + generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster, + }, + }, + }, + expectedRequests: []reconcile.Request{}, + }, + { + name: "a cluster generator should produce a request", + items: []argov1alpha1.ApplicationSet{ + { + ObjectMeta: v1.ObjectMeta{ + Name: "my-app-set", + Namespace: "argocd", + }, + Spec: argov1alpha1.ApplicationSetSpec{ + Generators: []argov1alpha1.ApplicationSetGenerator{ + { + Clusters: &argov1alpha1.ClusterGenerator{}, + }, + }, + }, + }, + }, + secret: corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "argocd", + Name: "my-secret", + Labels: map[string]string{ + generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster, + }, + }, + }, + expectedRequests: []reconcile.Request{{ + NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set"}, + }}, + }, + { + name: "multiple cluster generators should produce multiple requests", + items: []argov1alpha1.ApplicationSet{ + { + ObjectMeta: v1.ObjectMeta{ + Name: "my-app-set", + Namespace: "argocd", + }, + Spec: argov1alpha1.ApplicationSetSpec{ + Generators: []argov1alpha1.ApplicationSetGenerator{ + { + Clusters: &argov1alpha1.ClusterGenerator{}, + }, + }, + }, + }, + { + ObjectMeta: v1.ObjectMeta{ + Name: "my-app-set2", + Namespace: "argocd", + }, + Spec: argov1alpha1.ApplicationSetSpec{ + Generators: []argov1alpha1.ApplicationSetGenerator{ + { + Clusters: &argov1alpha1.ClusterGenerator{}, + }, + }, + }, + }, + }, + secret: corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "argocd", + Name: "my-secret", + Labels: map[string]string{ + generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster, + }, + }, + }, + expectedRequests: []reconcile.Request{ + {NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set"}}, + {NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set2"}}, + }, + }, + { + name: "non-cluster generator should not match", + items: []argov1alpha1.ApplicationSet{ + { + ObjectMeta: v1.ObjectMeta{ + Name: "my-app-set", + Namespace: "another-namespace", + }, + Spec: argov1alpha1.ApplicationSetSpec{ + Generators: []argov1alpha1.ApplicationSetGenerator{ + { + Clusters: &argov1alpha1.ClusterGenerator{}, + }, + }, + }, + }, + { + ObjectMeta: v1.ObjectMeta{ + Name: "app-set-non-cluster", + Namespace: "argocd", + }, + Spec: argov1alpha1.ApplicationSetSpec{ + Generators: []argov1alpha1.ApplicationSetGenerator{ + { + List: &argov1alpha1.ListGenerator{}, + }, + }, + }, + }, + }, + secret: corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "argocd", + Name: "my-secret", + Labels: map[string]string{ + generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster, + }, + }, + }, + expectedRequests: []reconcile.Request{ + {NamespacedName: types.NamespacedName{Namespace: "another-namespace", Name: "my-app-set"}}, + }, + }, + { + name: "non-argo cd secret should not match", + items: []argov1alpha1.ApplicationSet{ + { + ObjectMeta: v1.ObjectMeta{ + Name: "my-app-set", + Namespace: "another-namespace", + }, + Spec: argov1alpha1.ApplicationSetSpec{ + Generators: []argov1alpha1.ApplicationSetGenerator{ + { + Clusters: &argov1alpha1.ClusterGenerator{}, + }, + }, + }, + }, + }, + secret: corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "argocd", + Name: "my-non-argocd-secret", + }, + }, + expectedRequests: []reconcile.Request{}, + }, + { + name: "a matrix generator with a cluster generator should produce a request", + items: []argov1alpha1.ApplicationSet{ + { + ObjectMeta: v1.ObjectMeta{ + Name: "my-app-set", + Namespace: "argocd", + }, + Spec: argov1alpha1.ApplicationSetSpec{ + Generators: []argov1alpha1.ApplicationSetGenerator{ + { + Matrix: &argov1alpha1.MatrixGenerator{ + Generators: []argov1alpha1.ApplicationSetNestedGenerator{ + { + Clusters: &argov1alpha1.ClusterGenerator{}, + }, + }, + }, + }, + }, + }, + }, + }, + secret: corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "argocd", + Name: "my-secret", + Labels: map[string]string{ + generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster, + }, + }, + }, + expectedRequests: []reconcile.Request{{ + NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set"}, + }}, + }, + { + name: "a matrix generator with non cluster generator should not match", + items: []argov1alpha1.ApplicationSet{ + { + ObjectMeta: v1.ObjectMeta{ + Name: "my-app-set", + Namespace: "argocd", + }, + Spec: argov1alpha1.ApplicationSetSpec{ + Generators: []argov1alpha1.ApplicationSetGenerator{ + { + Matrix: &argov1alpha1.MatrixGenerator{ + Generators: []argov1alpha1.ApplicationSetNestedGenerator{ + { + List: &argov1alpha1.ListGenerator{}, + }, + }, + }, + }, + }, + }, + }, + }, + secret: corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "argocd", + Name: "my-secret", + Labels: map[string]string{ + generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster, + }, + }, + }, + expectedRequests: []reconcile.Request{}, + }, + { + name: "a matrix generator with a nested matrix generator containing a cluster generator should produce a request", + items: []argov1alpha1.ApplicationSet{ + { + ObjectMeta: v1.ObjectMeta{ + Name: "my-app-set", + Namespace: "argocd", + }, + Spec: argov1alpha1.ApplicationSetSpec{ + Generators: []argov1alpha1.ApplicationSetGenerator{ + { + Matrix: &argov1alpha1.MatrixGenerator{ + Generators: []argov1alpha1.ApplicationSetNestedGenerator{ + { + Matrix: &apiextensionsv1.JSON{ + Raw: []byte( + `{ + "generators": [ + { + "clusters": { + "selector": { + "matchLabels": { + "argocd.argoproj.io/secret-type": "cluster" + } + } + } + } + ] + }`, + ), + }, + }, + }, + }, + }, + }, + }, + }, + }, + secret: corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "argocd", + Name: "my-secret", + Labels: map[string]string{ + generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster, + }, + }, + }, + expectedRequests: []reconcile.Request{{ + NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set"}, + }}, + }, + { + name: "a matrix generator with a nested matrix generator containing non cluster generator should not match", + items: []argov1alpha1.ApplicationSet{ + { + ObjectMeta: v1.ObjectMeta{ + Name: "my-app-set", + Namespace: "argocd", + }, + Spec: argov1alpha1.ApplicationSetSpec{ + Generators: []argov1alpha1.ApplicationSetGenerator{ + { + Matrix: &argov1alpha1.MatrixGenerator{ + Generators: []argov1alpha1.ApplicationSetNestedGenerator{ + { + Matrix: &apiextensionsv1.JSON{ + Raw: []byte( + `{ + "generators": [ + { + "list": { + "elements": [ + "a", + "b" + ] + } + } + ] + }`, + ), + }, + }, + }, + }, + }, + }, + }, + }, + }, + secret: corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "argocd", + Name: "my-secret", + Labels: map[string]string{ + generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster, + }, + }, + }, + expectedRequests: []reconcile.Request{}, + }, + { + name: "a merge generator with a cluster generator should produce a request", + items: []argov1alpha1.ApplicationSet{ + { + ObjectMeta: v1.ObjectMeta{ + Name: "my-app-set", + Namespace: "argocd", + }, + Spec: argov1alpha1.ApplicationSetSpec{ + Generators: []argov1alpha1.ApplicationSetGenerator{ + { + Merge: &argov1alpha1.MergeGenerator{ + Generators: []argov1alpha1.ApplicationSetNestedGenerator{ + { + Clusters: &argov1alpha1.ClusterGenerator{}, + }, + }, + }, + }, + }, + }, + }, + }, + secret: corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "argocd", + Name: "my-secret", + Labels: map[string]string{ + generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster, + }, + }, + }, + expectedRequests: []reconcile.Request{{ + NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set"}, + }}, + }, + { + name: "a matrix generator with non cluster generator should not match", + items: []argov1alpha1.ApplicationSet{ + { + ObjectMeta: v1.ObjectMeta{ + Name: "my-app-set", + Namespace: "argocd", + }, + Spec: argov1alpha1.ApplicationSetSpec{ + Generators: []argov1alpha1.ApplicationSetGenerator{ + { + Merge: &argov1alpha1.MergeGenerator{ + Generators: []argov1alpha1.ApplicationSetNestedGenerator{ + { + List: &argov1alpha1.ListGenerator{}, + }, + }, + }, + }, + }, + }, + }, + }, + secret: corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "argocd", + Name: "my-secret", + Labels: map[string]string{ + generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster, + }, + }, + }, + expectedRequests: []reconcile.Request{}, + }, + { + name: "a merge generator with a nested merge generator containing a cluster generator should produce a request", + items: []argov1alpha1.ApplicationSet{ + { + ObjectMeta: v1.ObjectMeta{ + Name: "my-app-set", + Namespace: "argocd", + }, + Spec: argov1alpha1.ApplicationSetSpec{ + Generators: []argov1alpha1.ApplicationSetGenerator{ + { + Merge: &argov1alpha1.MergeGenerator{ + Generators: []argov1alpha1.ApplicationSetNestedGenerator{ + { + Merge: &apiextensionsv1.JSON{ + Raw: []byte( + `{ + "generators": [ + { + "clusters": { + "selector": { + "matchLabels": { + "argocd.argoproj.io/secret-type": "cluster" + } + } + } + } + ] + }`, + ), + }, + }, + }, + }, + }, + }, + }, + }, + }, + secret: corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "argocd", + Name: "my-secret", + Labels: map[string]string{ + generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster, + }, + }, + }, + expectedRequests: []reconcile.Request{{ + NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set"}, + }}, + }, + { + name: "a merge generator with a nested merge generator containing non cluster generator should not match", + items: []argov1alpha1.ApplicationSet{ + { + ObjectMeta: v1.ObjectMeta{ + Name: "my-app-set", + Namespace: "argocd", + }, + Spec: argov1alpha1.ApplicationSetSpec{ + Generators: []argov1alpha1.ApplicationSetGenerator{ + { + Merge: &argov1alpha1.MergeGenerator{ + Generators: []argov1alpha1.ApplicationSetNestedGenerator{ + { + Merge: &apiextensionsv1.JSON{ + Raw: []byte( + `{ + "generators": [ + { + "list": { + "elements": [ + "a", + "b" + ] + } + } + ] + }`, + ), + }, + }, + }, + }, + }, + }, + }, + }, + }, + secret: corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "argocd", + Name: "my-secret", + Labels: map[string]string{ + generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster, + }, + }, + }, + expectedRequests: []reconcile.Request{}, + }, + } + + for _, test := range tests { + + t.Run(test.name, func(t *testing.T) { + + appSetList := argov1alpha1.ApplicationSetList{ + Items: test.items, + } + + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithLists(&appSetList).Build() + + handler := &clusterSecretEventHandler{ + Client: fakeClient, + Log: log.WithField("type", "createSecretEventHandler"), + } + + mockAddRateLimitingInterface := mockAddRateLimitingInterface{} + + handler.queueRelatedAppGenerators(&mockAddRateLimitingInterface, &test.secret) + + assert.False(t, mockAddRateLimitingInterface.errorOccurred) + assert.ElementsMatch(t, mockAddRateLimitingInterface.addedItems, test.expectedRequests) + + }) + } + +} + +// Add checks the type, and adds it to the internal list of received additions +func (obj *mockAddRateLimitingInterface) Add(item interface{}) { + if req, ok := item.(ctrl.Request); ok { + obj.addedItems = append(obj.addedItems, req) + } else { + obj.errorOccurred = true + } +} + +type mockAddRateLimitingInterface struct { + errorOccurred bool + addedItems []ctrl.Request +} + +func TestNestedGeneratorHasClusterGenerator_NestedClusterGenerator(t *testing.T) { + nested := argov1alpha1.ApplicationSetNestedGenerator{ + Clusters: &argov1alpha1.ClusterGenerator{}, + } + + hasClusterGenerator, err := nestedGeneratorHasClusterGenerator(nested) + + assert.Nil(t, err) + assert.True(t, hasClusterGenerator) +} + +func TestNestedGeneratorHasClusterGenerator_NestedMergeGenerator(t *testing.T) { + nested := argov1alpha1.ApplicationSetNestedGenerator{ + Merge: &apiextensionsv1.JSON{ + Raw: []byte( + `{ + "generators": [ + { + "clusters": { + "selector": { + "matchLabels": { + "argocd.argoproj.io/secret-type": "cluster" + } + } + } + } + ] + }`, + ), + }, + } + + hasClusterGenerator, err := nestedGeneratorHasClusterGenerator(nested) + + assert.Nil(t, err) + assert.True(t, hasClusterGenerator) +} + +func TestNestedGeneratorHasClusterGenerator_NestedMergeGeneratorWithInvalidJSON(t *testing.T) { + nested := argov1alpha1.ApplicationSetNestedGenerator{ + Merge: &apiextensionsv1.JSON{ + Raw: []byte( + `{ + "generators": [ + { + "clusters": { + "selector": { + "matchLabels": { + "argocd.argoproj.io/secret-type": "cluster" + } + } + } + } + ] + `, + ), + }, + } + + hasClusterGenerator, err := nestedGeneratorHasClusterGenerator(nested) + + assert.NotNil(t, err) + assert.False(t, hasClusterGenerator) +} diff --git a/applicationset/controllers/requeue_after_test.go b/applicationset/controllers/requeue_after_test.go new file mode 100644 index 0000000000000..6db6145af5348 --- /dev/null +++ b/applicationset/controllers/requeue_after_test.go @@ -0,0 +1,153 @@ +package controllers + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + dynfake "k8s.io/client-go/dynamic/fake" + kubefake "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/argoproj/argo-cd/v2/applicationset/generators" + "github.com/argoproj/argo-cd/v2/applicationset/services/mocks" + argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +func TestRequeueAfter(t *testing.T) { + mockServer := &mocks.Repos{} + ctx := context.Background() + scheme := runtime.NewScheme() + err := argov1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + gvrToListKind := map[schema.GroupVersionResource]string{{ + Group: "mallard.io", + Version: "v1", + Resource: "ducks", + }: "DuckList"} + appClientset := kubefake.NewSimpleClientset() + k8sClient := fake.NewClientBuilder().Build() + duckType := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v2quack", + "kind": "Duck", + "metadata": map[string]interface{}{ + "name": "mightyduck", + "namespace": "namespace", + "labels": map[string]interface{}{"duck": "all-species"}, + }, + "status": map[string]interface{}{ + "decisions": []interface{}{ + map[string]interface{}{ + "clusterName": "staging-01", + }, + map[string]interface{}{ + "clusterName": "production-01", + }, + }, + }, + }, + } + fakeDynClient := dynfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, duckType) + + terminalGenerators := map[string]generators.Generator{ + "List": generators.NewListGenerator(), + "Clusters": generators.NewClusterGenerator(k8sClient, ctx, appClientset, "argocd"), + "Git": generators.NewGitGenerator(mockServer), + "SCMProvider": generators.NewSCMProviderGenerator(fake.NewClientBuilder().WithObjects(&corev1.Secret{}).Build(), generators.SCMAuthProviders{}, "", []string{""}, true), + "ClusterDecisionResource": generators.NewDuckTypeGenerator(ctx, fakeDynClient, appClientset, "argocd"), + "PullRequest": generators.NewPullRequestGenerator(k8sClient, generators.SCMAuthProviders{}, "", []string{""}, true), + } + + nestedGenerators := map[string]generators.Generator{ + "List": terminalGenerators["List"], + "Clusters": terminalGenerators["Clusters"], + "Git": terminalGenerators["Git"], + "SCMProvider": terminalGenerators["SCMProvider"], + "ClusterDecisionResource": terminalGenerators["ClusterDecisionResource"], + "PullRequest": terminalGenerators["PullRequest"], + "Matrix": generators.NewMatrixGenerator(terminalGenerators), + "Merge": generators.NewMergeGenerator(terminalGenerators), + } + + topLevelGenerators := map[string]generators.Generator{ + "List": terminalGenerators["List"], + "Clusters": terminalGenerators["Clusters"], + "Git": terminalGenerators["Git"], + "SCMProvider": terminalGenerators["SCMProvider"], + "ClusterDecisionResource": terminalGenerators["ClusterDecisionResource"], + "PullRequest": terminalGenerators["PullRequest"], + "Matrix": generators.NewMatrixGenerator(nestedGenerators), + "Merge": generators.NewMergeGenerator(nestedGenerators), + } + + client := fake.NewClientBuilder().WithScheme(scheme).Build() + r := ApplicationSetReconciler{ + Client: client, + Scheme: scheme, + Recorder: record.NewFakeRecorder(0), + Generators: topLevelGenerators, + } + + type args struct { + appset *argov1alpha1.ApplicationSet + } + tests := []struct { + name string + args args + want time.Duration + wantErr assert.ErrorAssertionFunc + }{ + {name: "Cluster", args: args{appset: &argov1alpha1.ApplicationSet{ + Spec: argov1alpha1.ApplicationSetSpec{ + Generators: []argov1alpha1.ApplicationSetGenerator{{Clusters: &argov1alpha1.ClusterGenerator{}}}, + }, + }}, want: generators.NoRequeueAfter, wantErr: assert.NoError}, + {name: "ClusterMergeNested", args: args{&argov1alpha1.ApplicationSet{ + Spec: argov1alpha1.ApplicationSetSpec{ + Generators: []argov1alpha1.ApplicationSetGenerator{ + {Clusters: &argov1alpha1.ClusterGenerator{}}, + {Merge: &argov1alpha1.MergeGenerator{ + Generators: []argov1alpha1.ApplicationSetNestedGenerator{ + { + Clusters: &argov1alpha1.ClusterGenerator{}, + Git: &argov1alpha1.GitGenerator{}, + }, + }, + }}, + }, + }, + }}, want: generators.DefaultRequeueAfterSeconds, wantErr: assert.NoError}, + {name: "ClusterMatrixNested", args: args{&argov1alpha1.ApplicationSet{ + Spec: argov1alpha1.ApplicationSetSpec{ + Generators: []argov1alpha1.ApplicationSetGenerator{ + {Clusters: &argov1alpha1.ClusterGenerator{}}, + {Matrix: &argov1alpha1.MatrixGenerator{ + Generators: []argov1alpha1.ApplicationSetNestedGenerator{ + { + Clusters: &argov1alpha1.ClusterGenerator{}, + Git: &argov1alpha1.GitGenerator{}, + }, + }, + }}, + }, + }, + }}, want: generators.DefaultRequeueAfterSeconds, wantErr: assert.NoError}, + {name: "ListGenerator", args: args{appset: &argov1alpha1.ApplicationSet{ + Spec: argov1alpha1.ApplicationSetSpec{ + Generators: []argov1alpha1.ApplicationSetGenerator{{List: &argov1alpha1.ListGenerator{}}}, + }, + }}, want: generators.NoRequeueAfter, wantErr: assert.NoError}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, r.getMinRequeueAfter(tt.args.appset), "getMinRequeueAfter(%v)", tt.args.appset) + }) + } +} diff --git a/applicationset/examples/applications-sync-policies/create-only.yaml b/applicationset/examples/applications-sync-policies/create-only.yaml new file mode 100644 index 0000000000000..7758a70b45765 --- /dev/null +++ b/applicationset/examples/applications-sync-policies/create-only.yaml @@ -0,0 +1,35 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + goTemplate: true + generators: + - list: + elements: + - cluster: engineering-dev + url: https://kubernetes.default.svc + foo: bar + # Update foo value with foo: bar + # Application engineering-prod-guestbook labels will still be baz + # Delete this element + # Application engineering-prod-guestbook will be kept + - cluster: engineering-prod + url: https://kubernetes.default.svc + foo: baz + template: + metadata: + name: '{{.cluster}}-guestbook' + labels: + foo: '{{.foo}}' + spec: + project: default + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: applicationset/examples/list-generator/guestbook/{{.cluster}} + destination: + server: '{{.url}}' + namespace: guestbook + syncPolicy: + applicationsSync: create-only diff --git a/applicationset/examples/applications-sync-policies/create-update.yaml b/applicationset/examples/applications-sync-policies/create-update.yaml new file mode 100644 index 0000000000000..277e8d6e18884 --- /dev/null +++ b/applicationset/examples/applications-sync-policies/create-update.yaml @@ -0,0 +1,35 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + goTemplate: true + generators: + - list: + elements: + - cluster: engineering-dev + url: https://kubernetes.default.svc + foo: bar + # Update foo value with foo: bar + # Application engineering-prod-guestbook labels will change to foo: bar + # Delete this element + # Application engineering-prod-guestbook will be kept + - cluster: engineering-prod + url: https://kubernetes.default.svc + foo: baz + template: + metadata: + name: '{{.cluster}}-guestbook' + labels: + foo: '{{.foo}}' + spec: + project: default + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: applicationset/examples/list-generator/guestbook/{{.cluster}} + destination: + server: '{{.url}}' + namespace: guestbook + syncPolicy: + applicationsSync: create-update diff --git a/applicationset/examples/applications-sync-policies/guestbook/engineering-dev/guestbook-ui-deployment.yaml b/applicationset/examples/applications-sync-policies/guestbook/engineering-dev/guestbook-ui-deployment.yaml new file mode 100644 index 0000000000000..8a0975e363539 --- /dev/null +++ b/applicationset/examples/applications-sync-policies/guestbook/engineering-dev/guestbook-ui-deployment.yaml @@ -0,0 +1,20 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: guestbook-ui +spec: + replicas: 1 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: guestbook-ui + template: + metadata: + labels: + app: guestbook-ui + spec: + containers: + - image: gcr.io/heptio-images/ks-guestbook-demo:0.2 + name: guestbook-ui + ports: + - containerPort: 80 diff --git a/applicationset/examples/applications-sync-policies/guestbook/engineering-dev/guestbook-ui-svc.yaml b/applicationset/examples/applications-sync-policies/guestbook/engineering-dev/guestbook-ui-svc.yaml new file mode 100644 index 0000000000000..e8a4a27fbae40 --- /dev/null +++ b/applicationset/examples/applications-sync-policies/guestbook/engineering-dev/guestbook-ui-svc.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Service +metadata: + name: guestbook-ui +spec: + ports: + - port: 80 + targetPort: 80 + selector: + app: guestbook-ui diff --git a/applicationset/examples/applications-sync-policies/guestbook/engineering-prod/guestbook-ui-deployment.yaml b/applicationset/examples/applications-sync-policies/guestbook/engineering-prod/guestbook-ui-deployment.yaml new file mode 100644 index 0000000000000..8a0975e363539 --- /dev/null +++ b/applicationset/examples/applications-sync-policies/guestbook/engineering-prod/guestbook-ui-deployment.yaml @@ -0,0 +1,20 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: guestbook-ui +spec: + replicas: 1 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: guestbook-ui + template: + metadata: + labels: + app: guestbook-ui + spec: + containers: + - image: gcr.io/heptio-images/ks-guestbook-demo:0.2 + name: guestbook-ui + ports: + - containerPort: 80 diff --git a/applicationset/examples/applications-sync-policies/guestbook/engineering-prod/guestbook-ui-svc.yaml b/applicationset/examples/applications-sync-policies/guestbook/engineering-prod/guestbook-ui-svc.yaml new file mode 100644 index 0000000000000..e8a4a27fbae40 --- /dev/null +++ b/applicationset/examples/applications-sync-policies/guestbook/engineering-prod/guestbook-ui-svc.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Service +metadata: + name: guestbook-ui +spec: + ports: + - port: 80 + targetPort: 80 + selector: + app: guestbook-ui diff --git a/applicationset/examples/cluster/cluster-example-fasttemplate.yaml b/applicationset/examples/cluster/cluster-example-fasttemplate.yaml new file mode 100644 index 0000000000000..497e7657de68c --- /dev/null +++ b/applicationset/examples/cluster/cluster-example-fasttemplate.yaml @@ -0,0 +1,19 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + generators: + - clusters: {} + template: + metadata: + name: '{{name}}-guestbook' + spec: + project: "default" + source: + repoURL: https://github.com/argoproj/argocd-example-apps/ + targetRevision: HEAD + path: guestbook + destination: + server: '{{server}}' + namespace: guestbook diff --git a/applicationset/examples/cluster/cluster-example.yaml b/applicationset/examples/cluster/cluster-example.yaml new file mode 100644 index 0000000000000..a8e54212595e8 --- /dev/null +++ b/applicationset/examples/cluster/cluster-example.yaml @@ -0,0 +1,21 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - clusters: {} + template: + metadata: + name: '{{.name}}-guestbook' + spec: + project: "default" + source: + repoURL: https://github.com/argoproj/argocd-example-apps/ + targetRevision: HEAD + path: guestbook + destination: + server: '{{.server}}' + namespace: guestbook diff --git a/applicationset/examples/clusterDecisionResource/README.md b/applicationset/examples/clusterDecisionResource/README.md new file mode 100644 index 0000000000000..045cf8202e5ec --- /dev/null +++ b/applicationset/examples/clusterDecisionResource/README.md @@ -0,0 +1,57 @@ +# How the Cluster Decision Resource generator works for clusterDecisionResource +1. The Cluster Decision Resource generator reads a configurable status format: +```yaml +status: + clusters: + - name: cluster-01 + - name: cluster-02 +``` +This is a common status format. Another format that could be read looks like this: +```yaml +status: + decisions: + - clusterName: cluster-01 + namespace: cluster-01 + - clusterName: cluster-02 + namespace: cluster-02 +``` +2. Any resource that has a list of key / value pairs, where the value matches ArgoCD cluster names can be used. +3. The key / value pairs found in each element of the list will be available to the template. As well, `name` and `server` will still be available to the template. +4. The Service Account used by the ApplicationSet controller must have access to `Get` the resource you want to retrieve the duck type definition from +5. A configMap is used to identify the resource to read status of generated ArgoCD clusters from. You can use multiple resources by creating a ConfigMap for each one in the ArgoCD namespace. +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: my-configmap +data: + apiVersion: group.io/v1 + kind: mykinds + statusListKey: clusters + matchKey: name +``` + * `apiVersion` - This is the apiVersion of your resource + * `kind` - This is the plural kind of your resource + * `statusListKey` - Default is 'clusters', this is the key found in your resource's status that is a list of ArgoCD clusters. + * `matchKey` - Is the key name found in the cluster list, `name` and `clusterName` are the keys in the examples above. + +# Applying the example +1. Connect to a cluster with the ApplicationSet controller running +2. Edit the Role for the ApplicationSet service account, and grant it permission to `list` the `placementdecisions` resources, from apiGroups `cluster.open-cluster-management.io/v1alpha1` +```yaml +- apiGroups: + - "cluster.open-cluster-management.io/v1alpha1" + resources: + - placementdecisions + verbs: + - list +``` +3. Apply the following controller and associated ManagedCluster CRD's: +https://github.com/open-cluster-management/placement +4. Now apply the PlacementDecision and an ApplicationSet: +```bash +kubectl apply -f ./placementdecision.yaml +kubectl apply -f ./configMap.yaml +kubectl apply -f ./ducktype-example.yaml +``` +5. For now this won't do anything until you create a controller that populates the `Status.Decisions` array. \ No newline at end of file diff --git a/applicationset/examples/clusterDecisionResource/configMap.yaml b/applicationset/examples/clusterDecisionResource/configMap.yaml new file mode 100644 index 0000000000000..69ddc81fc3fd9 --- /dev/null +++ b/applicationset/examples/clusterDecisionResource/configMap.yaml @@ -0,0 +1,11 @@ +# To generate a Status.Decisions from this CRD, requires https://github.com/open-cluster-management/multicloud-operators-placementrule be deployed +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: ocm-placement +data: + apiVersion: apps.open-cluster-management.io/v1 + kind: placementrules + statusListKey: decisions + matchKey: clusterName diff --git a/applicationset/examples/clusterDecisionResource/ducktype-example-fasttemplate.yaml b/applicationset/examples/clusterDecisionResource/ducktype-example-fasttemplate.yaml new file mode 100644 index 0000000000000..1663bbb06e483 --- /dev/null +++ b/applicationset/examples/clusterDecisionResource/ducktype-example-fasttemplate.yaml @@ -0,0 +1,27 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: book-import +spec: + generators: + - clusterDecisionResource: + configMapRef: ocm-placement + name: test-placement + requeueAfterSeconds: 30 + template: + metadata: + name: '{{clusterName}}-book-import' + spec: + project: "default" + source: + repoURL: https://github.com/open-cluster-management/application-samples.git + targetRevision: HEAD + path: book-import + destination: + name: '{{clusterName}}' + namespace: bookimport + syncPolicy: + automated: + prune: true + syncOptions: + - CreateNamespace=true diff --git a/applicationset/examples/clusterDecisionResource/ducktype-example.yaml b/applicationset/examples/clusterDecisionResource/ducktype-example.yaml new file mode 100644 index 0000000000000..cf633483a8f68 --- /dev/null +++ b/applicationset/examples/clusterDecisionResource/ducktype-example.yaml @@ -0,0 +1,29 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: book-import +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - clusterDecisionResource: + configMapRef: ocm-placement + name: test-placement + requeueAfterSeconds: 30 + template: + metadata: + name: '{{.clusterName}}-book-import' + spec: + project: "default" + source: + repoURL: https://github.com/open-cluster-management/application-samples.git + targetRevision: HEAD + path: book-import + destination: + name: '{{.clusterName}}' + namespace: bookimport + syncPolicy: + automated: + prune: true + syncOptions: + - CreateNamespace=true diff --git a/applicationset/examples/clusterDecisionResource/placementdecision.yaml b/applicationset/examples/clusterDecisionResource/placementdecision.yaml new file mode 100644 index 0000000000000..666c2acabeaa8 --- /dev/null +++ b/applicationset/examples/clusterDecisionResource/placementdecision.yaml @@ -0,0 +1,18 @@ +--- +apiVersion: apps.open-cluster-management.io/v1 +kind: PlacementRule +metadata: + name: test-placement +spec: + clusterReplicas: 1 # Availability choice, maximum number of clusters to provision at once + clusterSelector: + matchLabels: + 'usage': 'development' + clusterConditions: + - type: ManagedClusterConditionAvailable + status: "True" +# Below is sample output the generator can consume. +status: + decisions: + - clusterName: cluster-01 + - clusterName: cluster-02 \ No newline at end of file diff --git a/applicationset/examples/design-doc/applicationset-fasttemplate.yaml b/applicationset/examples/design-doc/applicationset-fasttemplate.yaml new file mode 100644 index 0000000000000..8249b727d2dc9 --- /dev/null +++ b/applicationset/examples/design-doc/applicationset-fasttemplate.yaml @@ -0,0 +1,22 @@ +# This is an example of a typical ApplicationSet which uses the cluster generator. +# An ApplicationSet is comprised with two stanzas: +# - spec.generator - producer of a list of values supplied as arguments to an app template +# - spec.template - an application template, which has been parameterized +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + generators: + - clusters: {} + template: + metadata: + name: '{{name}}-guestbook' + spec: + source: + repoURL: https://github.com/infra-team/cluster-deployments.git + targetRevision: HEAD + chart: guestbook + destination: + server: '{{server}}' + namespace: guestbook diff --git a/applicationset/examples/design-doc/applicationset.yaml b/applicationset/examples/design-doc/applicationset.yaml new file mode 100644 index 0000000000000..7ab4e824596a3 --- /dev/null +++ b/applicationset/examples/design-doc/applicationset.yaml @@ -0,0 +1,24 @@ +# This is an example of a typical ApplicationSet which uses the cluster generator. +# An ApplicationSet is comprised with two stanzas: +# - spec.generator - producer of a list of values supplied as arguments to an app template +# - spec.template - an application template, which has been parameterized +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - clusters: {} + template: + metadata: + name: '{{.name}}-guestbook' + spec: + source: + repoURL: https://github.com/infra-team/cluster-deployments.git + targetRevision: HEAD + chart: guestbook + destination: + server: '{{.server}}' + namespace: guestbook diff --git a/applicationset/examples/design-doc/clusters.yaml b/applicationset/examples/design-doc/clusters.yaml new file mode 100644 index 0000000000000..474d3cc7cdad5 --- /dev/null +++ b/applicationset/examples/design-doc/clusters.yaml @@ -0,0 +1,33 @@ +# The cluster generator produces an items list from all clusters registered to Argo CD. +# It automatically provides the following fields as values to the app template: +# - name +# - server +# - metadata.labels. +# - metadata.annotations. +# - values. +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + generators: + - clusters: + selector: + matchLabels: + argocd.argoproj.io/secret-type: cluster + values: + project: default + template: + metadata: + name: '{{.name}}-guestbook' + labels: + environment: '{{.metadata.labels.environment}}' + spec: + project: '{{.values.project}}' + source: + repoURL: https://github.com/infra-team/cluster-deployments.git + targetRevision: HEAD + chart: guestbook + destination: + server: '{{.server}}' + namespace: guestbook diff --git a/applicationset/examples/design-doc/git-directory-discovery-fasttemplate.yaml b/applicationset/examples/design-doc/git-directory-discovery-fasttemplate.yaml new file mode 100644 index 0000000000000..7ff9bb3c053e5 --- /dev/null +++ b/applicationset/examples/design-doc/git-directory-discovery-fasttemplate.yaml @@ -0,0 +1,44 @@ +# This example demonstrates the git directory generator, which produces an items list +# based on discovery of directories in a git repo matching a specified pattern. +# Git generators automatically provide {{path}} and {{path.basename}} as available +# variables to the app template. +# +# Suppose the following git directory structure (note the use of different config tools): +# +# cluster-deployments +# └── add-ons +# ├── argo-rollouts +# │   ├── all.yaml +# │   └── kustomization.yaml +# ├── argo-workflows +# │   └── install.yaml +# ├── grafana +# │   ├── Chart.yaml +# │   └── values.yaml +# └── prometheus-operator +# ├── Chart.yaml +# └── values.yaml +# +# The following ApplicationSet would produce four applications (in different namespaces), +# using the directory basename as both the namespace and application name. +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: cluster-addons +spec: + generators: + - git: + repoURL: https://github.com/infra-team/cluster-deployments.git + directories: + - path: add-ons/* + template: + metadata: + name: '{{path.basename}}' + spec: + source: + repoURL: https://github.com/infra-team/cluster-deployments.git + targetRevision: HEAD + path: '{{path}}' + destination: + server: http://kubernetes.default.svc + namespace: '{{path.basename}}' diff --git a/applicationset/examples/design-doc/git-directory-discovery.yaml b/applicationset/examples/design-doc/git-directory-discovery.yaml new file mode 100644 index 0000000000000..a158d034d9043 --- /dev/null +++ b/applicationset/examples/design-doc/git-directory-discovery.yaml @@ -0,0 +1,46 @@ +# This example demonstrates the git directory generator, which produces an items list +# based on discovery of directories in a git repo matching a specified pattern. +# Git generators automatically provide {{path}} and {{path.basename}} as available +# variables to the app template. +# +# Suppose the following git directory structure (note the use of different config tools): +# +# cluster-deployments +# └── add-ons +# ├── argo-rollouts +# │   ├── all.yaml +# │   └── kustomization.yaml +# ├── argo-workflows +# │   └── install.yaml +# ├── grafana +# │   ├── Chart.yaml +# │   └── values.yaml +# └── prometheus-operator +# ├── Chart.yaml +# └── values.yaml +# +# The following ApplicationSet would produce four applications (in different namespaces), +# using the directory basename as both the namespace and application name. +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: cluster-addons +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - git: + repoURL: https://github.com/infra-team/cluster-deployments.git + directories: + - path: add-ons/* + template: + metadata: + name: '{{.path.basename}}' + spec: + source: + repoURL: https://github.com/infra-team/cluster-deployments.git + targetRevision: HEAD + path: '{{.path.path}}' + destination: + server: http://kubernetes.default.svc + namespace: '{{.path.basename}}' diff --git a/applicationset/examples/design-doc/git-files-discovery-fasttemplate.yaml b/applicationset/examples/design-doc/git-files-discovery-fasttemplate.yaml new file mode 100644 index 0000000000000..7cb717d31d49a --- /dev/null +++ b/applicationset/examples/design-doc/git-files-discovery-fasttemplate.yaml @@ -0,0 +1,55 @@ +# This example demonstrates a git file generator which traverses the directory structure of a git +# repository to discover items based on a filename convention. For each file discovered, the +# contents of the discovered files themselves, act as the set of inputs to the app template. +# +# Suppose the following git directory structure: +# +# cluster-deployments +# ├── apps +# │ └── guestbook +# │ └── install.yaml +# └── cluster-config +# ├── engineering +# │ ├── dev +# │ │ └── config.json +# │ └── prod +# │ └── config.json +# └── finance +# ├── dev +# │ └── config.json +# └── prod +# └── config.json +# +# The discovered files (e.g. config.json) files can be any structured data supplied to the +# generated application. e.g.: +# { +# "aws_account": "123456", +# "asset_id": "11223344" +# "cluster": { +# "owner": "Jesse_Suen@intuit.com", +# "name": "engineering-dev", +# "address": "http://1.2.3.4" +# } +# } +# +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + generators: + - git: + repoURL: https://github.com/infra-team/cluster-deployments.git + files: + - path: "**/config.json" + template: + metadata: + name: '{{cluster.name}}-guestbook' + spec: + source: + repoURL: https://github.com/infra-team/cluster-deployments.git + targetRevision: HEAD + path: apps/guestbook + destination: + server: '{{cluster.address}}' + namespace: guestbook diff --git a/applicationset/examples/design-doc/git-files-discovery.yaml b/applicationset/examples/design-doc/git-files-discovery.yaml new file mode 100644 index 0000000000000..367e318ac2d5a --- /dev/null +++ b/applicationset/examples/design-doc/git-files-discovery.yaml @@ -0,0 +1,57 @@ +# This example demonstrates a git file generator which traverses the directory structure of a git +# repository to discover items based on a filename convention. For each file discovered, the +# contents of the discovered files themselves, act as the set of inputs to the app template. +# +# Suppose the following git directory structure: +# +# cluster-deployments +# ├── apps +# │ └── guestbook +# │ └── install.yaml +# └── cluster-config +# ├── engineering +# │ ├── dev +# │ │ └── config.json +# │ └── prod +# │ └── config.json +# └── finance +# ├── dev +# │ └── config.json +# └── prod +# └── config.json +# +# The discovered files (e.g. config.json) files can be any structured data supplied to the +# generated application. e.g.: +# { +# "aws_account": "123456", +# "asset_id": "11223344" +# "cluster": { +# "owner": "Jesse_Suen@intuit.com", +# "name": "engineering-dev", +# "address": "http://1.2.3.4" +# } +# } +# +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - git: + repoURL: https://github.com/infra-team/cluster-deployments.git + files: + - path: "**/config.json" + template: + metadata: + name: '{{.cluster.name}}-guestbook' + spec: + source: + repoURL: https://github.com/infra-team/cluster-deployments.git + targetRevision: HEAD + path: apps/guestbook + destination: + server: '{{.cluster.address}}' + namespace: guestbook diff --git a/applicationset/examples/design-doc/git-files-literal-fasttemplate.yaml b/applicationset/examples/design-doc/git-files-literal-fasttemplate.yaml new file mode 100644 index 0000000000000..f2c52b0c220f7 --- /dev/null +++ b/applicationset/examples/design-doc/git-files-literal-fasttemplate.yaml @@ -0,0 +1,68 @@ +# This example demonstrates a git file generator which produces its items based on one or +# more files referenced in a git repo. The referenced files would contain a json/yaml list of +# arbitrary structured objects. Each item of the list would become a set of parameters to a +# generated application. +# +# Suppose the following git directory structure: +# +# cluster-deployments +# ├── apps +# │ └── guestbook +# │ ├── v1.0 +# │ │ └── install.yaml +# │ └── v2.0 +# │ └── install.yaml +# └── config +# └── clusters.json +# +# In this example, the `clusters.json` file is json list of structured data: +# [ +# { +# "account": "123456", +# "asset_id": "11223344", +# "cluster": { +# "owner": "Jesse_Suen@intuit.com", +# "name": "engineering-dev", +# "address": "http://1.2.3.4" +# }, +# "appVersions": { +# "prometheus-operator": "v0.38", +# "guestbook": "v2.0" +# } +# }, +# { +# "account": "456789", +# "asset_id": "55667788", +# "cluster": { +# "owner": "Alexander_Matyushentsev@intuit.com", +# "name": "engineering-prod", +# "address": "http://2.4.6.8" +# }, +# "appVersions": { +# "prometheus-operator": "v0.38", +# "guestbook": "v1.0" +# } +# } +# ] +# +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + generators: + - git: + repoURL: https://github.com/infra-team/cluster-deployments.git + files: + - path: config/clusters.json + template: + metadata: + name: '{{cluster.name}}-guestbook' + spec: + source: + repoURL: https://github.com/infra-team/cluster-deployments.git + targetRevision: HEAD + path: apps/guestbook/{{appVersions.guestbook}} + destination: + server: http://kubernetes.default.svc + namespace: guestbook diff --git a/applicationset/examples/design-doc/git-files-literal.yaml b/applicationset/examples/design-doc/git-files-literal.yaml new file mode 100644 index 0000000000000..9dbace36e4c56 --- /dev/null +++ b/applicationset/examples/design-doc/git-files-literal.yaml @@ -0,0 +1,70 @@ +# This example demonstrates a git file generator which produces its items based on one or +# more files referenced in a git repo. The referenced files would contain a json/yaml list of +# arbitrary structured objects. Each item of the list would become a set of parameters to a +# generated application. +# +# Suppose the following git directory structure: +# +# cluster-deployments +# ├── apps +# │ └── guestbook +# │ ├── v1.0 +# │ │ └── install.yaml +# │ └── v2.0 +# │ └── install.yaml +# └── config +# └── clusters.json +# +# In this example, the `clusters.json` file is json list of structured data: +# [ +# { +# "account": "123456", +# "asset_id": "11223344", +# "cluster": { +# "owner": "Jesse_Suen@intuit.com", +# "name": "engineering-dev", +# "address": "http://1.2.3.4" +# }, +# "appVersions": { +# "prometheus-operator": "v0.38", +# "guestbook": "v2.0" +# } +# }, +# { +# "account": "456789", +# "asset_id": "55667788", +# "cluster": { +# "owner": "Alexander_Matyushentsev@intuit.com", +# "name": "engineering-prod", +# "address": "http://2.4.6.8" +# }, +# "appVersions": { +# "prometheus-operator": "v0.38", +# "guestbook": "v1.0" +# } +# } +# ] +# +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - git: + repoURL: https://github.com/infra-team/cluster-deployments.git + files: + - path: config/clusters.json + template: + metadata: + name: '{{.cluster.name}}-guestbook' + spec: + source: + repoURL: https://github.com/infra-team/cluster-deployments.git + targetRevision: HEAD + path: apps/guestbook/{{.appVersions.guestbook}} + destination: + server: http://kubernetes.default.svc + namespace: guestbook diff --git a/applicationset/examples/design-doc/list-fasttemplate.yaml b/applicationset/examples/design-doc/list-fasttemplate.yaml new file mode 100644 index 0000000000000..7cdbc5552442a --- /dev/null +++ b/applicationset/examples/design-doc/list-fasttemplate.yaml @@ -0,0 +1,33 @@ +# The list generator specifies a literal list of argument values to the app spec template. +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + generators: + - list: + elements: + - cluster: engineering-dev + url: https://1.2.3.4 + values: + project: dev + - cluster: engineering-prod + url: https://2.4.6.8 + values: + project: prod + - cluster: finance-preprod + url: https://9.8.7.6 + values: + project: preprod + template: + metadata: + name: '{{cluster}}-guestbook' + spec: + project: '{{values.project}}' + source: + repoURL: https://github.com/infra-team/cluster-deployments.git + targetRevision: HEAD + path: guestbook/{{cluster}} + destination: + server: '{{url}}' + namespace: guestbook diff --git a/applicationset/examples/design-doc/list.yaml b/applicationset/examples/design-doc/list.yaml new file mode 100644 index 0000000000000..b1bcd593eac7f --- /dev/null +++ b/applicationset/examples/design-doc/list.yaml @@ -0,0 +1,35 @@ +# The list generator specifies a literal list of argument values to the app spec template. +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - list: + elements: + - cluster: engineering-dev + url: https://1.2.3.4 + values: + project: dev + - cluster: engineering-prod + url: https://2.4.6.8 + values: + project: prod + - cluster: finance-preprod + url: https://9.8.7.6 + values: + project: preprod + template: + metadata: + name: '{{.cluster}}-guestbook' + spec: + project: '{{.values.project}}' + source: + repoURL: https://github.com/infra-team/cluster-deployments.git + targetRevision: HEAD + path: guestbook/{{.cluster}} + destination: + server: '{{.url}}' + namespace: guestbook diff --git a/applicationset/examples/design-doc/proposal/README.md b/applicationset/examples/design-doc/proposal/README.md new file mode 100644 index 0000000000000..5839139f928f8 --- /dev/null +++ b/applicationset/examples/design-doc/proposal/README.md @@ -0,0 +1,3 @@ +# Proposal Examples +This directory contains examples that are not yet implemented. +They are part of the project to indicate future progress, and we are welcome any contribution that will add an implementation diff --git a/applicationset/examples/design-doc/proposal/filters.yaml b/applicationset/examples/design-doc/proposal/filters.yaml new file mode 100644 index 0000000000000..295d2dce975da --- /dev/null +++ b/applicationset/examples/design-doc/proposal/filters.yaml @@ -0,0 +1,48 @@ +# For all generators, filters can be applied to reduce the generated items to a smaller subset. +# A powerful set of filter expressions are supported using syntax provided by the +# https://github.com/antonmedv/expr library. Examples expressions are demonstrated below +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + generators: + # Match all clusters who meet ALL of the following conditions: + # 1. name matches the regex `sales-.*` + # 2. environment label is either 'staging' or 'prod' + - clusters: + filters: + - expr: '{{name}} matches "sales-.*"' + - expr: '{{metadata.labels.environment}} in [staging, prod]' + values: + version: '2.0.0' + # Filter items from `config/clusters.json` in the `cluster-deployments` git repo, + # to only those having the `cluster.enabled == true` property. e.g.: + # { + # ... + # "cluster": { + # "enabled": true, + # ... + # } + # } + - git: + repoURL: https://github.com/infra-team/cluster-deployments.git + files: + - path: config/clusters.json + filters: + - expr: '{{cluster.enabled}} == true' + template: + metadata: + name: '{{name}}-guestbook' + spec: + source: + repoURL: https://github.com/infra-team/cluster-deployments.git + targetRevision: "{{values.version}}" + chart: guestbook + helm: + parameters: + - name: foo + value: "{{metadata.annotations.foo}}" + destination: + server: '{{server}}' + namespace: guestbook diff --git a/applicationset/examples/design-doc/template-override-fasttemplate.yaml b/applicationset/examples/design-doc/template-override-fasttemplate.yaml new file mode 100644 index 0000000000000..9eade199f9762 --- /dev/null +++ b/applicationset/examples/design-doc/template-override-fasttemplate.yaml @@ -0,0 +1,48 @@ +# App templates can also be defined as part of the generator's template stanza. Sometimes it is +# useful to do this in order to override the spec.template stanza, and when simple string +# parameterization are insufficient. In the below examples, the generators[].XXX.template is +# a partial definition, which overrides/patch the default template. +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + generators: + - list: + elements: + - cluster: engineering-dev + url: https://1.2.3.4 + template: + metadata: {} + spec: + project: "project" + source: + repoURL: https://github.com/infra-team/cluster-deployments.git + path: '{{cluster}}-override' + destination: {} + + - list: + elements: + - cluster: engineering-prod + url: https://1.2.3.4 + template: + metadata: {} + spec: + project: "project2" + source: + repoURL: https://github.com/infra-team/cluster-deployments.git + path: '{{cluster}}-override2' + destination: {} + + template: + metadata: + name: '{{cluster}}-guestbook' + spec: + project: "project" + source: + repoURL: https://github.com/infra-team/cluster-deployments.git + targetRevision: HEAD + path: guestbook/{{cluster}} + destination: + server: '{{url}}' + namespace: guestbook diff --git a/applicationset/examples/design-doc/template-override.yaml b/applicationset/examples/design-doc/template-override.yaml new file mode 100644 index 0000000000000..970c7c395a820 --- /dev/null +++ b/applicationset/examples/design-doc/template-override.yaml @@ -0,0 +1,50 @@ +# App templates can also be defined as part of the generator's template stanza. Sometimes it is +# useful to do this in order to override the spec.template stanza, and when simple string +# parameterization are insufficient. In the below examples, the generators[].XXX.template is +# a partial definition, which overrides/patch the default template. +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - list: + elements: + - cluster: engineering-dev + url: https://1.2.3.4 + template: + metadata: {} + spec: + project: "project" + source: + repoURL: https://github.com/infra-team/cluster-deployments.git + path: '{{.cluster}}-override' + destination: {} + + - list: + elements: + - cluster: engineering-prod + url: https://1.2.3.4 + template: + metadata: {} + spec: + project: "project2" + source: + repoURL: https://github.com/infra-team/cluster-deployments.git + path: '{{.cluster}}-override2' + destination: {} + + template: + metadata: + name: '{{.cluster}}-guestbook' + spec: + project: "project" + source: + repoURL: https://github.com/infra-team/cluster-deployments.git + targetRevision: HEAD + path: guestbook/{{.cluster}} + destination: + server: '{{.url}}' + namespace: guestbook diff --git a/applicationset/examples/git-generator-directory/cluster-addons/argo-workflows/kustomization.yaml b/applicationset/examples/git-generator-directory/cluster-addons/argo-workflows/kustomization.yaml new file mode 100644 index 0000000000000..6d0249813affd --- /dev/null +++ b/applicationset/examples/git-generator-directory/cluster-addons/argo-workflows/kustomization.yaml @@ -0,0 +1,6 @@ +#namePrefix: kustomize- + +resources: +- https://github.com/argoproj/argo-workflows/releases/download/v3.4.0/namespace-install.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization diff --git a/applicationset/examples/git-generator-directory/cluster-addons/prometheus-operator/Chart.yaml b/applicationset/examples/git-generator-directory/cluster-addons/prometheus-operator/Chart.yaml new file mode 100644 index 0000000000000..34004adfbca07 --- /dev/null +++ b/applicationset/examples/git-generator-directory/cluster-addons/prometheus-operator/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v2 +name: helm-prometheus-operator + +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +appVersion: "1.0" diff --git a/applicationset/examples/git-generator-directory/cluster-addons/prometheus-operator/requirements.yaml b/applicationset/examples/git-generator-directory/cluster-addons/prometheus-operator/requirements.yaml new file mode 100644 index 0000000000000..f498af068a5dd --- /dev/null +++ b/applicationset/examples/git-generator-directory/cluster-addons/prometheus-operator/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: +- name: kube-prometheus-stack + version: 40.5.0 + repository: https://prometheus-community.github.io/helm-charts diff --git a/applicationset/examples/git-generator-directory/cluster-addons/prometheus-operator/values.yaml b/applicationset/examples/git-generator-directory/cluster-addons/prometheus-operator/values.yaml new file mode 100644 index 0000000000000..f09e9043f5db6 --- /dev/null +++ b/applicationset/examples/git-generator-directory/cluster-addons/prometheus-operator/values.yaml @@ -0,0 +1 @@ +# Blank values.yaml diff --git a/applicationset/examples/git-generator-directory/excludes/cluster-addons/argo-workflows/kustomization.yaml b/applicationset/examples/git-generator-directory/excludes/cluster-addons/argo-workflows/kustomization.yaml new file mode 100644 index 0000000000000..6d0249813affd --- /dev/null +++ b/applicationset/examples/git-generator-directory/excludes/cluster-addons/argo-workflows/kustomization.yaml @@ -0,0 +1,6 @@ +#namePrefix: kustomize- + +resources: +- https://github.com/argoproj/argo-workflows/releases/download/v3.4.0/namespace-install.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization diff --git a/applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook/Chart.yaml b/applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook/Chart.yaml new file mode 100644 index 0000000000000..6fac831761d30 --- /dev/null +++ b/applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook/Chart.yaml @@ -0,0 +1,23 @@ +apiVersion: v2 +name: helm-guestbook +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +appVersion: "1.0" diff --git a/applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook/templates/NOTES.txt b/applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook/templates/NOTES.txt new file mode 100644 index 0000000000000..37a1485f894ea --- /dev/null +++ b/applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook/templates/NOTES.txt @@ -0,0 +1,19 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.hosts }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "helm-guestbook.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "helm-guestbook.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "helm-guestbook.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "helm-guestbook.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook/templates/_helpers.tpl b/applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook/templates/_helpers.tpl new file mode 100644 index 0000000000000..20f5d8e4ecd96 --- /dev/null +++ b/applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "helm-guestbook.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "helm-guestbook.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "helm-guestbook.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook/templates/deployment.yaml b/applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook/templates/deployment.yaml new file mode 100644 index 0000000000000..980df256ebcb4 --- /dev/null +++ b/applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook/templates/deployment.yaml @@ -0,0 +1,52 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "helm-guestbook.fullname" . }} + labels: + app: {{ template "helm-guestbook.name" . }} + chart: {{ template "helm-guestbook.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.replicaCount }} + revisionHistoryLimit: 3 + selector: + matchLabels: + app: {{ template "helm-guestbook.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "helm-guestbook.name" . }} + release: {{ .Release.Name }} + spec: + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: 80 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: http + readinessProbe: + httpGet: + path: / + port: http + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} diff --git a/applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook/templates/service.yaml b/applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook/templates/service.yaml new file mode 100644 index 0000000000000..b7aab0ba3c919 --- /dev/null +++ b/applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook/templates/service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "helm-guestbook.fullname" . }} + labels: + app: {{ template "helm-guestbook.name" . }} + chart: {{ template "helm-guestbook.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + app: {{ template "helm-guestbook.name" . }} + release: {{ .Release.Name }} diff --git a/applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook/values-production.yaml b/applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook/values-production.yaml new file mode 100644 index 0000000000000..42838b76e43d0 --- /dev/null +++ b/applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook/values-production.yaml @@ -0,0 +1,2 @@ +service: + type: LoadBalancer diff --git a/applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook/values.yaml b/applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook/values.yaml new file mode 100644 index 0000000000000..3666712aec29b --- /dev/null +++ b/applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook/values.yaml @@ -0,0 +1,45 @@ +# Default values for helm-guestbook. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: gcr.io/heptio-images/ks-guestbook-demo + tag: 0.1 + pullPolicy: IfNotPresent + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + path: / + hosts: + - chart-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/applicationset/examples/git-generator-directory/excludes/cluster-addons/prometheus-operator/Chart.yaml b/applicationset/examples/git-generator-directory/excludes/cluster-addons/prometheus-operator/Chart.yaml new file mode 100644 index 0000000000000..34004adfbca07 --- /dev/null +++ b/applicationset/examples/git-generator-directory/excludes/cluster-addons/prometheus-operator/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v2 +name: helm-prometheus-operator + +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +appVersion: "1.0" diff --git a/applicationset/examples/git-generator-directory/excludes/cluster-addons/prometheus-operator/requirements.yaml b/applicationset/examples/git-generator-directory/excludes/cluster-addons/prometheus-operator/requirements.yaml new file mode 100644 index 0000000000000..f498af068a5dd --- /dev/null +++ b/applicationset/examples/git-generator-directory/excludes/cluster-addons/prometheus-operator/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: +- name: kube-prometheus-stack + version: 40.5.0 + repository: https://prometheus-community.github.io/helm-charts diff --git a/applicationset/examples/git-generator-directory/excludes/cluster-addons/prometheus-operator/values.yaml b/applicationset/examples/git-generator-directory/excludes/cluster-addons/prometheus-operator/values.yaml new file mode 100644 index 0000000000000..f09e9043f5db6 --- /dev/null +++ b/applicationset/examples/git-generator-directory/excludes/cluster-addons/prometheus-operator/values.yaml @@ -0,0 +1 @@ +# Blank values.yaml diff --git a/applicationset/examples/git-generator-directory/excludes/git-directories-exclude-example-fasttemplate.yaml b/applicationset/examples/git-generator-directory/excludes/git-directories-exclude-example-fasttemplate.yaml new file mode 100644 index 0000000000000..fdd2c42af04f5 --- /dev/null +++ b/applicationset/examples/git-generator-directory/excludes/git-directories-exclude-example-fasttemplate.yaml @@ -0,0 +1,29 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: cluster-addons + namespace: argocd +spec: + generators: + - git: + repoURL: https://github.com/argoproj/argo-cd.git + revision: HEAD + directories: + - path: applicationset/examples/git-generator-directory/excludes/cluster-addons/* + - exclude: true + path: applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook + template: + metadata: + name: '{{path.basename}}' + spec: + project: "my-project" + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: '{{path}}' + destination: + server: https://kubernetes.default.svc + namespace: '{{path.basename}}' + syncPolicy: + syncOptions: + - CreateNamespace=true diff --git a/applicationset/examples/git-generator-directory/excludes/git-directories-exclude-example.yaml b/applicationset/examples/git-generator-directory/excludes/git-directories-exclude-example.yaml new file mode 100644 index 0000000000000..a021a3d0c66d3 --- /dev/null +++ b/applicationset/examples/git-generator-directory/excludes/git-directories-exclude-example.yaml @@ -0,0 +1,31 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: cluster-addons + namespace: argocd +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - git: + repoURL: https://github.com/argoproj/argo-cd.git + revision: HEAD + directories: + - path: applicationset/examples/git-generator-directory/excludes/cluster-addons/* + - exclude: true + path: applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook + template: + metadata: + name: '{{.path.basename}}' + spec: + project: "my-project" + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: '{{.path}}' + destination: + server: https://kubernetes.default.svc + namespace: '{{.path.basename}}' + syncPolicy: + syncOptions: + - CreateNamespace=true diff --git a/applicationset/examples/git-generator-directory/git-directories-example-fasttemplate.yaml b/applicationset/examples/git-generator-directory/git-directories-example-fasttemplate.yaml new file mode 100644 index 0000000000000..fdca6a53ae496 --- /dev/null +++ b/applicationset/examples/git-generator-directory/git-directories-example-fasttemplate.yaml @@ -0,0 +1,27 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: cluster-addons + namespace: argocd +spec: + generators: + - git: + repoURL: https://github.com/argoproj/argo-cd.git + revision: HEAD + directories: + - path: applicationset/examples/git-generator-directory/cluster-addons/* + template: + metadata: + name: '{{path.basename}}' + spec: + project: "my-project" + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: '{{path}}' + destination: + server: https://kubernetes.default.svc + namespace: '{{path.basename}}' + syncPolicy: + syncOptions: + - CreateNamespace=true diff --git a/applicationset/examples/git-generator-directory/git-directories-example.yaml b/applicationset/examples/git-generator-directory/git-directories-example.yaml new file mode 100644 index 0000000000000..6fc16b4d39384 --- /dev/null +++ b/applicationset/examples/git-generator-directory/git-directories-example.yaml @@ -0,0 +1,29 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: cluster-addons + namespace: argocd +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - git: + repoURL: https://github.com/argoproj/argo-cd.git + revision: HEAD + directories: + - path: applicationset/examples/git-generator-directory/cluster-addons/* + template: + metadata: + name: '{{.path.basename}}' + spec: + project: "my-project" + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: '{{.path.path}}' + destination: + server: https://kubernetes.default.svc + namespace: '{{.path.basename}}' + syncPolicy: + syncOptions: + - CreateNamespace=true diff --git a/applicationset/examples/git-generator-files-discovery/apps/guestbook/guestbook-ui-deployment.yaml b/applicationset/examples/git-generator-files-discovery/apps/guestbook/guestbook-ui-deployment.yaml new file mode 100644 index 0000000000000..8a0975e363539 --- /dev/null +++ b/applicationset/examples/git-generator-files-discovery/apps/guestbook/guestbook-ui-deployment.yaml @@ -0,0 +1,20 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: guestbook-ui +spec: + replicas: 1 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: guestbook-ui + template: + metadata: + labels: + app: guestbook-ui + spec: + containers: + - image: gcr.io/heptio-images/ks-guestbook-demo:0.2 + name: guestbook-ui + ports: + - containerPort: 80 diff --git a/applicationset/examples/git-generator-files-discovery/apps/guestbook/guestbook-ui-svc.yaml b/applicationset/examples/git-generator-files-discovery/apps/guestbook/guestbook-ui-svc.yaml new file mode 100644 index 0000000000000..e619b5cd39f7b --- /dev/null +++ b/applicationset/examples/git-generator-files-discovery/apps/guestbook/guestbook-ui-svc.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Service +metadata: + name: guestbook-ui +spec: + ports: + - port: 80 + targetPort: 80 + selector: + app: guestbook-ui diff --git a/applicationset/examples/git-generator-files-discovery/apps/guestbook/kustomization.yaml b/applicationset/examples/git-generator-files-discovery/apps/guestbook/kustomization.yaml new file mode 100644 index 0000000000000..cbaba9021cb23 --- /dev/null +++ b/applicationset/examples/git-generator-files-discovery/apps/guestbook/kustomization.yaml @@ -0,0 +1,7 @@ +namePrefix: kustomize- + +resources: +- guestbook-ui-deployment.yaml +- guestbook-ui-svc.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization diff --git a/applicationset/examples/git-generator-files-discovery/cluster-config/engineering/dev/config.json b/applicationset/examples/git-generator-files-discovery/cluster-config/engineering/dev/config.json new file mode 100644 index 0000000000000..51b23ca1bbe4f --- /dev/null +++ b/applicationset/examples/git-generator-files-discovery/cluster-config/engineering/dev/config.json @@ -0,0 +1,9 @@ +{ + "aws_account": "123456", + "asset_id": "11223344", + "cluster": { + "owner": "cluster-admin@company.com", + "name": "engineering-dev", + "address": "http://1.2.3.4" + } +} diff --git a/applicationset/examples/git-generator-files-discovery/cluster-config/engineering/prod/config.json b/applicationset/examples/git-generator-files-discovery/cluster-config/engineering/prod/config.json new file mode 100644 index 0000000000000..4a48be12ff3da --- /dev/null +++ b/applicationset/examples/git-generator-files-discovery/cluster-config/engineering/prod/config.json @@ -0,0 +1,9 @@ +{ + "aws_account": "123456", + "asset_id": "11223344", + "cluster": { + "owner": "cluster-admin@company.com", + "name": "engineering-prod", + "address": "http://1.2.3.4" + } +} diff --git a/applicationset/examples/git-generator-files-discovery/git-generator-files-fasttemplate.yaml b/applicationset/examples/git-generator-files-discovery/git-generator-files-fasttemplate.yaml new file mode 100644 index 0000000000000..99923b72c565a --- /dev/null +++ b/applicationset/examples/git-generator-files-discovery/git-generator-files-fasttemplate.yaml @@ -0,0 +1,24 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + generators: + - git: + repoURL: https://github.com/argoproj/argo-cd.git + revision: HEAD + files: + - path: "applicationset/examples/git-generator-files-discovery/cluster-config/**/config.json" + template: + metadata: + name: '{{cluster.name}}-guestbook' + spec: + project: default + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: "applicationset/examples/git-generator-files-discovery/apps/guestbook" + destination: + server: https://kubernetes.default.svc + #server: '{{cluster.address}}' + namespace: guestbook diff --git a/applicationset/examples/git-generator-files-discovery/git-generator-files.yaml b/applicationset/examples/git-generator-files-discovery/git-generator-files.yaml new file mode 100644 index 0000000000000..78a0136655498 --- /dev/null +++ b/applicationset/examples/git-generator-files-discovery/git-generator-files.yaml @@ -0,0 +1,26 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - git: + repoURL: https://github.com/argoproj/argo-cd.git + revision: HEAD + files: + - path: "applicationset/examples/git-generator-files-discovery/cluster-config/**/config.json" + template: + metadata: + name: '{{.cluster.name}}-guestbook' + spec: + project: default + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: "applicationset/examples/git-generator-files-discovery/apps/guestbook" + destination: + server: https://kubernetes.default.svc + #server: '{{.cluster.address}}' + namespace: guestbook diff --git a/applicationset/examples/list-generator/guestbook/engineering-dev/guestbook-ui-deployment.yaml b/applicationset/examples/list-generator/guestbook/engineering-dev/guestbook-ui-deployment.yaml new file mode 100644 index 0000000000000..8a0975e363539 --- /dev/null +++ b/applicationset/examples/list-generator/guestbook/engineering-dev/guestbook-ui-deployment.yaml @@ -0,0 +1,20 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: guestbook-ui +spec: + replicas: 1 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: guestbook-ui + template: + metadata: + labels: + app: guestbook-ui + spec: + containers: + - image: gcr.io/heptio-images/ks-guestbook-demo:0.2 + name: guestbook-ui + ports: + - containerPort: 80 diff --git a/applicationset/examples/list-generator/guestbook/engineering-dev/guestbook-ui-svc.yaml b/applicationset/examples/list-generator/guestbook/engineering-dev/guestbook-ui-svc.yaml new file mode 100644 index 0000000000000..e8a4a27fbae40 --- /dev/null +++ b/applicationset/examples/list-generator/guestbook/engineering-dev/guestbook-ui-svc.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Service +metadata: + name: guestbook-ui +spec: + ports: + - port: 80 + targetPort: 80 + selector: + app: guestbook-ui diff --git a/applicationset/examples/list-generator/guestbook/engineering-prod/guestbook-ui-deployment.yaml b/applicationset/examples/list-generator/guestbook/engineering-prod/guestbook-ui-deployment.yaml new file mode 100644 index 0000000000000..8a0975e363539 --- /dev/null +++ b/applicationset/examples/list-generator/guestbook/engineering-prod/guestbook-ui-deployment.yaml @@ -0,0 +1,20 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: guestbook-ui +spec: + replicas: 1 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: guestbook-ui + template: + metadata: + labels: + app: guestbook-ui + spec: + containers: + - image: gcr.io/heptio-images/ks-guestbook-demo:0.2 + name: guestbook-ui + ports: + - containerPort: 80 diff --git a/applicationset/examples/list-generator/guestbook/engineering-prod/guestbook-ui-svc.yaml b/applicationset/examples/list-generator/guestbook/engineering-prod/guestbook-ui-svc.yaml new file mode 100644 index 0000000000000..e8a4a27fbae40 --- /dev/null +++ b/applicationset/examples/list-generator/guestbook/engineering-prod/guestbook-ui-svc.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Service +metadata: + name: guestbook-ui +spec: + ports: + - port: 80 + targetPort: 80 + selector: + app: guestbook-ui diff --git a/applicationset/examples/list-generator/list-elementsYaml-example.yaml b/applicationset/examples/list-generator/list-elementsYaml-example.yaml new file mode 100644 index 0000000000000..f3aa3f34dd57d --- /dev/null +++ b/applicationset/examples/list-generator/list-elementsYaml-example.yaml @@ -0,0 +1,14 @@ +key: + components: + - name: component1 + chart: podinfo + version: "6.3.2" + releaseName: component1 + repoUrl: "https://stefanprodan.github.io/podinfo" + namespace: component1 + - name: component2 + chart: podinfo + version: "6.3.3" + releaseName: component2 + repoUrl: "ghcr.io/stefanprodan/charts" + namespace: component2 diff --git a/applicationset/examples/list-generator/list-example-fasttemplate.yaml b/applicationset/examples/list-generator/list-example-fasttemplate.yaml new file mode 100644 index 0000000000000..a671ee45fab02 --- /dev/null +++ b/applicationset/examples/list-generator/list-example-fasttemplate.yaml @@ -0,0 +1,24 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + generators: + - list: + elements: + - cluster: engineering-dev + url: https://kubernetes.default.svc + - cluster: engineering-prod + url: https://kubernetes.default.svc + template: + metadata: + name: '{{cluster}}-guestbook' + spec: + project: default + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: applicationset/examples/list-generator/guestbook/{{cluster}} + destination: + server: '{{url}}' + namespace: guestbook diff --git a/applicationset/examples/list-generator/list-example.yaml b/applicationset/examples/list-generator/list-example.yaml new file mode 100644 index 0000000000000..03e33130bad84 --- /dev/null +++ b/applicationset/examples/list-generator/list-example.yaml @@ -0,0 +1,26 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - list: + elements: + - cluster: engineering-dev + url: https://kubernetes.default.svc + - cluster: engineering-prod + url: https://kubernetes.default.svc + template: + metadata: + name: '{{.cluster}}-guestbook' + spec: + project: default + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: applicationset/examples/list-generator/guestbook/{{.cluster}} + destination: + server: '{{.url}}' + namespace: guestbook diff --git a/applicationset/examples/matrix/cluster-addons/argo-workflows/kustomization.yaml b/applicationset/examples/matrix/cluster-addons/argo-workflows/kustomization.yaml new file mode 100644 index 0000000000000..68cd552162014 --- /dev/null +++ b/applicationset/examples/matrix/cluster-addons/argo-workflows/kustomization.yaml @@ -0,0 +1,6 @@ +#namePrefix: kustomize- + +resources: +- namespace-install.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization diff --git a/applicationset/examples/matrix/cluster-addons/argo-workflows/namespace-install.yaml b/applicationset/examples/matrix/cluster-addons/argo-workflows/namespace-install.yaml new file mode 100644 index 0000000000000..c140cd6297f98 --- /dev/null +++ b/applicationset/examples/matrix/cluster-addons/argo-workflows/namespace-install.yaml @@ -0,0 +1,417 @@ +# This is an auto-generated file. DO NOT EDIT +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusterworkflowtemplates.argoproj.io +spec: + group: argoproj.io + names: + kind: ClusterWorkflowTemplate + listKind: ClusterWorkflowTemplateList + plural: clusterworkflowtemplates + shortNames: + - clusterwftmpl + - cwft + singular: clusterworkflowtemplate + scope: Cluster + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: cronworkflows.argoproj.io +spec: + group: argoproj.io + names: + kind: CronWorkflow + listKind: CronWorkflowList + plural: cronworkflows + shortNames: + - cwf + - cronwf + singular: cronworkflow + scope: Namespaced + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: workfloweventbindings.argoproj.io +spec: + group: argoproj.io + names: + kind: WorkflowEventBinding + listKind: WorkflowEventBindingList + plural: workfloweventbindings + shortNames: + - wfeb + singular: workfloweventbinding + scope: Namespaced + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: workflows.argoproj.io +spec: + additionalPrinterColumns: + - JSONPath: .status.phase + description: Status of the workflow + name: Status + type: string + - JSONPath: .status.startedAt + description: When the workflow was started + format: date-time + name: Age + type: date + group: argoproj.io + names: + kind: Workflow + listKind: WorkflowList + plural: workflows + shortNames: + - wf + singular: workflow + scope: Namespaced + subresources: {} + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: workflowtemplates.argoproj.io +spec: + group: argoproj.io + names: + kind: WorkflowTemplate + listKind: WorkflowTemplateList + plural: workflowtemplates + shortNames: + - wftmpl + singular: workflowtemplate + scope: Namespaced + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: argo +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: argo-server +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: argo-role +rules: +- apiGroups: + - "" + resources: + - pods + - pods/exec + verbs: + - create + - get + - list + - watch + - update + - patch + - delete +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - watch + - list +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - create + - delete + - get +- apiGroups: + - argoproj.io + resources: + - workflows + - workflows/finalizers + verbs: + - get + - list + - watch + - update + - patch + - delete + - create +- apiGroups: + - argoproj.io + resources: + - workflowtemplates + - workflowtemplates/finalizers + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - get + - list +- apiGroups: + - "" + resources: + - secrets + verbs: + - get +- apiGroups: + - argoproj.io + resources: + - cronworkflows + - cronworkflows/finalizers + verbs: + - get + - list + - watch + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - get + - delete +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: argo-server-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - watch + - list +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +- apiGroups: + - "" + resources: + - pods + - pods/exec + - pods/log + verbs: + - get + - list + - watch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - watch + - create + - patch +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - get + - list +- apiGroups: + - argoproj.io + resources: + - workflows + - workfloweventbindings + - workflowtemplates + - cronworkflows + - cronworkflows/finalizers + verbs: + - create + - get + - list + - watch + - update + - patch + - delete +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: argo-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: argo-role +subjects: +- kind: ServiceAccount + name: argo +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: argo-server-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: argo-server-role +subjects: +- kind: ServiceAccount + name: argo-server +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: workflow-controller-configmap +--- +apiVersion: v1 +kind: Service +metadata: + name: argo-server +spec: + ports: + - name: web + port: 2746 + targetPort: 2746 + selector: + app: argo-server +--- +apiVersion: v1 +kind: Service +metadata: + name: workflow-controller-metrics +spec: + ports: + - name: metrics + port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: workflow-controller +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: argo-server +spec: + selector: + matchLabels: + app: argo-server + template: + metadata: + labels: + app: argo-server + spec: + containers: + - args: + - server + - --namespaced + image: argoproj/argocli:v2.12.5 + name: argo-server + ports: + - containerPort: 2746 + name: web + readinessProbe: + httpGet: + path: / + port: 2746 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 20 + volumeMounts: + - mountPath: /tmp + name: tmp + nodeSelector: + kubernetes.io/os: linux + securityContext: + runAsNonRoot: true + serviceAccountName: argo-server + volumes: + - emptyDir: {} + name: tmp +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: workflow-controller +spec: + selector: + matchLabels: + app: workflow-controller + template: + metadata: + labels: + app: workflow-controller + spec: + containers: + - args: + - --configmap + - workflow-controller-configmap + - --executor-image + - argoproj/argoexec:v2.12.5 + - --namespaced + command: + - workflow-controller + image: argoproj/workflow-controller:v2.12.5 + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: 30 + periodSeconds: 30 + name: workflow-controller + ports: + - containerPort: 9090 + name: metrics + nodeSelector: + kubernetes.io/os: linux + securityContext: + runAsNonRoot: true + serviceAccountName: argo diff --git a/applicationset/examples/matrix/cluster-addons/prometheus-operator/Chart.yaml b/applicationset/examples/matrix/cluster-addons/prometheus-operator/Chart.yaml new file mode 100644 index 0000000000000..8247e4204049a --- /dev/null +++ b/applicationset/examples/matrix/cluster-addons/prometheus-operator/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v2 +name: helm-prometheus-operator + +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +appVersion: "1.0" \ No newline at end of file diff --git a/applicationset/examples/matrix/cluster-addons/prometheus-operator/requirements.yaml b/applicationset/examples/matrix/cluster-addons/prometheus-operator/requirements.yaml new file mode 100644 index 0000000000000..2e34cfccee057 --- /dev/null +++ b/applicationset/examples/matrix/cluster-addons/prometheus-operator/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: +- name: kube-prometheus-stack + version: 9.4.10 + repository: https://prometheus-community.github.io/helm-charts diff --git a/applicationset/examples/matrix/cluster-addons/prometheus-operator/values.yaml b/applicationset/examples/matrix/cluster-addons/prometheus-operator/values.yaml new file mode 100644 index 0000000000000..f09e9043f5db6 --- /dev/null +++ b/applicationset/examples/matrix/cluster-addons/prometheus-operator/values.yaml @@ -0,0 +1 @@ +# Blank values.yaml diff --git a/applicationset/examples/matrix/cluster-and-git-fasttemplate.yaml b/applicationset/examples/matrix/cluster-and-git-fasttemplate.yaml new file mode 100644 index 0000000000000..4c7497f71242e --- /dev/null +++ b/applicationset/examples/matrix/cluster-and-git-fasttemplate.yaml @@ -0,0 +1,33 @@ +# This example demonstrates the combining of the git generator with a cluster generator +# The expected output would be an application per git directory and a cluster (application_count = git directory * clusters) +# +# +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: cluster-git +spec: + generators: + - matrix: + generators: + - git: + repoURL: https://github.com/argoproj/argo-cd.git + revision: HEAD + directories: + - path: applicationset/examples/matrix/cluster-addons/* + - clusters: + selector: + matchLabels: + argocd.argoproj.io/secret-type: cluster + template: + metadata: + name: '{{path.basename}}-{{name}}' + spec: + project: '{{metadata.labels.environment}}' + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: '{{path}}' + destination: + server: '{{server}}' + namespace: '{{path.basename}}' diff --git a/applicationset/examples/matrix/cluster-and-git.yaml b/applicationset/examples/matrix/cluster-and-git.yaml new file mode 100644 index 0000000000000..d58d2fa5f83f6 --- /dev/null +++ b/applicationset/examples/matrix/cluster-and-git.yaml @@ -0,0 +1,35 @@ +# This example demonstrates the combining of the git generator with a cluster generator +# The expected output would be an application per git directory and a cluster (application_count = git directory * clusters) +# +# +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: cluster-git +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - matrix: + generators: + - git: + repoURL: https://github.com/argoproj/argo-cd.git + revision: HEAD + directories: + - path: applicationset/examples/matrix/cluster-addons/* + - clusters: + selector: + matchLabels: + argocd.argoproj.io/secret-type: cluster + template: + metadata: + name: '{{.path.basename}}-{{.name}}' + spec: + project: '{{.metadata.labels.environment}}' + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: '{{.path.path}}' + destination: + server: '{{.server}}' + namespace: '{{.path.basename}}' diff --git a/applicationset/examples/matrix/list-and-git-fasttemplate.yaml b/applicationset/examples/matrix/list-and-git-fasttemplate.yaml new file mode 100644 index 0000000000000..33f5511902777 --- /dev/null +++ b/applicationset/examples/matrix/list-and-git-fasttemplate.yaml @@ -0,0 +1,39 @@ +# This example demonstrates the combining of the git generator with a list generator +# The expected output would be an application per git directory and a list entry (application_count = git directory * list entries) +# +# +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: list-git +spec: + generators: + - matrix: + generators: + - git: + repoURL: https://github.com/argoproj/argo-cd.git + revision: HEAD + directories: + - path: applicationset/examples/matrix/cluster-addons/* + - list: + elements: + - cluster: engineering-dev + url: https://1.2.3.4 + values: + project: dev + - cluster: engineering-prod + url: https://2.4.6.8 + values: + project: prod + template: + metadata: + name: '{{path.basename}}-{{cluster}}' + spec: + project: '{{values.project}}' + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: '{{path}}' + destination: + server: '{{url}}' + namespace: '{{path.basename}}' diff --git a/applicationset/examples/matrix/list-and-git.yaml b/applicationset/examples/matrix/list-and-git.yaml new file mode 100644 index 0000000000000..9ba04345476b4 --- /dev/null +++ b/applicationset/examples/matrix/list-and-git.yaml @@ -0,0 +1,41 @@ +# This example demonstrates the combining of the git generator with a list generator +# The expected output would be an application per git directory and a list entry (application_count = git directory * list entries) +# +# +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: list-git +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - matrix: + generators: + - git: + repoURL: https://github.com/argoproj/argo-cd.git + revision: HEAD + directories: + - path: applicationset/examples/matrix/cluster-addons/* + - list: + elements: + - cluster: engineering-dev + url: https://1.2.3.4 + values: + project: dev + - cluster: engineering-prod + url: https://2.4.6.8 + values: + project: prod + template: + metadata: + name: '{{.path.basename}}-{{.cluster}}' + spec: + project: '{{.values.project}}' + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: '{{.path.path}}' + destination: + server: '{{.url}}' + namespace: '{{.path.basename}}' diff --git a/applicationset/examples/matrix/list-and-list-fasttemplate.yaml b/applicationset/examples/matrix/list-and-list-fasttemplate.yaml new file mode 100644 index 0000000000000..7e1ac1237ad29 --- /dev/null +++ b/applicationset/examples/matrix/list-and-list-fasttemplate.yaml @@ -0,0 +1,37 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: list-and-list + namespace: argocd +spec: + generators: + - matrix: + generators: + - list: + elements: + - cluster: engineering-dev + url: https://kubernetes.default.svc + values: + project: default + - cluster: engineering-prod + url: https://kubernetes.default.svc + values: + project: default + - list: + elements: + - values: + suffix: '1' + - values: + suffix: '2' + template: + metadata: + name: '{{cluster}}-{{values.suffix}}' + spec: + project: '{{values.project}}' + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: '{{path}}' + destination: + server: '{{url}}' + namespace: '{{path.basename}}' diff --git a/applicationset/examples/matrix/list-and-list.yaml b/applicationset/examples/matrix/list-and-list.yaml new file mode 100644 index 0000000000000..f88189ba5ec01 --- /dev/null +++ b/applicationset/examples/matrix/list-and-list.yaml @@ -0,0 +1,39 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: list-and-list + namespace: argocd +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - matrix: + generators: + - list: + elements: + - cluster: engineering-dev + url: https://kubernetes.default.svc + values: + project: default + - cluster: engineering-prod + url: https://kubernetes.default.svc + values: + project: default + - list: + elements: + - values: + suffix: '1' + - values: + suffix: '2' + template: + metadata: + name: '{{.cluster}}-{{.values.suffix}}' + spec: + project: '{{.values.project}}' + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: '{{.path.path}}' + destination: + server: '{{.url}}' + namespace: '{{.path.basename}}' diff --git a/applicationset/examples/matrix/matrix-and-union-in-matrix-fasttemplate.yaml b/applicationset/examples/matrix/matrix-and-union-in-matrix-fasttemplate.yaml new file mode 100644 index 0000000000000..eb5938382942d --- /dev/null +++ b/applicationset/examples/matrix/matrix-and-union-in-matrix-fasttemplate.yaml @@ -0,0 +1,67 @@ +# The matrix generator can contain other combination-type generators (matrix and union). But nested matrix and union +# generators cannot contain further-nested matrix or union generators. +# +# The generators are evaluated from most-nested to least-nested. In this case: +# 1. The union generator joins two lists to make 3 parameter sets. +# 2. The inner matrix generator takes the cartesian product of the two lists to make 4 parameters sets. +# 3. The outer matrix generator takes the cartesian product of the 3 union and the 4 inner matrix parameter sets to +# make 3*4=12 final parameter sets. +# 4. The 12 final parameter sets are evaluated against the top-level template to generate 12 Applications. +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: matrix-and-union-in-matrix +spec: + generators: + - matrix: + generators: + - union: + mergeKeys: + - cluster + generators: + - list: + elements: + - cluster: engineering-dev + url: https://kubernetes.default.svc + values: + project: default + - cluster: engineering-prod + url: https://kubernetes.default.svc + values: + project: default + - list: + elements: + - cluster: engineering-dev + url: https://kubernetes.default.svc + values: + project: default + - cluster: engineering-test + url: https://kubernetes.default.svc + values: + project: default + - matrix: + generators: + - list: + elements: + - values: + suffix: '1' + - values: + suffix: '2' + - list: + elements: + - values: + prefix: 'first' + - values: + prefix: 'second' + template: + metadata: + name: '{{values.prefix}}-{{cluster}}-{{values.suffix}}' + spec: + project: '{{values.project}}' + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: '{{path}}' + destination: + server: '{{url}}' + namespace: '{{path.basename}}' diff --git a/applicationset/examples/matrix/matrix-and-union-in-matrix.yaml b/applicationset/examples/matrix/matrix-and-union-in-matrix.yaml new file mode 100644 index 0000000000000..e4fed589764a8 --- /dev/null +++ b/applicationset/examples/matrix/matrix-and-union-in-matrix.yaml @@ -0,0 +1,69 @@ +# The matrix generator can contain other combination-type generators (matrix and union). But nested matrix and union +# generators cannot contain further-nested matrix or union generators. +# +# The generators are evaluated from most-nested to least-nested. In this case: +# 1. The union generator joins two lists to make 3 parameter sets. +# 2. The inner matrix generator takes the cartesian product of the two lists to make 4 parameters sets. +# 3. The outer matrix generator takes the cartesian product of the 3 union and the 4 inner matrix parameter sets to +# make 3*4=12 final parameter sets. +# 4. The 12 final parameter sets are evaluated against the top-level template to generate 12 Applications. +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: matrix-and-union-in-matrix +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - matrix: + generators: + - union: + mergeKeys: + - cluster + generators: + - list: + elements: + - cluster: engineering-dev + url: https://kubernetes.default.svc + values: + project: default + - cluster: engineering-prod + url: https://kubernetes.default.svc + values: + project: default + - list: + elements: + - cluster: engineering-dev + url: https://kubernetes.default.svc + values: + project: default + - cluster: engineering-test + url: https://kubernetes.default.svc + values: + project: default + - matrix: + generators: + - list: + elements: + - values: + suffix: '1' + - values: + suffix: '2' + - list: + elements: + - values: + prefix: 'first' + - values: + prefix: 'second' + template: + metadata: + name: '{{.values.prefix}}-{{.cluster}}-{{.values.suffix}}' + spec: + project: '{{.values.project}}' + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: '{{.path.path}}' + destination: + server: '{{.url}}' + namespace: '{{.path.basename}}' diff --git a/applicationset/examples/merge/merge-clusters-and-list-fasttemplate.yaml b/applicationset/examples/merge/merge-clusters-and-list-fasttemplate.yaml new file mode 100644 index 0000000000000..5b6971238edd3 --- /dev/null +++ b/applicationset/examples/merge/merge-clusters-and-list-fasttemplate.yaml @@ -0,0 +1,44 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: merge-clusters-and-list +spec: + generators: + - merge: + mergeKeys: + - server + generators: + - clusters: + values: + kafka: 'true' + redis: 'false' + # For clusters with a specific label, enable Kafka. + - clusters: + selector: + matchLabels: + use-kafka: 'false' + values: + kafka: 'false' + # For a specific cluster, enable Redis. + - list: + elements: + - server: https://some-specific-cluster + values.redis: 'true' + template: + metadata: + name: '{{name}}' + spec: + project: default + source: + repoURL: https://github.com/argoproj/argocd-example-apps/ + targetRevision: HEAD + path: helm-guestbook + helm: + parameters: + - name: kafka + value: '{{values.kafka}}' + - name: redis + value: '{{values.redis}}' + destination: + server: '{{server}}' + namespace: default diff --git a/applicationset/examples/merge/merge-clusters-and-list.yaml b/applicationset/examples/merge/merge-clusters-and-list.yaml new file mode 100644 index 0000000000000..c91f4fea47d7b --- /dev/null +++ b/applicationset/examples/merge/merge-clusters-and-list.yaml @@ -0,0 +1,46 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: merge-clusters-and-list +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - merge: + mergeKeys: + - server + generators: + - clusters: + values: + kafka: 'true' + redis: 'false' + # For clusters with a specific label, enable Kafka. + - clusters: + selector: + matchLabels: + use-kafka: 'false' + values: + kafka: 'false' + # For a specific cluster, enable Redis. + - list: + elements: + - server: https://some-specific-cluster + values.redis: 'true' + template: + metadata: + name: '{{.name}}' + spec: + project: default + source: + repoURL: https://github.com/argoproj/argocd-example-apps/ + targetRevision: HEAD + path: helm-guestbook + helm: + parameters: + - name: kafka + value: '{{.values.kafka}}' + - name: redis + value: '{{.values.redis}}' + destination: + server: '{{.server}}' + namespace: default diff --git a/applicationset/examples/merge/merge-two-matrixes-fasttemplate.yaml b/applicationset/examples/merge/merge-two-matrixes-fasttemplate.yaml new file mode 100644 index 0000000000000..f47463d7293c5 --- /dev/null +++ b/applicationset/examples/merge/merge-two-matrixes-fasttemplate.yaml @@ -0,0 +1,43 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: merge-two-matrixes +spec: + generators: + - merge: + mergeKeys: + - server + - environment + generators: + - matrix: + generators: + - clusters: + values: + replicaCount: '2' + - list: + elements: + - environment: staging + namespace: guestbook-non-prod + - environment: prod + namespace: guestbook + - list: + elements: + - server: https://kubernetes.default.svc + environment: staging + values.replicaCount: '1' + template: + metadata: + name: '{{name}}-guestbook-{{environment}}' + spec: + project: default + source: + repoURL: https://github.com/argoproj/argocd-example-apps/ + targetRevision: HEAD + path: helm-guestbook + helm: + parameters: + - name: replicaCount + value: '{{values.replicaCount}}' + destination: + server: '{{server}}' + namespace: '{{namespace}}' diff --git a/applicationset/examples/merge/merge-two-matrixes.yaml b/applicationset/examples/merge/merge-two-matrixes.yaml new file mode 100644 index 0000000000000..f864ac6948b2d --- /dev/null +++ b/applicationset/examples/merge/merge-two-matrixes.yaml @@ -0,0 +1,45 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: merge-two-matrixes +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - merge: + mergeKeys: + - server + - environment + generators: + - matrix: + generators: + - clusters: + values: + replicaCount: '2' + - list: + elements: + - environment: staging + namespace: guestbook-non-prod + - environment: prod + namespace: guestbook + - list: + elements: + - server: https://kubernetes.default.svc + environment: staging + values.replicaCount: '1' + template: + metadata: + name: '{{.name}}-guestbook-{{.environment}}' + spec: + project: default + source: + repoURL: https://github.com/argoproj/argocd-example-apps/ + targetRevision: HEAD + path: helm-guestbook + helm: + parameters: + - name: replicaCount + value: '{{.values.replicaCount}}' + destination: + server: '{{.server}}' + namespace: '{{.namespace}}' diff --git a/applicationset/examples/pull-request-generator/pull-request-example-fasttemplate.yaml b/applicationset/examples/pull-request-generator/pull-request-example-fasttemplate.yaml new file mode 100644 index 0000000000000..e5d2d5adc0ad8 --- /dev/null +++ b/applicationset/examples/pull-request-generator/pull-request-example-fasttemplate.yaml @@ -0,0 +1,40 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: myapp +spec: + generators: + - pullRequest: + github: + # The GitHub organization or user. + owner: myorg + # The Github repository + repo: myrepo + # For GitHub Enterprise. (optional) + api: https://git.example.com/ + # Reference to a Secret containing an access token. (optional) + tokenRef: + secretName: github-token + key: token + # Labels is used to filter the PRs that you want to target. (optional) + labels: + - preview + template: + metadata: + name: 'myapp-{{ branch }}-{{ number }}' + spec: + source: + repoURL: 'https://github.com/myorg/myrepo.git' + targetRevision: '{{ head_sha }}' + path: helm-guestbook + helm: + parameters: + - name: "image.tag" + value: "pull-{{ head_sha }}" + project: default + destination: + server: https://kubernetes.default.svc + namespace: "{{ branch }}-{{ number }}" + syncPolicy: + syncOptions: + - CreateNamespace=true diff --git a/applicationset/examples/pull-request-generator/pull-request-example.yaml b/applicationset/examples/pull-request-generator/pull-request-example.yaml new file mode 100644 index 0000000000000..d8ad8502b9b13 --- /dev/null +++ b/applicationset/examples/pull-request-generator/pull-request-example.yaml @@ -0,0 +1,44 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: myapp +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - pullRequest: + github: + # The GitHub organization or user. + owner: myorg + # The Github repository + repo: myrepo + # For GitHub Enterprise. (optional) + api: https://git.example.com/ + # Reference to a Secret containing an access token. (optional) + tokenRef: + secretName: github-token + key: token + # Labels is used to filter the PRs that you want to target. (optional) + labels: + - preview + template: + metadata: + name: 'myapp-{{ .branch }}-{{ .number }}' + labels: + key1: '{{ index .labels 0 }}' + spec: + source: + repoURL: 'https://github.com/myorg/myrepo.git' + targetRevision: '{{ .head_sha }}' + path: helm-guestbook + helm: + parameters: + - name: "image.tag" + value: "pull-{{ .head_sha }}" + project: default + destination: + server: https://kubernetes.default.svc + namespace: "{{ .branch }}-{{ .number }}" + syncPolicy: + syncOptions: + - CreateNamespace=true diff --git a/applicationset/examples/scm-provider-generator/scm-provider-example-fasttemplate-gitlab.yaml b/applicationset/examples/scm-provider-generator/scm-provider-example-fasttemplate-gitlab.yaml new file mode 100644 index 0000000000000..c62c151122d1f --- /dev/null +++ b/applicationset/examples/scm-provider-generator/scm-provider-example-fasttemplate-gitlab.yaml @@ -0,0 +1,26 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + generators: + - scmProvider: + gitlab: + api: https://gitlab.com + group: test-argocd-proton + includeSubgroups: true + cloneProtocol: https + filters: + - repositoryMatch: test-app + template: + metadata: + name: '{{ repository }}-guestbook' + spec: + project: "default" + source: + repoURL: '{{ url }}' + targetRevision: '{{ branch }}' + path: guestbook + destination: + server: https://kubernetes.default.svc + namespace: guestbook diff --git a/applicationset/examples/scm-provider-generator/scm-provider-example-fasttemplate.yaml b/applicationset/examples/scm-provider-generator/scm-provider-example-fasttemplate.yaml new file mode 100644 index 0000000000000..24d8ba41c2aed --- /dev/null +++ b/applicationset/examples/scm-provider-generator/scm-provider-example-fasttemplate.yaml @@ -0,0 +1,24 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + generators: + - scmProvider: + github: + organization: argoproj + cloneProtocol: https + filters: + - repositoryMatch: example-apps + template: + metadata: + name: '{{ repository }}-guestbook' + spec: + project: "default" + source: + repoURL: '{{ url }}' + targetRevision: '{{ branch }}' + path: guestbook + destination: + server: https://kubernetes.default.svc + namespace: guestbook diff --git a/applicationset/examples/scm-provider-generator/scm-provider-example.yaml b/applicationset/examples/scm-provider-generator/scm-provider-example.yaml new file mode 100644 index 0000000000000..c3ca2e5b3e5a9 --- /dev/null +++ b/applicationset/examples/scm-provider-generator/scm-provider-example.yaml @@ -0,0 +1,26 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - scmProvider: + github: + organization: argoproj + cloneProtocol: https + filters: + - repositoryMatch: example-apps + template: + metadata: + name: '{{ .repository }}-guestbook' + spec: + project: "default" + source: + repoURL: '{{ .url }}' + targetRevision: '{{ .branch }}' + path: guestbook + destination: + server: https://kubernetes.default.svc + namespace: guestbook diff --git a/applicationset/examples/template-override/default/guestbook-ui-deployment.yaml b/applicationset/examples/template-override/default/guestbook-ui-deployment.yaml new file mode 100644 index 0000000000000..8a0975e363539 --- /dev/null +++ b/applicationset/examples/template-override/default/guestbook-ui-deployment.yaml @@ -0,0 +1,20 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: guestbook-ui +spec: + replicas: 1 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: guestbook-ui + template: + metadata: + labels: + app: guestbook-ui + spec: + containers: + - image: gcr.io/heptio-images/ks-guestbook-demo:0.2 + name: guestbook-ui + ports: + - containerPort: 80 diff --git a/applicationset/examples/template-override/default/guestbook-ui-svc.yaml b/applicationset/examples/template-override/default/guestbook-ui-svc.yaml new file mode 100644 index 0000000000000..e619b5cd39f7b --- /dev/null +++ b/applicationset/examples/template-override/default/guestbook-ui-svc.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Service +metadata: + name: guestbook-ui +spec: + ports: + - port: 80 + targetPort: 80 + selector: + app: guestbook-ui diff --git a/applicationset/examples/template-override/default/kustomization.yaml b/applicationset/examples/template-override/default/kustomization.yaml new file mode 100644 index 0000000000000..cbaba9021cb23 --- /dev/null +++ b/applicationset/examples/template-override/default/kustomization.yaml @@ -0,0 +1,7 @@ +namePrefix: kustomize- + +resources: +- guestbook-ui-deployment.yaml +- guestbook-ui-svc.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization diff --git a/applicationset/examples/template-override/engineering-dev-override/guestbook-ui-deployment.yaml b/applicationset/examples/template-override/engineering-dev-override/guestbook-ui-deployment.yaml new file mode 100644 index 0000000000000..8a0975e363539 --- /dev/null +++ b/applicationset/examples/template-override/engineering-dev-override/guestbook-ui-deployment.yaml @@ -0,0 +1,20 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: guestbook-ui +spec: + replicas: 1 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: guestbook-ui + template: + metadata: + labels: + app: guestbook-ui + spec: + containers: + - image: gcr.io/heptio-images/ks-guestbook-demo:0.2 + name: guestbook-ui + ports: + - containerPort: 80 diff --git a/applicationset/examples/template-override/engineering-dev-override/guestbook-ui-svc.yaml b/applicationset/examples/template-override/engineering-dev-override/guestbook-ui-svc.yaml new file mode 100644 index 0000000000000..e619b5cd39f7b --- /dev/null +++ b/applicationset/examples/template-override/engineering-dev-override/guestbook-ui-svc.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Service +metadata: + name: guestbook-ui +spec: + ports: + - port: 80 + targetPort: 80 + selector: + app: guestbook-ui diff --git a/applicationset/examples/template-override/engineering-dev-override/kustomization.yaml b/applicationset/examples/template-override/engineering-dev-override/kustomization.yaml new file mode 100644 index 0000000000000..cbaba9021cb23 --- /dev/null +++ b/applicationset/examples/template-override/engineering-dev-override/kustomization.yaml @@ -0,0 +1,7 @@ +namePrefix: kustomize- + +resources: +- guestbook-ui-deployment.yaml +- guestbook-ui-svc.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization diff --git a/applicationset/examples/template-override/template-overrides-example-fasttemplate.yaml b/applicationset/examples/template-override/template-overrides-example-fasttemplate.yaml new file mode 100644 index 0000000000000..a8fe9916b94d6 --- /dev/null +++ b/applicationset/examples/template-override/template-overrides-example-fasttemplate.yaml @@ -0,0 +1,36 @@ +# App templates can also be defined as part of the generator's template stanza. Sometimes it is +# useful to do this in order to override the spec.template stanza, and when simple string +# parameterization are insufficient. In the below examples, the generators[].XXX.template is +# a partial definition, which overrides/patch the default template. +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + generators: + - list: + elements: + - cluster: engineering-dev + url: https://kubernetes.default.svc + template: + metadata: {} + spec: + project: "default" + source: + targetRevision: HEAD + repoURL: https://github.com/argoproj/argo-cd.git + path: 'applicationset/examples/template-override/{{cluster}}-override' + destination: {} + + template: + metadata: + name: '{{cluster}}-guestbook' + spec: + project: "default" + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: applicationset/examples/template-override/default + destination: + server: '{{url}}' + namespace: guestbook diff --git a/applicationset/examples/template-override/template-overrides-example.yaml b/applicationset/examples/template-override/template-overrides-example.yaml new file mode 100644 index 0000000000000..48cbf703fcd70 --- /dev/null +++ b/applicationset/examples/template-override/template-overrides-example.yaml @@ -0,0 +1,38 @@ +# App templates can also be defined as part of the generator's template stanza. Sometimes it is +# useful to do this in order to override the spec.template stanza, and when simple string +# parameterization are insufficient. In the below examples, the generators[].XXX.template is +# a partial definition, which overrides/patch the default template. +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - list: + elements: + - cluster: engineering-dev + url: https://kubernetes.default.svc + template: + metadata: {} + spec: + project: "default" + source: + targetRevision: HEAD + repoURL: https://github.com/argoproj/argo-cd.git + path: 'applicationset/examples/template-override/{{.cluster}}-override' + destination: {} + + template: + metadata: + name: '{{.cluster}}-guestbook' + spec: + project: "default" + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: applicationset/examples/template-override/default + destination: + server: '{{.url}}' + namespace: guestbook diff --git a/applicationset/generators/cluster.go b/applicationset/generators/cluster.go new file mode 100644 index 0000000000000..d8647d78d3a5c --- /dev/null +++ b/applicationset/generators/cluster.go @@ -0,0 +1,189 @@ +package generators + +import ( + "context" + "fmt" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/argoproj/argo-cd/v2/util/settings" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/argoproj/argo-cd/v2/applicationset/utils" + argoappsetv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +const ( + ArgoCDSecretTypeLabel = "argocd.argoproj.io/secret-type" + ArgoCDSecretTypeCluster = "cluster" +) + +var _ Generator = (*ClusterGenerator)(nil) + +// ClusterGenerator generates Applications for some or all clusters registered with ArgoCD. +type ClusterGenerator struct { + client.Client + ctx context.Context + clientset kubernetes.Interface + // namespace is the Argo CD namespace + namespace string + settingsManager *settings.SettingsManager +} + +var render = &utils.Render{} + +func NewClusterGenerator(c client.Client, ctx context.Context, clientset kubernetes.Interface, namespace string) Generator { + + settingsManager := settings.NewSettingsManager(ctx, clientset, namespace) + + g := &ClusterGenerator{ + Client: c, + ctx: ctx, + clientset: clientset, + namespace: namespace, + settingsManager: settingsManager, + } + return g +} + +// GetRequeueAfter never requeue the cluster generator because the `clusterSecretEventHandler` will requeue the appsets +// when the cluster secrets change +func (g *ClusterGenerator) GetRequeueAfter(appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator) time.Duration { + return NoRequeueAfter +} + +func (g *ClusterGenerator) GetTemplate(appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator) *argoappsetv1alpha1.ApplicationSetTemplate { + return &appSetGenerator.Clusters.Template +} + +func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator, appSet *argoappsetv1alpha1.ApplicationSet) ([]map[string]interface{}, error) { + + if appSetGenerator == nil { + return nil, EmptyAppSetGeneratorError + } + + if appSetGenerator.Clusters == nil { + return nil, EmptyAppSetGeneratorError + } + + // Do not include the local cluster in the cluster parameters IF there is a non-empty selector + // - Since local clusters do not have secrets, they do not have labels to match against + ignoreLocalClusters := len(appSetGenerator.Clusters.Selector.MatchExpressions) > 0 || len(appSetGenerator.Clusters.Selector.MatchLabels) > 0 + + // ListCluster from Argo CD's util/db package will include the local cluster in the list of clusters + clustersFromArgoCD, err := utils.ListClusters(g.ctx, g.clientset, g.namespace) + if err != nil { + return nil, fmt.Errorf("error listing clusters: %w", err) + } + + if clustersFromArgoCD == nil { + return nil, nil + } + + clusterSecrets, err := g.getSecretsByClusterName(appSetGenerator) + if err != nil { + return nil, err + } + + res := []map[string]interface{}{} + + secretsFound := []corev1.Secret{} + + for _, cluster := range clustersFromArgoCD.Items { + + // If there is a secret for this cluster, then it's a non-local cluster, so it will be + // handled by the next step. + if secretForCluster, exists := clusterSecrets[cluster.Name]; exists { + secretsFound = append(secretsFound, secretForCluster) + + } else if !ignoreLocalClusters { + // If there is no secret for the cluster, it's the local cluster, so handle it here. + params := map[string]interface{}{} + params["name"] = cluster.Name + params["nameNormalized"] = cluster.Name + params["server"] = cluster.Server + + err = appendTemplatedValues(appSetGenerator.Clusters.Values, params, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions) + if err != nil { + return nil, err + } + + res = append(res, params) + + log.WithField("cluster", "local cluster").Info("matched local cluster") + } + } + + // For each matching cluster secret (non-local clusters only) + for _, cluster := range secretsFound { + params := map[string]interface{}{} + + params["name"] = string(cluster.Data["name"]) + params["nameNormalized"] = utils.SanitizeName(string(cluster.Data["name"])) + params["server"] = string(cluster.Data["server"]) + + if appSet.Spec.GoTemplate { + meta := map[string]interface{}{} + + if len(cluster.ObjectMeta.Annotations) > 0 { + meta["annotations"] = cluster.ObjectMeta.Annotations + } + if len(cluster.ObjectMeta.Labels) > 0 { + meta["labels"] = cluster.ObjectMeta.Labels + } + + params["metadata"] = meta + } else { + for key, value := range cluster.ObjectMeta.Annotations { + params[fmt.Sprintf("metadata.annotations.%s", key)] = value + } + + for key, value := range cluster.ObjectMeta.Labels { + params[fmt.Sprintf("metadata.labels.%s", key)] = value + } + } + + err = appendTemplatedValues(appSetGenerator.Clusters.Values, params, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions) + if err != nil { + return nil, err + } + + res = append(res, params) + + log.WithField("cluster", cluster.Name).Info("matched cluster secret") + } + + return res, nil +} + +func (g *ClusterGenerator) getSecretsByClusterName(appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator) (map[string]corev1.Secret, error) { + // List all Clusters: + clusterSecretList := &corev1.SecretList{} + + selector := metav1.AddLabelToSelector(&appSetGenerator.Clusters.Selector, ArgoCDSecretTypeLabel, ArgoCDSecretTypeCluster) + secretSelector, err := metav1.LabelSelectorAsSelector(selector) + if err != nil { + return nil, err + } + + if err := g.Client.List(context.Background(), clusterSecretList, client.MatchingLabelsSelector{Selector: secretSelector}); err != nil { + return nil, err + } + log.Debug("clusters matching labels", "count", len(clusterSecretList.Items)) + + res := map[string]corev1.Secret{} + + for _, cluster := range clusterSecretList.Items { + clusterName := string(cluster.Data["name"]) + + res[clusterName] = cluster + } + + return res, nil + +} diff --git a/applicationset/generators/cluster_test.go b/applicationset/generators/cluster_test.go new file mode 100644 index 0000000000000..0abc9399149d2 --- /dev/null +++ b/applicationset/generators/cluster_test.go @@ -0,0 +1,645 @@ +package generators + +import ( + "context" + "fmt" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + kubefake "k8s.io/client-go/kubernetes/fake" + + "github.com/argoproj/argo-cd/v2/applicationset/utils" + argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + + "github.com/stretchr/testify/assert" +) + +type possiblyErroringFakeCtrlRuntimeClient struct { + client.Client + shouldError bool +} + +func (p *possiblyErroringFakeCtrlRuntimeClient) List(ctx context.Context, secretList client.ObjectList, opts ...client.ListOption) error { + if p.shouldError { + return fmt.Errorf("could not list Secrets") + } + return p.Client.List(ctx, secretList, opts...) +} + +func TestGenerateParams(t *testing.T) { + clusters := []client.Object{ + &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "staging-01", + Namespace: "namespace", + Labels: map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + "environment": "staging", + "org": "foo", + }, + Annotations: map[string]string{ + "foo.argoproj.io": "staging", + }, + }, + Data: map[string][]byte{ + "config": []byte("{}"), + "name": []byte("staging-01"), + "server": []byte("https://staging-01.example.com"), + }, + Type: corev1.SecretType("Opaque"), + }, + &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "production-01", + Namespace: "namespace", + Labels: map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + "environment": "production", + "org": "bar", + }, + Annotations: map[string]string{ + "foo.argoproj.io": "production", + }, + }, + Data: map[string][]byte{ + "config": []byte("{}"), + "name": []byte("production_01/west"), + "server": []byte("https://production-01.example.com"), + }, + Type: corev1.SecretType("Opaque"), + }, + } + testCases := []struct { + name string + selector metav1.LabelSelector + values map[string]string + expected []map[string]interface{} + // clientError is true if a k8s client error should be simulated + clientError bool + expectedError error + }{ + { + name: "no label selector", + selector: metav1.LabelSelector{}, + values: map[string]string{ + "lol1": "lol", + "lol2": "{{values.lol1}}{{values.lol1}}", + "lol3": "{{values.lol2}}{{values.lol2}}{{values.lol2}}", + "foo": "bar", + "bar": "{{ metadata.annotations.foo.argoproj.io }}", + "bat": "{{ metadata.labels.environment }}", + "aaa": "{{ server }}", + "no-op": "{{ this-does-not-exist }}", + }, expected: []map[string]interface{}{ + {"values.lol1": "lol", "values.lol2": "{{values.lol1}}{{values.lol1}}", "values.lol3": "{{values.lol2}}{{values.lol2}}{{values.lol2}}", "values.foo": "bar", "values.bar": "production", "values.no-op": "{{ this-does-not-exist }}", "values.bat": "production", "values.aaa": "https://production-01.example.com", "name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar", + "metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production"}, + + {"values.lol1": "lol", "values.lol2": "{{values.lol1}}{{values.lol1}}", "values.lol3": "{{values.lol2}}{{values.lol2}}{{values.lol2}}", "values.foo": "bar", "values.bar": "staging", "values.no-op": "{{ this-does-not-exist }}", "values.bat": "staging", "values.aaa": "https://staging-01.example.com", "name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo", + "metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging"}, + + {"values.lol1": "lol", "values.lol2": "{{values.lol1}}{{values.lol1}}", "values.lol3": "{{values.lol2}}{{values.lol2}}{{values.lol2}}", "values.foo": "bar", "values.bar": "{{ metadata.annotations.foo.argoproj.io }}", "values.no-op": "{{ this-does-not-exist }}", "values.bat": "{{ metadata.labels.environment }}", "values.aaa": "https://kubernetes.default.svc", "nameNormalized": "in-cluster", "name": "in-cluster", "server": "https://kubernetes.default.svc"}, + }, + clientError: false, + expectedError: nil, + }, + { + name: "secret type label selector", + selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + }, + }, + values: nil, + expected: []map[string]interface{}{ + {"name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar", + "metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production"}, + + {"name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo", + "metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging"}, + }, + clientError: false, + expectedError: nil, + }, + { + name: "production-only", + selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "environment": "production", + }, + }, + values: map[string]string{ + "foo": "bar", + }, + expected: []map[string]interface{}{ + {"values.foo": "bar", "name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar", + "metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production"}, + }, + clientError: false, + expectedError: nil, + }, + { + name: "production or staging", + selector: metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "environment", + Operator: "In", + Values: []string{ + "production", + "staging", + }, + }, + }, + }, + values: map[string]string{ + "foo": "bar", + }, + expected: []map[string]interface{}{ + {"values.foo": "bar", "name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo", + "metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging"}, + {"values.foo": "bar", "name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar", + "metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production"}, + }, + clientError: false, + expectedError: nil, + }, + { + name: "production or staging with match labels", + selector: metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "environment", + Operator: "In", + Values: []string{ + "production", + "staging", + }, + }, + }, + MatchLabels: map[string]string{ + "org": "foo", + }, + }, + values: map[string]string{ + "name": "baz", + }, + expected: []map[string]interface{}{ + {"values.name": "baz", "name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo", + "metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging"}, + }, + clientError: false, + expectedError: nil, + }, + { + name: "simulate client error", + selector: metav1.LabelSelector{}, + values: nil, + expected: nil, + clientError: true, + expectedError: fmt.Errorf("could not list Secrets"), + }, + } + + // convert []client.Object to []runtime.Object, for use by kubefake package + runtimeClusters := []runtime.Object{} + for _, clientCluster := range clusters { + runtimeClusters = append(runtimeClusters, clientCluster) + } + + for _, testCase := range testCases { + + t.Run(testCase.name, func(t *testing.T) { + + appClientset := kubefake.NewSimpleClientset(runtimeClusters...) + + fakeClient := fake.NewClientBuilder().WithObjects(clusters...).Build() + cl := &possiblyErroringFakeCtrlRuntimeClient{ + fakeClient, + testCase.clientError, + } + + var clusterGenerator = NewClusterGenerator(cl, context.Background(), appClientset, "namespace") + + applicationSetInfo := argoprojiov1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "set", + }, + Spec: argoprojiov1alpha1.ApplicationSetSpec{}, + } + + got, err := clusterGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{ + Clusters: &argoprojiov1alpha1.ClusterGenerator{ + Selector: testCase.selector, + Values: testCase.values, + }, + }, &applicationSetInfo) + + if testCase.expectedError != nil { + assert.EqualError(t, err, testCase.expectedError.Error()) + } else { + assert.NoError(t, err) + assert.ElementsMatch(t, testCase.expected, got) + } + + }) + } +} + +func TestGenerateParamsGoTemplate(t *testing.T) { + clusters := []client.Object{ + &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "staging-01", + Namespace: "namespace", + Labels: map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + "environment": "staging", + "org": "foo", + }, + Annotations: map[string]string{ + "foo.argoproj.io": "staging", + }, + }, + Data: map[string][]byte{ + "config": []byte("{}"), + "name": []byte("staging-01"), + "server": []byte("https://staging-01.example.com"), + }, + Type: corev1.SecretType("Opaque"), + }, + &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "production-01", + Namespace: "namespace", + Labels: map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + "environment": "production", + "org": "bar", + }, + Annotations: map[string]string{ + "foo.argoproj.io": "production", + }, + }, + Data: map[string][]byte{ + "config": []byte("{}"), + "name": []byte("production_01/west"), + "server": []byte("https://production-01.example.com"), + }, + Type: corev1.SecretType("Opaque"), + }, + } + testCases := []struct { + name string + selector metav1.LabelSelector + values map[string]string + expected []map[string]interface{} + // clientError is true if a k8s client error should be simulated + clientError bool + expectedError error + }{ + { + name: "no label selector", + selector: metav1.LabelSelector{}, + values: map[string]string{ + "lol1": "lol", + "lol2": "{{ .values.lol1 }}{{ .values.lol1 }}", + "lol3": "{{ .values.lol2 }}{{ .values.lol2 }}{{ .values.lol2 }}", + "foo": "bar", + "bar": "{{ if not (empty .metadata) }}{{index .metadata.annotations \"foo.argoproj.io\" }}{{ end }}", + "bat": "{{ if not (empty .metadata) }}{{.metadata.labels.environment}}{{ end }}", + "aaa": "{{ .server }}", + "no-op": "{{ .thisDoesNotExist }}", + }, expected: []map[string]interface{}{ + { + "name": "production_01/west", + "nameNormalized": "production-01-west", + "server": "https://production-01.example.com", + "metadata": map[string]interface{}{ + "labels": map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + "environment": "production", + "org": "bar", + }, + "annotations": map[string]string{ + "foo.argoproj.io": "production", + }, + }, + "values": map[string]string{ + "lol1": "lol", + "lol2": "", + "lol3": "", + "foo": "bar", + "bar": "production", + "bat": "production", + "aaa": "https://production-01.example.com", + "no-op": "", + }, + }, + { + "name": "staging-01", + "nameNormalized": "staging-01", + "server": "https://staging-01.example.com", + "metadata": map[string]interface{}{ + "labels": map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + "environment": "staging", + "org": "foo", + }, + "annotations": map[string]string{ + "foo.argoproj.io": "staging", + }, + }, + "values": map[string]string{ + "lol1": "lol", + "lol2": "", + "lol3": "", + "foo": "bar", + "bar": "staging", + "bat": "staging", + "aaa": "https://staging-01.example.com", + "no-op": "", + }, + }, + { + "nameNormalized": "in-cluster", + "name": "in-cluster", + "server": "https://kubernetes.default.svc", + "values": map[string]string{ + "lol1": "lol", + "lol2": "", + "lol3": "", + "foo": "bar", + "bar": "", + "bat": "", + "aaa": "https://kubernetes.default.svc", + "no-op": "", + }, + }, + }, + clientError: false, + expectedError: nil, + }, + { + name: "secret type label selector", + selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + }, + }, + values: nil, + expected: []map[string]interface{}{ + { + "name": "production_01/west", + "nameNormalized": "production-01-west", + "server": "https://production-01.example.com", + "metadata": map[string]interface{}{ + "labels": map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + "environment": "production", + "org": "bar", + }, + "annotations": map[string]string{ + "foo.argoproj.io": "production", + }, + }, + }, + { + "name": "staging-01", + "nameNormalized": "staging-01", + "server": "https://staging-01.example.com", + "metadata": map[string]interface{}{ + "labels": map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + "environment": "staging", + "org": "foo", + }, + "annotations": map[string]string{ + "foo.argoproj.io": "staging", + }, + }, + }, + }, + clientError: false, + expectedError: nil, + }, + { + name: "production-only", + selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "environment": "production", + }, + }, + values: map[string]string{ + "foo": "bar", + }, + expected: []map[string]interface{}{ + { + "name": "production_01/west", + "nameNormalized": "production-01-west", + "server": "https://production-01.example.com", + "metadata": map[string]interface{}{ + "labels": map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + "environment": "production", + "org": "bar", + }, + "annotations": map[string]string{ + "foo.argoproj.io": "production", + }, + }, + "values": map[string]string{ + "foo": "bar", + }, + }, + }, + clientError: false, + expectedError: nil, + }, + { + name: "production or staging", + selector: metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "environment", + Operator: "In", + Values: []string{ + "production", + "staging", + }, + }, + }, + }, + values: map[string]string{ + "foo": "bar", + }, + expected: []map[string]interface{}{ + { + "name": "production_01/west", + "nameNormalized": "production-01-west", + "server": "https://production-01.example.com", + "metadata": map[string]interface{}{ + "labels": map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + "environment": "production", + "org": "bar", + }, + "annotations": map[string]string{ + "foo.argoproj.io": "production", + }, + }, + "values": map[string]string{ + "foo": "bar", + }, + }, + { + "name": "staging-01", + "nameNormalized": "staging-01", + "server": "https://staging-01.example.com", + "metadata": map[string]interface{}{ + "labels": map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + "environment": "staging", + "org": "foo", + }, + "annotations": map[string]string{ + "foo.argoproj.io": "staging", + }, + }, + "values": map[string]string{ + "foo": "bar", + }, + }, + }, + clientError: false, + expectedError: nil, + }, + { + name: "production or staging with match labels", + selector: metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "environment", + Operator: "In", + Values: []string{ + "production", + "staging", + }, + }, + }, + MatchLabels: map[string]string{ + "org": "foo", + }, + }, + values: map[string]string{ + "name": "baz", + }, + expected: []map[string]interface{}{ + { + "name": "staging-01", + "nameNormalized": "staging-01", + "server": "https://staging-01.example.com", + "metadata": map[string]interface{}{ + "labels": map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + "environment": "staging", + "org": "foo", + }, + "annotations": map[string]string{ + "foo.argoproj.io": "staging", + }, + }, + "values": map[string]string{ + "name": "baz", + }, + }, + }, + clientError: false, + expectedError: nil, + }, + { + name: "simulate client error", + selector: metav1.LabelSelector{}, + values: nil, + expected: nil, + clientError: true, + expectedError: fmt.Errorf("could not list Secrets"), + }, + } + + // convert []client.Object to []runtime.Object, for use by kubefake package + runtimeClusters := []runtime.Object{} + for _, clientCluster := range clusters { + runtimeClusters = append(runtimeClusters, clientCluster) + } + + for _, testCase := range testCases { + + t.Run(testCase.name, func(t *testing.T) { + + appClientset := kubefake.NewSimpleClientset(runtimeClusters...) + + fakeClient := fake.NewClientBuilder().WithObjects(clusters...).Build() + cl := &possiblyErroringFakeCtrlRuntimeClient{ + fakeClient, + testCase.clientError, + } + + var clusterGenerator = NewClusterGenerator(cl, context.Background(), appClientset, "namespace") + + applicationSetInfo := argoprojiov1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "set", + }, + Spec: argoprojiov1alpha1.ApplicationSetSpec{ + GoTemplate: true, + }, + } + + got, err := clusterGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{ + Clusters: &argoprojiov1alpha1.ClusterGenerator{ + Selector: testCase.selector, + Values: testCase.values, + }, + }, &applicationSetInfo) + + if testCase.expectedError != nil { + assert.EqualError(t, err, testCase.expectedError.Error()) + } else { + assert.NoError(t, err) + assert.ElementsMatch(t, testCase.expected, got) + } + + }) + } +} + +func TestSanitizeClusterName(t *testing.T) { + t.Run("valid DNS-1123 subdomain name", func(t *testing.T) { + assert.Equal(t, "cluster-name", utils.SanitizeName("cluster-name")) + }) + t.Run("invalid DNS-1123 subdomain name", func(t *testing.T) { + invalidName := "-.--CLUSTER/name -./.-" + assert.Equal(t, "cluster-name", utils.SanitizeName(invalidName)) + }) +} diff --git a/applicationset/generators/duck_type.go b/applicationset/generators/duck_type.go new file mode 100644 index 0000000000000..f98afd0e01381 --- /dev/null +++ b/applicationset/generators/duck_type.go @@ -0,0 +1,236 @@ +package generators + +import ( + "context" + "fmt" + "strings" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/argoproj/argo-cd/v2/util/settings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + + "github.com/argoproj/argo-cd/v2/applicationset/utils" + argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +var _ Generator = (*DuckTypeGenerator)(nil) + +// DuckTypeGenerator generates Applications for some or all clusters registered with ArgoCD. +type DuckTypeGenerator struct { + ctx context.Context + dynClient dynamic.Interface + clientset kubernetes.Interface + namespace string // namespace is the Argo CD namespace + settingsManager *settings.SettingsManager +} + +func NewDuckTypeGenerator(ctx context.Context, dynClient dynamic.Interface, clientset kubernetes.Interface, namespace string) Generator { + + settingsManager := settings.NewSettingsManager(ctx, clientset, namespace) + + g := &DuckTypeGenerator{ + ctx: ctx, + dynClient: dynClient, + clientset: clientset, + namespace: namespace, + settingsManager: settingsManager, + } + return g +} + +func (g *DuckTypeGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration { + + // Return a requeue default of 3 minutes, if no override is specified. + + if appSetGenerator.ClusterDecisionResource.RequeueAfterSeconds != nil { + return time.Duration(*appSetGenerator.ClusterDecisionResource.RequeueAfterSeconds) * time.Second + } + + return DefaultRequeueAfterSeconds +} + +func (g *DuckTypeGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate { + return &appSetGenerator.ClusterDecisionResource.Template +} + +func (g *DuckTypeGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, appSet *argoprojiov1alpha1.ApplicationSet) ([]map[string]interface{}, error) { + + if appSetGenerator == nil { + return nil, EmptyAppSetGeneratorError + } + + // Not likely to happen + if appSetGenerator.ClusterDecisionResource == nil { + return nil, EmptyAppSetGeneratorError + } + + // ListCluster from Argo CD's util/db package will include the local cluster in the list of clusters + clustersFromArgoCD, err := utils.ListClusters(g.ctx, g.clientset, g.namespace) + if err != nil { + return nil, fmt.Errorf("error listing clusters: %w", err) + } + + if clustersFromArgoCD == nil { + return nil, nil + } + + // Read the configMapRef + cm, err := g.clientset.CoreV1().ConfigMaps(g.namespace).Get(g.ctx, appSetGenerator.ClusterDecisionResource.ConfigMapRef, metav1.GetOptions{}) + + if err != nil { + return nil, fmt.Errorf("error reading configMapRef: %w", err) + } + + // Extract GVK data for the dynamic client to use + versionIdx := strings.Index(cm.Data["apiVersion"], "/") + kind := cm.Data["kind"] + resourceName := appSetGenerator.ClusterDecisionResource.Name + labelSelector := appSetGenerator.ClusterDecisionResource.LabelSelector + + log.WithField("kind.apiVersion", kind+"."+cm.Data["apiVersion"]).Info("Kind.Group/Version Reference") + + // Validate the fields + if kind == "" || versionIdx < 1 { + log.Warningf("kind=%v, resourceName=%v, versionIdx=%v", kind, resourceName, versionIdx) + return nil, fmt.Errorf("There is a problem with the apiVersion, kind or resourceName provided") + } + + if (resourceName == "" && labelSelector.MatchLabels == nil && labelSelector.MatchExpressions == nil) || + (resourceName != "" && (labelSelector.MatchExpressions != nil || labelSelector.MatchLabels != nil)) { + + log.Warningf("You must choose either resourceName=%v, labelSelector.matchLabels=%v or labelSelect.matchExpressions=%v", resourceName, labelSelector.MatchLabels, labelSelector.MatchExpressions) + return nil, fmt.Errorf("There is a problem with the definition of the ClusterDecisionResource generator") + } + + // Split up the apiVersion + group := cm.Data["apiVersion"][0:versionIdx] + version := cm.Data["apiVersion"][versionIdx+1:] + log.WithField("kind.group.version", kind+"."+group+"/"+version).Debug("decoded Ref") + + duckGVR := schema.GroupVersionResource{Group: group, Version: version, Resource: kind} + + listOptions := metav1.ListOptions{} + if resourceName == "" { + listOptions.LabelSelector = metav1.FormatLabelSelector(&labelSelector) + log.WithField("listOptions.LabelSelector", listOptions.LabelSelector).Info("selection type") + } else { + listOptions.FieldSelector = fields.OneTermEqualSelector("metadata.name", resourceName).String() + //metav1.Convert_fields_Selector_To_string(fields.).Sprintf("metadata.name=%s", resourceName) + log.WithField("listOptions.FieldSelector", listOptions.FieldSelector).Info("selection type") + } + + duckResources, err := g.dynClient.Resource(duckGVR).Namespace(g.namespace).List(g.ctx, listOptions) + + if err != nil { + log.WithField("GVK", duckGVR).Warning("resources were not found") + return nil, err + } + + if len(duckResources.Items) == 0 { + log.Warning("no resource found, make sure you clusterDecisionResource is defined correctly") + return nil, fmt.Errorf("no clusterDecisionResources found") + } + + // Override the duck type in the status of the resource + statusListKey := "clusters" + + matchKey := cm.Data["matchKey"] + + if cm.Data["statusListKey"] != "" { + statusListKey = cm.Data["statusListKey"] + } + if matchKey == "" { + log.WithField("matchKey", matchKey).Warning("matchKey not found in " + cm.Name) + return nil, nil + + } + + res := []map[string]interface{}{} + clusterDecisions := []interface{}{} + + // Build the decision slice + for _, duckResource := range duckResources.Items { + log.WithField("duckResourceName", duckResource.GetName()).Debug("found resource") + + if duckResource.Object["status"] == nil || len(duckResource.Object["status"].(map[string]interface{})) == 0 { + log.Warningf("clusterDecisionResource: %s, has no status", duckResource.GetName()) + continue + } + + log.WithField("duckResourceStatus", duckResource.Object["status"]).Debug("found resource") + + clusterDecisions = append(clusterDecisions, duckResource.Object["status"].(map[string]interface{})[statusListKey].([]interface{})...) + + } + log.Infof("Number of decisions found: %v", len(clusterDecisions)) + + // Read this outside the loop to improve performance + argoClusters := clustersFromArgoCD.Items + + if len(clusterDecisions) > 0 { + for _, cluster := range clusterDecisions { + + // generated instance of cluster params + params := map[string]interface{}{} + + log.Infof("cluster: %v", cluster) + matchValue := cluster.(map[string]interface{})[matchKey] + if matchValue == nil || matchValue.(string) == "" { + log.Warningf("matchKey=%v not found in \"%v\" list: %v\n", matchKey, statusListKey, cluster.(map[string]interface{})) + continue + } + + strMatchValue := matchValue.(string) + log.WithField(matchKey, strMatchValue).Debug("validate against ArgoCD") + + found := false + + for _, argoCluster := range argoClusters { + if argoCluster.Name == strMatchValue { + + log.WithField(matchKey, argoCluster.Name).Info("matched cluster in ArgoCD") + params["name"] = argoCluster.Name + params["server"] = argoCluster.Server + + found = true + break // Stop looking + } + + } + + if !found { + log.WithField(matchKey, strMatchValue).Warning("unmatched cluster in ArgoCD") + continue + } + + for key, value := range cluster.(map[string]interface{}) { + params[key] = value.(string) + } + + for key, value := range appSetGenerator.ClusterDecisionResource.Values { + if appSet.Spec.GoTemplate { + if params["values"] == nil { + params["values"] = map[string]string{} + } + params["values"].(map[string]string)[key] = value + } else { + params[fmt.Sprintf("values.%s", key)] = value + } + } + + res = append(res, params) + } + } else { + log.Warningf("clusterDecisionResource status." + statusListKey + " missing") + return nil, nil + } + + return res, nil +} diff --git a/applicationset/generators/duck_type_test.go b/applicationset/generators/duck_type_test.go new file mode 100644 index 0000000000000..788457b27559c --- /dev/null +++ b/applicationset/generators/duck_type_test.go @@ -0,0 +1,621 @@ +package generators + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + dynfake "k8s.io/client-go/dynamic/fake" + kubefake "k8s.io/client-go/kubernetes/fake" + "sigs.k8s.io/controller-runtime/pkg/client" + + argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +const resourceApiVersion = "mallard.io/v1" +const resourceKind = "ducks" +const resourceName = "quak" + +func TestGenerateParamsForDuckType(t *testing.T) { + clusters := []client.Object{ + &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "staging-01", + Namespace: "namespace", + Labels: map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + "environment": "staging", + "org": "foo", + }, + Annotations: map[string]string{ + "foo.argoproj.io": "staging", + }, + }, + Data: map[string][]byte{ + "config": []byte("{}"), + "name": []byte("staging-01"), + "server": []byte("https://staging-01.example.com"), + }, + Type: corev1.SecretType("Opaque"), + }, + &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "production-01", + Namespace: "namespace", + Labels: map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + "environment": "production", + "org": "bar", + }, + Annotations: map[string]string{ + "foo.argoproj.io": "production", + }, + }, + Data: map[string][]byte{ + "config": []byte("{}"), + "name": []byte("production-01"), + "server": []byte("https://production-01.example.com"), + }, + Type: corev1.SecretType("Opaque"), + }, + } + + duckType := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": resourceApiVersion, + "kind": "Duck", + "metadata": map[string]interface{}{ + "name": resourceName, + "namespace": "namespace", + "labels": map[string]interface{}{"duck": "all-species"}, + }, + "status": map[string]interface{}{ + "decisions": []interface{}{ + map[string]interface{}{ + "clusterName": "staging-01", + }, + map[string]interface{}{ + "clusterName": "production-01", + }, + }, + }, + }, + } + + duckTypeProdOnly := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": resourceApiVersion, + "kind": "Duck", + "metadata": map[string]interface{}{ + "name": resourceName, + "namespace": "namespace", + "labels": map[string]interface{}{"duck": "spotted"}, + }, + "status": map[string]interface{}{ + "decisions": []interface{}{ + map[string]interface{}{ + "clusterName": "production-01", + }, + }, + }, + }, + } + + duckTypeEmpty := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": resourceApiVersion, + "kind": "Duck", + "metadata": map[string]interface{}{ + "name": resourceName, + "namespace": "namespace", + "labels": map[string]interface{}{"duck": "canvasback"}, + }, + "status": map[string]interface{}{}, + }, + } + + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-configmap", + Namespace: "namespace", + }, + Data: map[string]string{ + "apiVersion": resourceApiVersion, + "kind": resourceKind, + "statusListKey": "decisions", + "matchKey": "clusterName", + }, + } + + testCases := []struct { + name string + configMapRef string + resourceName string + labelSelector metav1.LabelSelector + resource *unstructured.Unstructured + values map[string]string + expected []map[string]interface{} + expectedError error + }{ + { + name: "no duck resource", + resourceName: "", + resource: duckType, + values: nil, + expected: []map[string]interface{}{}, + expectedError: fmt.Errorf("There is a problem with the definition of the ClusterDecisionResource generator"), + }, + /*** This does not work with the FAKE runtime client, fieldSelectors are broken. + { + name: "invalid name for duck resource", + resourceName: resourceName + "-different", + resource: duckType, + values: nil, + expected: []map[string]string{}, + expectedError: fmt.Errorf("duck.mallard.io \"quak\" not found"), + }, + ***/ + { + name: "duck type generator resourceName", + resourceName: resourceName, + resource: duckType, + values: nil, + expected: []map[string]interface{}{ + {"clusterName": "production-01", "name": "production-01", "server": "https://production-01.example.com"}, + + {"clusterName": "staging-01", "name": "staging-01", "server": "https://staging-01.example.com"}, + }, + expectedError: nil, + }, + { + name: "production-only", + resourceName: resourceName, + resource: duckTypeProdOnly, + values: map[string]string{ + "foo": "bar", + }, + expected: []map[string]interface{}{ + {"clusterName": "production-01", "values.foo": "bar", "name": "production-01", "server": "https://production-01.example.com"}, + }, + expectedError: nil, + }, + { + name: "duck type empty status", + resourceName: resourceName, + resource: duckTypeEmpty, + values: nil, + expected: nil, + expectedError: nil, + }, + { + name: "duck type empty status labelSelector.matchLabels", + resourceName: "", + labelSelector: metav1.LabelSelector{MatchLabels: map[string]string{"duck": "canvasback"}}, + resource: duckTypeEmpty, + values: nil, + expected: nil, + expectedError: nil, + }, + { + name: "duck type generator labelSelector.matchLabels", + resourceName: "", + labelSelector: metav1.LabelSelector{MatchLabels: map[string]string{"duck": "all-species"}}, + resource: duckType, + values: nil, + expected: []map[string]interface{}{ + {"clusterName": "production-01", "name": "production-01", "server": "https://production-01.example.com"}, + + {"clusterName": "staging-01", "name": "staging-01", "server": "https://staging-01.example.com"}, + }, + expectedError: nil, + }, + { + name: "production-only labelSelector.matchLabels", + resourceName: "", + resource: duckTypeProdOnly, + labelSelector: metav1.LabelSelector{MatchLabels: map[string]string{"duck": "spotted"}}, + values: map[string]string{ + "foo": "bar", + }, + expected: []map[string]interface{}{ + {"clusterName": "production-01", "values.foo": "bar", "name": "production-01", "server": "https://production-01.example.com"}, + }, + expectedError: nil, + }, + { + name: "duck type generator labelSelector.matchExpressions", + resourceName: "", + labelSelector: metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "duck", + Operator: "In", + Values: []string{"all-species", "marbled"}, + }, + }}, + resource: duckType, + values: nil, + expected: []map[string]interface{}{ + {"clusterName": "production-01", "name": "production-01", "server": "https://production-01.example.com"}, + + {"clusterName": "staging-01", "name": "staging-01", "server": "https://staging-01.example.com"}, + }, + expectedError: nil, + }, + { + name: "duck type generator resourceName and labelSelector.matchExpressions", + resourceName: resourceName, + labelSelector: metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "duck", + Operator: "In", + Values: []string{"all-species", "marbled"}, + }, + }}, + resource: duckType, + values: nil, + expected: nil, + expectedError: fmt.Errorf("There is a problem with the definition of the ClusterDecisionResource generator"), + }, + } + + // convert []client.Object to []runtime.Object, for use by kubefake package + runtimeClusters := []runtime.Object{} + for _, clientCluster := range clusters { + runtimeClusters = append(runtimeClusters, clientCluster) + } + + for _, testCase := range testCases { + + t.Run(testCase.name, func(t *testing.T) { + + appClientset := kubefake.NewSimpleClientset(append(runtimeClusters, configMap)...) + + gvrToListKind := map[schema.GroupVersionResource]string{{ + Group: "mallard.io", + Version: "v1", + Resource: "ducks", + }: "DuckList"} + + fakeDynClient := dynfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, testCase.resource) + + var duckTypeGenerator = NewDuckTypeGenerator(context.Background(), fakeDynClient, appClientset, "namespace") + + applicationSetInfo := argoprojiov1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "set", + }, + Spec: argoprojiov1alpha1.ApplicationSetSpec{}, + } + + got, err := duckTypeGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{ + ClusterDecisionResource: &argoprojiov1alpha1.DuckTypeGenerator{ + ConfigMapRef: "my-configmap", + Name: testCase.resourceName, + LabelSelector: testCase.labelSelector, + Values: testCase.values, + }, + }, &applicationSetInfo) + + if testCase.expectedError != nil { + assert.EqualError(t, err, testCase.expectedError.Error()) + } else { + assert.NoError(t, err) + assert.ElementsMatch(t, testCase.expected, got) + } + }) + } +} + +func TestGenerateParamsForDuckTypeGoTemplate(t *testing.T) { + clusters := []client.Object{ + &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "staging-01", + Namespace: "namespace", + Labels: map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + "environment": "staging", + "org": "foo", + }, + Annotations: map[string]string{ + "foo.argoproj.io": "staging", + }, + }, + Data: map[string][]byte{ + "config": []byte("{}"), + "name": []byte("staging-01"), + "server": []byte("https://staging-01.example.com"), + }, + Type: corev1.SecretType("Opaque"), + }, + &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "production-01", + Namespace: "namespace", + Labels: map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + "environment": "production", + "org": "bar", + }, + Annotations: map[string]string{ + "foo.argoproj.io": "production", + }, + }, + Data: map[string][]byte{ + "config": []byte("{}"), + "name": []byte("production-01"), + "server": []byte("https://production-01.example.com"), + }, + Type: corev1.SecretType("Opaque"), + }, + } + + duckType := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": resourceApiVersion, + "kind": "Duck", + "metadata": map[string]interface{}{ + "name": resourceName, + "namespace": "namespace", + "labels": map[string]interface{}{"duck": "all-species"}, + }, + "status": map[string]interface{}{ + "decisions": []interface{}{ + map[string]interface{}{ + "clusterName": "staging-01", + }, + map[string]interface{}{ + "clusterName": "production-01", + }, + }, + }, + }, + } + + duckTypeProdOnly := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": resourceApiVersion, + "kind": "Duck", + "metadata": map[string]interface{}{ + "name": resourceName, + "namespace": "namespace", + "labels": map[string]interface{}{"duck": "spotted"}, + }, + "status": map[string]interface{}{ + "decisions": []interface{}{ + map[string]interface{}{ + "clusterName": "production-01", + }, + }, + }, + }, + } + + duckTypeEmpty := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": resourceApiVersion, + "kind": "Duck", + "metadata": map[string]interface{}{ + "name": resourceName, + "namespace": "namespace", + "labels": map[string]interface{}{"duck": "canvasback"}, + }, + "status": map[string]interface{}{}, + }, + } + + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-configmap", + Namespace: "namespace", + }, + Data: map[string]string{ + "apiVersion": resourceApiVersion, + "kind": resourceKind, + "statusListKey": "decisions", + "matchKey": "clusterName", + }, + } + + testCases := []struct { + name string + configMapRef string + resourceName string + labelSelector metav1.LabelSelector + resource *unstructured.Unstructured + values map[string]string + expected []map[string]interface{} + expectedError error + }{ + { + name: "no duck resource", + resourceName: "", + resource: duckType, + values: nil, + expected: []map[string]interface{}{}, + expectedError: fmt.Errorf("There is a problem with the definition of the ClusterDecisionResource generator"), + }, + /*** This does not work with the FAKE runtime client, fieldSelectors are broken. + { + name: "invalid name for duck resource", + resourceName: resourceName + "-different", + resource: duckType, + values: nil, + expected: []map[string]string{}, + expectedError: fmt.Errorf("duck.mallard.io \"quak\" not found"), + }, + ***/ + { + name: "duck type generator resourceName", + resourceName: resourceName, + resource: duckType, + values: nil, + expected: []map[string]interface{}{ + {"clusterName": "production-01", "name": "production-01", "server": "https://production-01.example.com"}, + + {"clusterName": "staging-01", "name": "staging-01", "server": "https://staging-01.example.com"}, + }, + expectedError: nil, + }, + { + name: "production-only", + resourceName: resourceName, + resource: duckTypeProdOnly, + values: map[string]string{ + "foo": "bar", + }, + expected: []map[string]interface{}{ + {"clusterName": "production-01", "values": map[string]string{"foo": "bar"}, "name": "production-01", "server": "https://production-01.example.com"}, + }, + expectedError: nil, + }, + { + name: "duck type empty status", + resourceName: resourceName, + resource: duckTypeEmpty, + values: nil, + expected: nil, + expectedError: nil, + }, + { + name: "duck type empty status labelSelector.matchLabels", + resourceName: "", + labelSelector: metav1.LabelSelector{MatchLabels: map[string]string{"duck": "canvasback"}}, + resource: duckTypeEmpty, + values: nil, + expected: nil, + expectedError: nil, + }, + { + name: "duck type generator labelSelector.matchLabels", + resourceName: "", + labelSelector: metav1.LabelSelector{MatchLabels: map[string]string{"duck": "all-species"}}, + resource: duckType, + values: nil, + expected: []map[string]interface{}{ + {"clusterName": "production-01", "name": "production-01", "server": "https://production-01.example.com"}, + + {"clusterName": "staging-01", "name": "staging-01", "server": "https://staging-01.example.com"}, + }, + expectedError: nil, + }, + { + name: "production-only labelSelector.matchLabels", + resourceName: "", + resource: duckTypeProdOnly, + labelSelector: metav1.LabelSelector{MatchLabels: map[string]string{"duck": "spotted"}}, + values: map[string]string{ + "foo": "bar", + }, + expected: []map[string]interface{}{ + {"clusterName": "production-01", "values": map[string]string{"foo": "bar"}, "name": "production-01", "server": "https://production-01.example.com"}, + }, + expectedError: nil, + }, + { + name: "duck type generator labelSelector.matchExpressions", + resourceName: "", + labelSelector: metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "duck", + Operator: "In", + Values: []string{"all-species", "marbled"}, + }, + }}, + resource: duckType, + values: nil, + expected: []map[string]interface{}{ + {"clusterName": "production-01", "name": "production-01", "server": "https://production-01.example.com"}, + + {"clusterName": "staging-01", "name": "staging-01", "server": "https://staging-01.example.com"}, + }, + expectedError: nil, + }, + { + name: "duck type generator resourceName and labelSelector.matchExpressions", + resourceName: resourceName, + labelSelector: metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "duck", + Operator: "In", + Values: []string{"all-species", "marbled"}, + }, + }}, + resource: duckType, + values: nil, + expected: nil, + expectedError: fmt.Errorf("There is a problem with the definition of the ClusterDecisionResource generator"), + }, + } + + // convert []client.Object to []runtime.Object, for use by kubefake package + runtimeClusters := []runtime.Object{} + for _, clientCluster := range clusters { + runtimeClusters = append(runtimeClusters, clientCluster) + } + + for _, testCase := range testCases { + + t.Run(testCase.name, func(t *testing.T) { + + appClientset := kubefake.NewSimpleClientset(append(runtimeClusters, configMap)...) + + gvrToListKind := map[schema.GroupVersionResource]string{{ + Group: "mallard.io", + Version: "v1", + Resource: "ducks", + }: "DuckList"} + + fakeDynClient := dynfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, testCase.resource) + + var duckTypeGenerator = NewDuckTypeGenerator(context.Background(), fakeDynClient, appClientset, "namespace") + + applicationSetInfo := argoprojiov1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "set", + }, + Spec: argoprojiov1alpha1.ApplicationSetSpec{ + GoTemplate: true, + }, + } + + got, err := duckTypeGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{ + ClusterDecisionResource: &argoprojiov1alpha1.DuckTypeGenerator{ + ConfigMapRef: "my-configmap", + Name: testCase.resourceName, + LabelSelector: testCase.labelSelector, + Values: testCase.values, + }, + }, &applicationSetInfo) + + if testCase.expectedError != nil { + assert.EqualError(t, err, testCase.expectedError.Error()) + } else { + assert.NoError(t, err) + assert.ElementsMatch(t, testCase.expected, got) + } + }) + } +} diff --git a/applicationset/generators/generator_spec_processor.go b/applicationset/generators/generator_spec_processor.go new file mode 100644 index 0000000000000..494b2e8d9a37d --- /dev/null +++ b/applicationset/generators/generator_spec_processor.go @@ -0,0 +1,173 @@ +package generators + +import ( + "fmt" + "reflect" + + "github.com/jeremywohl/flatten" + + "github.com/argoproj/argo-cd/v2/applicationset/utils" + + "k8s.io/apimachinery/pkg/labels" + + argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + + "github.com/imdario/mergo" + log "github.com/sirupsen/logrus" +) + +const ( + selectorKey = "Selector" +) + +type TransformResult struct { + Params []map[string]interface{} + Template argoprojiov1alpha1.ApplicationSetTemplate +} + +// Transform a spec generator to list of paramSets and a template +func Transform(requestedGenerator argoprojiov1alpha1.ApplicationSetGenerator, allGenerators map[string]Generator, baseTemplate argoprojiov1alpha1.ApplicationSetTemplate, appSet *argoprojiov1alpha1.ApplicationSet, genParams map[string]interface{}) ([]TransformResult, error) { + // This is a custom version of the `LabelSelectorAsSelector` that is in k8s.io/apimachinery. This has been copied + // verbatim from that package, with the difference that we do not have any restrictions on label values. This is done + // so that, among other things, we can match on cluster urls. + selector, err := utils.LabelSelectorAsSelector(requestedGenerator.Selector) + if err != nil { + return nil, fmt.Errorf("error parsing label selector: %w", err) + } + + res := []TransformResult{} + var firstError error + interpolatedGenerator := requestedGenerator.DeepCopy() + + generators := GetRelevantGenerators(&requestedGenerator, allGenerators) + for _, g := range generators { + // we call mergeGeneratorTemplate first because GenerateParams might be more costly so we want to fail fast if there is an error + mergedTemplate, err := mergeGeneratorTemplate(g, &requestedGenerator, baseTemplate) + if err != nil { + log.WithError(err).WithField("generator", g). + Error("error generating params") + if firstError == nil { + firstError = err + } + continue + } + var params []map[string]interface{} + if len(genParams) != 0 { + tempInterpolatedGenerator, err := InterpolateGenerator(&requestedGenerator, genParams, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions) + interpolatedGenerator = &tempInterpolatedGenerator + if err != nil { + log.WithError(err).WithField("genParams", genParams). + Error("error interpolating params for generator") + if firstError == nil { + firstError = err + } + continue + } + } + params, err = g.GenerateParams(interpolatedGenerator, appSet) + if err != nil { + log.WithError(err).WithField("generator", g). + Error("error generating params") + if firstError == nil { + firstError = err + } + continue + } + var filterParams []map[string]interface{} + for _, param := range params { + flatParam, err := flattenParameters(param) + if err != nil { + log.WithError(err).WithField("generator", g). + Error("error flattening params") + if firstError == nil { + firstError = err + } + continue + } + + if requestedGenerator.Selector != nil && !selector.Matches(labels.Set(flatParam)) { + continue + } + filterParams = append(filterParams, param) + } + + res = append(res, TransformResult{ + Params: filterParams, + Template: mergedTemplate, + }) + } + + return res, firstError +} + +func GetRelevantGenerators(requestedGenerator *argoprojiov1alpha1.ApplicationSetGenerator, generators map[string]Generator) []Generator { + var res []Generator + + v := reflect.Indirect(reflect.ValueOf(requestedGenerator)) + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + if !field.CanInterface() { + continue + } + name := v.Type().Field(i).Name + if name == selectorKey { + continue + } + + if !reflect.ValueOf(field.Interface()).IsNil() { + res = append(res, generators[name]) + } + } + + return res +} + +func flattenParameters(in map[string]interface{}) (map[string]string, error) { + flat, err := flatten.Flatten(in, "", flatten.DotStyle) + if err != nil { + return nil, fmt.Errorf("error flatenning parameters: %w", err) + } + + out := make(map[string]string, len(flat)) + for k, v := range flat { + out[k] = fmt.Sprintf("%v", v) + } + + return out, nil +} + +func mergeGeneratorTemplate(g Generator, requestedGenerator *argoprojiov1alpha1.ApplicationSetGenerator, applicationSetTemplate argoprojiov1alpha1.ApplicationSetTemplate) (argoprojiov1alpha1.ApplicationSetTemplate, error) { + // Make a copy of the value from `GetTemplate()` before merge, rather than copying directly into + // the provided parameter (which will touch the original resource object returned by client-go) + dest := g.GetTemplate(requestedGenerator).DeepCopy() + + err := mergo.Merge(dest, applicationSetTemplate) + + return *dest, err +} + +// InterpolateGenerator allows interpolating the matrix's 2nd child generator with values from the 1st child generator +// "params" parameter is an array, where each index corresponds to a generator. Each index contains a map w/ that generator's parameters. +func InterpolateGenerator(requestedGenerator *argoprojiov1alpha1.ApplicationSetGenerator, params map[string]interface{}, useGoTemplate bool, goTemplateOptions []string) (argoprojiov1alpha1.ApplicationSetGenerator, error) { + render := utils.Render{} + interpolatedGenerator, err := render.RenderGeneratorParams(requestedGenerator, params, useGoTemplate, goTemplateOptions) + if err != nil { + log.WithError(err).WithField("interpolatedGenerator", interpolatedGenerator).Error("error interpolating generator with other generator's parameter") + return argoprojiov1alpha1.ApplicationSetGenerator{}, err + } + + return *interpolatedGenerator, nil +} + +// Fixes https://github.com/argoproj/argo-cd/issues/11982 while ensuring backwards compatibility. +// This is only a short-term solution and should be removed in a future major version. +func dropDisabledNestedSelectors(generators []argoprojiov1alpha1.ApplicationSetNestedGenerator) bool { + var foundSelector bool + for i := range generators { + if generators[i].Selector != nil { + foundSelector = true + generators[i].Selector = nil + } + } + return foundSelector +} diff --git a/applicationset/generators/generator_spec_processor_test.go b/applicationset/generators/generator_spec_processor_test.go new file mode 100644 index 0000000000000..b5838e7af7cbe --- /dev/null +++ b/applicationset/generators/generator_spec_processor_test.go @@ -0,0 +1,560 @@ +package generators + +import ( + "context" + "testing" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/argoproj/argo-cd/v2/applicationset/services/mocks" + + argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + + "github.com/stretchr/testify/mock" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + kubefake "k8s.io/client-go/kubernetes/fake" + crtclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestMatchValues(t *testing.T) { + testCases := []struct { + name string + elements []apiextensionsv1.JSON + selector *metav1.LabelSelector + expected []map[string]interface{} + }{ + { + name: "no filter", + elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "cluster","url": "url"}`)}}, + selector: &metav1.LabelSelector{}, + expected: []map[string]interface{}{{"cluster": "cluster", "url": "url"}}, + }, + { + name: "nil", + elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "cluster","url": "url"}`)}}, + selector: nil, + expected: []map[string]interface{}{{"cluster": "cluster", "url": "url"}}, + }, + { + name: "values.foo should be foo but is ignore element", + elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "cluster","url": "url","values":{"foo":"bar"}}`)}}, + selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "values.foo": "foo", + }, + }, + expected: []map[string]interface{}{}, + }, + { + name: "values.foo should be bar", + elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "cluster","url": "url","values":{"foo":"bar"}}`)}}, + selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "values.foo": "bar", + }, + }, + expected: []map[string]interface{}{{"cluster": "cluster", "url": "url", "values.foo": "bar"}}, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + var listGenerator = NewListGenerator() + var data = map[string]Generator{ + "List": listGenerator, + } + + applicationSetInfo := argov1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "set", + }, + Spec: argov1alpha1.ApplicationSetSpec{ + GoTemplate: false, + }, + } + + results, err := Transform(argov1alpha1.ApplicationSetGenerator{ + Selector: testCase.selector, + List: &argov1alpha1.ListGenerator{ + Elements: testCase.elements, + Template: emptyTemplate(), + }}, + data, + emptyTemplate(), + &applicationSetInfo, nil) + + assert.NoError(t, err) + assert.ElementsMatch(t, testCase.expected, results[0].Params) + }) + } +} + +func TestMatchValuesGoTemplate(t *testing.T) { + testCases := []struct { + name string + elements []apiextensionsv1.JSON + selector *metav1.LabelSelector + expected []map[string]interface{} + }{ + { + name: "no filter", + elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "cluster","url": "url"}`)}}, + selector: &metav1.LabelSelector{}, + expected: []map[string]interface{}{{"cluster": "cluster", "url": "url"}}, + }, + { + name: "nil", + elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "cluster","url": "url"}`)}}, + selector: nil, + expected: []map[string]interface{}{{"cluster": "cluster", "url": "url"}}, + }, + { + name: "values.foo should be foo but is ignore element", + elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "cluster","url": "url","values":{"foo":"bar"}}`)}}, + selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "values.foo": "foo", + }, + }, + expected: []map[string]interface{}{}, + }, + { + name: "values.foo should be bar", + elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "cluster","url": "url","values":{"foo":"bar"}}`)}}, + selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "values.foo": "bar", + }, + }, + expected: []map[string]interface{}{{"cluster": "cluster", "url": "url", "values": map[string]interface{}{"foo": "bar"}}}, + }, + { + name: "values.0 should be bar", + elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "cluster","url": "url","values":["bar"]}`)}}, + selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "values.0": "bar", + }, + }, + expected: []map[string]interface{}{{"cluster": "cluster", "url": "url", "values": []interface{}{"bar"}}}, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + var listGenerator = NewListGenerator() + var data = map[string]Generator{ + "List": listGenerator, + } + + applicationSetInfo := argov1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "set", + }, + Spec: argov1alpha1.ApplicationSetSpec{ + GoTemplate: true, + }, + } + + results, err := Transform(argov1alpha1.ApplicationSetGenerator{ + Selector: testCase.selector, + List: &argov1alpha1.ListGenerator{ + Elements: testCase.elements, + Template: emptyTemplate(), + }}, + data, + emptyTemplate(), + &applicationSetInfo, nil) + + assert.NoError(t, err) + assert.ElementsMatch(t, testCase.expected, results[0].Params) + }) + } +} + +func TestTransForm(t *testing.T) { + testCases := []struct { + name string + selector *metav1.LabelSelector + expected []map[string]interface{} + }{ + { + name: "server filter", + selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"server": "https://production-01.example.com"}, + }, + expected: []map[string]interface{}{{ + "metadata.annotations.foo.argoproj.io": "production", + "metadata.labels.argocd.argoproj.io/secret-type": "cluster", + "metadata.labels.environment": "production", + "metadata.labels.org": "bar", + "name": "production_01/west", + "nameNormalized": "production-01-west", + "server": "https://production-01.example.com", + }}, + }, + { + name: "server filter with long url", + selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"server": "https://some-really-long-url-that-will-exceed-63-characters.com"}, + }, + expected: []map[string]interface{}{{ + "metadata.annotations.foo.argoproj.io": "production", + "metadata.labels.argocd.argoproj.io/secret-type": "cluster", + "metadata.labels.environment": "production", + "metadata.labels.org": "bar", + "name": "some-really-long-server-url", + "nameNormalized": "some-really-long-server-url", + "server": "https://some-really-long-url-that-will-exceed-63-characters.com", + }}, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + testGenerators := map[string]Generator{ + "Clusters": getMockClusterGenerator(), + } + + applicationSetInfo := argov1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "set", + }, + Spec: argov1alpha1.ApplicationSetSpec{}, + } + + results, err := Transform( + argov1alpha1.ApplicationSetGenerator{ + Selector: testCase.selector, + Clusters: &argov1alpha1.ClusterGenerator{ + Selector: metav1.LabelSelector{}, + Template: argov1alpha1.ApplicationSetTemplate{}, + Values: nil, + }}, + testGenerators, + emptyTemplate(), + &applicationSetInfo, nil) + + assert.NoError(t, err) + assert.ElementsMatch(t, testCase.expected, results[0].Params) + }) + } +} + +func emptyTemplate() argov1alpha1.ApplicationSetTemplate { + return argov1alpha1.ApplicationSetTemplate{ + Spec: argov1alpha1.ApplicationSpec{ + Project: "project", + }, + } +} + +func getMockClusterGenerator() Generator { + clusters := []crtclient.Object{ + &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "staging-01", + Namespace: "namespace", + Labels: map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + "environment": "staging", + "org": "foo", + }, + Annotations: map[string]string{ + "foo.argoproj.io": "staging", + }, + }, + Data: map[string][]byte{ + "config": []byte("{}"), + "name": []byte("staging-01"), + "server": []byte("https://staging-01.example.com"), + }, + Type: corev1.SecretType("Opaque"), + }, + &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "production-01", + Namespace: "namespace", + Labels: map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + "environment": "production", + "org": "bar", + }, + Annotations: map[string]string{ + "foo.argoproj.io": "production", + }, + }, + Data: map[string][]byte{ + "config": []byte("{}"), + "name": []byte("production_01/west"), + "server": []byte("https://production-01.example.com"), + }, + Type: corev1.SecretType("Opaque"), + }, + &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "some-really-long-server-url", + Namespace: "namespace", + Labels: map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + "environment": "production", + "org": "bar", + }, + Annotations: map[string]string{ + "foo.argoproj.io": "production", + }, + }, + Data: map[string][]byte{ + "config": []byte("{}"), + "name": []byte("some-really-long-server-url"), + "server": []byte("https://some-really-long-url-that-will-exceed-63-characters.com"), + }, + Type: corev1.SecretType("Opaque"), + }, + } + runtimeClusters := []runtime.Object{} + for _, clientCluster := range clusters { + runtimeClusters = append(runtimeClusters, clientCluster) + } + appClientset := kubefake.NewSimpleClientset(runtimeClusters...) + + fakeClient := fake.NewClientBuilder().WithObjects(clusters...).Build() + return NewClusterGenerator(fakeClient, context.Background(), appClientset, "namespace") +} + +func getMockGitGenerator() Generator { + argoCDServiceMock := mocks.Repos{} + argoCDServiceMock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return([]string{"app1", "app2", "app_3", "p1/app4"}, nil) + var gitGenerator = NewGitGenerator(&argoCDServiceMock) + return gitGenerator +} + +func TestGetRelevantGenerators(t *testing.T) { + + testGenerators := map[string]Generator{ + "Clusters": getMockClusterGenerator(), + "Git": getMockGitGenerator(), + } + + testGenerators["Matrix"] = NewMatrixGenerator(testGenerators) + testGenerators["Merge"] = NewMergeGenerator(testGenerators) + testGenerators["List"] = NewListGenerator() + + requestedGenerator := &argov1alpha1.ApplicationSetGenerator{ + List: &argov1alpha1.ListGenerator{ + Elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "cluster","url": "url","values":{"foo":"bar"}}`)}}, + }} + + relevantGenerators := GetRelevantGenerators(requestedGenerator, testGenerators) + assert.Len(t, relevantGenerators, 1) + assert.IsType(t, &ListGenerator{}, relevantGenerators[0]) + + requestedGenerator = &argov1alpha1.ApplicationSetGenerator{ + Clusters: &argov1alpha1.ClusterGenerator{ + Selector: metav1.LabelSelector{}, + Template: argov1alpha1.ApplicationSetTemplate{}, + Values: nil, + }, + } + + relevantGenerators = GetRelevantGenerators(requestedGenerator, testGenerators) + assert.Len(t, relevantGenerators, 1) + assert.IsType(t, &ClusterGenerator{}, relevantGenerators[0]) + + requestedGenerator = &argov1alpha1.ApplicationSetGenerator{ + Git: &argov1alpha1.GitGenerator{ + RepoURL: "", + Directories: nil, + Files: nil, + Revision: "", + RequeueAfterSeconds: nil, + Template: argov1alpha1.ApplicationSetTemplate{}, + }, + } + + relevantGenerators = GetRelevantGenerators(requestedGenerator, testGenerators) + assert.Len(t, relevantGenerators, 1) + assert.IsType(t, &GitGenerator{}, relevantGenerators[0]) +} + +func TestInterpolateGenerator(t *testing.T) { + requestedGenerator := &argov1alpha1.ApplicationSetGenerator{ + Clusters: &argov1alpha1.ClusterGenerator{ + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + "path-basename": "{{path.basename}}", + "path-zero": "{{path[0]}}", + "path-full": "{{path}}", + }}, + }, + } + gitGeneratorParams := map[string]interface{}{ + "path": "p1/p2/app3", + "path.basename": "app3", + "path[0]": "p1", + "path[1]": "p2", + "path.basenameNormalized": "app3", + } + interpolatedGenerator, err := InterpolateGenerator(requestedGenerator, gitGeneratorParams, false, nil) + if err != nil { + log.WithError(err).WithField("requestedGenerator", requestedGenerator).Error("error interpolating Generator") + return + } + assert.Equal(t, "app3", interpolatedGenerator.Clusters.Selector.MatchLabels["path-basename"]) + assert.Equal(t, "p1", interpolatedGenerator.Clusters.Selector.MatchLabels["path-zero"]) + assert.Equal(t, "p1/p2/app3", interpolatedGenerator.Clusters.Selector.MatchLabels["path-full"]) + + fileNamePath := argov1alpha1.GitFileGeneratorItem{ + Path: "{{name}}", + } + fileServerPath := argov1alpha1.GitFileGeneratorItem{ + Path: "{{server}}", + } + + requestedGenerator = &argov1alpha1.ApplicationSetGenerator{ + Git: &argov1alpha1.GitGenerator{ + Files: append([]argov1alpha1.GitFileGeneratorItem{}, fileNamePath, fileServerPath), + Template: argov1alpha1.ApplicationSetTemplate{}, + }, + } + clusterGeneratorParams := map[string]interface{}{ + "name": "production_01/west", "server": "https://production-01.example.com", + } + interpolatedGenerator, err = InterpolateGenerator(requestedGenerator, clusterGeneratorParams, false, nil) + if err != nil { + log.WithError(err).WithField("requestedGenerator", requestedGenerator).Error("error interpolating Generator") + return + } + assert.Equal(t, "production_01/west", interpolatedGenerator.Git.Files[0].Path) + assert.Equal(t, "https://production-01.example.com", interpolatedGenerator.Git.Files[1].Path) +} + +func TestInterpolateGenerator_go(t *testing.T) { + requestedGenerator := &argov1alpha1.ApplicationSetGenerator{ + Clusters: &argov1alpha1.ClusterGenerator{ + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + "path-basename": "{{base .path.path}}", + "path-zero": "{{index .path.segments 0}}", + "path-full": "{{.path.path}}", + "kubernetes.io/environment": `{{default "foo" .my_label}}`, + }}, + }, + } + gitGeneratorParams := map[string]interface{}{ + "path": map[string]interface{}{ + "path": "p1/p2/app3", + "segments": []string{"p1", "p2", "app3"}, + }, + } + interpolatedGenerator, err := InterpolateGenerator(requestedGenerator, gitGeneratorParams, true, nil) + require.NoError(t, err) + if err != nil { + log.WithError(err).WithField("requestedGenerator", requestedGenerator).Error("error interpolating Generator") + return + } + assert.Equal(t, "app3", interpolatedGenerator.Clusters.Selector.MatchLabels["path-basename"]) + assert.Equal(t, "p1", interpolatedGenerator.Clusters.Selector.MatchLabels["path-zero"]) + assert.Equal(t, "p1/p2/app3", interpolatedGenerator.Clusters.Selector.MatchLabels["path-full"]) + + fileNamePath := argov1alpha1.GitFileGeneratorItem{ + Path: "{{.name}}", + } + fileServerPath := argov1alpha1.GitFileGeneratorItem{ + Path: "{{.server}}", + } + + requestedGenerator = &argov1alpha1.ApplicationSetGenerator{ + Git: &argov1alpha1.GitGenerator{ + Files: append([]argov1alpha1.GitFileGeneratorItem{}, fileNamePath, fileServerPath), + Template: argov1alpha1.ApplicationSetTemplate{}, + }, + } + clusterGeneratorParams := map[string]interface{}{ + "name": "production_01/west", "server": "https://production-01.example.com", + } + interpolatedGenerator, err = InterpolateGenerator(requestedGenerator, clusterGeneratorParams, true, nil) + if err != nil { + log.WithError(err).WithField("requestedGenerator", requestedGenerator).Error("error interpolating Generator") + return + } + assert.Equal(t, "production_01/west", interpolatedGenerator.Git.Files[0].Path) + assert.Equal(t, "https://production-01.example.com", interpolatedGenerator.Git.Files[1].Path) +} + +func TestInterpolateGeneratorError(t *testing.T) { + type args struct { + requestedGenerator *argov1alpha1.ApplicationSetGenerator + params map[string]interface{} + useGoTemplate bool + goTemplateOptions []string + } + tests := []struct { + name string + args args + want argov1alpha1.ApplicationSetGenerator + expectedErrStr string + }{ + {name: "Empty Gen", args: args{ + requestedGenerator: nil, + params: nil, + useGoTemplate: false, + goTemplateOptions: nil, + }, want: argov1alpha1.ApplicationSetGenerator{}, expectedErrStr: "generator is empty"}, + {name: "No Params", args: args{ + requestedGenerator: &argov1alpha1.ApplicationSetGenerator{}, + params: map[string]interface{}{}, + useGoTemplate: false, + goTemplateOptions: nil, + }, want: argov1alpha1.ApplicationSetGenerator{}, expectedErrStr: ""}, + {name: "Error templating", args: args{ + requestedGenerator: &argov1alpha1.ApplicationSetGenerator{Git: &argov1alpha1.GitGenerator{ + RepoURL: "foo", + Files: []argov1alpha1.GitFileGeneratorItem{{Path: "bar/"}}, + Revision: "main", + Values: map[string]string{ + "git_test": "{{ toPrettyJson . }}", + "selection": "{{ default .override .test }}", + "resolved": "{{ index .rmap (default .override .test) }}", + }, + }}, + params: map[string]interface{}{ + "name": "in-cluster", + "override": "foo", + }, + useGoTemplate: true, + goTemplateOptions: []string{}, + }, want: argov1alpha1.ApplicationSetGenerator{}, expectedErrStr: "failed to replace parameters in generator: failed to execute go template {{ index .rmap (default .override .test) }}: template: :1:3: executing \"\" at : error calling index: index of untyped nil"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := InterpolateGenerator(tt.args.requestedGenerator, tt.args.params, tt.args.useGoTemplate, tt.args.goTemplateOptions) + if tt.expectedErrStr != "" { + assert.EqualError(t, err, tt.expectedErrStr) + } else { + require.NoError(t, err) + } + assert.Equalf(t, tt.want, got, "InterpolateGenerator(%v, %v, %v, %v)", tt.args.requestedGenerator, tt.args.params, tt.args.useGoTemplate, tt.args.goTemplateOptions) + }) + } +} diff --git a/applicationset/generators/git.go b/applicationset/generators/git.go new file mode 100644 index 0000000000000..07c1b11849cd0 --- /dev/null +++ b/applicationset/generators/git.go @@ -0,0 +1,284 @@ +package generators + +import ( + "context" + "fmt" + "path" + "sort" + "strconv" + "strings" + "time" + + "github.com/jeremywohl/flatten" + log "github.com/sirupsen/logrus" + "sigs.k8s.io/yaml" + + "github.com/argoproj/argo-cd/v2/applicationset/services" + "github.com/argoproj/argo-cd/v2/applicationset/utils" + argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +var _ Generator = (*GitGenerator)(nil) + +type GitGenerator struct { + repos services.Repos +} + +func NewGitGenerator(repos services.Repos) Generator { + g := &GitGenerator{ + repos: repos, + } + return g +} + +func (g *GitGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate { + return &appSetGenerator.Git.Template +} + +func (g *GitGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration { + + // Return a requeue default of 3 minutes, if no default is specified. + + if appSetGenerator.Git.RequeueAfterSeconds != nil { + return time.Duration(*appSetGenerator.Git.RequeueAfterSeconds) * time.Second + } + + return DefaultRequeueAfterSeconds +} + +func (g *GitGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, appSet *argoprojiov1alpha1.ApplicationSet) ([]map[string]interface{}, error) { + + if appSetGenerator == nil { + return nil, EmptyAppSetGeneratorError + } + + if appSetGenerator.Git == nil { + return nil, EmptyAppSetGeneratorError + } + + var err error + var res []map[string]interface{} + if len(appSetGenerator.Git.Directories) != 0 { + res, err = g.generateParamsForGitDirectories(appSetGenerator, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions) + } else if len(appSetGenerator.Git.Files) != 0 { + res, err = g.generateParamsForGitFiles(appSetGenerator, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions) + } else { + return nil, EmptyAppSetGeneratorError + } + if err != nil { + return nil, fmt.Errorf("error generating params from git: %w", err) + } + + return res, nil +} + +func (g *GitGenerator) generateParamsForGitDirectories(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, useGoTemplate bool, goTemplateOptions []string) ([]map[string]interface{}, error) { + + // Directories, not files + allPaths, err := g.repos.GetDirectories(context.TODO(), appSetGenerator.Git.RepoURL, appSetGenerator.Git.Revision) + if err != nil { + return nil, fmt.Errorf("error getting directories from repo: %w", err) + } + + log.WithFields(log.Fields{ + "allPaths": allPaths, + "total": len(allPaths), + "repoURL": appSetGenerator.Git.RepoURL, + "revision": appSetGenerator.Git.Revision, + "pathParamPrefix": appSetGenerator.Git.PathParamPrefix, + }).Info("applications result from the repo service") + + requestedApps := g.filterApps(appSetGenerator.Git.Directories, allPaths) + + res, err := g.generateParamsFromApps(requestedApps, appSetGenerator, useGoTemplate, goTemplateOptions) + if err != nil { + return nil, fmt.Errorf("error generating params from apps: %w", err) + } + + return res, nil +} + +func (g *GitGenerator) generateParamsForGitFiles(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, useGoTemplate bool, goTemplateOptions []string) ([]map[string]interface{}, error) { + + // Get all files that match the requested path string, removing duplicates + allFiles := make(map[string][]byte) + for _, requestedPath := range appSetGenerator.Git.Files { + files, err := g.repos.GetFiles(context.TODO(), appSetGenerator.Git.RepoURL, appSetGenerator.Git.Revision, requestedPath.Path) + if err != nil { + return nil, err + } + for filePath, content := range files { + allFiles[filePath] = content + } + } + + // Extract the unduplicated map into a list, and sort by path to ensure a deterministic + // processing order in the subsequent step + allPaths := []string{} + for path := range allFiles { + allPaths = append(allPaths, path) + } + sort.Strings(allPaths) + + // Generate params from each path, and return + res := []map[string]interface{}{} + for _, path := range allPaths { + + // A JSON / YAML file path can contain multiple sets of parameters (ie it is an array) + paramsArray, err := g.generateParamsFromGitFile(path, allFiles[path], appSetGenerator.Git.Values, useGoTemplate, goTemplateOptions, appSetGenerator.Git.PathParamPrefix) + if err != nil { + return nil, fmt.Errorf("unable to process file '%s': %v", path, err) + } + + res = append(res, paramsArray...) + } + return res, nil +} + +func (g *GitGenerator) generateParamsFromGitFile(filePath string, fileContent []byte, values map[string]string, useGoTemplate bool, goTemplateOptions []string, pathParamPrefix string) ([]map[string]interface{}, error) { + objectsFound := []map[string]interface{}{} + + // First, we attempt to parse as an array + err := yaml.Unmarshal(fileContent, &objectsFound) + if err != nil { + // If unable to parse as an array, attempt to parse as a single object + singleObj := make(map[string]interface{}) + err = yaml.Unmarshal(fileContent, &singleObj) + if err != nil { + return nil, fmt.Errorf("unable to parse file: %v", err) + } + objectsFound = append(objectsFound, singleObj) + } else if len(objectsFound) == 0 { + // If file is valid but empty, add a default empty item + objectsFound = append(objectsFound, map[string]interface{}{}) + } + + res := []map[string]interface{}{} + + for _, objectFound := range objectsFound { + + params := map[string]interface{}{} + + if useGoTemplate { + for k, v := range objectFound { + params[k] = v + } + + paramPath := map[string]interface{}{} + + paramPath["path"] = path.Dir(filePath) + paramPath["basename"] = path.Base(paramPath["path"].(string)) + paramPath["filename"] = path.Base(filePath) + paramPath["basenameNormalized"] = utils.SanitizeName(path.Base(paramPath["path"].(string))) + paramPath["filenameNormalized"] = utils.SanitizeName(path.Base(paramPath["filename"].(string))) + paramPath["segments"] = strings.Split(paramPath["path"].(string), "/") + if pathParamPrefix != "" { + params[pathParamPrefix] = map[string]interface{}{"path": paramPath} + } else { + params["path"] = paramPath + } + } else { + flat, err := flatten.Flatten(objectFound, "", flatten.DotStyle) + if err != nil { + return nil, fmt.Errorf("error flattening object: %w", err) + } + for k, v := range flat { + params[k] = fmt.Sprintf("%v", v) + } + pathParamName := "path" + if pathParamPrefix != "" { + pathParamName = pathParamPrefix + "." + pathParamName + } + params[pathParamName] = path.Dir(filePath) + params[pathParamName+".basename"] = path.Base(params[pathParamName].(string)) + params[pathParamName+".filename"] = path.Base(filePath) + params[pathParamName+".basenameNormalized"] = utils.SanitizeName(path.Base(params[pathParamName].(string))) + params[pathParamName+".filenameNormalized"] = utils.SanitizeName(path.Base(params[pathParamName+".filename"].(string))) + for k, v := range strings.Split(params[pathParamName].(string), "/") { + if len(v) > 0 { + params[pathParamName+"["+strconv.Itoa(k)+"]"] = v + } + } + } + + err := appendTemplatedValues(values, params, useGoTemplate, goTemplateOptions) + if err != nil { + return nil, fmt.Errorf("failed to append templated values: %w", err) + } + + res = append(res, params) + } + + return res, nil +} + +func (g *GitGenerator) filterApps(Directories []argoprojiov1alpha1.GitDirectoryGeneratorItem, allPaths []string) []string { + res := []string{} + for _, appPath := range allPaths { + appInclude := false + appExclude := false + // Iterating over each appPath and check whether directories object has requestedPath that matches the appPath + for _, requestedPath := range Directories { + match, err := path.Match(requestedPath.Path, appPath) + if err != nil { + log.WithError(err).WithField("requestedPath", requestedPath). + WithField("appPath", appPath).Error("error while matching appPath to requestedPath") + continue + } + if match && !requestedPath.Exclude { + appInclude = true + } + if match && requestedPath.Exclude { + appExclude = true + } + } + // Whenever there is a path with exclude: true it wont be included, even if it is included in a different path pattern + if appInclude && !appExclude { + res = append(res, appPath) + } + } + return res +} + +func (g *GitGenerator) generateParamsFromApps(requestedApps []string, appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, useGoTemplate bool, goTemplateOptions []string) ([]map[string]interface{}, error) { + res := make([]map[string]interface{}, len(requestedApps)) + for i, a := range requestedApps { + + params := make(map[string]interface{}, 5) + + if useGoTemplate { + paramPath := map[string]interface{}{} + paramPath["path"] = a + paramPath["basename"] = path.Base(a) + paramPath["basenameNormalized"] = utils.SanitizeName(path.Base(a)) + paramPath["segments"] = strings.Split(paramPath["path"].(string), "/") + if appSetGenerator.Git.PathParamPrefix != "" { + params[appSetGenerator.Git.PathParamPrefix] = map[string]interface{}{"path": paramPath} + } else { + params["path"] = paramPath + } + } else { + pathParamName := "path" + if appSetGenerator.Git.PathParamPrefix != "" { + pathParamName = appSetGenerator.Git.PathParamPrefix + "." + pathParamName + } + params[pathParamName] = a + params[pathParamName+".basename"] = path.Base(a) + params[pathParamName+".basenameNormalized"] = utils.SanitizeName(path.Base(a)) + for k, v := range strings.Split(params[pathParamName].(string), "/") { + if len(v) > 0 { + params[pathParamName+"["+strconv.Itoa(k)+"]"] = v + } + } + } + + err := appendTemplatedValues(appSetGenerator.Git.Values, params, useGoTemplate, goTemplateOptions) + if err != nil { + return nil, fmt.Errorf("failed to append templated values: %w", err) + } + + res[i] = params + } + + return res, nil +} diff --git a/applicationset/generators/git_test.go b/applicationset/generators/git_test.go new file mode 100644 index 0000000000000..f0d1d29bca6ec --- /dev/null +++ b/applicationset/generators/git_test.go @@ -0,0 +1,1358 @@ +package generators + +import ( + "fmt" + "testing" + + "github.com/argoproj/argo-cd/v2/applicationset/services/mocks" + argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func Test_generateParamsFromGitFile(t *testing.T) { + defaultContent := []byte(` +foo: + bar: baz +`) + type args struct { + filePath string + fileContent []byte + values map[string]string + useGoTemplate bool + goTemplateOptions []string + pathParamPrefix string + } + tests := []struct { + name string + args args + want []map[string]interface{} + wantErr bool + }{ + { + name: "empty file returns path parameters", + args: args{ + filePath: "path/dir/file_name.yaml", + fileContent: []byte(""), + values: map[string]string{}, + useGoTemplate: false, + }, + want: []map[string]interface{}{ + { + "path": "path/dir", + "path.basename": "dir", + "path.filename": "file_name.yaml", + "path.basenameNormalized": "dir", + "path.filenameNormalized": "file-name.yaml", + "path[0]": "path", + "path[1]": "dir", + }, + }, + }, + { + name: "invalid json/yaml file returns error", + args: args{ + filePath: "path/dir/file_name.yaml", + fileContent: []byte("this is not json or yaml"), + values: map[string]string{}, + useGoTemplate: false, + }, + wantErr: true, + }, + { + name: "file parameters are added to params", + args: args{ + filePath: "path/dir/file_name.yaml", + fileContent: defaultContent, + values: map[string]string{}, + useGoTemplate: false, + }, + want: []map[string]interface{}{ + { + "foo.bar": "baz", + "path": "path/dir", + "path.basename": "dir", + "path.filename": "file_name.yaml", + "path.basenameNormalized": "dir", + "path.filenameNormalized": "file-name.yaml", + "path[0]": "path", + "path[1]": "dir", + }, + }, + }, + { + name: "path parameter are prefixed", + args: args{ + filePath: "path/dir/file_name.yaml", + fileContent: defaultContent, + values: map[string]string{}, + useGoTemplate: false, + pathParamPrefix: "myRepo", + }, + want: []map[string]interface{}{ + { + "foo.bar": "baz", + "myRepo.path": "path/dir", + "myRepo.path.basename": "dir", + "myRepo.path.filename": "file_name.yaml", + "myRepo.path.basenameNormalized": "dir", + "myRepo.path.filenameNormalized": "file-name.yaml", + "myRepo.path[0]": "path", + "myRepo.path[1]": "dir", + }, + }, + }, + { + name: "file parameters are added to params with go template", + args: args{ + filePath: "path/dir/file_name.yaml", + fileContent: defaultContent, + values: map[string]string{}, + useGoTemplate: true, + }, + want: []map[string]interface{}{ + { + "foo": map[string]interface{}{ + "bar": "baz", + }, + "path": map[string]interface{}{ + "path": "path/dir", + "basename": "dir", + "filename": "file_name.yaml", + "basenameNormalized": "dir", + "filenameNormalized": "file-name.yaml", + "segments": []string{ + "path", + "dir", + }, + }, + }, + }, + }, + { + name: "path parameter are prefixed with go template", + args: args{ + filePath: "path/dir/file_name.yaml", + fileContent: defaultContent, + values: map[string]string{}, + useGoTemplate: true, + pathParamPrefix: "myRepo", + }, + want: []map[string]interface{}{ + { + "foo": map[string]interface{}{ + "bar": "baz", + }, + "myRepo": map[string]interface{}{ + "path": map[string]interface{}{ + "path": "path/dir", + "basename": "dir", + "filename": "file_name.yaml", + "basenameNormalized": "dir", + "filenameNormalized": "file-name.yaml", + "segments": []string{ + "path", + "dir", + }, + }, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + params, err := (*GitGenerator)(nil).generateParamsFromGitFile(tt.args.filePath, tt.args.fileContent, tt.args.values, tt.args.useGoTemplate, tt.args.goTemplateOptions, tt.args.pathParamPrefix) + if (err != nil) != tt.wantErr { + t.Errorf("GitGenerator.generateParamsFromGitFile() error = %v, wantErr %v", err, tt.wantErr) + return + } + assert.Equal(t, tt.want, params) + }) + } +} + +func TestGitGenerateParamsFromDirectories(t *testing.T) { + + cases := []struct { + name string + directories []argoprojiov1alpha1.GitDirectoryGeneratorItem + pathParamPrefix string + repoApps []string + repoError error + values map[string]string + expected []map[string]interface{} + expectedError error + }{ + { + name: "happy flow - created apps", + directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}}, + repoApps: []string{ + "app1", + "app2", + "app_3", + "p1/app4", + }, + repoError: nil, + expected: []map[string]interface{}{ + {"path": "app1", "path.basename": "app1", "path.basenameNormalized": "app1", "path[0]": "app1"}, + {"path": "app2", "path.basename": "app2", "path.basenameNormalized": "app2", "path[0]": "app2"}, + {"path": "app_3", "path.basename": "app_3", "path.basenameNormalized": "app-3", "path[0]": "app_3"}, + }, + expectedError: nil, + }, + { + name: "It prefixes path parameters with PathParamPrefix", + directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}}, + pathParamPrefix: "myRepo", + repoApps: []string{ + "app1", + "app2", + "app_3", + "p1/app4", + }, + repoError: nil, + expected: []map[string]interface{}{ + {"myRepo.path": "app1", "myRepo.path.basename": "app1", "myRepo.path.basenameNormalized": "app1", "myRepo.path[0]": "app1"}, + {"myRepo.path": "app2", "myRepo.path.basename": "app2", "myRepo.path.basenameNormalized": "app2", "myRepo.path[0]": "app2"}, + {"myRepo.path": "app_3", "myRepo.path.basename": "app_3", "myRepo.path.basenameNormalized": "app-3", "myRepo.path[0]": "app_3"}, + }, + expectedError: nil, + }, + { + name: "It filters application according to the paths", + directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "p1/*"}, {Path: "p1/*/*"}}, + repoApps: []string{ + "app1", + "p1/app2", + "p1/p2/app3", + "p1/p2/p3/app4", + }, + repoError: nil, + expected: []map[string]interface{}{ + {"path": "p1/app2", "path.basename": "app2", "path[0]": "p1", "path[1]": "app2", "path.basenameNormalized": "app2"}, + {"path": "p1/p2/app3", "path.basename": "app3", "path[0]": "p1", "path[1]": "p2", "path[2]": "app3", "path.basenameNormalized": "app3"}, + }, + expectedError: nil, + }, + { + name: "It filters application according to the paths with Exclude", + directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "p1/*", Exclude: true}, {Path: "*"}, {Path: "*/*"}}, + repoApps: []string{ + "app1", + "app2", + "p1/app2", + "p1/app3", + "p2/app3", + }, + repoError: nil, + expected: []map[string]interface{}{ + {"path": "app1", "path.basename": "app1", "path[0]": "app1", "path.basenameNormalized": "app1"}, + {"path": "app2", "path.basename": "app2", "path[0]": "app2", "path.basenameNormalized": "app2"}, + {"path": "p2/app3", "path.basename": "app3", "path[0]": "p2", "path[1]": "app3", "path.basenameNormalized": "app3"}, + }, + expectedError: nil, + }, + { + name: "Expecting same exclude behavior with different order", + directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}, {Path: "*/*"}, {Path: "p1/*", Exclude: true}}, + repoApps: []string{ + "app1", + "app2", + "p1/app2", + "p1/app3", + "p2/app3", + }, + repoError: nil, + expected: []map[string]interface{}{ + {"path": "app1", "path.basename": "app1", "path[0]": "app1", "path.basenameNormalized": "app1"}, + {"path": "app2", "path.basename": "app2", "path[0]": "app2", "path.basenameNormalized": "app2"}, + {"path": "p2/app3", "path.basename": "app3", "path[0]": "p2", "path[1]": "app3", "path.basenameNormalized": "app3"}, + }, + expectedError: nil, + }, + { + name: "Value variable interpolation", + directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}, {Path: "*/*"}}, + repoApps: []string{ + "app1", + "p1/app2", + }, + repoError: nil, + values: map[string]string{ + "foo": "bar", + "aaa": "{{ path[0] }}", + "no-op": "{{ this-does-not-exist }}", + }, + expected: []map[string]interface{}{ + {"values.foo": "bar", "values.no-op": "{{ this-does-not-exist }}", "values.aaa": "app1", "path": "app1", "path.basename": "app1", "path[0]": "app1", "path.basenameNormalized": "app1"}, + {"values.foo": "bar", "values.no-op": "{{ this-does-not-exist }}", "values.aaa": "p1", "path": "p1/app2", "path.basename": "app2", "path[0]": "p1", "path[1]": "app2", "path.basenameNormalized": "app2"}, + }, + expectedError: nil, + }, + { + name: "handles empty response from repo server", + directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}}, + repoApps: []string{}, + repoError: nil, + expected: []map[string]interface{}{}, + expectedError: nil, + }, + { + name: "handles error from repo server", + directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}}, + repoApps: []string{}, + repoError: fmt.Errorf("error"), + expected: []map[string]interface{}{}, + expectedError: fmt.Errorf("error generating params from git: error getting directories from repo: error"), + }, + } + + for _, testCase := range cases { + testCaseCopy := testCase + + t.Run(testCaseCopy.name, func(t *testing.T) { + t.Parallel() + + argoCDServiceMock := mocks.Repos{} + + argoCDServiceMock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError) + + var gitGenerator = NewGitGenerator(&argoCDServiceMock) + applicationSetInfo := argoprojiov1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "set", + }, + Spec: argoprojiov1alpha1.ApplicationSetSpec{ + Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{ + Git: &argoprojiov1alpha1.GitGenerator{ + RepoURL: "RepoURL", + Revision: "Revision", + Directories: testCaseCopy.directories, + PathParamPrefix: testCaseCopy.pathParamPrefix, + Values: testCaseCopy.values, + }, + }}, + }, + } + + got, err := gitGenerator.GenerateParams(&applicationSetInfo.Spec.Generators[0], &applicationSetInfo) + + if testCaseCopy.expectedError != nil { + assert.EqualError(t, err, testCaseCopy.expectedError.Error()) + } else { + assert.NoError(t, err) + assert.Equal(t, testCaseCopy.expected, got) + } + + argoCDServiceMock.AssertExpectations(t) + }) + } +} + +func TestGitGenerateParamsFromDirectoriesGoTemplate(t *testing.T) { + + cases := []struct { + name string + directories []argoprojiov1alpha1.GitDirectoryGeneratorItem + pathParamPrefix string + repoApps []string + repoError error + expected []map[string]interface{} + expectedError error + }{ + { + name: "happy flow - created apps", + directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}}, + repoApps: []string{ + "app1", + "app2", + "app_3", + "p1/app4", + }, + repoError: nil, + expected: []map[string]interface{}{ + { + "path": map[string]interface{}{ + "path": "app1", + "basename": "app1", + "basenameNormalized": "app1", + "segments": []string{ + "app1", + }, + }, + }, + { + "path": map[string]interface{}{ + "path": "app2", + "basename": "app2", + "basenameNormalized": "app2", + "segments": []string{ + "app2", + }, + }, + }, + { + "path": map[string]interface{}{ + "path": "app_3", + "basename": "app_3", + "basenameNormalized": "app-3", + "segments": []string{ + "app_3", + }, + }, + }, + }, + expectedError: nil, + }, + { + name: "It prefixes path parameters with PathParamPrefix", + directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}}, + pathParamPrefix: "myRepo", + repoApps: []string{ + "app1", + "app2", + "app_3", + "p1/app4", + }, + repoError: nil, + expected: []map[string]interface{}{ + { + "myRepo": map[string]interface{}{ + "path": map[string]interface{}{ + "path": "app1", + "basename": "app1", + "basenameNormalized": "app1", + "segments": []string{ + "app1", + }, + }, + }, + }, + { + "myRepo": map[string]interface{}{ + "path": map[string]interface{}{ + "path": "app2", + "basename": "app2", + "basenameNormalized": "app2", + "segments": []string{ + "app2", + }, + }, + }, + }, + { + "myRepo": map[string]interface{}{ + "path": map[string]interface{}{ + "path": "app_3", + "basename": "app_3", + "basenameNormalized": "app-3", + "segments": []string{ + "app_3", + }, + }, + }, + }, + }, + expectedError: nil, + }, + { + name: "It filters application according to the paths", + directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "p1/*"}, {Path: "p1/*/*"}}, + repoApps: []string{ + "app1", + "p1/app2", + "p1/p2/app3", + "p1/p2/p3/app4", + }, + repoError: nil, + expected: []map[string]interface{}{ + { + "path": map[string]interface{}{ + "path": "p1/app2", + "basename": "app2", + "basenameNormalized": "app2", + "segments": []string{ + "p1", + "app2", + }, + }, + }, + { + "path": map[string]interface{}{ + "path": "p1/p2/app3", + "basename": "app3", + "basenameNormalized": "app3", + "segments": []string{ + "p1", + "p2", + "app3", + }, + }, + }, + }, + expectedError: nil, + }, + { + name: "It filters application according to the paths with Exclude", + directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "p1/*", Exclude: true}, {Path: "*"}, {Path: "*/*"}}, + repoApps: []string{ + "app1", + "app2", + "p1/app2", + "p1/app3", + "p2/app3", + }, + repoError: nil, + expected: []map[string]interface{}{ + { + "path": map[string]interface{}{ + "path": "app1", + "basename": "app1", + "basenameNormalized": "app1", + "segments": []string{ + "app1", + }, + }, + }, + { + "path": map[string]interface{}{ + "path": "app2", + "basename": "app2", + "basenameNormalized": "app2", + "segments": []string{ + "app2", + }, + }, + }, + { + "path": map[string]interface{}{ + "path": "p2/app3", + "basename": "app3", + "basenameNormalized": "app3", + "segments": []string{ + "p2", + "app3", + }, + }, + }, + }, + expectedError: nil, + }, + { + name: "Expecting same exclude behavior with different order", + directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}, {Path: "*/*"}, {Path: "p1/*", Exclude: true}}, + repoApps: []string{ + "app1", + "app2", + "p1/app2", + "p1/app3", + "p2/app3", + }, + repoError: nil, + expected: []map[string]interface{}{ + + { + "path": map[string]interface{}{ + "path": "app1", + "basename": "app1", + "basenameNormalized": "app1", + "segments": []string{ + "app1", + }, + }, + }, + { + "path": map[string]interface{}{ + "path": "app2", + "basename": "app2", + "basenameNormalized": "app2", + "segments": []string{ + "app2", + }, + }, + }, + { + "path": map[string]interface{}{ + "path": "p2/app3", + "basename": "app3", + "basenameNormalized": "app3", + "segments": []string{ + "p2", + "app3", + }, + }, + }, + }, + expectedError: nil, + }, + { + name: "handles empty response from repo server", + directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}}, + repoApps: []string{}, + repoError: nil, + expected: []map[string]interface{}{}, + expectedError: nil, + }, + { + name: "handles error from repo server", + directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}}, + repoApps: []string{}, + repoError: fmt.Errorf("error"), + expected: []map[string]interface{}{}, + expectedError: fmt.Errorf("error generating params from git: error getting directories from repo: error"), + }, + } + + for _, testCase := range cases { + testCaseCopy := testCase + + t.Run(testCaseCopy.name, func(t *testing.T) { + t.Parallel() + + argoCDServiceMock := mocks.Repos{} + + argoCDServiceMock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError) + + var gitGenerator = NewGitGenerator(&argoCDServiceMock) + applicationSetInfo := argoprojiov1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "set", + }, + Spec: argoprojiov1alpha1.ApplicationSetSpec{ + GoTemplate: true, + Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{ + Git: &argoprojiov1alpha1.GitGenerator{ + RepoURL: "RepoURL", + Revision: "Revision", + Directories: testCaseCopy.directories, + PathParamPrefix: testCaseCopy.pathParamPrefix, + }, + }}, + }, + } + + got, err := gitGenerator.GenerateParams(&applicationSetInfo.Spec.Generators[0], &applicationSetInfo) + + if testCaseCopy.expectedError != nil { + assert.EqualError(t, err, testCaseCopy.expectedError.Error()) + } else { + assert.NoError(t, err) + assert.Equal(t, testCaseCopy.expected, got) + } + + argoCDServiceMock.AssertExpectations(t) + }) + } + +} + +func TestGitGenerateParamsFromFiles(t *testing.T) { + + cases := []struct { + name string + // files is the list of paths/globs to match + files []argoprojiov1alpha1.GitFileGeneratorItem + // repoFileContents maps repo path to the literal contents of that path + repoFileContents map[string][]byte + // if repoPathsError is non-nil, the call to GetPaths(...) will return this error value + repoPathsError error + values map[string]string + expected []map[string]interface{} + expectedError error + }{ + { + name: "happy flow: create params from git files", + files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.json"}}, + repoFileContents: map[string][]byte{ + "cluster-config/production/config.json": []byte(`{ + "cluster": { + "owner": "john.doe@example.com", + "name": "production", + "address": "https://kubernetes.default.svc" + }, + "key1": "val1", + "key2": { + "key2_1": "val2_1", + "key2_2": { + "key2_2_1": "val2_2_1" + } + }, + "key3": 123 +}`), + "cluster-config/staging/config.json": []byte(`{ + "cluster": { + "owner": "foo.bar@example.com", + "name": "staging", + "address": "https://kubernetes.default.svc" + } +}`), + }, + repoPathsError: nil, + expected: []map[string]interface{}{ + { + "cluster.owner": "john.doe@example.com", + "cluster.name": "production", + "cluster.address": "https://kubernetes.default.svc", + "key1": "val1", + "key2.key2_1": "val2_1", + "key2.key2_2.key2_2_1": "val2_2_1", + "key3": "123", + "path": "cluster-config/production", + "path.basename": "production", + "path[0]": "cluster-config", + "path[1]": "production", + "path.basenameNormalized": "production", + "path.filename": "config.json", + "path.filenameNormalized": "config.json", + }, + { + "cluster.owner": "foo.bar@example.com", + "cluster.name": "staging", + "cluster.address": "https://kubernetes.default.svc", + "path": "cluster-config/staging", + "path.basename": "staging", + "path[0]": "cluster-config", + "path[1]": "staging", + "path.basenameNormalized": "staging", + "path.filename": "config.json", + "path.filenameNormalized": "config.json", + }, + }, + expectedError: nil, + }, + { + name: "Value variable interpolation", + files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.json"}}, + repoFileContents: map[string][]byte{ + "cluster-config/production/config.json": []byte(`{ + "cluster": { + "owner": "john.doe@example.com", + "name": "production", + "address": "https://kubernetes.default.svc" + }, + "key1": "val1", + "key2": { + "key2_1": "val2_1", + "key2_2": { + "key2_2_1": "val2_2_1" + } + }, + "key3": 123 +}`), + "cluster-config/staging/config.json": []byte(`{ + "cluster": { + "owner": "foo.bar@example.com", + "name": "staging", + "address": "https://kubernetes.default.svc" + } +}`), + }, + repoPathsError: nil, + values: map[string]string{ + "aaa": "{{ cluster.owner }}", + "no-op": "{{ this-does-not-exist }}", + }, + expected: []map[string]interface{}{ + { + "cluster.owner": "john.doe@example.com", + "cluster.name": "production", + "cluster.address": "https://kubernetes.default.svc", + "key1": "val1", + "key2.key2_1": "val2_1", + "key2.key2_2.key2_2_1": "val2_2_1", + "key3": "123", + "path": "cluster-config/production", + "path.basename": "production", + "path[0]": "cluster-config", + "path[1]": "production", + "path.basenameNormalized": "production", + "path.filename": "config.json", + "path.filenameNormalized": "config.json", + "values.aaa": "john.doe@example.com", + "values.no-op": "{{ this-does-not-exist }}", + }, + { + "cluster.owner": "foo.bar@example.com", + "cluster.name": "staging", + "cluster.address": "https://kubernetes.default.svc", + "path": "cluster-config/staging", + "path.basename": "staging", + "path[0]": "cluster-config", + "path[1]": "staging", + "path.basenameNormalized": "staging", + "path.filename": "config.json", + "path.filenameNormalized": "config.json", + "values.aaa": "foo.bar@example.com", + "values.no-op": "{{ this-does-not-exist }}", + }, + }, + expectedError: nil, + }, + { + name: "handles error during getting repo paths", + files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.json"}}, + repoFileContents: map[string][]byte{}, + repoPathsError: fmt.Errorf("paths error"), + expected: []map[string]interface{}{}, + expectedError: fmt.Errorf("error generating params from git: paths error"), + }, + { + name: "test invalid JSON file returns error", + files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.json"}}, + repoFileContents: map[string][]byte{ + "cluster-config/production/config.json": []byte(`invalid json file`), + }, + repoPathsError: nil, + expected: []map[string]interface{}{}, + expectedError: fmt.Errorf("error generating params from git: unable to process file 'cluster-config/production/config.json': unable to parse file: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type map[string]interface {}"), + }, + { + name: "test JSON array", + files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.json"}}, + repoFileContents: map[string][]byte{ + "cluster-config/production/config.json": []byte(` +[ + { + "cluster": { + "owner": "john.doe@example.com", + "name": "production", + "address": "https://kubernetes.default.svc", + "inner": { + "one" : "two" + } + } + }, + { + "cluster": { + "owner": "john.doe@example.com", + "name": "staging", + "address": "https://kubernetes.default.svc" + } + } +]`), + }, + repoPathsError: nil, + expected: []map[string]interface{}{ + { + "cluster.owner": "john.doe@example.com", + "cluster.name": "production", + "cluster.address": "https://kubernetes.default.svc", + "cluster.inner.one": "two", + "path": "cluster-config/production", + "path.basename": "production", + "path[0]": "cluster-config", + "path[1]": "production", + "path.basenameNormalized": "production", + "path.filename": "config.json", + "path.filenameNormalized": "config.json", + }, + { + "cluster.owner": "john.doe@example.com", + "cluster.name": "staging", + "cluster.address": "https://kubernetes.default.svc", + "path": "cluster-config/production", + "path.basename": "production", + "path[0]": "cluster-config", + "path[1]": "production", + "path.basenameNormalized": "production", + "path.filename": "config.json", + "path.filenameNormalized": "config.json", + }, + }, + expectedError: nil, + }, + { + name: "Test YAML flow", + files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.yaml"}}, + repoFileContents: map[string][]byte{ + "cluster-config/production/config.yaml": []byte(` +cluster: + owner: john.doe@example.com + name: production + address: https://kubernetes.default.svc +key1: val1 +key2: + key2_1: val2_1 + key2_2: + key2_2_1: val2_2_1 +`), + "cluster-config/staging/config.yaml": []byte(` +cluster: + owner: foo.bar@example.com + name: staging + address: https://kubernetes.default.svc +`), + }, + repoPathsError: nil, + expected: []map[string]interface{}{ + { + "cluster.owner": "john.doe@example.com", + "cluster.name": "production", + "cluster.address": "https://kubernetes.default.svc", + "key1": "val1", + "key2.key2_1": "val2_1", + "key2.key2_2.key2_2_1": "val2_2_1", + "path": "cluster-config/production", + "path.basename": "production", + "path[0]": "cluster-config", + "path[1]": "production", + "path.basenameNormalized": "production", + "path.filename": "config.yaml", + "path.filenameNormalized": "config.yaml", + }, + { + "cluster.owner": "foo.bar@example.com", + "cluster.name": "staging", + "cluster.address": "https://kubernetes.default.svc", + "path": "cluster-config/staging", + "path.basename": "staging", + "path[0]": "cluster-config", + "path[1]": "staging", + "path.basenameNormalized": "staging", + "path.filename": "config.yaml", + "path.filenameNormalized": "config.yaml", + }, + }, + expectedError: nil, + }, + { + name: "test YAML array", + files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.yaml"}}, + repoFileContents: map[string][]byte{ + "cluster-config/production/config.yaml": []byte(` +- cluster: + owner: john.doe@example.com + name: production + address: https://kubernetes.default.svc + inner: + one: two +- cluster: + owner: john.doe@example.com + name: staging + address: https://kubernetes.default.svc`), + }, + repoPathsError: nil, + expected: []map[string]interface{}{ + { + "cluster.owner": "john.doe@example.com", + "cluster.name": "production", + "cluster.address": "https://kubernetes.default.svc", + "cluster.inner.one": "two", + "path": "cluster-config/production", + "path.basename": "production", + "path[0]": "cluster-config", + "path[1]": "production", + "path.basenameNormalized": "production", + "path.filename": "config.yaml", + "path.filenameNormalized": "config.yaml", + }, + { + "cluster.owner": "john.doe@example.com", + "cluster.name": "staging", + "cluster.address": "https://kubernetes.default.svc", + "path": "cluster-config/production", + "path.basename": "production", + "path[0]": "cluster-config", + "path[1]": "production", + "path.basenameNormalized": "production", + "path.filename": "config.yaml", + "path.filenameNormalized": "config.yaml", + }, + }, + expectedError: nil, + }, + } + + for _, testCase := range cases { + testCaseCopy := testCase + + t.Run(testCaseCopy.name, func(t *testing.T) { + t.Parallel() + + argoCDServiceMock := mocks.Repos{} + argoCDServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(testCaseCopy.repoFileContents, testCaseCopy.repoPathsError) + + var gitGenerator = NewGitGenerator(&argoCDServiceMock) + applicationSetInfo := argoprojiov1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "set", + }, + Spec: argoprojiov1alpha1.ApplicationSetSpec{ + Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{ + Git: &argoprojiov1alpha1.GitGenerator{ + RepoURL: "RepoURL", + Revision: "Revision", + Files: testCaseCopy.files, + Values: testCaseCopy.values, + }, + }}, + }, + } + + got, err := gitGenerator.GenerateParams(&applicationSetInfo.Spec.Generators[0], &applicationSetInfo) + fmt.Println(got, err) + + if testCaseCopy.expectedError != nil { + assert.EqualError(t, err, testCaseCopy.expectedError.Error()) + } else { + assert.NoError(t, err) + assert.ElementsMatch(t, testCaseCopy.expected, got) + } + + argoCDServiceMock.AssertExpectations(t) + }) + } +} + +func TestGitGenerateParamsFromFilesGoTemplate(t *testing.T) { + + cases := []struct { + name string + // files is the list of paths/globs to match + files []argoprojiov1alpha1.GitFileGeneratorItem + // repoFileContents maps repo path to the literal contents of that path + repoFileContents map[string][]byte + // if repoPathsError is non-nil, the call to GetPaths(...) will return this error value + repoPathsError error + expected []map[string]interface{} + expectedError error + }{ + { + name: "happy flow: create params from git files", + files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.json"}}, + repoFileContents: map[string][]byte{ + "cluster-config/production/config.json": []byte(`{ + "cluster": { + "owner": "john.doe@example.com", + "name": "production", + "address": "https://kubernetes.default.svc" + }, + "key1": "val1", + "key2": { + "key2_1": "val2_1", + "key2_2": { + "key2_2_1": "val2_2_1" + } + }, + "key3": 123 +}`), + "cluster-config/staging/config.json": []byte(`{ + "cluster": { + "owner": "foo.bar@example.com", + "name": "staging", + "address": "https://kubernetes.default.svc" + } +}`), + }, + repoPathsError: nil, + expected: []map[string]interface{}{ + { + "cluster": map[string]interface{}{ + "owner": "john.doe@example.com", + "name": "production", + "address": "https://kubernetes.default.svc", + }, + "key1": "val1", + "key2": map[string]interface{}{ + "key2_1": "val2_1", + "key2_2": map[string]interface{}{ + "key2_2_1": "val2_2_1", + }, + }, + "key3": float64(123), + "path": map[string]interface{}{ + "path": "cluster-config/production", + "basename": "production", + "filename": "config.json", + "basenameNormalized": "production", + "filenameNormalized": "config.json", + "segments": []string{ + "cluster-config", + "production", + }, + }, + }, + { + "cluster": map[string]interface{}{ + "owner": "foo.bar@example.com", + "name": "staging", + "address": "https://kubernetes.default.svc", + }, + "path": map[string]interface{}{ + "path": "cluster-config/staging", + "basename": "staging", + "filename": "config.json", + "basenameNormalized": "staging", + "filenameNormalized": "config.json", + "segments": []string{ + "cluster-config", + "staging", + }, + }, + }, + }, + expectedError: nil, + }, + { + name: "handles error during getting repo paths", + files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.json"}}, + repoFileContents: map[string][]byte{}, + repoPathsError: fmt.Errorf("paths error"), + expected: []map[string]interface{}{}, + expectedError: fmt.Errorf("error generating params from git: paths error"), + }, + { + name: "test invalid JSON file returns error", + files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.json"}}, + repoFileContents: map[string][]byte{ + "cluster-config/production/config.json": []byte(`invalid json file`), + }, + repoPathsError: nil, + expected: []map[string]interface{}{}, + expectedError: fmt.Errorf("error generating params from git: unable to process file 'cluster-config/production/config.json': unable to parse file: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type map[string]interface {}"), + }, + { + name: "test JSON array", + files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.json"}}, + repoFileContents: map[string][]byte{ + "cluster-config/production/config.json": []byte(` +[ + { + "cluster": { + "owner": "john.doe@example.com", + "name": "production", + "address": "https://kubernetes.default.svc", + "inner": { + "one" : "two" + } + } + }, + { + "cluster": { + "owner": "john.doe@example.com", + "name": "staging", + "address": "https://kubernetes.default.svc" + } + } +]`), + }, + repoPathsError: nil, + expected: []map[string]interface{}{ + { + "cluster": map[string]interface{}{ + "owner": "john.doe@example.com", + "name": "production", + "address": "https://kubernetes.default.svc", + "inner": map[string]interface{}{ + "one": "two", + }, + }, + "path": map[string]interface{}{ + "path": "cluster-config/production", + "basename": "production", + "filename": "config.json", + "basenameNormalized": "production", + "filenameNormalized": "config.json", + "segments": []string{ + "cluster-config", + "production", + }, + }, + }, + { + "cluster": map[string]interface{}{ + "owner": "john.doe@example.com", + "name": "staging", + "address": "https://kubernetes.default.svc", + }, + "path": map[string]interface{}{ + "path": "cluster-config/production", + "basename": "production", + "filename": "config.json", + "basenameNormalized": "production", + "filenameNormalized": "config.json", + "segments": []string{ + "cluster-config", + "production", + }, + }, + }, + }, + expectedError: nil, + }, + { + name: "Test YAML flow", + files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.yaml"}}, + repoFileContents: map[string][]byte{ + "cluster-config/production/config.yaml": []byte(` +cluster: + owner: john.doe@example.com + name: production + address: https://kubernetes.default.svc +key1: val1 +key2: + key2_1: val2_1 + key2_2: + key2_2_1: val2_2_1 +`), + "cluster-config/staging/config.yaml": []byte(` +cluster: + owner: foo.bar@example.com + name: staging + address: https://kubernetes.default.svc +`), + }, + repoPathsError: nil, + expected: []map[string]interface{}{ + { + "cluster": map[string]interface{}{ + "owner": "john.doe@example.com", + "name": "production", + "address": "https://kubernetes.default.svc", + }, + "key1": "val1", + "key2": map[string]interface{}{ + "key2_1": "val2_1", + "key2_2": map[string]interface{}{ + "key2_2_1": "val2_2_1", + }, + }, + "path": map[string]interface{}{ + "path": "cluster-config/production", + "basename": "production", + "filename": "config.yaml", + "basenameNormalized": "production", + "filenameNormalized": "config.yaml", + "segments": []string{ + "cluster-config", + "production", + }, + }, + }, + { + "cluster": map[string]interface{}{ + "owner": "foo.bar@example.com", + "name": "staging", + "address": "https://kubernetes.default.svc", + }, + "path": map[string]interface{}{ + "path": "cluster-config/staging", + "basename": "staging", + "filename": "config.yaml", + "basenameNormalized": "staging", + "filenameNormalized": "config.yaml", + "segments": []string{ + "cluster-config", + "staging", + }, + }, + }, + }, + expectedError: nil, + }, + { + name: "test YAML array", + files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.yaml"}}, + repoFileContents: map[string][]byte{ + "cluster-config/production/config.yaml": []byte(` +- cluster: + owner: john.doe@example.com + name: production + address: https://kubernetes.default.svc + inner: + one: two +- cluster: + owner: john.doe@example.com + name: staging + address: https://kubernetes.default.svc`), + }, + repoPathsError: nil, + expected: []map[string]interface{}{ + { + "cluster": map[string]interface{}{ + "owner": "john.doe@example.com", + "name": "production", + "address": "https://kubernetes.default.svc", + "inner": map[string]interface{}{ + "one": "two", + }, + }, + "path": map[string]interface{}{ + "path": "cluster-config/production", + "basename": "production", + "filename": "config.yaml", + "basenameNormalized": "production", + "filenameNormalized": "config.yaml", + "segments": []string{ + "cluster-config", + "production", + }, + }, + }, + { + "cluster": map[string]interface{}{ + "owner": "john.doe@example.com", + "name": "staging", + "address": "https://kubernetes.default.svc", + }, + "path": map[string]interface{}{ + "path": "cluster-config/production", + "basename": "production", + "filename": "config.yaml", + "basenameNormalized": "production", + "filenameNormalized": "config.yaml", + "segments": []string{ + "cluster-config", + "production", + }, + }, + }, + }, + expectedError: nil, + }, + } + + for _, testCase := range cases { + testCaseCopy := testCase + + t.Run(testCaseCopy.name, func(t *testing.T) { + t.Parallel() + + argoCDServiceMock := mocks.Repos{} + argoCDServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(testCaseCopy.repoFileContents, testCaseCopy.repoPathsError) + + var gitGenerator = NewGitGenerator(&argoCDServiceMock) + applicationSetInfo := argoprojiov1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "set", + }, + Spec: argoprojiov1alpha1.ApplicationSetSpec{ + GoTemplate: true, + Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{ + Git: &argoprojiov1alpha1.GitGenerator{ + RepoURL: "RepoURL", + Revision: "Revision", + Files: testCaseCopy.files, + }, + }}, + }, + } + + got, err := gitGenerator.GenerateParams(&applicationSetInfo.Spec.Generators[0], &applicationSetInfo) + fmt.Println(got, err) + + if testCaseCopy.expectedError != nil { + assert.EqualError(t, err, testCaseCopy.expectedError.Error()) + } else { + assert.NoError(t, err) + assert.ElementsMatch(t, testCaseCopy.expected, got) + } + + argoCDServiceMock.AssertExpectations(t) + }) + } +} diff --git a/applicationset/generators/interface.go b/applicationset/generators/interface.go new file mode 100644 index 0000000000000..abb4830cf3fbe --- /dev/null +++ b/applicationset/generators/interface.go @@ -0,0 +1,32 @@ +package generators + +import ( + "fmt" + "time" + + argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +// Generator defines the interface implemented by all ApplicationSet generators. +type Generator interface { + // GenerateParams interprets the ApplicationSet and generates all relevant parameters for the application template. + // The expected / desired list of parameters is returned, it then will be render and reconciled + // against the current state of the Applications in the cluster. + GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, applicationSetInfo *argoprojiov1alpha1.ApplicationSet) ([]map[string]interface{}, error) + + // GetRequeueAfter is the generator can controller the next reconciled loop + // In case there is more then one generator the time will be the minimum of the times. + // In case NoRequeueAfter is empty, it will be ignored + GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration + + // GetTemplate returns the inline template from the spec if there is any, or an empty object otherwise + GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate +} + +var EmptyAppSetGeneratorError = fmt.Errorf("ApplicationSet is empty") +var NoRequeueAfter time.Duration + +// DefaultRequeueAfterSeconds is used when GetRequeueAfter is not specified, it is the default time to wait before the next reconcile loop +const ( + DefaultRequeueAfterSeconds = 3 * time.Minute +) diff --git a/applicationset/generators/list.go b/applicationset/generators/list.go new file mode 100644 index 0000000000000..b3afabe6dac7d --- /dev/null +++ b/applicationset/generators/list.go @@ -0,0 +1,90 @@ +package generators + +import ( + "encoding/json" + "fmt" + "time" + + "sigs.k8s.io/yaml" + + argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +var _ Generator = (*ListGenerator)(nil) + +type ListGenerator struct { +} + +func NewListGenerator() Generator { + g := &ListGenerator{} + return g +} + +func (g *ListGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration { + return NoRequeueAfter +} + +func (g *ListGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate { + return &appSetGenerator.List.Template +} + +func (g *ListGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, appSet *argoprojiov1alpha1.ApplicationSet) ([]map[string]interface{}, error) { + if appSetGenerator == nil { + return nil, EmptyAppSetGeneratorError + } + + if appSetGenerator.List == nil { + return nil, EmptyAppSetGeneratorError + } + + res := make([]map[string]interface{}, len(appSetGenerator.List.Elements)) + + for i, tmpItem := range appSetGenerator.List.Elements { + params := map[string]interface{}{} + var element map[string]interface{} + err := json.Unmarshal(tmpItem.Raw, &element) + if err != nil { + return nil, fmt.Errorf("error unmarshling list element %v", err) + } + + if appSet.Spec.GoTemplate { + res[i] = element + } else { + for key, value := range element { + if key == "values" { + values, ok := (value).(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("error parsing values map") + } + for k, v := range values { + value, ok := v.(string) + if !ok { + return nil, fmt.Errorf("error parsing value as string %v", err) + } + params[fmt.Sprintf("values.%s", k)] = value + } + } else { + v, ok := value.(string) + if !ok { + return nil, fmt.Errorf("error parsing value as string %v", err) + } + params[key] = v + } + res[i] = params + } + } + } + + // Append elements from ElementsYaml to the response + if len(appSetGenerator.List.ElementsYaml) > 0 { + + var yamlElements []map[string]interface{} + err := yaml.Unmarshal([]byte(appSetGenerator.List.ElementsYaml), &yamlElements) + if err != nil { + return nil, fmt.Errorf("error unmarshling decoded ElementsYaml %v", err) + } + res = append(res, yamlElements...) + } + + return res, nil +} diff --git a/applicationset/generators/list_test.go b/applicationset/generators/list_test.go new file mode 100644 index 0000000000000..39bdb06c06dd7 --- /dev/null +++ b/applicationset/generators/list_test.go @@ -0,0 +1,84 @@ +package generators + +import ( + "testing" + + "github.com/stretchr/testify/assert" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +func TestGenerateListParams(t *testing.T) { + testCases := []struct { + elements []apiextensionsv1.JSON + expected []map[string]interface{} + }{ + { + elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "cluster","url": "url"}`)}}, + expected: []map[string]interface{}{{"cluster": "cluster", "url": "url"}}, + }, { + elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "cluster","url": "url","values":{"foo":"bar"}}`)}}, + expected: []map[string]interface{}{{"cluster": "cluster", "url": "url", "values.foo": "bar"}}, + }, + } + + for _, testCase := range testCases { + + var listGenerator = NewListGenerator() + + applicationSetInfo := argoprojiov1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "set", + }, + Spec: argoprojiov1alpha1.ApplicationSetSpec{}, + } + + got, err := listGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{ + List: &argoprojiov1alpha1.ListGenerator{ + Elements: testCase.elements, + }}, &applicationSetInfo) + + assert.NoError(t, err) + assert.ElementsMatch(t, testCase.expected, got) + + } +} + +func TestGenerateListParamsGoTemplate(t *testing.T) { + testCases := []struct { + elements []apiextensionsv1.JSON + expected []map[string]interface{} + }{ + { + elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "cluster","url": "url"}`)}}, + expected: []map[string]interface{}{{"cluster": "cluster", "url": "url"}}, + }, { + elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "cluster","url": "url","values":{"foo":"bar"}}`)}}, + expected: []map[string]interface{}{{"cluster": "cluster", "url": "url", "values": map[string]interface{}{"foo": "bar"}}}, + }, + } + + for _, testCase := range testCases { + + var listGenerator = NewListGenerator() + + applicationSetInfo := argoprojiov1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "set", + }, + Spec: argoprojiov1alpha1.ApplicationSetSpec{ + GoTemplate: true, + }, + } + + got, err := listGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{ + List: &argoprojiov1alpha1.ListGenerator{ + Elements: testCase.elements, + }}, &applicationSetInfo) + + assert.NoError(t, err) + assert.ElementsMatch(t, testCase.expected, got) + } +} diff --git a/applicationset/generators/matrix.go b/applicationset/generators/matrix.go new file mode 100644 index 0000000000000..3edac086a4b3c --- /dev/null +++ b/applicationset/generators/matrix.go @@ -0,0 +1,191 @@ +package generators + +import ( + "fmt" + "time" + + "github.com/imdario/mergo" + + "github.com/argoproj/argo-cd/v2/applicationset/utils" + argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + + log "github.com/sirupsen/logrus" +) + +var _ Generator = (*MatrixGenerator)(nil) + +var ( + ErrMoreThanTwoGenerators = fmt.Errorf("found more than two generators, Matrix support only two") + ErrLessThanTwoGenerators = fmt.Errorf("found less than two generators, Matrix support only two") + ErrMoreThenOneInnerGenerators = fmt.Errorf("found more than one generator in matrix.Generators") +) + +type MatrixGenerator struct { + // The inner generators supported by the matrix generator (cluster, git, list...) + supportedGenerators map[string]Generator +} + +func NewMatrixGenerator(supportedGenerators map[string]Generator) Generator { + m := &MatrixGenerator{ + supportedGenerators: supportedGenerators, + } + return m +} + +func (m *MatrixGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, appSet *argoprojiov1alpha1.ApplicationSet) ([]map[string]interface{}, error) { + + if appSetGenerator.Matrix == nil { + return nil, EmptyAppSetGeneratorError + } + + if len(appSetGenerator.Matrix.Generators) < 2 { + return nil, ErrLessThanTwoGenerators + } + + if len(appSetGenerator.Matrix.Generators) > 2 { + return nil, ErrMoreThanTwoGenerators + } + + res := []map[string]interface{}{} + + g0, err := m.getParams(appSetGenerator.Matrix.Generators[0], appSet, nil) + if err != nil { + return nil, fmt.Errorf("error failed to get params for first generator in matrix generator: %w", err) + } + for _, a := range g0 { + g1, err := m.getParams(appSetGenerator.Matrix.Generators[1], appSet, a) + if err != nil { + return nil, fmt.Errorf("failed to get params for second generator in the matrix generator: %w", err) + } + for _, b := range g1 { + + if appSet.Spec.GoTemplate { + tmp := map[string]interface{}{} + if err := mergo.Merge(&tmp, b, mergo.WithOverride); err != nil { + return nil, fmt.Errorf("failed to merge params from the second generator in the matrix generator with temp map: %w", err) + } + if err := mergo.Merge(&tmp, a, mergo.WithOverride); err != nil { + return nil, fmt.Errorf("failed to merge params from the second generator in the matrix generator with the first: %w", err) + } + res = append(res, tmp) + } else { + val, err := utils.CombineStringMaps(a, b) + if err != nil { + return nil, fmt.Errorf("failed to combine string maps with merging params for the matrix generator: %w", err) + } + res = append(res, utils.ConvertToMapStringInterface(val)) + } + } + } + + return res, nil +} + +func (m *MatrixGenerator) getParams(appSetBaseGenerator argoprojiov1alpha1.ApplicationSetNestedGenerator, appSet *argoprojiov1alpha1.ApplicationSet, params map[string]interface{}) ([]map[string]interface{}, error) { + matrixGen, err := getMatrixGenerator(appSetBaseGenerator) + if err != nil { + return nil, err + } + if matrixGen != nil && !appSet.Spec.ApplyNestedSelectors { + foundSelector := dropDisabledNestedSelectors(matrixGen.Generators) + if foundSelector { + log.Warnf("AppSet '%v' defines selector on nested matrix generator's generator without enabling them via 'spec.applyNestedSelectors', ignoring nested selectors", appSet.Name) + } + } + mergeGen, err := getMergeGenerator(appSetBaseGenerator) + if err != nil { + return nil, fmt.Errorf("error retrieving merge generator: %w", err) + } + if mergeGen != nil && !appSet.Spec.ApplyNestedSelectors { + foundSelector := dropDisabledNestedSelectors(mergeGen.Generators) + if foundSelector { + log.Warnf("AppSet '%v' defines selector on nested merge generator's generator without enabling them via 'spec.applyNestedSelectors', ignoring nested selectors", appSet.Name) + } + } + + t, err := Transform( + argoprojiov1alpha1.ApplicationSetGenerator{ + List: appSetBaseGenerator.List, + Clusters: appSetBaseGenerator.Clusters, + Git: appSetBaseGenerator.Git, + SCMProvider: appSetBaseGenerator.SCMProvider, + ClusterDecisionResource: appSetBaseGenerator.ClusterDecisionResource, + PullRequest: appSetBaseGenerator.PullRequest, + Plugin: appSetBaseGenerator.Plugin, + Matrix: matrixGen, + Merge: mergeGen, + Selector: appSetBaseGenerator.Selector, + }, + m.supportedGenerators, + argoprojiov1alpha1.ApplicationSetTemplate{}, + appSet, + params) + + if err != nil { + return nil, fmt.Errorf("child generator returned an error on parameter generation: %v", err) + } + + if len(t) == 0 { + return nil, fmt.Errorf("child generator generated no parameters") + } + + if len(t) > 1 { + return nil, ErrMoreThenOneInnerGenerators + } + + return t[0].Params, nil +} + +const maxDuration time.Duration = 1<<63 - 1 + +func (m *MatrixGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration { + res := maxDuration + var found bool + + for _, r := range appSetGenerator.Matrix.Generators { + matrixGen, _ := getMatrixGenerator(r) + mergeGen, _ := getMergeGenerator(r) + base := &argoprojiov1alpha1.ApplicationSetGenerator{ + List: r.List, + Clusters: r.Clusters, + Git: r.Git, + PullRequest: r.PullRequest, + Plugin: r.Plugin, + SCMProvider: r.SCMProvider, + ClusterDecisionResource: r.ClusterDecisionResource, + Matrix: matrixGen, + Merge: mergeGen, + } + generators := GetRelevantGenerators(base, m.supportedGenerators) + + for _, g := range generators { + temp := g.GetRequeueAfter(base) + if temp < res && temp != NoRequeueAfter { + found = true + res = temp + } + } + } + + if found { + return res + } else { + return NoRequeueAfter + } + +} + +func getMatrixGenerator(r argoprojiov1alpha1.ApplicationSetNestedGenerator) (*argoprojiov1alpha1.MatrixGenerator, error) { + if r.Matrix == nil { + return nil, nil + } + matrix, err := argoprojiov1alpha1.ToNestedMatrixGenerator(r.Matrix) + if err != nil { + return nil, err + } + return matrix.ToMatrixGenerator(), nil +} + +func (m *MatrixGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate { + return &appSetGenerator.Matrix.Template +} diff --git a/applicationset/generators/matrix_test.go b/applicationset/generators/matrix_test.go new file mode 100644 index 0000000000000..35748b98bcf19 --- /dev/null +++ b/applicationset/generators/matrix_test.go @@ -0,0 +1,1151 @@ +package generators + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + kubefake "k8s.io/client-go/kubernetes/fake" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/argoproj/argo-cd/v2/applicationset/services/mocks" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + + argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +func TestMatrixGenerate(t *testing.T) { + + gitGenerator := &argoprojiov1alpha1.GitGenerator{ + RepoURL: "RepoURL", + Revision: "Revision", + Directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}}, + } + + listGenerator := &argoprojiov1alpha1.ListGenerator{ + Elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "Cluster","url": "Url", "templated": "test-{{path.basenameNormalized}}"}`)}}, + } + + testCases := []struct { + name string + baseGenerators []argoprojiov1alpha1.ApplicationSetNestedGenerator + expectedErr error + expected []map[string]interface{} + }{ + { + name: "happy flow - generate params", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + { + Git: gitGenerator, + }, + { + List: listGenerator, + }, + }, + expected: []map[string]interface{}{ + {"path": "app1", "path.basename": "app1", "path.basenameNormalized": "app1", "cluster": "Cluster", "url": "Url", "templated": "test-app1"}, + {"path": "app2", "path.basename": "app2", "path.basenameNormalized": "app2", "cluster": "Cluster", "url": "Url", "templated": "test-app2"}, + }, + }, + { + name: "happy flow - generate params from two lists", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + { + List: &argoprojiov1alpha1.ListGenerator{ + Elements: []apiextensionsv1.JSON{ + {Raw: []byte(`{"a": "1"}`)}, + {Raw: []byte(`{"a": "2"}`)}, + }, + }, + }, + { + List: &argoprojiov1alpha1.ListGenerator{ + Elements: []apiextensionsv1.JSON{ + {Raw: []byte(`{"b": "1"}`)}, + {Raw: []byte(`{"b": "2"}`)}, + }, + }, + }, + }, + expected: []map[string]interface{}{ + {"a": "1", "b": "1"}, + {"a": "1", "b": "2"}, + {"a": "2", "b": "1"}, + {"a": "2", "b": "2"}, + }, + }, + { + name: "returns error if there is less than two base generators", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + { + Git: gitGenerator, + }, + }, + expectedErr: ErrLessThanTwoGenerators, + }, + { + name: "returns error if there is more than two base generators", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + { + List: listGenerator, + }, + { + List: listGenerator, + }, + { + List: listGenerator, + }, + }, + expectedErr: ErrMoreThanTwoGenerators, + }, + { + name: "returns error if there is more than one inner generator in the first base generator", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + { + Git: gitGenerator, + List: listGenerator, + }, + { + Git: gitGenerator, + }, + }, + expectedErr: ErrMoreThenOneInnerGenerators, + }, + { + name: "returns error if there is more than one inner generator in the second base generator", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + { + List: listGenerator, + }, + { + Git: gitGenerator, + List: listGenerator, + }, + }, + expectedErr: ErrMoreThenOneInnerGenerators, + }, + } + + for _, testCase := range testCases { + testCaseCopy := testCase // Since tests may run in parallel + + t.Run(testCaseCopy.name, func(t *testing.T) { + genMock := &generatorMock{} + appSet := &argoprojiov1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "set", + }, + Spec: argoprojiov1alpha1.ApplicationSetSpec{}, + } + + for _, g := range testCaseCopy.baseGenerators { + + gitGeneratorSpec := argoprojiov1alpha1.ApplicationSetGenerator{ + Git: g.Git, + List: g.List, + } + genMock.On("GenerateParams", mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator"), appSet).Return([]map[string]interface{}{ + { + "path": "app1", + "path.basename": "app1", + "path.basenameNormalized": "app1", + }, + { + "path": "app2", + "path.basename": "app2", + "path.basenameNormalized": "app2", + }, + }, nil) + + genMock.On("GetTemplate", &gitGeneratorSpec). + Return(&argoprojiov1alpha1.ApplicationSetTemplate{}) + } + + var matrixGenerator = NewMatrixGenerator( + map[string]Generator{ + "Git": genMock, + "List": &ListGenerator{}, + }, + ) + + got, err := matrixGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{ + Matrix: &argoprojiov1alpha1.MatrixGenerator{ + Generators: testCaseCopy.baseGenerators, + Template: argoprojiov1alpha1.ApplicationSetTemplate{}, + }, + }, appSet) + + if testCaseCopy.expectedErr != nil { + assert.ErrorIs(t, err, testCaseCopy.expectedErr) + } else { + assert.NoError(t, err) + assert.Equal(t, testCaseCopy.expected, got) + } + + }) + + } +} + +func TestMatrixGenerateGoTemplate(t *testing.T) { + + gitGenerator := &argoprojiov1alpha1.GitGenerator{ + RepoURL: "RepoURL", + Revision: "Revision", + Directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}}, + } + + listGenerator := &argoprojiov1alpha1.ListGenerator{ + Elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "Cluster","url": "Url"}`)}}, + } + + testCases := []struct { + name string + baseGenerators []argoprojiov1alpha1.ApplicationSetNestedGenerator + expectedErr error + expected []map[string]interface{} + }{ + { + name: "happy flow - generate params", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + { + Git: gitGenerator, + }, + { + List: listGenerator, + }, + }, + expected: []map[string]interface{}{ + { + "path": map[string]string{ + "path": "app1", + "basename": "app1", + "basenameNormalized": "app1", + }, + "cluster": "Cluster", + "url": "Url", + }, + { + "path": map[string]string{ + "path": "app2", + "basename": "app2", + "basenameNormalized": "app2", + }, + "cluster": "Cluster", + "url": "Url", + }, + }, + }, + { + name: "happy flow - generate params from two lists", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + { + List: &argoprojiov1alpha1.ListGenerator{ + Elements: []apiextensionsv1.JSON{ + {Raw: []byte(`{"a": "1"}`)}, + {Raw: []byte(`{"a": "2"}`)}, + }, + }, + }, + { + List: &argoprojiov1alpha1.ListGenerator{ + Elements: []apiextensionsv1.JSON{ + {Raw: []byte(`{"b": "1"}`)}, + {Raw: []byte(`{"b": "2"}`)}, + }, + }, + }, + }, + expected: []map[string]interface{}{ + {"a": "1", "b": "1"}, + {"a": "1", "b": "2"}, + {"a": "2", "b": "1"}, + {"a": "2", "b": "2"}, + }, + }, + { + name: "parameter override: first list elements take precedence", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + { + List: &argoprojiov1alpha1.ListGenerator{ + Elements: []apiextensionsv1.JSON{ + {Raw: []byte(`{"booleanFalse": false, "booleanTrue": true, "stringFalse": "false", "stringTrue": "true"}`)}, + }, + }, + }, + { + List: &argoprojiov1alpha1.ListGenerator{ + Elements: []apiextensionsv1.JSON{ + {Raw: []byte(`{"booleanFalse": true, "booleanTrue": false, "stringFalse": "true", "stringTrue": "false"}`)}, + }, + }, + }, + }, + expected: []map[string]interface{}{ + {"booleanFalse": false, "booleanTrue": true, "stringFalse": "false", "stringTrue": "true"}, + }, + }, + { + name: "returns error if there is less than two base generators", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + { + Git: gitGenerator, + }, + }, + expectedErr: ErrLessThanTwoGenerators, + }, + { + name: "returns error if there is more than two base generators", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + { + List: listGenerator, + }, + { + List: listGenerator, + }, + { + List: listGenerator, + }, + }, + expectedErr: ErrMoreThanTwoGenerators, + }, + { + name: "returns error if there is more than one inner generator in the first base generator", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + { + Git: gitGenerator, + List: listGenerator, + }, + { + Git: gitGenerator, + }, + }, + expectedErr: ErrMoreThenOneInnerGenerators, + }, + { + name: "returns error if there is more than one inner generator in the second base generator", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + { + List: listGenerator, + }, + { + Git: gitGenerator, + List: listGenerator, + }, + }, + expectedErr: ErrMoreThenOneInnerGenerators, + }, + } + + for _, testCase := range testCases { + testCaseCopy := testCase // Since tests may run in parallel + + t.Run(testCaseCopy.name, func(t *testing.T) { + genMock := &generatorMock{} + appSet := &argoprojiov1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "set", + }, + Spec: argoprojiov1alpha1.ApplicationSetSpec{ + GoTemplate: true, + }, + } + + for _, g := range testCaseCopy.baseGenerators { + + gitGeneratorSpec := argoprojiov1alpha1.ApplicationSetGenerator{ + Git: g.Git, + List: g.List, + } + genMock.On("GenerateParams", mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator"), appSet).Return([]map[string]interface{}{ + { + "path": map[string]string{ + "path": "app1", + "basename": "app1", + "basenameNormalized": "app1", + }, + }, + { + "path": map[string]string{ + "path": "app2", + "basename": "app2", + "basenameNormalized": "app2", + }, + }, + }, nil) + + genMock.On("GetTemplate", &gitGeneratorSpec). + Return(&argoprojiov1alpha1.ApplicationSetTemplate{}) + } + + var matrixGenerator = NewMatrixGenerator( + map[string]Generator{ + "Git": genMock, + "List": &ListGenerator{}, + }, + ) + + got, err := matrixGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{ + Matrix: &argoprojiov1alpha1.MatrixGenerator{ + Generators: testCaseCopy.baseGenerators, + Template: argoprojiov1alpha1.ApplicationSetTemplate{}, + }, + }, appSet) + + if testCaseCopy.expectedErr != nil { + assert.ErrorIs(t, err, testCaseCopy.expectedErr) + } else { + assert.NoError(t, err) + assert.Equal(t, testCaseCopy.expected, got) + } + + }) + + } +} + +func TestMatrixGetRequeueAfter(t *testing.T) { + + gitGenerator := &argoprojiov1alpha1.GitGenerator{ + RepoURL: "RepoURL", + Revision: "Revision", + Directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}}, + } + + listGenerator := &argoprojiov1alpha1.ListGenerator{ + Elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "Cluster","url": "Url"}`)}}, + } + + pullRequestGenerator := &argoprojiov1alpha1.PullRequestGenerator{} + + scmGenerator := &argoprojiov1alpha1.SCMProviderGenerator{} + + duckTypeGenerator := &argoprojiov1alpha1.DuckTypeGenerator{} + + testCases := []struct { + name string + baseGenerators []argoprojiov1alpha1.ApplicationSetNestedGenerator + gitGetRequeueAfter time.Duration + expected time.Duration + }{ + { + name: "return NoRequeueAfter if all the inner baseGenerators returns it", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + { + Git: gitGenerator, + }, + { + List: listGenerator, + }, + }, + gitGetRequeueAfter: NoRequeueAfter, + expected: NoRequeueAfter, + }, + { + name: "returns the minimal time", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + { + Git: gitGenerator, + }, + { + List: listGenerator, + }, + }, + gitGetRequeueAfter: time.Duration(1), + expected: time.Duration(1), + }, + { + name: "returns the minimal time for pull request", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + { + Git: gitGenerator, + }, + { + PullRequest: pullRequestGenerator, + }, + }, + gitGetRequeueAfter: time.Duration(15 * time.Second), + expected: time.Duration(15 * time.Second), + }, + { + name: "returns the default time if no requeueAfterSeconds is provided", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + { + Git: gitGenerator, + }, + { + PullRequest: pullRequestGenerator, + }, + }, + expected: time.Duration(30 * time.Minute), + }, + { + name: "returns the default time for duck type generator", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + { + Git: gitGenerator, + }, + { + ClusterDecisionResource: duckTypeGenerator, + }, + }, + expected: time.Duration(3 * time.Minute), + }, + { + name: "returns the default time for scm generator", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + { + Git: gitGenerator, + }, + { + SCMProvider: scmGenerator, + }, + }, + expected: time.Duration(30 * time.Minute), + }, + } + + for _, testCase := range testCases { + testCaseCopy := testCase // Since tests may run in parallel + + t.Run(testCaseCopy.name, func(t *testing.T) { + mock := &generatorMock{} + + for _, g := range testCaseCopy.baseGenerators { + gitGeneratorSpec := argoprojiov1alpha1.ApplicationSetGenerator{ + Git: g.Git, + List: g.List, + PullRequest: g.PullRequest, + SCMProvider: g.SCMProvider, + ClusterDecisionResource: g.ClusterDecisionResource, + } + mock.On("GetRequeueAfter", &gitGeneratorSpec).Return(testCaseCopy.gitGetRequeueAfter, nil) + } + + var matrixGenerator = NewMatrixGenerator( + map[string]Generator{ + "Git": mock, + "List": &ListGenerator{}, + "PullRequest": &PullRequestGenerator{}, + "SCMProvider": &SCMProviderGenerator{}, + "ClusterDecisionResource": &DuckTypeGenerator{}, + }, + ) + + got := matrixGenerator.GetRequeueAfter(&argoprojiov1alpha1.ApplicationSetGenerator{ + Matrix: &argoprojiov1alpha1.MatrixGenerator{ + Generators: testCaseCopy.baseGenerators, + Template: argoprojiov1alpha1.ApplicationSetTemplate{}, + }, + }) + + assert.Equal(t, testCaseCopy.expected, got) + + }) + + } +} + +func TestInterpolatedMatrixGenerate(t *testing.T) { + interpolatedGitGenerator := &argoprojiov1alpha1.GitGenerator{ + RepoURL: "RepoURL", + Revision: "Revision", + Files: []argoprojiov1alpha1.GitFileGeneratorItem{ + {Path: "examples/git-generator-files-discovery/cluster-config/dev/config.json"}, + {Path: "examples/git-generator-files-discovery/cluster-config/prod/config.json"}, + }, + } + + interpolatedClusterGenerator := &argoprojiov1alpha1.ClusterGenerator{ + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{"environment": "{{path.basename}}"}, + MatchExpressions: nil, + }, + } + testCases := []struct { + name string + baseGenerators []argoprojiov1alpha1.ApplicationSetNestedGenerator + expectedErr error + expected []map[string]interface{} + clientError bool + }{ + { + name: "happy flow - generate interpolated params", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + { + Git: interpolatedGitGenerator, + }, + { + Clusters: interpolatedClusterGenerator, + }, + }, + expected: []map[string]interface{}{ + {"path": "examples/git-generator-files-discovery/cluster-config/dev/config.json", "path.basename": "dev", "path.basenameNormalized": "dev", "name": "dev-01", "nameNormalized": "dev-01", "server": "https://dev-01.example.com", "metadata.labels.environment": "dev", "metadata.labels.argocd.argoproj.io/secret-type": "cluster"}, + {"path": "examples/git-generator-files-discovery/cluster-config/prod/config.json", "path.basename": "prod", "path.basenameNormalized": "prod", "name": "prod-01", "nameNormalized": "prod-01", "server": "https://prod-01.example.com", "metadata.labels.environment": "prod", "metadata.labels.argocd.argoproj.io/secret-type": "cluster"}, + }, + clientError: false, + }, + } + clusters := []client.Object{ + &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "dev-01", + Namespace: "namespace", + Labels: map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + "environment": "dev", + }, + }, + Data: map[string][]byte{ + "config": []byte("{}"), + "name": []byte("dev-01"), + "server": []byte("https://dev-01.example.com"), + }, + Type: corev1.SecretType("Opaque"), + }, + &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "prod-01", + Namespace: "namespace", + Labels: map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + "environment": "prod", + }, + }, + Data: map[string][]byte{ + "config": []byte("{}"), + "name": []byte("prod-01"), + "server": []byte("https://prod-01.example.com"), + }, + Type: corev1.SecretType("Opaque"), + }, + } + // convert []client.Object to []runtime.Object, for use by kubefake package + runtimeClusters := []runtime.Object{} + for _, clientCluster := range clusters { + runtimeClusters = append(runtimeClusters, clientCluster) + } + + for _, testCase := range testCases { + testCaseCopy := testCase // Since tests may run in parallel + + t.Run(testCaseCopy.name, func(t *testing.T) { + genMock := &generatorMock{} + appSet := &argoprojiov1alpha1.ApplicationSet{} + + appClientset := kubefake.NewSimpleClientset(runtimeClusters...) + fakeClient := fake.NewClientBuilder().WithObjects(clusters...).Build() + cl := &possiblyErroringFakeCtrlRuntimeClient{ + fakeClient, + testCase.clientError, + } + var clusterGenerator = NewClusterGenerator(cl, context.Background(), appClientset, "namespace") + + for _, g := range testCaseCopy.baseGenerators { + + gitGeneratorSpec := argoprojiov1alpha1.ApplicationSetGenerator{ + Git: g.Git, + Clusters: g.Clusters, + } + genMock.On("GenerateParams", mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator"), appSet).Return([]map[string]interface{}{ + { + "path": "examples/git-generator-files-discovery/cluster-config/dev/config.json", + "path.basename": "dev", + "path.basenameNormalized": "dev", + }, + { + "path": "examples/git-generator-files-discovery/cluster-config/prod/config.json", + "path.basename": "prod", + "path.basenameNormalized": "prod", + }, + }, nil) + genMock.On("GetTemplate", &gitGeneratorSpec). + Return(&argoprojiov1alpha1.ApplicationSetTemplate{}) + } + var matrixGenerator = NewMatrixGenerator( + map[string]Generator{ + "Git": genMock, + "Clusters": clusterGenerator, + }, + ) + + got, err := matrixGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{ + Matrix: &argoprojiov1alpha1.MatrixGenerator{ + Generators: testCaseCopy.baseGenerators, + Template: argoprojiov1alpha1.ApplicationSetTemplate{}, + }, + }, appSet) + + if testCaseCopy.expectedErr != nil { + assert.ErrorIs(t, err, testCaseCopy.expectedErr) + } else { + assert.NoError(t, err) + assert.Equal(t, testCaseCopy.expected, got) + } + + }) + } +} + +func TestInterpolatedMatrixGenerateGoTemplate(t *testing.T) { + interpolatedGitGenerator := &argoprojiov1alpha1.GitGenerator{ + RepoURL: "RepoURL", + Revision: "Revision", + Files: []argoprojiov1alpha1.GitFileGeneratorItem{ + {Path: "examples/git-generator-files-discovery/cluster-config/dev/config.json"}, + {Path: "examples/git-generator-files-discovery/cluster-config/prod/config.json"}, + }, + } + + interpolatedClusterGenerator := &argoprojiov1alpha1.ClusterGenerator{ + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{"environment": "{{.path.basename}}"}, + MatchExpressions: nil, + }, + } + testCases := []struct { + name string + baseGenerators []argoprojiov1alpha1.ApplicationSetNestedGenerator + expectedErr error + expected []map[string]interface{} + clientError bool + }{ + { + name: "happy flow - generate interpolated params", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + { + Git: interpolatedGitGenerator, + }, + { + Clusters: interpolatedClusterGenerator, + }, + }, + expected: []map[string]interface{}{ + { + "path": map[string]string{ + "path": "examples/git-generator-files-discovery/cluster-config/dev/config.json", + "basename": "dev", + "basenameNormalized": "dev", + }, + "name": "dev-01", + "nameNormalized": "dev-01", + "server": "https://dev-01.example.com", + "metadata": map[string]interface{}{ + "labels": map[string]string{ + "environment": "dev", + "argocd.argoproj.io/secret-type": "cluster", + }, + }, + }, + { + "path": map[string]string{ + "path": "examples/git-generator-files-discovery/cluster-config/prod/config.json", + "basename": "prod", + "basenameNormalized": "prod", + }, + "name": "prod-01", + "nameNormalized": "prod-01", + "server": "https://prod-01.example.com", + "metadata": map[string]interface{}{ + "labels": map[string]string{ + "environment": "prod", + "argocd.argoproj.io/secret-type": "cluster", + }, + }, + }, + }, + clientError: false, + }, + } + clusters := []client.Object{ + &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "dev-01", + Namespace: "namespace", + Labels: map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + "environment": "dev", + }, + }, + Data: map[string][]byte{ + "config": []byte("{}"), + "name": []byte("dev-01"), + "server": []byte("https://dev-01.example.com"), + }, + Type: corev1.SecretType("Opaque"), + }, + &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "prod-01", + Namespace: "namespace", + Labels: map[string]string{ + "argocd.argoproj.io/secret-type": "cluster", + "environment": "prod", + }, + }, + Data: map[string][]byte{ + "config": []byte("{}"), + "name": []byte("prod-01"), + "server": []byte("https://prod-01.example.com"), + }, + Type: corev1.SecretType("Opaque"), + }, + } + // convert []client.Object to []runtime.Object, for use by kubefake package + runtimeClusters := []runtime.Object{} + for _, clientCluster := range clusters { + runtimeClusters = append(runtimeClusters, clientCluster) + } + + for _, testCase := range testCases { + testCaseCopy := testCase // Since tests may run in parallel + + t.Run(testCaseCopy.name, func(t *testing.T) { + genMock := &generatorMock{} + appSet := &argoprojiov1alpha1.ApplicationSet{ + Spec: argoprojiov1alpha1.ApplicationSetSpec{ + GoTemplate: true, + }, + } + + appClientset := kubefake.NewSimpleClientset(runtimeClusters...) + fakeClient := fake.NewClientBuilder().WithObjects(clusters...).Build() + cl := &possiblyErroringFakeCtrlRuntimeClient{ + fakeClient, + testCase.clientError, + } + var clusterGenerator = NewClusterGenerator(cl, context.Background(), appClientset, "namespace") + + for _, g := range testCaseCopy.baseGenerators { + + gitGeneratorSpec := argoprojiov1alpha1.ApplicationSetGenerator{ + Git: g.Git, + Clusters: g.Clusters, + } + genMock.On("GenerateParams", mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator"), appSet).Return([]map[string]interface{}{ + + { + "path": map[string]string{ + "path": "examples/git-generator-files-discovery/cluster-config/dev/config.json", + "basename": "dev", + "basenameNormalized": "dev", + }, + }, + { + "path": map[string]string{ + "path": "examples/git-generator-files-discovery/cluster-config/prod/config.json", + "basename": "prod", + "basenameNormalized": "prod", + }, + }, + }, nil) + genMock.On("GetTemplate", &gitGeneratorSpec). + Return(&argoprojiov1alpha1.ApplicationSetTemplate{}) + } + var matrixGenerator = NewMatrixGenerator( + map[string]Generator{ + "Git": genMock, + "Clusters": clusterGenerator, + }, + ) + + got, err := matrixGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{ + Matrix: &argoprojiov1alpha1.MatrixGenerator{ + Generators: testCaseCopy.baseGenerators, + Template: argoprojiov1alpha1.ApplicationSetTemplate{}, + }, + }, appSet) + + if testCaseCopy.expectedErr != nil { + assert.ErrorIs(t, err, testCaseCopy.expectedErr) + } else { + assert.NoError(t, err) + assert.Equal(t, testCaseCopy.expected, got) + } + + }) + + } +} + +func TestMatrixGenerateListElementsYaml(t *testing.T) { + + gitGenerator := &argoprojiov1alpha1.GitGenerator{ + RepoURL: "RepoURL", + Revision: "Revision", + Files: []argoprojiov1alpha1.GitFileGeneratorItem{ + {Path: "config.yaml"}, + }, + } + + listGenerator := &argoprojiov1alpha1.ListGenerator{ + Elements: []apiextensionsv1.JSON{}, + ElementsYaml: "{{ .foo.bar | toJson }}", + } + + testCases := []struct { + name string + baseGenerators []argoprojiov1alpha1.ApplicationSetNestedGenerator + expectedErr error + expected []map[string]interface{} + }{ + { + name: "happy flow - generate params", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + { + Git: gitGenerator, + }, + { + List: listGenerator, + }, + }, + expected: []map[string]interface{}{ + { + "chart": "a", + "version": "1", + "foo": map[string]interface{}{ + "bar": []interface{}{ + map[string]interface{}{ + "chart": "a", + "version": "1", + }, + map[string]interface{}{ + "chart": "b", + "version": "2", + }, + }, + }, + "path": map[string]interface{}{ + "basename": "dir", + "basenameNormalized": "dir", + "filename": "file_name.yaml", + "filenameNormalized": "file-name.yaml", + "path": "path/dir", + "segments": []string{ + "path", + "dir", + }, + }, + }, + { + "chart": "b", + "version": "2", + "foo": map[string]interface{}{ + "bar": []interface{}{ + map[string]interface{}{ + "chart": "a", + "version": "1", + }, + map[string]interface{}{ + "chart": "b", + "version": "2", + }, + }, + }, + "path": map[string]interface{}{ + "basename": "dir", + "basenameNormalized": "dir", + "filename": "file_name.yaml", + "filenameNormalized": "file-name.yaml", + "path": "path/dir", + "segments": []string{ + "path", + "dir", + }, + }, + }, + }, + }, + } + + for _, testCase := range testCases { + testCaseCopy := testCase // Since tests may run in parallel + + t.Run(testCaseCopy.name, func(t *testing.T) { + genMock := &generatorMock{} + appSet := &argoprojiov1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "set", + }, + Spec: argoprojiov1alpha1.ApplicationSetSpec{ + GoTemplate: true, + }, + } + + for _, g := range testCaseCopy.baseGenerators { + + gitGeneratorSpec := argoprojiov1alpha1.ApplicationSetGenerator{ + Git: g.Git, + List: g.List, + } + genMock.On("GenerateParams", mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator"), appSet).Return([]map[string]any{{ + "foo": map[string]interface{}{ + "bar": []interface{}{ + map[string]interface{}{ + "chart": "a", + "version": "1", + }, + map[string]interface{}{ + "chart": "b", + "version": "2", + }, + }, + }, + "path": map[string]interface{}{ + "basename": "dir", + "basenameNormalized": "dir", + "filename": "file_name.yaml", + "filenameNormalized": "file-name.yaml", + "path": "path/dir", + "segments": []string{ + "path", + "dir", + }, + }, + }}, nil) + genMock.On("GetTemplate", &gitGeneratorSpec). + Return(&argoprojiov1alpha1.ApplicationSetTemplate{}) + + } + + var matrixGenerator = NewMatrixGenerator( + map[string]Generator{ + "Git": genMock, + "List": &ListGenerator{}, + }, + ) + + got, err := matrixGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{ + Matrix: &argoprojiov1alpha1.MatrixGenerator{ + Generators: testCaseCopy.baseGenerators, + Template: argoprojiov1alpha1.ApplicationSetTemplate{}, + }, + }, appSet) + + if testCaseCopy.expectedErr != nil { + assert.ErrorIs(t, err, testCaseCopy.expectedErr) + } else { + assert.NoError(t, err) + assert.Equal(t, testCaseCopy.expected, got) + } + + }) + + } +} + +type generatorMock struct { + mock.Mock +} + +func (g *generatorMock) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate { + args := g.Called(appSetGenerator) + + return args.Get(0).(*argoprojiov1alpha1.ApplicationSetTemplate) +} + +func (g *generatorMock) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, appSet *argoprojiov1alpha1.ApplicationSet) ([]map[string]interface{}, error) { + args := g.Called(appSetGenerator, appSet) + + return args.Get(0).([]map[string]interface{}), args.Error(1) +} + +func (g *generatorMock) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration { + args := g.Called(appSetGenerator) + + return args.Get(0).(time.Duration) + +} + +func TestGitGenerator_GenerateParams_list_x_git_matrix_generator(t *testing.T) { + // Given a matrix generator over a list generator and a git files generator, the nested git files generator should + // be treated as a files generator, and it should produce parameters. + + // This tests for a specific bug where a nested git files generator was being treated as a directory generator. This + // happened because, when the matrix generator was being processed, the nested git files generator was being + // interpolated by the deeplyReplace function. That function cannot differentiate between a nil slice and an empty + // slice. So it was replacing the `Directories` field with an empty slice, which the ApplicationSet controller + // interpreted as meaning this was a directory generator, not a files generator. + + // Now instead of checking for nil, we check whether the field is a non-empty slice. This test prevents a regression + // of that bug. + + listGeneratorMock := &generatorMock{} + listGeneratorMock.On("GenerateParams", mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator"), mock.AnythingOfType("*v1alpha1.ApplicationSet")).Return([]map[string]interface{}{ + {"some": "value"}, + }, nil) + listGeneratorMock.On("GetTemplate", mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator")).Return(&argoprojiov1alpha1.ApplicationSetTemplate{}) + + gitGeneratorSpec := &argoprojiov1alpha1.GitGenerator{ + RepoURL: "https://git.example.com", + Files: []argoprojiov1alpha1.GitFileGeneratorItem{ + {Path: "some/path.json"}, + }, + } + + repoServiceMock := &mocks.Repos{} + repoServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(map[string][]byte{ + "some/path.json": []byte("test: content"), + }, nil) + gitGenerator := NewGitGenerator(repoServiceMock) + + matrixGenerator := NewMatrixGenerator(map[string]Generator{ + "List": listGeneratorMock, + "Git": gitGenerator, + }) + + matrixGeneratorSpec := &argoprojiov1alpha1.MatrixGenerator{ + Generators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + { + List: &argoprojiov1alpha1.ListGenerator{ + Elements: []apiextensionsv1.JSON{ + { + Raw: []byte(`{"some": "value"}`), + }, + }, + }, + }, + { + Git: gitGeneratorSpec, + }, + }, + } + params, err := matrixGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{ + Matrix: matrixGeneratorSpec, + }, &argoprojiov1alpha1.ApplicationSet{}) + require.NoError(t, err) + assert.Equal(t, []map[string]interface{}{{ + "path": "some", + "path.basename": "some", + "path.basenameNormalized": "some", + "path.filename": "path.json", + "path.filenameNormalized": "path.json", + "path[0]": "some", + "some": "value", + "test": "content", + }}, params) +} diff --git a/applicationset/generators/merge.go b/applicationset/generators/merge.go new file mode 100644 index 0000000000000..ebda7180df70f --- /dev/null +++ b/applicationset/generators/merge.go @@ -0,0 +1,247 @@ +package generators + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/imdario/mergo" + + "github.com/argoproj/argo-cd/v2/applicationset/utils" + argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + + log "github.com/sirupsen/logrus" +) + +var _ Generator = (*MergeGenerator)(nil) + +var ( + ErrLessThanTwoGeneratorsInMerge = fmt.Errorf("found less than two generators, Merge requires two or more") + ErrNoMergeKeys = fmt.Errorf("no merge keys were specified, Merge requires at least one") + ErrNonUniqueParamSets = fmt.Errorf("the parameters from a generator were not unique by the given mergeKeys, Merge requires all param sets to be unique") +) + +type MergeGenerator struct { + // The inner generators supported by the merge generator (cluster, git, list...) + supportedGenerators map[string]Generator +} + +// NewMergeGenerator returns a MergeGenerator which allows the given supportedGenerators as child generators. +func NewMergeGenerator(supportedGenerators map[string]Generator) Generator { + m := &MergeGenerator{ + supportedGenerators: supportedGenerators, + } + return m +} + +// getParamSetsForAllGenerators generates params for each child generator in a MergeGenerator. Param sets are returned +// in slices ordered according to the order of the given generators. +func (m *MergeGenerator) getParamSetsForAllGenerators(generators []argoprojiov1alpha1.ApplicationSetNestedGenerator, appSet *argoprojiov1alpha1.ApplicationSet) ([][]map[string]interface{}, error) { + var paramSets [][]map[string]interface{} + for i, generator := range generators { + generatorParamSets, err := m.getParams(generator, appSet) + if err != nil { + return nil, fmt.Errorf("error getting params from generator %d of %d: %w", i+1, len(generators), err) + } + // concatenate param lists produced by each generator + paramSets = append(paramSets, generatorParamSets) + } + return paramSets, nil +} + +// GenerateParams gets the params produced by the MergeGenerator. +func (m *MergeGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, appSet *argoprojiov1alpha1.ApplicationSet) ([]map[string]interface{}, error) { + if appSetGenerator.Merge == nil { + return nil, EmptyAppSetGeneratorError + } + + if len(appSetGenerator.Merge.Generators) < 2 { + return nil, ErrLessThanTwoGeneratorsInMerge + } + + paramSetsFromGenerators, err := m.getParamSetsForAllGenerators(appSetGenerator.Merge.Generators, appSet) + if err != nil { + return nil, fmt.Errorf("error getting param sets from generators: %w", err) + } + + baseParamSetsByMergeKey, err := getParamSetsByMergeKey(appSetGenerator.Merge.MergeKeys, paramSetsFromGenerators[0]) + if err != nil { + return nil, fmt.Errorf("error getting param sets by merge key: %w", err) + } + + for _, paramSets := range paramSetsFromGenerators[1:] { + paramSetsByMergeKey, err := getParamSetsByMergeKey(appSetGenerator.Merge.MergeKeys, paramSets) + if err != nil { + return nil, fmt.Errorf("error getting param sets by merge key: %w", err) + } + + for mergeKeyValue, baseParamSet := range baseParamSetsByMergeKey { + if overrideParamSet, exists := paramSetsByMergeKey[mergeKeyValue]; exists { + + if appSet.Spec.GoTemplate { + if err := mergo.Merge(&baseParamSet, overrideParamSet, mergo.WithOverride); err != nil { + return nil, fmt.Errorf("error merging base param set with override param set: %w", err) + } + baseParamSetsByMergeKey[mergeKeyValue] = baseParamSet + } else { + overriddenParamSet, err := utils.CombineStringMapsAllowDuplicates(baseParamSet, overrideParamSet) + if err != nil { + return nil, fmt.Errorf("error combining string maps: %w", err) + } + baseParamSetsByMergeKey[mergeKeyValue] = utils.ConvertToMapStringInterface(overriddenParamSet) + } + } + } + } + + mergedParamSets := make([]map[string]interface{}, len(baseParamSetsByMergeKey)) + var i = 0 + for _, mergedParamSet := range baseParamSetsByMergeKey { + mergedParamSets[i] = mergedParamSet + i += 1 + } + + return mergedParamSets, nil +} + +// getParamSetsByMergeKey converts the given list of parameter sets to a map of parameter sets where the key is the +// unique key of the parameter set as determined by the given mergeKeys. If any two parameter sets share the same merge +// key, getParamSetsByMergeKey will throw NonUniqueParamSets. +func getParamSetsByMergeKey(mergeKeys []string, paramSets []map[string]interface{}) (map[string]map[string]interface{}, error) { + if len(mergeKeys) < 1 { + return nil, ErrNoMergeKeys + } + + deDuplicatedMergeKeys := make(map[string]bool, len(mergeKeys)) + for _, mergeKey := range mergeKeys { + deDuplicatedMergeKeys[mergeKey] = false + } + + paramSetsByMergeKey := make(map[string]map[string]interface{}, len(paramSets)) + for _, paramSet := range paramSets { + paramSetKey := make(map[string]interface{}) + for mergeKey := range deDuplicatedMergeKeys { + paramSetKey[mergeKey] = paramSet[mergeKey] + } + paramSetKeyJson, err := json.Marshal(paramSetKey) + if err != nil { + return nil, fmt.Errorf("error marshalling param set key json: %w", err) + } + paramSetKeyString := string(paramSetKeyJson) + if _, exists := paramSetsByMergeKey[paramSetKeyString]; exists { + return nil, fmt.Errorf("%w. Duplicate key was %s", ErrNonUniqueParamSets, paramSetKeyString) + } + paramSetsByMergeKey[paramSetKeyString] = paramSet + } + + return paramSetsByMergeKey, nil +} + +// getParams get the parameters generated by this generator. +func (m *MergeGenerator) getParams(appSetBaseGenerator argoprojiov1alpha1.ApplicationSetNestedGenerator, appSet *argoprojiov1alpha1.ApplicationSet) ([]map[string]interface{}, error) { + matrixGen, err := getMatrixGenerator(appSetBaseGenerator) + if err != nil { + return nil, err + } + if matrixGen != nil && !appSet.Spec.ApplyNestedSelectors { + foundSelector := dropDisabledNestedSelectors(matrixGen.Generators) + if foundSelector { + log.Warnf("AppSet '%v' defines selector on nested matrix generator's generator without enabling them via 'spec.applyNestedSelectors', ignoring nested selector", appSet.Name) + } + } + mergeGen, err := getMergeGenerator(appSetBaseGenerator) + if err != nil { + return nil, err + } + if mergeGen != nil && !appSet.Spec.ApplyNestedSelectors { + foundSelector := dropDisabledNestedSelectors(mergeGen.Generators) + if foundSelector { + log.Warnf("AppSet '%v' defines selector on nested merge generator's generator without enabling them via 'spec.applyNestedSelectors', ignoring nested selector", appSet.Name) + } + } + + t, err := Transform( + argoprojiov1alpha1.ApplicationSetGenerator{ + List: appSetBaseGenerator.List, + Clusters: appSetBaseGenerator.Clusters, + Git: appSetBaseGenerator.Git, + SCMProvider: appSetBaseGenerator.SCMProvider, + ClusterDecisionResource: appSetBaseGenerator.ClusterDecisionResource, + PullRequest: appSetBaseGenerator.PullRequest, + Plugin: appSetBaseGenerator.Plugin, + Matrix: matrixGen, + Merge: mergeGen, + Selector: appSetBaseGenerator.Selector, + }, + m.supportedGenerators, + argoprojiov1alpha1.ApplicationSetTemplate{}, + appSet, + map[string]interface{}{}) + + if err != nil { + return nil, fmt.Errorf("child generator returned an error on parameter generation: %v", err) + } + + if len(t) == 0 { + return nil, fmt.Errorf("child generator generated no parameters") + } + + if len(t) > 1 { + return nil, ErrMoreThenOneInnerGenerators + } + + return t[0].Params, nil +} + +func (m *MergeGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration { + res := maxDuration + var found bool + + for _, r := range appSetGenerator.Merge.Generators { + matrixGen, _ := getMatrixGenerator(r) + mergeGen, _ := getMergeGenerator(r) + base := &argoprojiov1alpha1.ApplicationSetGenerator{ + List: r.List, + Clusters: r.Clusters, + Git: r.Git, + PullRequest: r.PullRequest, + Plugin: r.Plugin, + SCMProvider: r.SCMProvider, + ClusterDecisionResource: r.ClusterDecisionResource, + Matrix: matrixGen, + Merge: mergeGen, + } + generators := GetRelevantGenerators(base, m.supportedGenerators) + + for _, g := range generators { + temp := g.GetRequeueAfter(base) + if temp < res && temp != NoRequeueAfter { + found = true + res = temp + } + } + } + + if found { + return res + } else { + return NoRequeueAfter + } + +} + +func getMergeGenerator(r argoprojiov1alpha1.ApplicationSetNestedGenerator) (*argoprojiov1alpha1.MergeGenerator, error) { + if r.Merge == nil { + return nil, nil + } + merge, err := argoprojiov1alpha1.ToNestedMergeGenerator(r.Merge) + if err != nil { + return nil, fmt.Errorf("error converting to nested merge generator: %w", err) + } + return merge.ToMergeGenerator(), nil +} + +// GetTemplate gets the Template field for the MergeGenerator. +func (m *MergeGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate { + return &appSetGenerator.Merge.Template +} diff --git a/applicationset/generators/merge_test.go b/applicationset/generators/merge_test.go new file mode 100644 index 0000000000000..454b1884190a3 --- /dev/null +++ b/applicationset/generators/merge_test.go @@ -0,0 +1,351 @@ +package generators + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + + argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +func getNestedListGenerator(json string) *argoprojiov1alpha1.ApplicationSetNestedGenerator { + return &argoprojiov1alpha1.ApplicationSetNestedGenerator{ + List: &argoprojiov1alpha1.ListGenerator{ + Elements: []apiextensionsv1.JSON{{Raw: []byte(json)}}, + }, + } +} + +func getTerminalListGeneratorMultiple(jsons []string) argoprojiov1alpha1.ApplicationSetTerminalGenerator { + elements := make([]apiextensionsv1.JSON, len(jsons)) + + for i, json := range jsons { + elements[i] = apiextensionsv1.JSON{Raw: []byte(json)} + } + + generator := argoprojiov1alpha1.ApplicationSetTerminalGenerator{ + List: &argoprojiov1alpha1.ListGenerator{ + Elements: elements, + }, + } + + return generator +} + +func listOfMapsToSet(maps []map[string]interface{}) (map[string]bool, error) { + set := make(map[string]bool, len(maps)) + for _, paramMap := range maps { + paramMapAsJson, err := json.Marshal(paramMap) + if err != nil { + return nil, err + } + + set[string(paramMapAsJson)] = false + } + return set, nil +} + +func TestMergeGenerate(t *testing.T) { + + testCases := []struct { + name string + baseGenerators []argoprojiov1alpha1.ApplicationSetNestedGenerator + mergeKeys []string + expectedErr error + expected []map[string]interface{} + }{ + { + name: "no generators", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{}, + mergeKeys: []string{"b"}, + expectedErr: ErrLessThanTwoGeneratorsInMerge, + }, + { + name: "one generator", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + *getNestedListGenerator(`{"a": "1_1","b": "same","c": "1_3"}`), + }, + mergeKeys: []string{"b"}, + expectedErr: ErrLessThanTwoGeneratorsInMerge, + }, + { + name: "happy flow - generate paramSets", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + *getNestedListGenerator(`{"a": "1_1","b": "same","c": "1_3"}`), + *getNestedListGenerator(`{"a": "2_1","b": "same"}`), + *getNestedListGenerator(`{"a": "3_1","b": "different","c": "3_3"}`), // gets ignored because its merge key value isn't in the base params set + }, + mergeKeys: []string{"b"}, + expected: []map[string]interface{}{ + {"a": "2_1", "b": "same", "c": "1_3"}, + }, + }, + { + name: "merge keys absent - do not merge", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + *getNestedListGenerator(`{"a": "a"}`), + *getNestedListGenerator(`{"a": "a"}`), + }, + mergeKeys: []string{"b"}, + expected: []map[string]interface{}{ + {"a": "a"}, + }, + }, + { + name: "merge key present in first set, absent in second - do not merge", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + *getNestedListGenerator(`{"a": "a"}`), + *getNestedListGenerator(`{"b": "b"}`), + }, + mergeKeys: []string{"b"}, + expected: []map[string]interface{}{ + {"a": "a"}, + }, + }, + { + name: "merge nested matrix with some lists", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + { + Matrix: toAPIExtensionsJSON(t, &argoprojiov1alpha1.NestedMatrixGenerator{ + Generators: []argoprojiov1alpha1.ApplicationSetTerminalGenerator{ + getTerminalListGeneratorMultiple([]string{`{"a": "1"}`, `{"a": "2"}`}), + getTerminalListGeneratorMultiple([]string{`{"b": "1"}`, `{"b": "2"}`}), + }, + }), + }, + *getNestedListGenerator(`{"a": "1", "b": "1", "c": "added"}`), + }, + mergeKeys: []string{"a", "b"}, + expected: []map[string]interface{}{ + {"a": "1", "b": "1", "c": "added"}, + {"a": "1", "b": "2"}, + {"a": "2", "b": "1"}, + {"a": "2", "b": "2"}, + }, + }, + { + name: "merge nested merge with some lists", + baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{ + { + Merge: toAPIExtensionsJSON(t, &argoprojiov1alpha1.NestedMergeGenerator{ + MergeKeys: []string{"a"}, + Generators: []argoprojiov1alpha1.ApplicationSetTerminalGenerator{ + getTerminalListGeneratorMultiple([]string{`{"a": "1", "b": "1"}`, `{"a": "2", "b": "2"}`}), + getTerminalListGeneratorMultiple([]string{`{"a": "1", "b": "3", "c": "added"}`, `{"a": "3", "b": "2"}`}), // First gets merged, second gets ignored + }, + }), + }, + *getNestedListGenerator(`{"a": "1", "b": "3", "d": "added"}`), + }, + mergeKeys: []string{"a", "b"}, + expected: []map[string]interface{}{ + {"a": "1", "b": "3", "c": "added", "d": "added"}, + {"a": "2", "b": "2"}, + }, + }, + } + + for _, testCase := range testCases { + testCaseCopy := testCase // since tests may run in parallel + + t.Run(testCaseCopy.name, func(t *testing.T) { + t.Parallel() + + appSet := &argoprojiov1alpha1.ApplicationSet{} + + var mergeGenerator = NewMergeGenerator( + map[string]Generator{ + "List": &ListGenerator{}, + "Matrix": &MatrixGenerator{ + supportedGenerators: map[string]Generator{ + "List": &ListGenerator{}, + }, + }, + "Merge": &MergeGenerator{ + supportedGenerators: map[string]Generator{ + "List": &ListGenerator{}, + }, + }, + }, + ) + + got, err := mergeGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{ + Merge: &argoprojiov1alpha1.MergeGenerator{ + Generators: testCaseCopy.baseGenerators, + MergeKeys: testCaseCopy.mergeKeys, + Template: argoprojiov1alpha1.ApplicationSetTemplate{}, + }, + }, appSet) + + if testCaseCopy.expectedErr != nil { + assert.EqualError(t, err, testCaseCopy.expectedErr.Error()) + } else { + expectedSet, err := listOfMapsToSet(testCaseCopy.expected) + assert.NoError(t, err) + + actualSet, err := listOfMapsToSet(got) + assert.NoError(t, err) + + assert.NoError(t, err) + assert.Equal(t, expectedSet, actualSet) + } + }) + } +} + +func toAPIExtensionsJSON(t *testing.T, g interface{}) *apiextensionsv1.JSON { + + resVal, err := json.Marshal(g) + if err != nil { + t.Error("unable to unmarshal json", g) + return nil + } + + res := &apiextensionsv1.JSON{Raw: resVal} + + return res +} + +func TestParamSetsAreUniqueByMergeKeys(t *testing.T) { + testCases := []struct { + name string + mergeKeys []string + paramSets []map[string]interface{} + expectedErr error + expected map[string]map[string]interface{} + }{ + { + name: "no merge keys", + mergeKeys: []string{}, + expectedErr: ErrNoMergeKeys, + }, + { + name: "no paramSets", + mergeKeys: []string{"key"}, + expected: make(map[string]map[string]interface{}), + }, + { + name: "simple key, unique paramSets", + mergeKeys: []string{"key"}, + paramSets: []map[string]interface{}{{"key": "a"}, {"key": "b"}}, + expected: map[string]map[string]interface{}{ + `{"key":"a"}`: {"key": "a"}, + `{"key":"b"}`: {"key": "b"}, + }, + }, + { + name: "simple key object, unique paramSets", + mergeKeys: []string{"key"}, + paramSets: []map[string]interface{}{{"key": map[string]interface{}{"hello": "world"}}, {"key": "b"}}, + expected: map[string]map[string]interface{}{ + `{"key":{"hello":"world"}}`: {"key": map[string]interface{}{"hello": "world"}}, + `{"key":"b"}`: {"key": "b"}, + }, + }, + { + name: "simple key, non-unique paramSets", + mergeKeys: []string{"key"}, + paramSets: []map[string]interface{}{{"key": "a"}, {"key": "b"}, {"key": "b"}}, + expectedErr: fmt.Errorf("%w. Duplicate key was %s", ErrNonUniqueParamSets, `{"key":"b"}`), + }, + { + name: "simple key, duplicated key name, unique paramSets", + mergeKeys: []string{"key", "key"}, + paramSets: []map[string]interface{}{{"key": "a"}, {"key": "b"}}, + expected: map[string]map[string]interface{}{ + `{"key":"a"}`: {"key": "a"}, + `{"key":"b"}`: {"key": "b"}, + }, + }, + { + name: "simple key, duplicated key name, non-unique paramSets", + mergeKeys: []string{"key", "key"}, + paramSets: []map[string]interface{}{{"key": "a"}, {"key": "b"}, {"key": "b"}}, + expectedErr: fmt.Errorf("%w. Duplicate key was %s", ErrNonUniqueParamSets, `{"key":"b"}`), + }, + { + name: "compound key, unique paramSets", + mergeKeys: []string{"key1", "key2"}, + paramSets: []map[string]interface{}{ + {"key1": "a", "key2": "a"}, + {"key1": "a", "key2": "b"}, + {"key1": "b", "key2": "a"}, + }, + expected: map[string]map[string]interface{}{ + `{"key1":"a","key2":"a"}`: {"key1": "a", "key2": "a"}, + `{"key1":"a","key2":"b"}`: {"key1": "a", "key2": "b"}, + `{"key1":"b","key2":"a"}`: {"key1": "b", "key2": "a"}, + }, + }, + { + name: "compound key object, unique paramSets", + mergeKeys: []string{"key1", "key2"}, + paramSets: []map[string]interface{}{ + {"key1": "a", "key2": map[string]interface{}{"hello": "world"}}, + {"key1": "a", "key2": "b"}, + {"key1": "b", "key2": "a"}, + }, + expected: map[string]map[string]interface{}{ + `{"key1":"a","key2":{"hello":"world"}}`: {"key1": "a", "key2": map[string]interface{}{"hello": "world"}}, + `{"key1":"a","key2":"b"}`: {"key1": "a", "key2": "b"}, + `{"key1":"b","key2":"a"}`: {"key1": "b", "key2": "a"}, + }, + }, + { + name: "compound key, duplicate key names, unique paramSets", + mergeKeys: []string{"key1", "key1", "key2"}, + paramSets: []map[string]interface{}{ + {"key1": "a", "key2": "a"}, + {"key1": "a", "key2": "b"}, + {"key1": "b", "key2": "a"}, + }, + expected: map[string]map[string]interface{}{ + `{"key1":"a","key2":"a"}`: {"key1": "a", "key2": "a"}, + `{"key1":"a","key2":"b"}`: {"key1": "a", "key2": "b"}, + `{"key1":"b","key2":"a"}`: {"key1": "b", "key2": "a"}, + }, + }, + { + name: "compound key, non-unique paramSets", + mergeKeys: []string{"key1", "key2"}, + paramSets: []map[string]interface{}{ + {"key1": "a", "key2": "a"}, + {"key1": "a", "key2": "a"}, + {"key1": "b", "key2": "a"}, + }, + expectedErr: fmt.Errorf("%w. Duplicate key was %s", ErrNonUniqueParamSets, `{"key1":"a","key2":"a"}`), + }, + { + name: "compound key, duplicate key names, non-unique paramSets", + mergeKeys: []string{"key1", "key1", "key2"}, + paramSets: []map[string]interface{}{ + {"key1": "a", "key2": "a"}, + {"key1": "a", "key2": "a"}, + {"key1": "b", "key2": "a"}, + }, + expectedErr: fmt.Errorf("%w. Duplicate key was %s", ErrNonUniqueParamSets, `{"key1":"a","key2":"a"}`), + }, + } + + for _, testCase := range testCases { + testCaseCopy := testCase // since tests may run in parallel + + t.Run(testCaseCopy.name, func(t *testing.T) { + t.Parallel() + + got, err := getParamSetsByMergeKey(testCaseCopy.mergeKeys, testCaseCopy.paramSets) + + if testCaseCopy.expectedErr != nil { + assert.EqualError(t, err, testCaseCopy.expectedErr.Error()) + } else { + assert.NoError(t, err) + assert.Equal(t, testCaseCopy.expected, got) + } + + }) + + } +} diff --git a/applicationset/generators/plugin.go b/applicationset/generators/plugin.go new file mode 100644 index 0000000000000..e0acca0622cdc --- /dev/null +++ b/applicationset/generators/plugin.go @@ -0,0 +1,211 @@ +package generators + +import ( + "context" + "fmt" + "strconv" + "strings" + "time" + + "github.com/jeremywohl/flatten" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client" + + argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/argoproj/argo-cd/v2/util/settings" + + "github.com/argoproj/argo-cd/v2/applicationset/services/plugin" +) + +const ( + DefaultPluginRequeueAfterSeconds = 30 * time.Minute +) + +var _ Generator = (*PluginGenerator)(nil) + +type PluginGenerator struct { + client client.Client + ctx context.Context + clientset kubernetes.Interface + namespace string +} + +func NewPluginGenerator(client client.Client, ctx context.Context, clientset kubernetes.Interface, namespace string) Generator { + g := &PluginGenerator{ + client: client, + ctx: ctx, + clientset: clientset, + namespace: namespace, + } + return g +} + +func (g *PluginGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration { + // Return a requeue default of 30 minutes, if no default is specified. + + if appSetGenerator.Plugin.RequeueAfterSeconds != nil { + return time.Duration(*appSetGenerator.Plugin.RequeueAfterSeconds) * time.Second + } + + return DefaultPluginRequeueAfterSeconds +} + +func (g *PluginGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate { + return &appSetGenerator.Plugin.Template +} + +func (g *PluginGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, applicationSetInfo *argoprojiov1alpha1.ApplicationSet) ([]map[string]interface{}, error) { + + if appSetGenerator == nil { + return nil, EmptyAppSetGeneratorError + } + + if appSetGenerator.Plugin == nil { + return nil, EmptyAppSetGeneratorError + } + + ctx := context.Background() + + providerConfig := appSetGenerator.Plugin + + pluginClient, err := g.getPluginFromGenerator(ctx, applicationSetInfo.Name, providerConfig) + if err != nil { + return nil, fmt.Errorf("error getting plugin from generator: %w", err) + } + + list, err := pluginClient.List(ctx, providerConfig.Input.Parameters) + if err != nil { + return nil, fmt.Errorf("error listing params: %w", err) + } + + res, err := g.generateParams(appSetGenerator, applicationSetInfo, list.Output.Parameters, appSetGenerator.Plugin.Input.Parameters, applicationSetInfo.Spec.GoTemplate) + if err != nil { + return nil, fmt.Errorf("error generating params: %w", err) + } + + return res, nil +} + +func (g *PluginGenerator) getPluginFromGenerator(ctx context.Context, appSetName string, generatorConfig *argoprojiov1alpha1.PluginGenerator) (*plugin.Service, error) { + cm, err := g.getConfigMap(ctx, generatorConfig.ConfigMapRef.Name) + if err != nil { + return nil, fmt.Errorf("error fetching ConfigMap: %w", err) + } + token, err := g.getToken(ctx, cm["token"]) + if err != nil { + return nil, fmt.Errorf("error fetching Secret token: %v", err) + } + + var requestTimeout int + requestTimeoutStr, ok := cm["requestTimeout"] + if ok { + requestTimeout, err = strconv.Atoi(requestTimeoutStr) + if err != nil { + return nil, fmt.Errorf("error set requestTimeout : %w", err) + } + } + + pluginClient, err := plugin.NewPluginService(ctx, appSetName, cm["baseUrl"], token, requestTimeout) + if err != nil { + return nil, fmt.Errorf("error initializing plugin client: %w", err) + } + return pluginClient, nil +} + +func (g *PluginGenerator) generateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, appSet *argoprojiov1alpha1.ApplicationSet, objectsFound []map[string]interface{}, pluginParams argoprojiov1alpha1.PluginParameters, useGoTemplate bool) ([]map[string]interface{}, error) { + res := []map[string]interface{}{} + + for _, objectFound := range objectsFound { + + params := map[string]interface{}{} + + if useGoTemplate { + for k, v := range objectFound { + params[k] = v + } + } else { + flat, err := flatten.Flatten(objectFound, "", flatten.DotStyle) + if err != nil { + return nil, err + } + for k, v := range flat { + params[k] = fmt.Sprintf("%v", v) + } + } + + params["generator"] = map[string]interface{}{ + "input": map[string]argoprojiov1alpha1.PluginParameters{ + "parameters": pluginParams, + }, + } + + err := appendTemplatedValues(appSetGenerator.Plugin.Values, params, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions) + if err != nil { + return nil, err + } + + res = append(res, params) + } + + return res, nil +} + +func (g *PluginGenerator) getToken(ctx context.Context, tokenRef string) (string, error) { + + if tokenRef == "" || !strings.HasPrefix(tokenRef, "$") { + return "", fmt.Errorf("token is empty, or does not reference a secret key starting with '$': %v", tokenRef) + } + + secretName, tokenKey := plugin.ParseSecretKey(tokenRef) + + secret := &corev1.Secret{} + err := g.client.Get( + ctx, + client.ObjectKey{ + Name: secretName, + Namespace: g.namespace, + }, + secret) + + if err != nil { + return "", fmt.Errorf("error fetching secret %s/%s: %v", g.namespace, secretName, err) + } + + secretValues := make(map[string]string, len(secret.Data)) + + for k, v := range secret.Data { + secretValues[k] = string(v) + } + + token := settings.ReplaceStringSecret(tokenKey, secretValues) + + return token, err +} + +func (g *PluginGenerator) getConfigMap(ctx context.Context, configMapRef string) (map[string]string, error) { + cm := &corev1.ConfigMap{} + err := g.client.Get( + ctx, + client.ObjectKey{ + Name: configMapRef, + Namespace: g.namespace, + }, + cm) + + if err != nil { + return nil, err + } + + baseUrl, ok := cm.Data["baseUrl"] + if !ok || baseUrl == "" { + return nil, fmt.Errorf("baseUrl not found in ConfigMap") + } + + token, ok := cm.Data["token"] + if !ok || token == "" { + return nil, fmt.Errorf("token not found in ConfigMap") + } + + return cm.Data, nil +} diff --git a/applicationset/generators/plugin_test.go b/applicationset/generators/plugin_test.go new file mode 100644 index 0000000000000..9611a2cbf14c1 --- /dev/null +++ b/applicationset/generators/plugin_test.go @@ -0,0 +1,705 @@ +package generators + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + kubefake "k8s.io/client-go/kubernetes/fake" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/argoproj/argo-cd/v2/applicationset/services/plugin" + argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +func TestPluginGenerateParams(t *testing.T) { + testCases := []struct { + name string + configmap *v1.ConfigMap + secret *v1.Secret + inputParameters map[string]apiextensionsv1.JSON + values map[string]string + gotemplate bool + expected []map[string]interface{} + content []byte + expectedError error + }{ + { + name: "simple case", + configmap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "first-plugin-cm", + Namespace: "default", + }, + Data: map[string]string{ + "baseUrl": "http://127.0.0.1", + "token": "$plugin.token", + }, + }, + secret: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "argocd-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "plugin.token": []byte("my-secret"), + }, + }, + inputParameters: map[string]apiextensionsv1.JSON{ + "pkey1": {Raw: []byte(`"val1"`)}, + "pkey2": {Raw: []byte(`"val2"`)}, + }, + gotemplate: false, + content: []byte(`{"output": { + "parameters": [{ + "key1": "val1", + "key2": { + "key2_1": "val2_1", + "key2_2": { + "key2_2_1": "val2_2_1" + } + }, + "key3": 123 + }] + }}`), + expected: []map[string]interface{}{ + { + "key1": "val1", + "key2.key2_1": "val2_1", + "key2.key2_2.key2_2_1": "val2_2_1", + "key3": "123", + "generator": map[string]interface{}{ + "input": argoprojiov1alpha1.PluginInput{ + Parameters: argoprojiov1alpha1.PluginParameters{ + "pkey1": {Raw: []byte(`"val1"`)}, + "pkey2": {Raw: []byte(`"val2"`)}, + }, + }, + }, + }, + }, + expectedError: nil, + }, + { + name: "simple case with values", + configmap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "first-plugin-cm", + Namespace: "default", + }, + Data: map[string]string{ + "baseUrl": "http://127.0.0.1", + "token": "$plugin.token", + }, + }, + secret: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "argocd-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "plugin.token": []byte("my-secret"), + }, + }, + inputParameters: map[string]apiextensionsv1.JSON{ + "pkey1": {Raw: []byte(`"val1"`)}, + "pkey2": {Raw: []byte(`"val2"`)}, + }, + values: map[string]string{ + "valuekey1": "valuevalue1", + "valuekey2": "templated-{{key1}}", + }, + gotemplate: false, + content: []byte(`{"output": { + "parameters": [{ + "key1": "val1", + "key2": { + "key2_1": "val2_1", + "key2_2": { + "key2_2_1": "val2_2_1" + } + }, + "key3": 123 + }] + }}`), + expected: []map[string]interface{}{ + { + "key1": "val1", + "key2.key2_1": "val2_1", + "key2.key2_2.key2_2_1": "val2_2_1", + "key3": "123", + "values.valuekey1": "valuevalue1", + "values.valuekey2": "templated-val1", + "generator": map[string]interface{}{ + "input": argoprojiov1alpha1.PluginInput{ + Parameters: argoprojiov1alpha1.PluginParameters{ + "pkey1": {Raw: []byte(`"val1"`)}, + "pkey2": {Raw: []byte(`"val2"`)}, + }, + }, + }, + }, + }, + expectedError: nil, + }, + { + name: "simple case with gotemplate", + configmap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "first-plugin-cm", + Namespace: "default", + }, + Data: map[string]string{ + "baseUrl": "http://127.0.0.1", + "token": "$plugin.token", + }, + }, + secret: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "argocd-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "plugin.token": []byte("my-secret"), + }, + }, + inputParameters: map[string]apiextensionsv1.JSON{ + "pkey1": {Raw: []byte(`"val1"`)}, + "pkey2": {Raw: []byte(`"val2"`)}, + }, + gotemplate: true, + content: []byte(`{"output": { + "parameters": [{ + "key1": "val1", + "key2": { + "key2_1": "val2_1", + "key2_2": { + "key2_2_1": "val2_2_1" + } + }, + "key3": 123 + }] + }}`), + expected: []map[string]interface{}{ + { + "key1": "val1", + "key2": map[string]interface{}{ + "key2_1": "val2_1", + "key2_2": map[string]interface{}{ + "key2_2_1": "val2_2_1", + }, + }, + "key3": float64(123), + "generator": map[string]interface{}{ + "input": argoprojiov1alpha1.PluginInput{ + Parameters: argoprojiov1alpha1.PluginParameters{ + "pkey1": {Raw: []byte(`"val1"`)}, + "pkey2": {Raw: []byte(`"val2"`)}, + }, + }, + }, + }, + }, + expectedError: nil, + }, + { + name: "simple case with appended params", + configmap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "first-plugin-cm", + Namespace: "default", + }, + Data: map[string]string{ + "baseUrl": "http://127.0.0.1", + "token": "$plugin.token", + }, + }, + secret: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "argocd-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "plugin.token": []byte("my-secret"), + }, + }, + inputParameters: map[string]apiextensionsv1.JSON{ + "pkey1": {Raw: []byte(`"val1"`)}, + "pkey2": {Raw: []byte(`"val2"`)}, + }, + gotemplate: false, + content: []byte(`{"output": {"parameters": [{ + "key1": "val1", + "key2": { + "key2_1": "val2_1", + "key2_2": { + "key2_2_1": "val2_2_1" + } + }, + "key3": 123, + "pkey2": "valplugin" + }]}}`), + expected: []map[string]interface{}{ + { + "key1": "val1", + "key2.key2_1": "val2_1", + "key2.key2_2.key2_2_1": "val2_2_1", + "key3": "123", + "pkey2": "valplugin", + "generator": map[string]interface{}{ + "input": argoprojiov1alpha1.PluginInput{ + Parameters: argoprojiov1alpha1.PluginParameters{ + "pkey1": {Raw: []byte(`"val1"`)}, + "pkey2": {Raw: []byte(`"val2"`)}, + }, + }, + }, + }, + }, + expectedError: nil, + }, + { + name: "no params", + configmap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "first-plugin-cm", + Namespace: "default", + }, + Data: map[string]string{ + "baseUrl": "http://127.0.0.1", + "token": "$plugin.token", + }, + }, + secret: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "argocd-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "plugin.token": []byte("my-secret"), + }, + }, + inputParameters: argoprojiov1alpha1.PluginParameters{}, + gotemplate: false, + content: []byte(`{"output": { + "parameters": [{ + "key1": "val1", + "key2": { + "key2_1": "val2_1", + "key2_2": { + "key2_2_1": "val2_2_1" + } + }, + "key3": 123 + }] + }}`), + expected: []map[string]interface{}{ + { + "key1": "val1", + "key2.key2_1": "val2_1", + "key2.key2_2.key2_2_1": "val2_2_1", + "key3": "123", + "generator": map[string]interface{}{ + "input": map[string]map[string]interface{}{ + "parameters": {}, + }, + }, + }, + }, + expectedError: nil, + }, + { + name: "empty return", + configmap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "first-plugin-cm", + Namespace: "default", + }, + Data: map[string]string{ + "baseUrl": "http://127.0.0.1", + "token": "$plugin.token", + }, + }, + secret: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "argocd-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "plugin.token": []byte("my-secret"), + }, + }, + inputParameters: map[string]apiextensionsv1.JSON{}, + gotemplate: false, + content: []byte(`{"input": {"parameters": []}}`), + expected: []map[string]interface{}{}, + expectedError: nil, + }, + { + name: "wrong return", + configmap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "first-plugin-cm", + Namespace: "default", + }, + Data: map[string]string{ + "baseUrl": "http://127.0.0.1", + "token": "$plugin.token", + }, + }, + secret: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "argocd-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "plugin.token": []byte("my-secret"), + }, + }, + inputParameters: map[string]apiextensionsv1.JSON{}, + gotemplate: false, + content: []byte(`wrong body ...`), + expected: []map[string]interface{}{}, + expectedError: fmt.Errorf("error listing params: error get api 'set': invalid character 'w' looking for beginning of value: wrong body ..."), + }, + { + name: "external secret", + configmap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "first-plugin-cm", + Namespace: "default", + }, + Data: map[string]string{ + "baseUrl": "http://127.0.0.1", + "token": "$plugin-secret:plugin.token", + }, + }, + secret: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "plugin-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "plugin.token": []byte("my-secret"), + }, + }, + inputParameters: map[string]apiextensionsv1.JSON{ + "pkey1": {Raw: []byte(`"val1"`)}, + "pkey2": {Raw: []byte(`"val2"`)}, + }, + gotemplate: false, + content: []byte(`{"output": {"parameters": [{ + "key1": "val1", + "key2": { + "key2_1": "val2_1", + "key2_2": { + "key2_2_1": "val2_2_1" + } + }, + "key3": 123, + "pkey2": "valplugin" + }]}}`), + expected: []map[string]interface{}{ + { + "key1": "val1", + "key2.key2_1": "val2_1", + "key2.key2_2.key2_2_1": "val2_2_1", + "key3": "123", + "pkey2": "valplugin", + "generator": map[string]interface{}{ + "input": argoprojiov1alpha1.PluginInput{ + Parameters: argoprojiov1alpha1.PluginParameters{ + "pkey1": {Raw: []byte(`"val1"`)}, + "pkey2": {Raw: []byte(`"val2"`)}, + }, + }, + }, + }, + }, + expectedError: nil, + }, + { + name: "no secret", + configmap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "first-plugin-cm", + Namespace: "default", + }, + Data: map[string]string{ + "baseUrl": "http://127.0.0.1", + "token": "$plugin.token", + }, + }, + secret: &v1.Secret{}, + inputParameters: map[string]apiextensionsv1.JSON{ + "pkey1": {Raw: []byte(`"val1"`)}, + "pkey2": {Raw: []byte(`"val2"`)}, + }, + gotemplate: false, + content: []byte(`{"output": { + "parameters": [{ + "key1": "val1", + "key2": { + "key2_1": "val2_1", + "key2_2": { + "key2_2_1": "val2_2_1" + } + }, + "key3": 123 + }] + }}`), + expected: []map[string]interface{}{ + { + "key1": "val1", + "key2.key2_1": "val2_1", + "key2.key2_2.key2_2_1": "val2_2_1", + "key3": "123", + "generator": map[string]interface{}{ + "input": argoprojiov1alpha1.PluginInput{ + Parameters: argoprojiov1alpha1.PluginParameters{ + "pkey1": {Raw: []byte(`"val1"`)}, + "pkey2": {Raw: []byte(`"val2"`)}, + }, + }, + }, + }, + }, + expectedError: fmt.Errorf("error getting plugin from generator: error fetching Secret token: error fetching secret default/argocd-secret: secrets \"argocd-secret\" not found"), + }, + { + name: "no configmap", + configmap: &v1.ConfigMap{}, + secret: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "argocd-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "plugin.token": []byte("my-secret"), + }, + }, + inputParameters: map[string]apiextensionsv1.JSON{ + "pkey1": {Raw: []byte(`"val1"`)}, + "pkey2": {Raw: []byte(`"val2"`)}, + }, + gotemplate: false, + content: []byte(`{"output": { + "parameters": [{ + "key1": "val1", + "key2": { + "key2_1": "val2_1", + "key2_2": { + "key2_2_1": "val2_2_1" + } + }, + "key3": 123 + }] + }}`), + expected: []map[string]interface{}{ + { + "key1": "val1", + "key2.key2_1": "val2_1", + "key2.key2_2.key2_2_1": "val2_2_1", + "key3": "123", + "generator": map[string]interface{}{ + "input": argoprojiov1alpha1.PluginInput{ + Parameters: argoprojiov1alpha1.PluginParameters{ + "pkey1": {Raw: []byte(`"val1"`)}, + "pkey2": {Raw: []byte(`"val2"`)}, + }, + }, + }, + }, + }, + expectedError: fmt.Errorf("error getting plugin from generator: error fetching ConfigMap: configmaps \"\" not found"), + }, + { + name: "no baseUrl", + configmap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "first-plugin-cm", + Namespace: "default", + }, + Data: map[string]string{ + "token": "$plugin.token", + }, + }, + secret: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "argocd-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "plugin.token": []byte("my-secret"), + }, + }, + inputParameters: map[string]apiextensionsv1.JSON{ + "pkey1": {Raw: []byte(`"val1"`)}, + "pkey2": {Raw: []byte(`"val2"`)}, + }, + gotemplate: false, + content: []byte(`{"output": { + "parameters": [{ + "key1": "val1", + "key2": { + "key2_1": "val2_1", + "key2_2": { + "key2_2_1": "val2_2_1" + } + }, + "key3": 123 + }] + }}`), + expected: []map[string]interface{}{ + { + "key1": "val1", + "key2.key2_1": "val2_1", + "key2.key2_2.key2_2_1": "val2_2_1", + "key3": "123", + "generator": map[string]interface{}{ + "input": argoprojiov1alpha1.PluginInput{ + Parameters: argoprojiov1alpha1.PluginParameters{ + "pkey1": {Raw: []byte(`"val1"`)}, + "pkey2": {Raw: []byte(`"val2"`)}, + }, + }, + }, + }, + }, + expectedError: fmt.Errorf("error getting plugin from generator: error fetching ConfigMap: baseUrl not found in ConfigMap"), + }, + { + name: "no token", + configmap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "first-plugin-cm", + Namespace: "default", + }, + Data: map[string]string{ + "baseUrl": "http://127.0.0.1", + }, + }, + secret: &v1.Secret{}, + inputParameters: map[string]apiextensionsv1.JSON{ + "pkey1": {Raw: []byte(`"val1"`)}, + "pkey2": {Raw: []byte(`"val2"`)}, + }, + gotemplate: false, + content: []byte(`{"output": { + "parameters": [{ + "key1": "val1", + "key2": { + "key2_1": "val2_1", + "key2_2": { + "key2_2_1": "val2_2_1" + } + }, + "key3": 123 + }] + }}`), + expected: []map[string]interface{}{ + { + "key1": "val1", + "key2.key2_1": "val2_1", + "key2.key2_2.key2_2_1": "val2_2_1", + "key3": "123", + "generator": map[string]interface{}{ + "input": argoprojiov1alpha1.PluginInput{ + Parameters: argoprojiov1alpha1.PluginParameters{ + "pkey1": {Raw: []byte(`"val1"`)}, + "pkey2": {Raw: []byte(`"val2"`)}, + }, + }, + }, + }, + }, + expectedError: fmt.Errorf("error getting plugin from generator: error fetching ConfigMap: token not found in ConfigMap"), + }, + } + + ctx := context.Background() + + for _, testCase := range testCases { + + t.Run(testCase.name, func(t *testing.T) { + + generatorConfig := argoprojiov1alpha1.ApplicationSetGenerator{ + Plugin: &argoprojiov1alpha1.PluginGenerator{ + ConfigMapRef: argoprojiov1alpha1.PluginConfigMapRef{Name: testCase.configmap.Name}, + Input: argoprojiov1alpha1.PluginInput{ + Parameters: testCase.inputParameters, + }, + Values: testCase.values, + }, + } + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + + authHeader := r.Header.Get("Authorization") + _, tokenKey := plugin.ParseSecretKey(testCase.configmap.Data["token"]) + expectedToken := testCase.secret.Data[strings.Replace(tokenKey, "$", "", -1)] + if authHeader != "Bearer "+string(expectedToken) { + w.WriteHeader(http.StatusUnauthorized) + return + } + + w.Header().Set("Content-Type", "application/json") + _, err := w.Write(testCase.content) + if err != nil { + assert.NoError(t, fmt.Errorf("Error Write %v", err)) + } + }) + + fakeServer := httptest.NewServer(handler) + + defer fakeServer.Close() + + if _, ok := testCase.configmap.Data["baseUrl"]; ok { + testCase.configmap.Data["baseUrl"] = fakeServer.URL + } + + fakeClient := kubefake.NewSimpleClientset(append([]runtime.Object{}, testCase.configmap, testCase.secret)...) + + fakeClientWithCache := fake.NewClientBuilder().WithObjects([]client.Object{testCase.configmap, testCase.secret}...).Build() + + var pluginGenerator = NewPluginGenerator(fakeClientWithCache, ctx, fakeClient, "default") + + applicationSetInfo := argoprojiov1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "set", + }, + Spec: argoprojiov1alpha1.ApplicationSetSpec{ + GoTemplate: testCase.gotemplate, + }, + } + + got, err := pluginGenerator.GenerateParams(&generatorConfig, &applicationSetInfo) + + if err != nil { + fmt.Println(err) + } + + if testCase.expectedError != nil { + assert.EqualError(t, err, testCase.expectedError.Error()) + } else { + assert.NoError(t, err) + expectedJson, err := json.Marshal(testCase.expected) + require.NoError(t, err) + gotJson, err := json.Marshal(got) + require.NoError(t, err) + assert.Equal(t, string(expectedJson), string(gotJson)) + } + }) + } +} diff --git a/applicationset/generators/pull_request.go b/applicationset/generators/pull_request.go new file mode 100644 index 0000000000000..c1dfd5ed978e9 --- /dev/null +++ b/applicationset/generators/pull_request.go @@ -0,0 +1,233 @@ +package generators + +import ( + "context" + "fmt" + "strconv" + "time" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/gosimple/slug" + + pullrequest "github.com/argoproj/argo-cd/v2/applicationset/services/pull_request" + argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +var _ Generator = (*PullRequestGenerator)(nil) + +const ( + DefaultPullRequestRequeueAfterSeconds = 30 * time.Minute +) + +type PullRequestGenerator struct { + client client.Client + selectServiceProviderFunc func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) + auth SCMAuthProviders + scmRootCAPath string + allowedSCMProviders []string + enableSCMProviders bool +} + +func NewPullRequestGenerator(client client.Client, auth SCMAuthProviders, scmRootCAPath string, allowedScmProviders []string, enableSCMProviders bool) Generator { + g := &PullRequestGenerator{ + client: client, + auth: auth, + scmRootCAPath: scmRootCAPath, + allowedSCMProviders: allowedScmProviders, + enableSCMProviders: enableSCMProviders, + } + g.selectServiceProviderFunc = g.selectServiceProvider + return g +} + +func (g *PullRequestGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration { + // Return a requeue default of 30 minutes, if no default is specified. + + if appSetGenerator.PullRequest.RequeueAfterSeconds != nil { + return time.Duration(*appSetGenerator.PullRequest.RequeueAfterSeconds) * time.Second + } + + return DefaultPullRequestRequeueAfterSeconds +} + +func (g *PullRequestGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate { + return &appSetGenerator.PullRequest.Template +} + +func (g *PullRequestGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, applicationSetInfo *argoprojiov1alpha1.ApplicationSet) ([]map[string]interface{}, error) { + if appSetGenerator == nil { + return nil, EmptyAppSetGeneratorError + } + + if appSetGenerator.PullRequest == nil { + return nil, EmptyAppSetGeneratorError + } + + ctx := context.Background() + svc, err := g.selectServiceProviderFunc(ctx, appSetGenerator.PullRequest, applicationSetInfo) + if err != nil { + return nil, fmt.Errorf("failed to select pull request service provider: %w", err) + } + + pulls, err := pullrequest.ListPullRequests(ctx, svc, appSetGenerator.PullRequest.Filters) + if err != nil { + return nil, fmt.Errorf("error listing repos: %v", err) + } + params := make([]map[string]interface{}, 0, len(pulls)) + + // In order to follow the DNS label standard as defined in RFC 1123, + // we need to limit the 'branch' to 50 to give room to append/suffix-ing it + // with 13 more characters. Also, there is the need to clean it as recommended + // here https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names + slug.MaxLength = 50 + + // Converting underscores to dashes + slug.CustomSub = map[string]string{ + "_": "-", + } + + var shortSHALength int + var shortSHALength7 int + for _, pull := range pulls { + shortSHALength = 8 + if len(pull.HeadSHA) < 8 { + shortSHALength = len(pull.HeadSHA) + } + + shortSHALength7 = 7 + if len(pull.HeadSHA) < 7 { + shortSHALength7 = len(pull.HeadSHA) + } + + paramMap := map[string]interface{}{ + "number": strconv.Itoa(pull.Number), + "branch": pull.Branch, + "branch_slug": slug.Make(pull.Branch), + "target_branch": pull.TargetBranch, + "target_branch_slug": slug.Make(pull.TargetBranch), + "head_sha": pull.HeadSHA, + "head_short_sha": pull.HeadSHA[:shortSHALength], + "head_short_sha_7": pull.HeadSHA[:shortSHALength7], + } + + // PR lables will only be supported for Go Template appsets, since fasttemplate will be deprecated. + if applicationSetInfo != nil && applicationSetInfo.Spec.GoTemplate { + paramMap["labels"] = pull.Labels + } + params = append(params, paramMap) + } + return params, nil +} + +// selectServiceProvider selects the provider to get pull requests from the configuration +func (g *PullRequestGenerator) selectServiceProvider(ctx context.Context, generatorConfig *argoprojiov1alpha1.PullRequestGenerator, applicationSetInfo *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) { + if !g.enableSCMProviders { + return nil, ErrSCMProvidersDisabled + } + if err := ScmProviderAllowed(applicationSetInfo, generatorConfig, g.allowedSCMProviders); err != nil { + return nil, fmt.Errorf("scm provider not allowed: %w", err) + } + + if generatorConfig.Github != nil { + return g.github(ctx, generatorConfig.Github, applicationSetInfo) + } + if generatorConfig.GitLab != nil { + providerConfig := generatorConfig.GitLab + token, err := g.getSecretRef(ctx, providerConfig.TokenRef, applicationSetInfo.Namespace) + if err != nil { + return nil, fmt.Errorf("error fetching Secret token: %v", err) + } + return pullrequest.NewGitLabService(ctx, token, providerConfig.API, providerConfig.Project, providerConfig.Labels, providerConfig.PullRequestState, g.scmRootCAPath, providerConfig.Insecure) + } + if generatorConfig.Gitea != nil { + providerConfig := generatorConfig.Gitea + token, err := g.getSecretRef(ctx, providerConfig.TokenRef, applicationSetInfo.Namespace) + if err != nil { + return nil, fmt.Errorf("error fetching Secret token: %v", err) + } + return pullrequest.NewGiteaService(ctx, token, providerConfig.API, providerConfig.Owner, providerConfig.Repo, providerConfig.Insecure) + } + if generatorConfig.BitbucketServer != nil { + providerConfig := generatorConfig.BitbucketServer + if providerConfig.BasicAuth != nil { + password, err := g.getSecretRef(ctx, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace) + if err != nil { + return nil, fmt.Errorf("error fetching Secret token: %v", err) + } + return pullrequest.NewBitbucketServiceBasicAuth(ctx, providerConfig.BasicAuth.Username, password, providerConfig.API, providerConfig.Project, providerConfig.Repo) + } else { + return pullrequest.NewBitbucketServiceNoAuth(ctx, providerConfig.API, providerConfig.Project, providerConfig.Repo) + } + } + if generatorConfig.Bitbucket != nil { + providerConfig := generatorConfig.Bitbucket + if providerConfig.BearerToken != nil { + appToken, err := g.getSecretRef(ctx, providerConfig.BearerToken.TokenRef, applicationSetInfo.Namespace) + if err != nil { + return nil, fmt.Errorf("error fetching Secret Bearer token: %v", err) + } + return pullrequest.NewBitbucketCloudServiceBearerToken(providerConfig.API, appToken, providerConfig.Owner, providerConfig.Repo) + } else if providerConfig.BasicAuth != nil { + password, err := g.getSecretRef(ctx, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace) + if err != nil { + return nil, fmt.Errorf("error fetching Secret token: %v", err) + } + return pullrequest.NewBitbucketCloudServiceBasicAuth(providerConfig.API, providerConfig.BasicAuth.Username, password, providerConfig.Owner, providerConfig.Repo) + } else { + return pullrequest.NewBitbucketCloudServiceNoAuth(providerConfig.API, providerConfig.Owner, providerConfig.Repo) + } + } + if generatorConfig.AzureDevOps != nil { + providerConfig := generatorConfig.AzureDevOps + token, err := g.getSecretRef(ctx, providerConfig.TokenRef, applicationSetInfo.Namespace) + if err != nil { + return nil, fmt.Errorf("error fetching Secret token: %v", err) + } + return pullrequest.NewAzureDevOpsService(ctx, token, providerConfig.API, providerConfig.Organization, providerConfig.Project, providerConfig.Repo, providerConfig.Labels) + } + return nil, fmt.Errorf("no Pull Request provider implementation configured") +} + +func (g *PullRequestGenerator) github(ctx context.Context, cfg *argoprojiov1alpha1.PullRequestGeneratorGithub, applicationSetInfo *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) { + // use an app if it was configured + if cfg.AppSecretName != "" { + auth, err := g.auth.GitHubApps.GetAuthSecret(ctx, cfg.AppSecretName) + if err != nil { + return nil, fmt.Errorf("error getting GitHub App secret: %v", err) + } + return pullrequest.NewGithubAppService(*auth, cfg.API, cfg.Owner, cfg.Repo, cfg.Labels) + } + + // always default to token, even if not set (public access) + token, err := g.getSecretRef(ctx, cfg.TokenRef, applicationSetInfo.Namespace) + if err != nil { + return nil, fmt.Errorf("error fetching Secret token: %v", err) + } + return pullrequest.NewGithubService(ctx, token, cfg.API, cfg.Owner, cfg.Repo, cfg.Labels) +} + +// getSecretRef gets the value of the key for the specified Secret resource. +func (g *PullRequestGenerator) getSecretRef(ctx context.Context, ref *argoprojiov1alpha1.SecretRef, namespace string) (string, error) { + if ref == nil { + return "", nil + } + + secret := &corev1.Secret{} + err := g.client.Get( + ctx, + client.ObjectKey{ + Name: ref.SecretName, + Namespace: namespace, + }, + secret) + if err != nil { + return "", fmt.Errorf("error fetching secret %s/%s: %v", namespace, ref.SecretName, err) + } + tokenBytes, ok := secret.Data[ref.Key] + if !ok { + return "", fmt.Errorf("key %q in secret %s/%s not found", ref.Key, namespace, ref.SecretName) + } + return string(tokenBytes), nil +} diff --git a/applicationset/generators/pull_request_test.go b/applicationset/generators/pull_request_test.go new file mode 100644 index 0000000000000..9f4d3d0a9b693 --- /dev/null +++ b/applicationset/generators/pull_request_test.go @@ -0,0 +1,374 @@ +package generators + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + pullrequest "github.com/argoproj/argo-cd/v2/applicationset/services/pull_request" + argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +func TestPullRequestGithubGenerateParams(t *testing.T) { + ctx := context.Background() + cases := []struct { + selectFunc func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) + expected []map[string]interface{} + expectedErr error + applicationSet argoprojiov1alpha1.ApplicationSet + }{ + { + selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) { + return pullrequest.NewFakeService( + ctx, + []*pullrequest.PullRequest{ + { + Number: 1, + Branch: "branch1", + TargetBranch: "master", + HeadSHA: "089d92cbf9ff857a39e6feccd32798ca700fb958", + }, + }, + nil, + ) + }, + expected: []map[string]interface{}{ + { + "number": "1", + "branch": "branch1", + "branch_slug": "branch1", + "target_branch": "master", + "target_branch_slug": "master", + "head_sha": "089d92cbf9ff857a39e6feccd32798ca700fb958", + "head_short_sha": "089d92cb", + "head_short_sha_7": "089d92c", + }, + }, + expectedErr: nil, + }, + { + selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) { + return pullrequest.NewFakeService( + ctx, + []*pullrequest.PullRequest{ + { + Number: 2, + Branch: "feat/areally+long_pull_request_name_to_test_argo_slugification_and_branch_name_shortening_feature", + TargetBranch: "feat/anotherreally+long_pull_request_name_to_test_argo_slugification_and_branch_name_shortening_feature", + HeadSHA: "9b34ff5bd418e57d58891eb0aa0728043ca1e8be", + }, + }, + nil, + ) + }, + expected: []map[string]interface{}{ + { + "number": "2", + "branch": "feat/areally+long_pull_request_name_to_test_argo_slugification_and_branch_name_shortening_feature", + "branch_slug": "feat-areally-long-pull-request-name-to-test-argo", + "target_branch": "feat/anotherreally+long_pull_request_name_to_test_argo_slugification_and_branch_name_shortening_feature", + "target_branch_slug": "feat-anotherreally-long-pull-request-name-to-test", + "head_sha": "9b34ff5bd418e57d58891eb0aa0728043ca1e8be", + "head_short_sha": "9b34ff5b", + "head_short_sha_7": "9b34ff5", + }, + }, + expectedErr: nil, + }, + { + selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) { + return pullrequest.NewFakeService( + ctx, + []*pullrequest.PullRequest{ + { + Number: 1, + Branch: "a-very-short-sha", + TargetBranch: "master", + HeadSHA: "abcd", + }, + }, + nil, + ) + }, + expected: []map[string]interface{}{ + { + "number": "1", + "branch": "a-very-short-sha", + "branch_slug": "a-very-short-sha", + "target_branch": "master", + "target_branch_slug": "master", + "head_sha": "abcd", + "head_short_sha": "abcd", + "head_short_sha_7": "abcd", + }, + }, + expectedErr: nil, + }, + { + selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) { + return pullrequest.NewFakeService( + ctx, + nil, + fmt.Errorf("fake error"), + ) + }, + expected: nil, + expectedErr: fmt.Errorf("error listing repos: fake error"), + }, + { + selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) { + return pullrequest.NewFakeService( + ctx, + []*pullrequest.PullRequest{ + { + Number: 1, + Branch: "branch1", + TargetBranch: "master", + HeadSHA: "089d92cbf9ff857a39e6feccd32798ca700fb958", + Labels: []string{"preview"}, + }, + }, + nil, + ) + }, + expected: []map[string]interface{}{ + { + "number": "1", + "branch": "branch1", + "branch_slug": "branch1", + "target_branch": "master", + "target_branch_slug": "master", + "head_sha": "089d92cbf9ff857a39e6feccd32798ca700fb958", + "head_short_sha": "089d92cb", + "head_short_sha_7": "089d92c", + "labels": []string{"preview"}, + }, + }, + expectedErr: nil, + applicationSet: argoprojiov1alpha1.ApplicationSet{ + Spec: argoprojiov1alpha1.ApplicationSetSpec{ + // Application set is using Go Template. + GoTemplate: true, + }, + }, + }, + { + selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) { + return pullrequest.NewFakeService( + ctx, + []*pullrequest.PullRequest{ + { + Number: 1, + Branch: "branch1", + TargetBranch: "master", + HeadSHA: "089d92cbf9ff857a39e6feccd32798ca700fb958", + Labels: []string{"preview"}, + }, + }, + nil, + ) + }, + expected: []map[string]interface{}{ + { + "number": "1", + "branch": "branch1", + "branch_slug": "branch1", + "target_branch": "master", + "target_branch_slug": "master", + "head_sha": "089d92cbf9ff857a39e6feccd32798ca700fb958", + "head_short_sha": "089d92cb", + "head_short_sha_7": "089d92c", + }, + }, + expectedErr: nil, + applicationSet: argoprojiov1alpha1.ApplicationSet{ + Spec: argoprojiov1alpha1.ApplicationSetSpec{ + // Application set is using fasttemplate. + GoTemplate: false, + }, + }, + }, + } + + for _, c := range cases { + gen := PullRequestGenerator{ + selectServiceProviderFunc: c.selectFunc, + } + generatorConfig := argoprojiov1alpha1.ApplicationSetGenerator{ + PullRequest: &argoprojiov1alpha1.PullRequestGenerator{}, + } + + got, gotErr := gen.GenerateParams(&generatorConfig, &c.applicationSet) + assert.Equal(t, c.expectedErr, gotErr) + assert.ElementsMatch(t, c.expected, got) + } +} + +func TestPullRequestGetSecretRef(t *testing.T) { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "test-secret", Namespace: "test"}, + Data: map[string][]byte{ + "my-token": []byte("secret"), + }, + } + gen := &PullRequestGenerator{client: fake.NewClientBuilder().WithObjects(secret).Build()} + ctx := context.Background() + + cases := []struct { + name, namespace, token string + ref *argoprojiov1alpha1.SecretRef + hasError bool + }{ + { + name: "valid ref", + ref: &argoprojiov1alpha1.SecretRef{SecretName: "test-secret", Key: "my-token"}, + namespace: "test", + token: "secret", + hasError: false, + }, + { + name: "nil ref", + ref: nil, + namespace: "test", + token: "", + hasError: false, + }, + { + name: "wrong name", + ref: &argoprojiov1alpha1.SecretRef{SecretName: "other", Key: "my-token"}, + namespace: "test", + token: "", + hasError: true, + }, + { + name: "wrong key", + ref: &argoprojiov1alpha1.SecretRef{SecretName: "test-secret", Key: "other-token"}, + namespace: "test", + token: "", + hasError: true, + }, + { + name: "wrong namespace", + ref: &argoprojiov1alpha1.SecretRef{SecretName: "test-secret", Key: "my-token"}, + namespace: "other", + token: "", + hasError: true, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + token, err := gen.getSecretRef(ctx, c.ref, c.namespace) + if c.hasError { + assert.NotNil(t, err) + } else { + assert.Nil(t, err) + } + assert.Equal(t, c.token, token) + }) + } +} + +func TestAllowedSCMProviderPullRequest(t *testing.T) { + cases := []struct { + name string + providerConfig *argoprojiov1alpha1.PullRequestGenerator + expectedError error + }{ + { + name: "Error Github", + providerConfig: &argoprojiov1alpha1.PullRequestGenerator{ + Github: &argoprojiov1alpha1.PullRequestGeneratorGithub{ + API: "https://myservice.mynamespace.svc.cluster.local", + }, + }, + expectedError: &ErrDisallowedSCMProvider{}, + }, + { + name: "Error Gitlab", + providerConfig: &argoprojiov1alpha1.PullRequestGenerator{ + GitLab: &argoprojiov1alpha1.PullRequestGeneratorGitLab{ + API: "https://myservice.mynamespace.svc.cluster.local", + }, + }, + expectedError: &ErrDisallowedSCMProvider{}, + }, + { + name: "Error Gitea", + providerConfig: &argoprojiov1alpha1.PullRequestGenerator{ + Gitea: &argoprojiov1alpha1.PullRequestGeneratorGitea{ + API: "https://myservice.mynamespace.svc.cluster.local", + }, + }, + expectedError: &ErrDisallowedSCMProvider{}, + }, + { + name: "Error Bitbucket", + providerConfig: &argoprojiov1alpha1.PullRequestGenerator{ + BitbucketServer: &argoprojiov1alpha1.PullRequestGeneratorBitbucketServer{ + API: "https://myservice.mynamespace.svc.cluster.local", + }, + }, + expectedError: &ErrDisallowedSCMProvider{}, + }, + } + + for _, testCase := range cases { + testCaseCopy := testCase + + t.Run(testCaseCopy.name, func(t *testing.T) { + t.Parallel() + + pullRequestGenerator := NewPullRequestGenerator(nil, SCMAuthProviders{}, "", []string{ + "github.myorg.com", + "gitlab.myorg.com", + "gitea.myorg.com", + "bitbucket.myorg.com", + "azuredevops.myorg.com", + }, true) + + applicationSetInfo := argoprojiov1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "set", + }, + Spec: argoprojiov1alpha1.ApplicationSetSpec{ + Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{ + PullRequest: testCaseCopy.providerConfig, + }}, + }, + } + + _, err := pullRequestGenerator.GenerateParams(&applicationSetInfo.Spec.Generators[0], &applicationSetInfo) + + assert.Error(t, err, "Must return an error") + assert.ErrorAs(t, err, testCaseCopy.expectedError) + }) + } +} + +func TestSCMProviderDisabled_PRGenerator(t *testing.T) { + generator := NewPullRequestGenerator(nil, SCMAuthProviders{}, "", []string{}, false) + + applicationSetInfo := argoprojiov1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "set", + }, + Spec: argoprojiov1alpha1.ApplicationSetSpec{ + Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{ + PullRequest: &argoprojiov1alpha1.PullRequestGenerator{ + Github: &argoprojiov1alpha1.PullRequestGeneratorGithub{ + API: "https://myservice.mynamespace.svc.cluster.local", + }, + }, + }}, + }, + } + + _, err := generator.GenerateParams(&applicationSetInfo.Spec.Generators[0], &applicationSetInfo) + assert.ErrorIs(t, err, ErrSCMProvidersDisabled) +} diff --git a/applicationset/generators/scm_provider.go b/applicationset/generators/scm_provider.go new file mode 100644 index 0000000000000..42b7789be67f0 --- /dev/null +++ b/applicationset/generators/scm_provider.go @@ -0,0 +1,286 @@ +package generators + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + log "github.com/sirupsen/logrus" + + "github.com/argoproj/argo-cd/v2/applicationset/services/github_app_auth" + "github.com/argoproj/argo-cd/v2/applicationset/services/scm_provider" + "github.com/argoproj/argo-cd/v2/applicationset/utils" + "github.com/argoproj/argo-cd/v2/common" + argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +var _ Generator = (*SCMProviderGenerator)(nil) + +const ( + DefaultSCMProviderRequeueAfterSeconds = 30 * time.Minute +) + +type SCMProviderGenerator struct { + client client.Client + // Testing hooks. + overrideProvider scm_provider.SCMProviderService + SCMAuthProviders + scmRootCAPath string + allowedSCMProviders []string + enableSCMProviders bool +} + +type SCMAuthProviders struct { + GitHubApps github_app_auth.Credentials +} + +func NewSCMProviderGenerator(client client.Client, providers SCMAuthProviders, scmRootCAPath string, allowedSCMProviders []string, enableSCMProviders bool) Generator { + return &SCMProviderGenerator{ + client: client, + SCMAuthProviders: providers, + scmRootCAPath: scmRootCAPath, + allowedSCMProviders: allowedSCMProviders, + enableSCMProviders: enableSCMProviders, + } +} + +// Testing generator +func NewTestSCMProviderGenerator(overrideProvider scm_provider.SCMProviderService) Generator { + return &SCMProviderGenerator{overrideProvider: overrideProvider, enableSCMProviders: true} +} + +func (g *SCMProviderGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration { + // Return a requeue default of 30 minutes, if no default is specified. + + if appSetGenerator.SCMProvider.RequeueAfterSeconds != nil { + return time.Duration(*appSetGenerator.SCMProvider.RequeueAfterSeconds) * time.Second + } + + return DefaultSCMProviderRequeueAfterSeconds +} + +func (g *SCMProviderGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate { + return &appSetGenerator.SCMProvider.Template +} + +var ErrSCMProvidersDisabled = errors.New("scm providers are disabled") + +type ErrDisallowedSCMProvider struct { + Provider string + Allowed []string +} + +func NewErrDisallowedSCMProvider(provider string, allowed []string) ErrDisallowedSCMProvider { + return ErrDisallowedSCMProvider{ + Provider: provider, + Allowed: allowed, + } +} + +func (e ErrDisallowedSCMProvider) Error() string { + return fmt.Sprintf("scm provider %q not allowed, must use one of the following: %s", e.Provider, strings.Join(e.Allowed, ", ")) +} + +func ScmProviderAllowed(applicationSetInfo *argoprojiov1alpha1.ApplicationSet, generator SCMGeneratorWithCustomApiUrl, allowedScmProviders []string) error { + url := generator.CustomApiUrl() + + if url == "" || len(allowedScmProviders) == 0 { + return nil + } + + for _, allowedScmProvider := range allowedScmProviders { + if url == allowedScmProvider { + return nil + } + } + + log.WithFields(log.Fields{ + common.SecurityField: common.SecurityMedium, + "applicationset": applicationSetInfo.Name, + "appSetNamespace": applicationSetInfo.Namespace, + }).Debugf("attempted to use disallowed SCM %q, must use one of the following: %s", url, strings.Join(allowedScmProviders, ", ")) + + return NewErrDisallowedSCMProvider(url, allowedScmProviders) +} + +func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, applicationSetInfo *argoprojiov1alpha1.ApplicationSet) ([]map[string]interface{}, error) { + if appSetGenerator == nil { + return nil, EmptyAppSetGeneratorError + } + + if appSetGenerator.SCMProvider == nil { + return nil, EmptyAppSetGeneratorError + } + + if !g.enableSCMProviders { + return nil, ErrSCMProvidersDisabled + } + + // Create the SCM provider helper. + providerConfig := appSetGenerator.SCMProvider + + if err := ScmProviderAllowed(applicationSetInfo, providerConfig, g.allowedSCMProviders); err != nil { + return nil, fmt.Errorf("scm provider not allowed: %w", err) + } + + ctx := context.Background() + var provider scm_provider.SCMProviderService + if g.overrideProvider != nil { + provider = g.overrideProvider + } else if providerConfig.Github != nil { + var err error + provider, err = g.githubProvider(ctx, providerConfig.Github, applicationSetInfo) + if err != nil { + return nil, fmt.Errorf("scm provider: %w", err) + } + } else if providerConfig.Gitlab != nil { + token, err := g.getSecretRef(ctx, providerConfig.Gitlab.TokenRef, applicationSetInfo.Namespace) + if err != nil { + return nil, fmt.Errorf("error fetching Gitlab token: %v", err) + } + provider, err = scm_provider.NewGitlabProvider(ctx, providerConfig.Gitlab.Group, token, providerConfig.Gitlab.API, providerConfig.Gitlab.AllBranches, providerConfig.Gitlab.IncludeSubgroups, providerConfig.Gitlab.WillIncludeSharedProjects(), providerConfig.Gitlab.Insecure, g.scmRootCAPath, providerConfig.Gitlab.Topic) + if err != nil { + return nil, fmt.Errorf("error initializing Gitlab service: %v", err) + } + } else if providerConfig.Gitea != nil { + token, err := g.getSecretRef(ctx, providerConfig.Gitea.TokenRef, applicationSetInfo.Namespace) + if err != nil { + return nil, fmt.Errorf("error fetching Gitea token: %v", err) + } + provider, err = scm_provider.NewGiteaProvider(ctx, providerConfig.Gitea.Owner, token, providerConfig.Gitea.API, providerConfig.Gitea.AllBranches, providerConfig.Gitea.Insecure) + if err != nil { + return nil, fmt.Errorf("error initializing Gitea service: %v", err) + } + } else if providerConfig.BitbucketServer != nil { + providerConfig := providerConfig.BitbucketServer + var scmError error + if providerConfig.BasicAuth != nil { + password, err := g.getSecretRef(ctx, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace) + if err != nil { + return nil, fmt.Errorf("error fetching Secret token: %v", err) + } + provider, scmError = scm_provider.NewBitbucketServerProviderBasicAuth(ctx, providerConfig.BasicAuth.Username, password, providerConfig.API, providerConfig.Project, providerConfig.AllBranches) + } else { + provider, scmError = scm_provider.NewBitbucketServerProviderNoAuth(ctx, providerConfig.API, providerConfig.Project, providerConfig.AllBranches) + } + if scmError != nil { + return nil, fmt.Errorf("error initializing Bitbucket Server service: %v", scmError) + } + } else if providerConfig.AzureDevOps != nil { + token, err := g.getSecretRef(ctx, providerConfig.AzureDevOps.AccessTokenRef, applicationSetInfo.Namespace) + if err != nil { + return nil, fmt.Errorf("error fetching Azure Devops access token: %v", err) + } + provider, err = scm_provider.NewAzureDevOpsProvider(ctx, token, providerConfig.AzureDevOps.Organization, providerConfig.AzureDevOps.API, providerConfig.AzureDevOps.TeamProject, providerConfig.AzureDevOps.AllBranches) + if err != nil { + return nil, fmt.Errorf("error initializing Azure Devops service: %v", err) + } + } else if providerConfig.Bitbucket != nil { + appPassword, err := g.getSecretRef(ctx, providerConfig.Bitbucket.AppPasswordRef, applicationSetInfo.Namespace) + if err != nil { + return nil, fmt.Errorf("error fetching Bitbucket cloud appPassword: %v", err) + } + provider, err = scm_provider.NewBitBucketCloudProvider(ctx, providerConfig.Bitbucket.Owner, providerConfig.Bitbucket.User, appPassword, providerConfig.Bitbucket.AllBranches) + if err != nil { + return nil, fmt.Errorf("error initializing Bitbucket cloud service: %v", err) + } + } else if providerConfig.AWSCodeCommit != nil { + var awsErr error + provider, awsErr = scm_provider.NewAWSCodeCommitProvider(ctx, providerConfig.AWSCodeCommit.TagFilters, providerConfig.AWSCodeCommit.Role, providerConfig.AWSCodeCommit.Region, providerConfig.AWSCodeCommit.AllBranches) + if awsErr != nil { + return nil, fmt.Errorf("error initializing AWS codecommit service: %v", awsErr) + } + } else { + return nil, fmt.Errorf("no SCM provider implementation configured") + } + + // Find all the available repos. + repos, err := scm_provider.ListRepos(ctx, provider, providerConfig.Filters, providerConfig.CloneProtocol) + if err != nil { + return nil, fmt.Errorf("error listing repos: %v", err) + } + paramsArray := make([]map[string]interface{}, 0, len(repos)) + var shortSHALength int + var shortSHALength7 int + for _, repo := range repos { + shortSHALength = 8 + if len(repo.SHA) < 8 { + shortSHALength = len(repo.SHA) + } + + shortSHALength7 = 7 + if len(repo.SHA) < 7 { + shortSHALength7 = len(repo.SHA) + } + + params := map[string]interface{}{ + "organization": repo.Organization, + "repository": repo.Repository, + "url": repo.URL, + "branch": repo.Branch, + "sha": repo.SHA, + "short_sha": repo.SHA[:shortSHALength], + "short_sha_7": repo.SHA[:shortSHALength7], + "labels": strings.Join(repo.Labels, ","), + "branchNormalized": utils.SanitizeName(repo.Branch), + } + + err := appendTemplatedValues(appSetGenerator.SCMProvider.Values, params, applicationSetInfo.Spec.GoTemplate, applicationSetInfo.Spec.GoTemplateOptions) + if err != nil { + return nil, fmt.Errorf("failed to append templated values: %w", err) + } + + paramsArray = append(paramsArray, params) + } + return paramsArray, nil +} + +func (g *SCMProviderGenerator) getSecretRef(ctx context.Context, ref *argoprojiov1alpha1.SecretRef, namespace string) (string, error) { + if ref == nil { + return "", nil + } + + secret := &corev1.Secret{} + err := g.client.Get( + ctx, + client.ObjectKey{ + Name: ref.SecretName, + Namespace: namespace, + }, + secret) + if err != nil { + return "", fmt.Errorf("error fetching secret %s/%s: %v", namespace, ref.SecretName, err) + } + tokenBytes, ok := secret.Data[ref.Key] + if !ok { + return "", fmt.Errorf("key %q in secret %s/%s not found", ref.Key, namespace, ref.SecretName) + } + return string(tokenBytes), nil +} + +func (g *SCMProviderGenerator) githubProvider(ctx context.Context, github *argoprojiov1alpha1.SCMProviderGeneratorGithub, applicationSetInfo *argoprojiov1alpha1.ApplicationSet) (scm_provider.SCMProviderService, error) { + if github.AppSecretName != "" { + auth, err := g.GitHubApps.GetAuthSecret(ctx, github.AppSecretName) + if err != nil { + return nil, fmt.Errorf("error fetching Github app secret: %v", err) + } + + return scm_provider.NewGithubAppProviderFor( + *auth, + github.Organization, + github.API, + github.AllBranches, + ) + } + + token, err := g.getSecretRef(ctx, github.TokenRef, applicationSetInfo.Namespace) + if err != nil { + return nil, fmt.Errorf("error fetching Github token: %v", err) + } + return scm_provider.NewGithubProvider(ctx, github.Organization, token, github.API, github.AllBranches) +} diff --git a/applicationset/generators/scm_provider_test.go b/applicationset/generators/scm_provider_test.go new file mode 100644 index 0000000000000..c438aa8f646fe --- /dev/null +++ b/applicationset/generators/scm_provider_test.go @@ -0,0 +1,313 @@ +package generators + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/argoproj/argo-cd/v2/applicationset/services/scm_provider" + argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +func TestSCMProviderGetSecretRef(t *testing.T) { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "test-secret", Namespace: "test"}, + Data: map[string][]byte{ + "my-token": []byte("secret"), + }, + } + gen := &SCMProviderGenerator{client: fake.NewClientBuilder().WithObjects(secret).Build()} + ctx := context.Background() + + cases := []struct { + name, namespace, token string + ref *argoprojiov1alpha1.SecretRef + hasError bool + }{ + { + name: "valid ref", + ref: &argoprojiov1alpha1.SecretRef{SecretName: "test-secret", Key: "my-token"}, + namespace: "test", + token: "secret", + hasError: false, + }, + { + name: "nil ref", + ref: nil, + namespace: "test", + token: "", + hasError: false, + }, + { + name: "wrong name", + ref: &argoprojiov1alpha1.SecretRef{SecretName: "other", Key: "my-token"}, + namespace: "test", + token: "", + hasError: true, + }, + { + name: "wrong key", + ref: &argoprojiov1alpha1.SecretRef{SecretName: "test-secret", Key: "other-token"}, + namespace: "test", + token: "", + hasError: true, + }, + { + name: "wrong namespace", + ref: &argoprojiov1alpha1.SecretRef{SecretName: "test-secret", Key: "my-token"}, + namespace: "other", + token: "", + hasError: true, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + token, err := gen.getSecretRef(ctx, c.ref, c.namespace) + if c.hasError { + assert.NotNil(t, err) + } else { + assert.Nil(t, err) + } + assert.Equal(t, c.token, token) + + }) + } +} + +func TestSCMProviderGenerateParams(t *testing.T) { + cases := []struct { + name string + repos []*scm_provider.Repository + values map[string]string + expected []map[string]interface{} + expectedError error + }{ + { + name: "Multiple repos with labels", + repos: []*scm_provider.Repository{ + { + Organization: "myorg", + Repository: "repo1", + URL: "git@github.com:myorg/repo1.git", + Branch: "main", + SHA: "0bc57212c3cbbec69d20b34c507284bd300def5b", + Labels: []string{"prod", "staging"}, + }, + { + Organization: "myorg", + Repository: "repo2", + URL: "git@github.com:myorg/repo2.git", + Branch: "main", + SHA: "59d0", + }, + }, + expected: []map[string]interface{}{ + { + "organization": "myorg", + "repository": "repo1", + "url": "git@github.com:myorg/repo1.git", + "branch": "main", + "branchNormalized": "main", + "sha": "0bc57212c3cbbec69d20b34c507284bd300def5b", + "short_sha": "0bc57212", + "short_sha_7": "0bc5721", + "labels": "prod,staging", + }, + { + "organization": "myorg", + "repository": "repo2", + "url": "git@github.com:myorg/repo2.git", + "branch": "main", + "branchNormalized": "main", + "sha": "59d0", + "short_sha": "59d0", + "short_sha_7": "59d0", + "labels": "", + }, + }, + }, + { + name: "Value interpolation", + repos: []*scm_provider.Repository{ + { + Organization: "myorg", + Repository: "repo3", + URL: "git@github.com:myorg/repo3.git", + Branch: "main", + SHA: "0bc57212c3cbbec69d20b34c507284bd300def5b", + Labels: []string{"prod", "staging"}, + }, + }, + values: map[string]string{ + "foo": "bar", + "should_i_force_push_to": "{{ branch }}?", + }, + expected: []map[string]interface{}{ + { + "organization": "myorg", + "repository": "repo3", + "url": "git@github.com:myorg/repo3.git", + "branch": "main", + "branchNormalized": "main", + "sha": "0bc57212c3cbbec69d20b34c507284bd300def5b", + "short_sha": "0bc57212", + "short_sha_7": "0bc5721", + "labels": "prod,staging", + "values.foo": "bar", + "values.should_i_force_push_to": "main?", + }, + }, + }, + } + + for _, testCase := range cases { + testCaseCopy := testCase + + t.Run(testCaseCopy.name, func(t *testing.T) { + t.Parallel() + + mockProvider := &scm_provider.MockProvider{ + Repos: testCaseCopy.repos, + } + scmGenerator := &SCMProviderGenerator{overrideProvider: mockProvider, enableSCMProviders: true} + applicationSetInfo := argoprojiov1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "set", + }, + Spec: argoprojiov1alpha1.ApplicationSetSpec{ + Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{ + SCMProvider: &argoprojiov1alpha1.SCMProviderGenerator{ + Values: testCaseCopy.values, + }, + }}, + }, + } + + got, err := scmGenerator.GenerateParams(&applicationSetInfo.Spec.Generators[0], &applicationSetInfo) + + if testCaseCopy.expectedError != nil { + assert.EqualError(t, err, testCaseCopy.expectedError.Error()) + } else { + assert.NoError(t, err) + assert.Equal(t, testCaseCopy.expected, got) + } + + }) + } +} + +func TestAllowedSCMProvider(t *testing.T) { + cases := []struct { + name string + providerConfig *argoprojiov1alpha1.SCMProviderGenerator + expectedError error + }{ + { + name: "Error Github", + providerConfig: &argoprojiov1alpha1.SCMProviderGenerator{ + Github: &argoprojiov1alpha1.SCMProviderGeneratorGithub{ + API: "https://myservice.mynamespace.svc.cluster.local", + }, + }, + expectedError: &ErrDisallowedSCMProvider{}, + }, + { + name: "Error Gitlab", + providerConfig: &argoprojiov1alpha1.SCMProviderGenerator{ + Gitlab: &argoprojiov1alpha1.SCMProviderGeneratorGitlab{ + API: "https://myservice.mynamespace.svc.cluster.local", + }, + }, + expectedError: &ErrDisallowedSCMProvider{}, + }, + { + name: "Error Gitea", + providerConfig: &argoprojiov1alpha1.SCMProviderGenerator{ + Gitea: &argoprojiov1alpha1.SCMProviderGeneratorGitea{ + API: "https://myservice.mynamespace.svc.cluster.local", + }, + }, + expectedError: &ErrDisallowedSCMProvider{}, + }, + { + name: "Error Bitbucket", + providerConfig: &argoprojiov1alpha1.SCMProviderGenerator{ + BitbucketServer: &argoprojiov1alpha1.SCMProviderGeneratorBitbucketServer{ + API: "https://myservice.mynamespace.svc.cluster.local", + }, + }, + expectedError: &ErrDisallowedSCMProvider{}, + }, + { + name: "Error AzureDevops", + providerConfig: &argoprojiov1alpha1.SCMProviderGenerator{ + AzureDevOps: &argoprojiov1alpha1.SCMProviderGeneratorAzureDevOps{ + API: "https://myservice.mynamespace.svc.cluster.local", + }, + }, + expectedError: &ErrDisallowedSCMProvider{}, + }, + } + + for _, testCase := range cases { + testCaseCopy := testCase + + t.Run(testCaseCopy.name, func(t *testing.T) { + t.Parallel() + + scmGenerator := &SCMProviderGenerator{ + allowedSCMProviders: []string{ + "github.myorg.com", + "gitlab.myorg.com", + "gitea.myorg.com", + "bitbucket.myorg.com", + "azuredevops.myorg.com", + }, + enableSCMProviders: true, + } + + applicationSetInfo := argoprojiov1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "set", + }, + Spec: argoprojiov1alpha1.ApplicationSetSpec{ + Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{ + SCMProvider: testCaseCopy.providerConfig, + }}, + }, + } + + _, err := scmGenerator.GenerateParams(&applicationSetInfo.Spec.Generators[0], &applicationSetInfo) + + assert.Error(t, err, "Must return an error") + assert.ErrorAs(t, err, testCaseCopy.expectedError) + }) + } +} + +func TestSCMProviderDisabled_SCMGenerator(t *testing.T) { + generator := &SCMProviderGenerator{enableSCMProviders: false} + + applicationSetInfo := argoprojiov1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "set", + }, + Spec: argoprojiov1alpha1.ApplicationSetSpec{ + Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{ + SCMProvider: &argoprojiov1alpha1.SCMProviderGenerator{ + Github: &argoprojiov1alpha1.SCMProviderGeneratorGithub{ + API: "https://myservice.mynamespace.svc.cluster.local", + }, + }, + }}, + }, + } + + _, err := generator.GenerateParams(&applicationSetInfo.Spec.Generators[0], &applicationSetInfo) + assert.ErrorIs(t, err, ErrSCMProvidersDisabled) +} diff --git a/applicationset/generators/scm_utils.go b/applicationset/generators/scm_utils.go new file mode 100644 index 0000000000000..51ac99d9b7e49 --- /dev/null +++ b/applicationset/generators/scm_utils.go @@ -0,0 +1,5 @@ +package generators + +type SCMGeneratorWithCustomApiUrl interface { + CustomApiUrl() string +} diff --git a/applicationset/generators/value_interpolation.go b/applicationset/generators/value_interpolation.go new file mode 100644 index 0000000000000..05a078d42f782 --- /dev/null +++ b/applicationset/generators/value_interpolation.go @@ -0,0 +1,43 @@ +package generators + +import ( + "fmt" +) + +func appendTemplatedValues(values map[string]string, params map[string]interface{}, useGoTemplate bool, goTemplateOptions []string) error { + // We create a local map to ensure that we do not fall victim to a billion-laughs attack. We iterate through the + // cluster values map and only replace values in said map if it has already been allowlisted in the params map. + // Once we iterate through all the cluster values we can then safely merge the `tmp` map into the main params map. + tmp := map[string]interface{}{} + + for key, value := range values { + result, err := replaceTemplatedString(value, params, useGoTemplate, goTemplateOptions) + + if err != nil { + return fmt.Errorf("failed to replace templated string: %w", err) + } + + if useGoTemplate { + if tmp["values"] == nil { + tmp["values"] = map[string]string{} + } + tmp["values"].(map[string]string)[key] = result + } else { + tmp[fmt.Sprintf("values.%s", key)] = result + } + } + + for key, value := range tmp { + params[key] = value + } + + return nil +} + +func replaceTemplatedString(value string, params map[string]interface{}, useGoTemplate bool, goTemplateOptions []string) (string, error) { + replacedTmplStr, err := render.Replace(value, params, useGoTemplate, goTemplateOptions) + if err != nil { + return "", fmt.Errorf("failed to replace templated string with rendered values: %w", err) + } + return replacedTmplStr, nil +} diff --git a/applicationset/generators/value_interpolation_test.go b/applicationset/generators/value_interpolation_test.go new file mode 100644 index 0000000000000..8aa57dc0c0e65 --- /dev/null +++ b/applicationset/generators/value_interpolation_test.go @@ -0,0 +1,125 @@ +package generators + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestValueInterpolation(t *testing.T) { + testCases := []struct { + name string + values map[string]string + params map[string]interface{} + expected map[string]interface{} + }{ + { + name: "Simple interpolation", + values: map[string]string{ + "hello": "{{ world }}", + }, + params: map[string]interface{}{ + "world": "world!", + }, + expected: map[string]interface{}{ + "world": "world!", + "values.hello": "world!", + }, + }, + { + name: "Non-existent", + values: map[string]string{ + "non-existent": "{{ non-existent }}", + }, + params: map[string]interface{}{}, + expected: map[string]interface{}{ + "values.non-existent": "{{ non-existent }}", + }, + }, + { + name: "Billion laughs", + values: map[string]string{ + "lol1": "lol", + "lol2": "{{values.lol1}}{{values.lol1}}", + "lol3": "{{values.lol2}}{{values.lol2}}{{values.lol2}}", + }, + params: map[string]interface{}{}, + expected: map[string]interface{}{ + "values.lol1": "lol", + "values.lol2": "{{values.lol1}}{{values.lol1}}", + "values.lol3": "{{values.lol2}}{{values.lol2}}{{values.lol2}}", + }, + }, + } + + for _, testCase := range testCases { + + t.Run(testCase.name, func(t *testing.T) { + err := appendTemplatedValues(testCase.values, testCase.params, false, nil) + assert.NoError(t, err) + assert.EqualValues(t, testCase.expected, testCase.params) + }) + } +} + +func TestValueInterpolationWithGoTemplating(t *testing.T) { + testCases := []struct { + name string + values map[string]string + params map[string]interface{} + expected map[string]interface{} + }{ + { + name: "Simple interpolation", + values: map[string]string{ + "hello": "{{ .world }}", + }, + params: map[string]interface{}{ + "world": "world!", + }, + expected: map[string]interface{}{ + "world": "world!", + "values": map[string]string{ + "hello": "world!", + }, + }, + }, + { + name: "Non-existent to default", + values: map[string]string{ + "non_existent": "{{ default \"bar\" .non_existent }}", + }, + params: map[string]interface{}{}, + expected: map[string]interface{}{ + "values": map[string]string{ + "non_existent": "bar", + }, + }, + }, + { + name: "Billion laughs", + values: map[string]string{ + "lol1": "lol", + "lol2": "{{.values.lol1}}{{.values.lol1}}", + "lol3": "{{.values.lol2}}{{.values.lol2}}{{.values.lol2}}", + }, + params: map[string]interface{}{}, + expected: map[string]interface{}{ + "values": map[string]string{ + "lol1": "lol", + "lol2": "", + "lol3": "", + }, + }, + }, + } + + for _, testCase := range testCases { + + t.Run(testCase.name, func(t *testing.T) { + err := appendTemplatedValues(testCase.values, testCase.params, true, nil) + assert.NoError(t, err) + assert.EqualValues(t, testCase.expected, testCase.params) + }) + } +} diff --git a/applicationset/services/github_app_auth/auth.go b/applicationset/services/github_app_auth/auth.go new file mode 100644 index 0000000000000..04676c61ec1ba --- /dev/null +++ b/applicationset/services/github_app_auth/auth.go @@ -0,0 +1,19 @@ +package github_app_auth + +import "context" + +// Authentication has the authentication information required to access the GitHub API and repositories. +type Authentication struct { + // Id specifies the ID of the GitHub app used to access the repo + Id int64 + // InstallationId specifies the installation ID of the GitHub App used to access the repo + InstallationId int64 + // EnterpriseBaseURL specifies the base URL of GitHub Enterprise installation. If empty will default to https://api.github.com + EnterpriseBaseURL string + // PrivateKey in PEM format. + PrivateKey string +} + +type Credentials interface { + GetAuthSecret(ctx context.Context, secretName string) (*Authentication, error) +} diff --git a/applicationset/services/internal/github_app/client.go b/applicationset/services/internal/github_app/client.go new file mode 100644 index 0000000000000..bad6e828aa5c6 --- /dev/null +++ b/applicationset/services/internal/github_app/client.go @@ -0,0 +1,35 @@ +package github_app + +import ( + "fmt" + "net/http" + + "github.com/bradleyfalzon/ghinstallation/v2" + "github.com/google/go-github/v35/github" + + "github.com/argoproj/argo-cd/v2/applicationset/services/github_app_auth" +) + +// Client builds a github client for the given app authentication. +func Client(g github_app_auth.Authentication, url string) (*github.Client, error) { + rt, err := ghinstallation.New(http.DefaultTransport, g.Id, g.InstallationId, []byte(g.PrivateKey)) + if err != nil { + return nil, fmt.Errorf("failed to create github app install: %w", err) + } + if url == "" { + url = g.EnterpriseBaseURL + } + var client *github.Client + if url == "" { + httpClient := http.Client{Transport: rt} + client = github.NewClient(&httpClient) + } else { + rt.BaseURL = url + httpClient := http.Client{Transport: rt} + client, err = github.NewEnterpriseClient(url, url, &httpClient) + if err != nil { + return nil, fmt.Errorf("failed to create github enterprise client: %w", err) + } + } + return client, nil +} diff --git a/applicationset/services/internal/http/client.go b/applicationset/services/internal/http/client.go new file mode 100644 index 0000000000000..00bcf32f3204f --- /dev/null +++ b/applicationset/services/internal/http/client.go @@ -0,0 +1,161 @@ +package http + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "time" +) + +const ( + userAgent = "argocd-applicationset" + defaultTimeout = 30 +) + +type Client struct { + // URL is the URL used for API requests. + baseURL string + + // UserAgent is the user agent to include in HTTP requests. + UserAgent string + + // Token is used to make authenticated API calls. + token string + + // Client is an HTTP client used to communicate with the API. + client *http.Client +} + +type ErrorResponse struct { + Body []byte + Response *http.Response + Message string +} + +func NewClient(baseURL string, options ...ClientOptionFunc) (*Client, error) { + client, err := newClient(baseURL, options...) + if err != nil { + return nil, err + } + return client, nil +} + +func newClient(baseURL string, options ...ClientOptionFunc) (*Client, error) { + c := &Client{baseURL: baseURL, UserAgent: userAgent} + + // Configure the HTTP client. + c.client = &http.Client{ + Timeout: time.Duration(defaultTimeout) * time.Second, + } + + // Apply any given client options. + for _, fn := range options { + if fn == nil { + continue + } + if err := fn(c); err != nil { + return nil, err + } + } + + return c, nil +} + +func (c *Client) NewRequest(method, path string, body interface{}, options []ClientOptionFunc) (*http.Request, error) { + + // Make sure the given URL end with a slash + if !strings.HasSuffix(c.baseURL, "/") { + c.baseURL += "/" + } + + var buf io.ReadWriter + if body != nil { + buf = &bytes.Buffer{} + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err := enc.Encode(body) + if err != nil { + return nil, err + } + } + + req, err := http.NewRequest(method, c.baseURL+path, buf) + if err != nil { + return nil, err + } + + if body != nil { + req.Header.Set("Content-Type", "application/json") + } + + if len(c.token) != 0 { + req.Header.Set("Authorization", "Bearer "+c.token) + } + + if c.UserAgent != "" { + req.Header.Set("User-Agent", c.UserAgent) + } + + return req, nil +} + +func (c *Client) Do(ctx context.Context, req *http.Request, v interface{}) (*http.Response, error) { + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + if err := CheckResponse(resp); err != nil { + return resp, err + } + + switch v := v.(type) { + case nil: + case io.Writer: + _, err = io.Copy(v, resp.Body) + default: + buf := new(bytes.Buffer) + teeReader := io.TeeReader(resp.Body, buf) + decErr := json.NewDecoder(teeReader).Decode(v) + if decErr == io.EOF { + decErr = nil // ignore EOF errors caused by empty response body + } + if decErr != nil { + err = fmt.Errorf("%s: %s", decErr.Error(), buf.String()) + } + } + return resp, err +} + +// CheckResponse checks the API response for errors, and returns them if present. +func CheckResponse(resp *http.Response) error { + + if c := resp.StatusCode; 200 <= c && c <= 299 { + return nil + } + + data, err := io.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("API error with status code %d: %v", resp.StatusCode, err) + } + + var raw map[string]interface{} + if err := json.Unmarshal(data, &raw); err != nil { + return fmt.Errorf("API error with status code %d: %s", resp.StatusCode, string(data)) + } + + message := "" + if value, ok := raw["message"].(string); ok { + message = value + } else if value, ok := raw["error"].(string); ok { + message = value + } + + return fmt.Errorf("API error with status code %d: %s", resp.StatusCode, message) +} diff --git a/applicationset/services/internal/http/client_options.go b/applicationset/services/internal/http/client_options.go new file mode 100644 index 0000000000000..ec388c9a80605 --- /dev/null +++ b/applicationset/services/internal/http/client_options.go @@ -0,0 +1,22 @@ +package http + +import "time" + +// ClientOptionFunc can be used to customize a new Restful API client. +type ClientOptionFunc func(*Client) error + +// WithToken is an option for NewClient to set token +func WithToken(token string) ClientOptionFunc { + return func(c *Client) error { + c.token = token + return nil + } +} + +// WithTimeout can be used to configure a custom timeout for requests. +func WithTimeout(timeout int) ClientOptionFunc { + return func(c *Client) error { + c.client.Timeout = time.Duration(timeout) * time.Second + return nil + } +} diff --git a/applicationset/services/internal/http/client_test.go b/applicationset/services/internal/http/client_test.go new file mode 100644 index 0000000000000..ca2c916177fee --- /dev/null +++ b/applicationset/services/internal/http/client_test.go @@ -0,0 +1,163 @@ +package http + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestClient(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, err := w.Write([]byte("Hello, World!")) + if err != nil { + assert.NoError(t, fmt.Errorf("Error Write %v", err)) + } + })) + defer server.Close() + + var clientOptionFns []ClientOptionFunc + _, err := NewClient(server.URL, clientOptionFns...) + + if err != nil { + t.Fatalf("Failed to create client: %v", err) + } +} + +func TestClientDo(t *testing.T) { + ctx := context.Background() + + for _, c := range []struct { + name string + params map[string]string + content []byte + fakeServer *httptest.Server + clientOptionFns []ClientOptionFunc + expected []map[string]interface{} + expectedCode int + expectedError error + }{ + { + name: "Simple", + params: map[string]string{ + "pkey1": "val1", + "pkey2": "val2", + }, + fakeServer: httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, err := w.Write([]byte(`[{ + "key1": "val1", + "key2": { + "key2_1": "val2_1", + "key2_2": { + "key2_2_1": "val2_2_1" + } + }, + "key3": 123 + }]`)) + if err != nil { + assert.NoError(t, fmt.Errorf("Error Write %v", err)) + } + })), + clientOptionFns: nil, + expected: []map[string]interface{}{ + { + "key1": "val1", + "key2": map[string]interface{}{ + "key2_1": "val2_1", + "key2_2": map[string]interface{}{ + "key2_2_1": "val2_2_1", + }, + }, + "key3": float64(123), + }, + }, + expectedCode: 200, + expectedError: nil, + }, + { + name: "With Token", + params: map[string]string{ + "pkey1": "val1", + "pkey2": "val2", + }, + fakeServer: httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + authHeader := r.Header.Get("Authorization") + if authHeader != "Bearer "+string("test-token") { + w.WriteHeader(http.StatusUnauthorized) + return + } + w.WriteHeader(http.StatusOK) + _, err := w.Write([]byte(`[{ + "key1": "val1", + "key2": { + "key2_1": "val2_1", + "key2_2": { + "key2_2_1": "val2_2_1" + } + }, + "key3": 123 + }]`)) + if err != nil { + assert.NoError(t, fmt.Errorf("Error Write %v", err)) + } + })), + clientOptionFns: nil, + expected: []map[string]interface{}(nil), + expectedCode: 401, + expectedError: fmt.Errorf("API error with status code 401: "), + }, + } { + cc := c + t.Run(cc.name, func(t *testing.T) { + defer cc.fakeServer.Close() + + client, err := NewClient(cc.fakeServer.URL, cc.clientOptionFns...) + + if err != nil { + t.Fatalf("NewClient returned unexpected error: %v", err) + } + + req, err := client.NewRequest("POST", "", cc.params, nil) + + if err != nil { + t.Fatalf("NewRequest returned unexpected error: %v", err) + } + + var data []map[string]interface{} + + resp, err := client.Do(ctx, req, &data) + + if cc.expectedError != nil { + assert.EqualError(t, err, cc.expectedError.Error()) + } else { + assert.Equal(t, resp.StatusCode, cc.expectedCode) + assert.Equal(t, data, cc.expected) + assert.NoError(t, err) + } + }) + } +} + +func TestCheckResponse(t *testing.T) { + resp := &http.Response{ + StatusCode: http.StatusBadRequest, + Body: io.NopCloser(bytes.NewBufferString(`{"error":"invalid_request","description":"Invalid token"}`)), + } + + err := CheckResponse(resp) + if err == nil { + t.Error("Expected an error, got nil") + } + + expected := "API error with status code 400: invalid_request" + if err.Error() != expected { + t.Errorf("Expected error '%s', got '%s'", expected, err.Error()) + } +} diff --git a/applicationset/services/mocks/Repos.go b/applicationset/services/mocks/Repos.go new file mode 100644 index 0000000000000..776b104cae284 --- /dev/null +++ b/applicationset/services/mocks/Repos.go @@ -0,0 +1,81 @@ +// Code generated by mockery v2.25.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// Repos is an autogenerated mock type for the Repos type +type Repos struct { + mock.Mock +} + +// GetDirectories provides a mock function with given fields: ctx, repoURL, revision +func (_m *Repos) GetDirectories(ctx context.Context, repoURL string, revision string) ([]string, error) { + ret := _m.Called(ctx, repoURL, revision) + + var r0 []string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) ([]string, error)); ok { + return rf(ctx, repoURL, revision) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) []string); ok { + r0 = rf(ctx, repoURL, revision) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, repoURL, revision) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetFiles provides a mock function with given fields: ctx, repoURL, revision, pattern +func (_m *Repos) GetFiles(ctx context.Context, repoURL string, revision string, pattern string) (map[string][]byte, error) { + ret := _m.Called(ctx, repoURL, revision, pattern) + + var r0 map[string][]byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string) (map[string][]byte, error)); ok { + return rf(ctx, repoURL, revision, pattern) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, string) map[string][]byte); ok { + r0 = rf(ctx, repoURL, revision, pattern) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string][]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok { + r1 = rf(ctx, repoURL, revision, pattern) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type mockConstructorTestingTNewRepos interface { + mock.TestingT + Cleanup(func()) +} + +// NewRepos creates a new instance of Repos. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewRepos(t mockConstructorTestingTNewRepos) *Repos { + mock := &Repos{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/applicationset/services/mocks/RepositoryDB.go b/applicationset/services/mocks/RepositoryDB.go new file mode 100644 index 0000000000000..9d6240d342776 --- /dev/null +++ b/applicationset/services/mocks/RepositoryDB.go @@ -0,0 +1,57 @@ +// Code generated by mockery v2.21.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + + v1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +// RepositoryDB is an autogenerated mock type for the RepositoryDB type +type RepositoryDB struct { + mock.Mock +} + +// GetRepository provides a mock function with given fields: ctx, url +func (_m *RepositoryDB) GetRepository(ctx context.Context, url string) (*v1alpha1.Repository, error) { + ret := _m.Called(ctx, url) + + var r0 *v1alpha1.Repository + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*v1alpha1.Repository, error)); ok { + return rf(ctx, url) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *v1alpha1.Repository); ok { + r0 = rf(ctx, url) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*v1alpha1.Repository) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, url) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type mockConstructorTestingTNewRepositoryDB interface { + mock.TestingT + Cleanup(func()) +} + +// NewRepositoryDB creates a new instance of RepositoryDB. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewRepositoryDB(t mockConstructorTestingTNewRepositoryDB) *RepositoryDB { + mock := &RepositoryDB{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/applicationset/services/plugin/plugin_service.go b/applicationset/services/plugin/plugin_service.go new file mode 100644 index 0000000000000..95573e0942407 --- /dev/null +++ b/applicationset/services/plugin/plugin_service.go @@ -0,0 +1,73 @@ +package plugin + +import ( + "context" + "fmt" + "net/http" + + internalhttp "github.com/argoproj/argo-cd/v2/applicationset/services/internal/http" + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +// ServiceRequest is the request object sent to the plugin service. +type ServiceRequest struct { + // ApplicationSetName is the appSetName of the ApplicationSet for which we're requesting parameters. Useful for logging in + // the plugin service. + ApplicationSetName string `json:"applicationSetName"` + // Input is the map of parameters set in the ApplicationSet spec for this generator. + Input v1alpha1.PluginInput `json:"input"` +} + +type Output struct { + // Parameters is the list of parameter sets returned by the plugin. + Parameters []map[string]interface{} `json:"parameters"` +} + +// ServiceResponse is the response object returned by the plugin service. +type ServiceResponse struct { + // Output is the map of outputs returned by the plugin. + Output Output `json:"output"` +} + +type Service struct { + client *internalhttp.Client + appSetName string +} + +func NewPluginService(ctx context.Context, appSetName string, baseURL string, token string, requestTimeout int) (*Service, error) { + var clientOptionFns []internalhttp.ClientOptionFunc + + clientOptionFns = append(clientOptionFns, internalhttp.WithToken(token)) + + if requestTimeout != 0 { + clientOptionFns = append(clientOptionFns, internalhttp.WithTimeout(requestTimeout)) + } + + client, err := internalhttp.NewClient(baseURL, clientOptionFns...) + if err != nil { + return nil, fmt.Errorf("error creating plugin client: %v", err) + } + + return &Service{ + client: client, + appSetName: appSetName, + }, nil +} + +func (p *Service) List(ctx context.Context, parameters v1alpha1.PluginParameters) (*ServiceResponse, error) { + req, err := p.client.NewRequest(http.MethodPost, "api/v1/getparams.execute", ServiceRequest{ApplicationSetName: p.appSetName, Input: v1alpha1.PluginInput{Parameters: parameters}}, nil) + + if err != nil { + return nil, fmt.Errorf("NewRequest returned unexpected error: %v", err) + } + + var data ServiceResponse + + _, err = p.client.Do(ctx, req, &data) + + if err != nil { + return nil, fmt.Errorf("error get api '%s': %v", p.appSetName, err) + } + + return &data, err +} diff --git a/applicationset/services/plugin/plugin_service_test.go b/applicationset/services/plugin/plugin_service_test.go new file mode 100644 index 0000000000000..6dc81d33df71f --- /dev/null +++ b/applicationset/services/plugin/plugin_service_test.go @@ -0,0 +1,52 @@ +package plugin + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPlugin(t *testing.T) { + expectedJSON := `{"parameters": [{"number":123,"digest":"sha256:942ae2dfd73088b54d7151a3c3fd5af038a51c50029bfcfd21f1e650d9579967"},{"number":456,"digest":"sha256:224e68cc69566e5cbbb76034b3c42cd2ed57c1a66720396e1c257794cb7d68c1"}]}` + token := "0bc57212c3cbbec69d20b34c507284bd300def5b" + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + authHeader := r.Header.Get("Authorization") + if authHeader != "Bearer "+token { + w.WriteHeader(http.StatusUnauthorized) + return + } + _, err := w.Write([]byte(expectedJSON)) + + if err != nil { + assert.NoError(t, fmt.Errorf("Error Write %v", err)) + } + }) + ts := httptest.NewServer(handler) + defer ts.Close() + + client, err := NewPluginService(context.Background(), "plugin-test", ts.URL, token, 0) + + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + data, err := client.List(context.Background(), nil) + + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + var expectedData ServiceResponse + err = json.Unmarshal([]byte(expectedJSON), &expectedData) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, &expectedData, data) +} diff --git a/applicationset/services/plugin/utils.go b/applicationset/services/plugin/utils.go new file mode 100644 index 0000000000000..26e38e492200d --- /dev/null +++ b/applicationset/services/plugin/utils.go @@ -0,0 +1,21 @@ +package plugin + +import ( + "fmt" + "strings" + + "github.com/argoproj/argo-cd/v2/common" +) + +// ParseSecretKey retrieves secret appSetName if different from common ArgoCDSecretName. +func ParseSecretKey(key string) (secretName string, tokenKey string) { + if strings.Contains(key, ":") { + parts := strings.Split(key, ":") + secretName = parts[0][1:] + tokenKey = fmt.Sprintf("$%s", parts[1]) + } else { + secretName = common.ArgoCDSecretName + tokenKey = key + } + return secretName, tokenKey +} diff --git a/applicationset/services/plugin/utils_test.go b/applicationset/services/plugin/utils_test.go new file mode 100644 index 0000000000000..c364d606392e4 --- /dev/null +++ b/applicationset/services/plugin/utils_test.go @@ -0,0 +1,17 @@ +package plugin + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseSecretKey(t *testing.T) { + secretName, tokenKey := ParseSecretKey("#my-secret:my-token") + assert.Equal(t, "my-secret", secretName) + assert.Equal(t, "$my-token", tokenKey) + + secretName, tokenKey = ParseSecretKey("#my-secret") + assert.Equal(t, "argocd-secret", secretName) + assert.Equal(t, "#my-secret", tokenKey) +} diff --git a/applicationset/services/pull_request/azure_devops.go b/applicationset/services/pull_request/azure_devops.go new file mode 100644 index 0000000000000..9090b829ca0c2 --- /dev/null +++ b/applicationset/services/pull_request/azure_devops.go @@ -0,0 +1,145 @@ +package pull_request + +import ( + "context" + "fmt" + "strings" + + "github.com/microsoft/azure-devops-go-api/azuredevops" + core "github.com/microsoft/azure-devops-go-api/azuredevops/core" + git "github.com/microsoft/azure-devops-go-api/azuredevops/git" +) + +const AZURE_DEVOPS_DEFAULT_URL = "https://dev.azure.com" + +type AzureDevOpsClientFactory interface { + // Returns an Azure Devops Client interface. + GetClient(ctx context.Context) (git.Client, error) +} + +type devopsFactoryImpl struct { + connection *azuredevops.Connection +} + +func (factory *devopsFactoryImpl) GetClient(ctx context.Context) (git.Client, error) { + gitClient, err := git.NewClient(ctx, factory.connection) + if err != nil { + return nil, fmt.Errorf("failed to get new Azure DevOps git client for pull request generator: %w", err) + } + return gitClient, nil +} + +type AzureDevOpsService struct { + clientFactory AzureDevOpsClientFactory + project string + repo string + labels []string +} + +var _ PullRequestService = (*AzureDevOpsService)(nil) +var _ AzureDevOpsClientFactory = &devopsFactoryImpl{} + +func NewAzureDevOpsService(ctx context.Context, token, url, organization, project, repo string, labels []string) (PullRequestService, error) { + organizationUrl := buildURL(url, organization) + + var connection *azuredevops.Connection + if token == "" { + connection = azuredevops.NewAnonymousConnection(organizationUrl) + } else { + connection = azuredevops.NewPatConnection(organizationUrl, token) + } + + return &AzureDevOpsService{ + clientFactory: &devopsFactoryImpl{connection: connection}, + project: project, + repo: repo, + labels: labels, + }, nil +} + +func (a *AzureDevOpsService) List(ctx context.Context) ([]*PullRequest, error) { + client, err := a.clientFactory.GetClient(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get Azure DevOps client: %w", err) + } + + args := git.GetPullRequestsByProjectArgs{ + Project: &a.project, + SearchCriteria: &git.GitPullRequestSearchCriteria{}, + } + + azurePullRequests, err := client.GetPullRequestsByProject(ctx, args) + if err != nil { + return nil, fmt.Errorf("failed to get pull requests by project: %w", err) + } + + pullRequests := []*PullRequest{} + + for _, pr := range *azurePullRequests { + if pr.Repository == nil || + pr.Repository.Name == nil || + pr.PullRequestId == nil || + pr.SourceRefName == nil || + pr.LastMergeSourceCommit == nil || + pr.LastMergeSourceCommit.CommitId == nil { + continue + } + + azureDevOpsLabels := convertLabels(pr.Labels) + if !containAzureDevOpsLabels(a.labels, azureDevOpsLabels) { + continue + } + + if *pr.Repository.Name == a.repo { + pullRequests = append(pullRequests, &PullRequest{ + Number: *pr.PullRequestId, + Branch: strings.Replace(*pr.SourceRefName, "refs/heads/", "", 1), + HeadSHA: *pr.LastMergeSourceCommit.CommitId, + Labels: azureDevOpsLabels, + }) + } + } + + return pullRequests, nil +} + +// convertLabels converts WebApiTagDefinitions to strings +func convertLabels(tags *[]core.WebApiTagDefinition) []string { + if tags == nil { + return []string{} + } + labelStrings := make([]string, len(*tags)) + for i, label := range *tags { + labelStrings[i] = *label.Name + } + return labelStrings +} + +// containAzureDevOpsLabels returns true if gotLabels contains expectedLabels +func containAzureDevOpsLabels(expectedLabels []string, gotLabels []string) bool { + for _, expected := range expectedLabels { + found := false + for _, got := range gotLabels { + if expected == got { + found = true + break + } + } + if !found { + return false + } + } + return true +} + +func buildURL(url, organization string) string { + if url == "" { + url = AZURE_DEVOPS_DEFAULT_URL + } + separator := "" + if !strings.HasSuffix(url, "/") { + separator = "/" + } + devOpsURL := fmt.Sprintf("%s%s%s", url, separator, organization) + return devOpsURL +} diff --git a/applicationset/services/pull_request/azure_devops_test.go b/applicationset/services/pull_request/azure_devops_test.go new file mode 100644 index 0000000000000..15ac1c8233d89 --- /dev/null +++ b/applicationset/services/pull_request/azure_devops_test.go @@ -0,0 +1,221 @@ +package pull_request + +import ( + "context" + "testing" + + "github.com/microsoft/azure-devops-go-api/azuredevops/core" + git "github.com/microsoft/azure-devops-go-api/azuredevops/git" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + azureMock "github.com/argoproj/argo-cd/v2/applicationset/services/scm_provider/azure_devops/git/mocks" +) + +func createBoolPtr(x bool) *bool { + return &x +} + +func createStringPtr(x string) *string { + return &x +} + +func createIntPtr(x int) *int { + return &x +} + +func createLabelsPtr(x []core.WebApiTagDefinition) *[]core.WebApiTagDefinition { + return &x +} + +type AzureClientFactoryMock struct { + mock *mock.Mock +} + +func (m *AzureClientFactoryMock) GetClient(ctx context.Context) (git.Client, error) { + args := m.mock.Called(ctx) + + var client git.Client + c := args.Get(0) + if c != nil { + client = c.(git.Client) + } + + var err error + if len(args) > 1 { + if e, ok := args.Get(1).(error); ok { + err = e + } + } + + return client, err +} + +func TestListPullRequest(t *testing.T) { + teamProject := "myorg_project" + repoName := "myorg_project_repo" + pr_id := 123 + pr_head_sha := "cd4973d9d14a08ffe6b641a89a68891d6aac8056" + ctx := context.Background() + + pullRequestMock := []git.GitPullRequest{ + { + PullRequestId: createIntPtr(pr_id), + SourceRefName: createStringPtr("refs/heads/feature-branch"), + LastMergeSourceCommit: &git.GitCommitRef{ + CommitId: createStringPtr(pr_head_sha), + }, + Labels: &[]core.WebApiTagDefinition{}, + Repository: &git.GitRepository{ + Name: createStringPtr(repoName), + }, + }, + } + + args := git.GetPullRequestsByProjectArgs{ + Project: &teamProject, + SearchCriteria: &git.GitPullRequestSearchCriteria{}, + } + + gitClientMock := azureMock.Client{} + clientFactoryMock := &AzureClientFactoryMock{mock: &mock.Mock{}} + clientFactoryMock.mock.On("GetClient", mock.Anything).Return(&gitClientMock, nil) + gitClientMock.On("GetPullRequestsByProject", ctx, args).Return(&pullRequestMock, nil) + + provider := AzureDevOpsService{ + clientFactory: clientFactoryMock, + project: teamProject, + repo: repoName, + labels: nil, + } + + list, err := provider.List(ctx) + assert.NoError(t, err) + assert.Equal(t, 1, len(list)) + assert.Equal(t, "feature-branch", list[0].Branch) + assert.Equal(t, pr_head_sha, list[0].HeadSHA) + assert.Equal(t, pr_id, list[0].Number) +} + +func TestConvertLabes(t *testing.T) { + testCases := []struct { + name string + gotLabels *[]core.WebApiTagDefinition + expectedLabels []string + }{ + { + name: "empty labels", + gotLabels: createLabelsPtr([]core.WebApiTagDefinition{}), + expectedLabels: []string{}, + }, + { + name: "nil labels", + gotLabels: createLabelsPtr(nil), + expectedLabels: []string{}, + }, + { + name: "one label", + gotLabels: createLabelsPtr([]core.WebApiTagDefinition{ + {Name: createStringPtr("label1"), Active: createBoolPtr(true)}, + }), + expectedLabels: []string{"label1"}, + }, + { + name: "two label", + gotLabels: createLabelsPtr([]core.WebApiTagDefinition{ + {Name: createStringPtr("label1"), Active: createBoolPtr(true)}, + {Name: createStringPtr("label2"), Active: createBoolPtr(true)}, + }), + expectedLabels: []string{"label1", "label2"}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got := convertLabels(tc.gotLabels) + assert.Equal(t, tc.expectedLabels, got) + }) + } +} + +func TestContainAzureDevOpsLabels(t *testing.T) { + testCases := []struct { + name string + expectedLabels []string + gotLabels []string + expectedResult bool + }{ + { + name: "empty labels", + expectedLabels: []string{}, + gotLabels: []string{}, + expectedResult: true, + }, + { + name: "no matching labels", + expectedLabels: []string{"label1", "label2"}, + gotLabels: []string{"label3", "label4"}, + expectedResult: false, + }, + { + name: "some matching labels", + expectedLabels: []string{"label1", "label2"}, + gotLabels: []string{"label1", "label3"}, + expectedResult: false, + }, + { + name: "all matching labels", + expectedLabels: []string{"label1", "label2"}, + gotLabels: []string{"label1", "label2"}, + expectedResult: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got := containAzureDevOpsLabels(tc.expectedLabels, tc.gotLabels) + assert.Equal(t, tc.expectedResult, got) + }) + } +} + +func TestBuildURL(t *testing.T) { + testCases := []struct { + name string + url string + organization string + expected string + }{ + { + name: "Provided default URL and organization", + url: "https://dev.azure.com/", + organization: "myorganization", + expected: "https://dev.azure.com/myorganization", + }, + { + name: "Provided default URL and organization without trailing slash", + url: "https://dev.azure.com", + organization: "myorganization", + expected: "https://dev.azure.com/myorganization", + }, + { + name: "Provided no URL and organization", + url: "", + organization: "myorganization", + expected: "https://dev.azure.com/myorganization", + }, + { + name: "Provided custom URL and organization", + url: "https://azuredevops.mycompany.com/", + organization: "myorganization", + expected: "https://azuredevops.mycompany.com/myorganization", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := buildURL(tc.url, tc.organization) + assert.Equal(t, result, tc.expected) + }) + } +} diff --git a/applicationset/services/pull_request/bitbucket_cloud.go b/applicationset/services/pull_request/bitbucket_cloud.go new file mode 100644 index 0000000000000..5d5f8208f9b06 --- /dev/null +++ b/applicationset/services/pull_request/bitbucket_cloud.go @@ -0,0 +1,138 @@ +package pull_request + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/ktrysmt/go-bitbucket" +) + +type BitbucketCloudService struct { + client *bitbucket.Client + owner string + repositorySlug string +} + +type BitbucketCloudPullRequest struct { + ID int `json:"id"` + Source BitbucketCloudPullRequestSource `json:"source"` +} + +type BitbucketCloudPullRequestSource struct { + Branch BitbucketCloudPullRequestSourceBranch `json:"branch"` + Commit BitbucketCloudPullRequestSourceCommit `json:"commit"` +} + +type BitbucketCloudPullRequestSourceBranch struct { + Name string `json:"name"` +} + +type BitbucketCloudPullRequestSourceCommit struct { + Hash string `json:"hash"` +} + +type PullRequestResponse struct { + Page int32 `json:"page"` + Size int32 `json:"size"` + Pagelen int32 `json:"pagelen"` + Next string `json:"next"` + Previous string `json:"previous"` + Items []PullRequest `json:"values"` +} + +var _ PullRequestService = (*BitbucketCloudService)(nil) + +func parseUrl(uri string) (*url.URL, error) { + if uri == "" { + uri = "https://api.bitbucket.org/2.0" + } + + url, err := url.Parse(uri) + if err != nil { + return nil, err + } + + return url, nil +} + +func NewBitbucketCloudServiceBasicAuth(baseUrl, username, password, owner, repositorySlug string) (PullRequestService, error) { + url, err := parseUrl(baseUrl) + if err != nil { + return nil, fmt.Errorf("error parsing base url of %s for %s/%s: %v", baseUrl, owner, repositorySlug, err) + } + + bitbucketClient := bitbucket.NewBasicAuth(username, password) + bitbucketClient.SetApiBaseURL(*url) + + return &BitbucketCloudService{ + client: bitbucketClient, + owner: owner, + repositorySlug: repositorySlug, + }, nil +} + +func NewBitbucketCloudServiceBearerToken(baseUrl, bearerToken, owner, repositorySlug string) (PullRequestService, error) { + url, err := parseUrl(baseUrl) + if err != nil { + return nil, fmt.Errorf("error parsing base url of %s for %s/%s: %v", baseUrl, owner, repositorySlug, err) + } + + bitbucketClient := bitbucket.NewOAuthbearerToken(bearerToken) + bitbucketClient.SetApiBaseURL(*url) + + return &BitbucketCloudService{ + client: bitbucketClient, + owner: owner, + repositorySlug: repositorySlug, + }, nil +} + +func NewBitbucketCloudServiceNoAuth(baseUrl, owner, repositorySlug string) (PullRequestService, error) { + // There is currently no method to explicitly not require auth + return NewBitbucketCloudServiceBearerToken(baseUrl, "", owner, repositorySlug) +} + +func (b *BitbucketCloudService) List(_ context.Context) ([]*PullRequest, error) { + opts := &bitbucket.PullRequestsOptions{ + Owner: b.owner, + RepoSlug: b.repositorySlug, + } + + response, err := b.client.Repositories.PullRequests.Gets(opts) + if err != nil { + return nil, fmt.Errorf("error listing pull requests for %s/%s: %v", b.owner, b.repositorySlug, err) + } + + resp, ok := response.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("unknown type returned from bitbucket pull requests") + } + + repoArray, ok := resp["values"].([]interface{}) + if !ok { + return nil, fmt.Errorf("unknown type returned from response values") + } + + jsonStr, err := json.Marshal(repoArray) + if err != nil { + return nil, fmt.Errorf("error marshalling response body to json: %v", err) + } + + var pulls []BitbucketCloudPullRequest + if err := json.Unmarshal(jsonStr, &pulls); err != nil { + return nil, fmt.Errorf("error unmarshalling json to type '[]BitbucketCloudPullRequest': %v", err) + } + + pullRequests := []*PullRequest{} + for _, pull := range pulls { + pullRequests = append(pullRequests, &PullRequest{ + Number: pull.ID, + Branch: pull.Source.Branch.Name, + HeadSHA: pull.Source.Commit.Hash, + }) + } + + return pullRequests, nil +} diff --git a/applicationset/services/pull_request/bitbucket_cloud_test.go b/applicationset/services/pull_request/bitbucket_cloud_test.go new file mode 100644 index 0000000000000..2f604c1fa9ccf --- /dev/null +++ b/applicationset/services/pull_request/bitbucket_cloud_test.go @@ -0,0 +1,410 @@ +package pull_request + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +func defaultHandlerCloud(t *testing.T) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + var err error + switch r.RequestURI { + case "/repositories/OWNER/REPO/pullrequests/": + _, err = io.WriteString(w, `{ + "size": 1, + "pagelen": 10, + "page": 1, + "values": [ + { + "id": 101, + "source": { + "branch": { + "name": "feature/foo-bar" + }, + "commit": { + "type": "commit", + "hash": "1a8dd249c04a" + } + } + } + ] + }`) + default: + t.Fail() + } + if err != nil { + t.Fail() + } + } +} + +func TestParseUrlEmptyUrl(t *testing.T) { + url, err := parseUrl("") + bitbucketUrl, _ := url.Parse("https://api.bitbucket.org/2.0") + + assert.NoError(t, err) + assert.Equal(t, bitbucketUrl, url) +} + +func TestInvalidBaseUrlBasicAuthCloud(t *testing.T) { + _, err := NewBitbucketCloudServiceBasicAuth("http:// example.org", "user", "password", "OWNER", "REPO") + + assert.Error(t, err) +} + +func TestInvalidBaseUrlBearerTokenCloud(t *testing.T) { + _, err := NewBitbucketCloudServiceBearerToken("http:// example.org", "TOKEN", "OWNER", "REPO") + + assert.Error(t, err) +} + +func TestInvalidBaseUrlNoAuthCloud(t *testing.T) { + _, err := NewBitbucketCloudServiceNoAuth("http:// example.org", "OWNER", "REPO") + + assert.Error(t, err) +} + +func TestListPullRequestBearerTokenCloud(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "Bearer TOKEN", r.Header.Get("Authorization")) + defaultHandlerCloud(t)(w, r) + })) + defer ts.Close() + svc, err := NewBitbucketCloudServiceBearerToken(ts.URL, "TOKEN", "OWNER", "REPO") + assert.NoError(t, err) + pullRequests, err := ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{}) + assert.NoError(t, err) + assert.Equal(t, 1, len(pullRequests)) + assert.Equal(t, 101, pullRequests[0].Number) + assert.Equal(t, "feature/foo-bar", pullRequests[0].Branch) + assert.Equal(t, "1a8dd249c04a", pullRequests[0].HeadSHA) +} + +func TestListPullRequestNoAuthCloud(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Empty(t, r.Header.Get("Authorization")) + defaultHandlerCloud(t)(w, r) + })) + defer ts.Close() + svc, err := NewBitbucketCloudServiceNoAuth(ts.URL, "OWNER", "REPO") + assert.NoError(t, err) + pullRequests, err := ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{}) + assert.NoError(t, err) + assert.Equal(t, 1, len(pullRequests)) + assert.Equal(t, 101, pullRequests[0].Number) + assert.Equal(t, "feature/foo-bar", pullRequests[0].Branch) + assert.Equal(t, "1a8dd249c04a", pullRequests[0].HeadSHA) +} + +func TestListPullRequestBasicAuthCloud(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "Basic dXNlcjpwYXNzd29yZA==", r.Header.Get("Authorization")) + defaultHandlerCloud(t)(w, r) + })) + defer ts.Close() + svc, err := NewBitbucketCloudServiceBasicAuth(ts.URL, "user", "password", "OWNER", "REPO") + assert.NoError(t, err) + pullRequests, err := ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{}) + assert.NoError(t, err) + assert.Equal(t, 1, len(pullRequests)) + assert.Equal(t, 101, pullRequests[0].Number) + assert.Equal(t, "feature/foo-bar", pullRequests[0].Branch) + assert.Equal(t, "1a8dd249c04a", pullRequests[0].HeadSHA) +} + +func TestListPullRequestPaginationCloud(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + var err error + switch r.RequestURI { + case "/repositories/OWNER/REPO/pullrequests/": + _, err = io.WriteString(w, fmt.Sprintf(`{ + "size": 2, + "pagelen": 1, + "page": 1, + "next": "http://%s/repositories/OWNER/REPO/pullrequests/?pagelen=1&page=2", + "values": [ + { + "id": 101, + "source": { + "branch": { + "name": "feature-101" + }, + "commit": { + "type": "commit", + "hash": "1a8dd249c04a" + } + } + }, + { + "id": 102, + "source": { + "branch": { + "name": "feature-102" + }, + "commit": { + "type": "commit", + "hash": "4cf807e67a6d" + } + } + } + ] + }`, r.Host)) + case "/repositories/OWNER/REPO/pullrequests/?pagelen=1&page=2": + _, err = io.WriteString(w, fmt.Sprintf(`{ + "size": 2, + "pagelen": 1, + "page": 2, + "previous": "http://%s/repositories/OWNER/REPO/pullrequests/?pagelen=1&page=1", + "values": [ + { + "id": 103, + "source": { + "branch": { + "name": "feature-103" + }, + "commit": { + "type": "commit", + "hash": "6344d9623e3b" + } + } + } + ] + }`, r.Host)) + default: + t.Fail() + } + if err != nil { + t.Fail() + } + })) + defer ts.Close() + svc, err := NewBitbucketCloudServiceNoAuth(ts.URL, "OWNER", "REPO") + assert.NoError(t, err) + pullRequests, err := ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{}) + assert.NoError(t, err) + assert.Equal(t, 3, len(pullRequests)) + assert.Equal(t, PullRequest{ + Number: 101, + Branch: "feature-101", + HeadSHA: "1a8dd249c04a", + }, *pullRequests[0]) + assert.Equal(t, PullRequest{ + Number: 102, + Branch: "feature-102", + HeadSHA: "4cf807e67a6d", + }, *pullRequests[1]) + assert.Equal(t, PullRequest{ + Number: 103, + Branch: "feature-103", + HeadSHA: "6344d9623e3b", + }, *pullRequests[2]) +} + +func TestListResponseErrorCloud(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(500) + })) + defer ts.Close() + svc, _ := NewBitbucketCloudServiceNoAuth(ts.URL, "OWNER", "REPO") + _, err := ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{}) + assert.Error(t, err) +} + +func TestListResponseMalformedCloud(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + switch r.RequestURI { + case "/repositories/OWNER/REPO/pullrequests/": + _, err := io.WriteString(w, `[{ + "size": 1, + "pagelen": 10, + "page": 1, + "values": [{ "id": 101 }] + }]`) + if err != nil { + t.Fail() + } + default: + t.Fail() + } + })) + defer ts.Close() + svc, _ := NewBitbucketCloudServiceNoAuth(ts.URL, "OWNER", "REPO") + _, err := ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{}) + assert.Error(t, err) +} + +func TestListResponseMalformedValuesCloud(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + switch r.RequestURI { + case "/repositories/OWNER/REPO/pullrequests/": + _, err := io.WriteString(w, `{ + "size": 1, + "pagelen": 10, + "page": 1, + "values": { "id": 101 } + }`) + if err != nil { + t.Fail() + } + default: + t.Fail() + } + })) + defer ts.Close() + svc, _ := NewBitbucketCloudServiceNoAuth(ts.URL, "OWNER", "REPO") + _, err := ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{}) + assert.Error(t, err) +} + +func TestListResponseEmptyCloud(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + switch r.RequestURI { + case "/repositories/OWNER/REPO/pullrequests/": + _, err := io.WriteString(w, `{ + "size": 1, + "pagelen": 10, + "page": 1, + "values": [] + }`) + if err != nil { + t.Fail() + } + default: + t.Fail() + } + })) + defer ts.Close() + svc, err := NewBitbucketCloudServiceNoAuth(ts.URL, "OWNER", "REPO") + assert.NoError(t, err) + pullRequests, err := ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{}) + assert.NoError(t, err) + assert.Empty(t, pullRequests) +} + +func TestListPullRequestBranchMatchCloud(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + var err error + switch r.RequestURI { + case "/repositories/OWNER/REPO/pullrequests/": + _, err = io.WriteString(w, fmt.Sprintf(`{ + "size": 2, + "pagelen": 1, + "page": 1, + "next": "http://%s/repositories/OWNER/REPO/pullrequests/?pagelen=1&page=2", + "values": [ + { + "id": 101, + "source": { + "branch": { + "name": "feature-101" + }, + "commit": { + "type": "commit", + "hash": "1a8dd249c04a" + } + } + }, + { + "id": 200, + "source": { + "branch": { + "name": "feature-200" + }, + "commit": { + "type": "commit", + "hash": "4cf807e67a6d" + } + } + } + ] + }`, r.Host)) + case "/repositories/OWNER/REPO/pullrequests/?pagelen=1&page=2": + _, err = io.WriteString(w, fmt.Sprintf(`{ + "size": 2, + "pagelen": 1, + "page": 2, + "previous": "http://%s/repositories/OWNER/REPO/pullrequests/?pagelen=1&page=1", + "values": [ + { + "id": 102, + "source": { + "branch": { + "name": "feature-102" + }, + "commit": { + "type": "commit", + "hash": "6344d9623e3b" + } + } + } + ] + }`, r.Host)) + default: + t.Fail() + } + if err != nil { + t.Fail() + } + })) + defer ts.Close() + regexp := `feature-1[\d]{2}` + svc, err := NewBitbucketCloudServiceNoAuth(ts.URL, "OWNER", "REPO") + assert.NoError(t, err) + pullRequests, err := ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{ + { + BranchMatch: ®exp, + }, + }) + assert.NoError(t, err) + assert.Equal(t, 2, len(pullRequests)) + assert.Equal(t, PullRequest{ + Number: 101, + Branch: "feature-101", + HeadSHA: "1a8dd249c04a", + }, *pullRequests[0]) + assert.Equal(t, PullRequest{ + Number: 102, + Branch: "feature-102", + HeadSHA: "6344d9623e3b", + }, *pullRequests[1]) + + regexp = `.*2$` + svc, err = NewBitbucketCloudServiceNoAuth(ts.URL, "OWNER", "REPO") + assert.NoError(t, err) + pullRequests, err = ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{ + { + BranchMatch: ®exp, + }, + }) + assert.NoError(t, err) + assert.Equal(t, 1, len(pullRequests)) + assert.Equal(t, PullRequest{ + Number: 102, + Branch: "feature-102", + HeadSHA: "6344d9623e3b", + }, *pullRequests[0]) + + regexp = `[\d{2}` + svc, err = NewBitbucketCloudServiceNoAuth(ts.URL, "OWNER", "REPO") + assert.NoError(t, err) + _, err = ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{ + { + BranchMatch: ®exp, + }, + }) + assert.Error(t, err) +} diff --git a/applicationset/services/pull_request/bitbucket_server.go b/applicationset/services/pull_request/bitbucket_server.go new file mode 100644 index 0000000000000..99665d163e1bc --- /dev/null +++ b/applicationset/services/pull_request/bitbucket_server.go @@ -0,0 +1,84 @@ +package pull_request + +import ( + "context" + "fmt" + + "github.com/argoproj/argo-cd/v2/applicationset/utils" + bitbucketv1 "github.com/gfleury/go-bitbucket-v1" + log "github.com/sirupsen/logrus" +) + +type BitbucketService struct { + client *bitbucketv1.APIClient + projectKey string + repositorySlug string + // Not supported for PRs by Bitbucket Server + // labels []string +} + +var _ PullRequestService = (*BitbucketService)(nil) + +func NewBitbucketServiceBasicAuth(ctx context.Context, username, password, url, projectKey, repositorySlug string) (PullRequestService, error) { + bitbucketConfig := bitbucketv1.NewConfiguration(url) + // Avoid the XSRF check + bitbucketConfig.AddDefaultHeader("x-atlassian-token", "no-check") + bitbucketConfig.AddDefaultHeader("x-requested-with", "XMLHttpRequest") + + ctx = context.WithValue(ctx, bitbucketv1.ContextBasicAuth, bitbucketv1.BasicAuth{ + UserName: username, + Password: password, + }) + return newBitbucketService(ctx, bitbucketConfig, projectKey, repositorySlug) +} + +func NewBitbucketServiceNoAuth(ctx context.Context, url, projectKey, repositorySlug string) (PullRequestService, error) { + return newBitbucketService(ctx, bitbucketv1.NewConfiguration(url), projectKey, repositorySlug) +} + +func newBitbucketService(ctx context.Context, bitbucketConfig *bitbucketv1.Configuration, projectKey, repositorySlug string) (PullRequestService, error) { + bitbucketConfig.BasePath = utils.NormalizeBitbucketBasePath(bitbucketConfig.BasePath) + bitbucketClient := bitbucketv1.NewAPIClient(ctx, bitbucketConfig) + + return &BitbucketService{ + client: bitbucketClient, + projectKey: projectKey, + repositorySlug: repositorySlug, + }, nil +} + +func (b *BitbucketService) List(_ context.Context) ([]*PullRequest, error) { + paged := map[string]interface{}{ + "limit": 100, + } + + pullRequests := []*PullRequest{} + for { + response, err := b.client.DefaultApi.GetPullRequestsPage(b.projectKey, b.repositorySlug, paged) + if err != nil { + return nil, fmt.Errorf("error listing pull requests for %s/%s: %v", b.projectKey, b.repositorySlug, err) + } + pulls, err := bitbucketv1.GetPullRequestsResponse(response) + if err != nil { + log.Errorf("error parsing pull request response '%v'", response.Values) + return nil, fmt.Errorf("error parsing pull request response for %s/%s: %v", b.projectKey, b.repositorySlug, err) + } + + for _, pull := range pulls { + pullRequests = append(pullRequests, &PullRequest{ + Number: pull.ID, + Branch: pull.FromRef.DisplayID, // ID: refs/heads/main DisplayID: main + TargetBranch: pull.ToRef.DisplayID, + HeadSHA: pull.FromRef.LatestCommit, // This is not defined in the official docs, but works in practice + Labels: []string{}, // Not supported by library + }) + } + + hasNextPage, nextPageStart := bitbucketv1.HasNextPage(response) + if !hasNextPage { + break + } + paged["start"] = nextPageStart + } + return pullRequests, nil +} diff --git a/applicationset/services/pull_request/bitbucket_server_test.go b/applicationset/services/pull_request/bitbucket_server_test.go new file mode 100644 index 0000000000000..911e3e7e0ccd0 --- /dev/null +++ b/applicationset/services/pull_request/bitbucket_server_test.go @@ -0,0 +1,367 @@ +package pull_request + +import ( + "context" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/stretchr/testify/assert" +) + +func defaultHandler(t *testing.T) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + var err error + switch r.RequestURI { + case "/rest/api/1.0/projects/PROJECT/repos/REPO/pull-requests?limit=100": + _, err = io.WriteString(w, `{ + "size": 1, + "limit": 100, + "isLastPage": true, + "values": [ + { + "id": 101, + "toRef": { + "latestCommit": "5b766e3564a3453808f3cd3dd3f2e5fad8ef0e7a", + "displayId": "master", + "id": "refs/heads/master" + }, + "fromRef": { + "id": "refs/heads/feature-ABC-123", + "displayId": "feature-ABC-123", + "latestCommit": "cb3cf2e4d1517c83e720d2585b9402dbef71f992" + } + } + ], + "start": 0 + }`) + default: + t.Fail() + } + if err != nil { + t.Fail() + } + } +} + +func TestListPullRequestNoAuth(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Empty(t, r.Header.Get("Authorization")) + defaultHandler(t)(w, r) + })) + defer ts.Close() + svc, err := NewBitbucketServiceNoAuth(context.Background(), ts.URL, "PROJECT", "REPO") + assert.NoError(t, err) + pullRequests, err := ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{}) + assert.NoError(t, err) + assert.Equal(t, 1, len(pullRequests)) + assert.Equal(t, 101, pullRequests[0].Number) + assert.Equal(t, "feature-ABC-123", pullRequests[0].Branch) + assert.Equal(t, "master", pullRequests[0].TargetBranch) + assert.Equal(t, "cb3cf2e4d1517c83e720d2585b9402dbef71f992", pullRequests[0].HeadSHA) +} + +func TestListPullRequestPagination(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + var err error + switch r.RequestURI { + case "/rest/api/1.0/projects/PROJECT/repos/REPO/pull-requests?limit=100": + _, err = io.WriteString(w, `{ + "size": 2, + "limit": 2, + "isLastPage": false, + "values": [ + { + "id": 101, + "toRef": { + "latestCommit": "5b766e3564a3453808f3cd3dd3f2e5fad8ef0e7a", + "displayId": "master", + "id": "refs/heads/master" + }, + "fromRef": { + "id": "refs/heads/feature-101", + "displayId": "feature-101", + "latestCommit": "ab3cf2e4d1517c83e720d2585b9402dbef71f992" + } + }, + { + "id": 102, + "toRef": { + "latestCommit": "5b766e3564a3453808f3cd3dd3f2e5fad8ef0e7a", + "displayId": "branch", + "id": "refs/heads/branch" + }, + "fromRef": { + "id": "refs/heads/feature-102", + "displayId": "feature-102", + "latestCommit": "bb3cf2e4d1517c83e720d2585b9402dbef71f992" + } + } + ], + "nextPageStart": 200 + }`) + case "/rest/api/1.0/projects/PROJECT/repos/REPO/pull-requests?limit=100&start=200": + _, err = io.WriteString(w, `{ + "size": 1, + "limit": 2, + "isLastPage": true, + "values": [ + { + "id": 200, + "toRef": { + "latestCommit": "5b766e3564a3453808f3cd3dd3f2e5fad8ef0e7a", + "displayId": "master", + "id": "refs/heads/master" + }, + "fromRef": { + "id": "refs/heads/feature-200", + "displayId": "feature-200", + "latestCommit": "cb3cf2e4d1517c83e720d2585b9402dbef71f992" + } + } + ], + "start": 200 + }`) + default: + t.Fail() + } + if err != nil { + t.Fail() + } + })) + defer ts.Close() + svc, err := NewBitbucketServiceNoAuth(context.Background(), ts.URL, "PROJECT", "REPO") + assert.NoError(t, err) + pullRequests, err := ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{}) + assert.NoError(t, err) + assert.Equal(t, 3, len(pullRequests)) + assert.Equal(t, PullRequest{ + Number: 101, + Branch: "feature-101", + TargetBranch: "master", + HeadSHA: "ab3cf2e4d1517c83e720d2585b9402dbef71f992", + Labels: []string{}, + }, *pullRequests[0]) + assert.Equal(t, PullRequest{ + Number: 102, + Branch: "feature-102", + TargetBranch: "branch", + HeadSHA: "bb3cf2e4d1517c83e720d2585b9402dbef71f992", + Labels: []string{}, + }, *pullRequests[1]) + assert.Equal(t, PullRequest{ + Number: 200, + Branch: "feature-200", + TargetBranch: "master", + HeadSHA: "cb3cf2e4d1517c83e720d2585b9402dbef71f992", + Labels: []string{}, + }, *pullRequests[2]) +} + +func TestListPullRequestBasicAuth(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // base64(user:password) + assert.Equal(t, "Basic dXNlcjpwYXNzd29yZA==", r.Header.Get("Authorization")) + assert.Equal(t, "no-check", r.Header.Get("X-Atlassian-Token")) + defaultHandler(t)(w, r) + })) + defer ts.Close() + svc, err := NewBitbucketServiceBasicAuth(context.Background(), "user", "password", ts.URL, "PROJECT", "REPO") + assert.NoError(t, err) + pullRequests, err := ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{}) + assert.NoError(t, err) + assert.Equal(t, 1, len(pullRequests)) + assert.Equal(t, 101, pullRequests[0].Number) + assert.Equal(t, "feature-ABC-123", pullRequests[0].Branch) + assert.Equal(t, "cb3cf2e4d1517c83e720d2585b9402dbef71f992", pullRequests[0].HeadSHA) +} + +func TestListResponseError(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + })) + defer ts.Close() + svc, _ := NewBitbucketServiceNoAuth(context.Background(), ts.URL, "PROJECT", "REPO") + _, err := ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{}) + assert.Error(t, err) +} + +func TestListResponseMalformed(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + switch r.RequestURI { + case "/rest/api/1.0/projects/PROJECT/repos/REPO/pull-requests?limit=100": + _, err := io.WriteString(w, `{ + "size": 1, + "limit": 100, + "isLastPage": true, + "values": { "id": 101 }, + "start": 0 + }`) + if err != nil { + t.Fail() + } + default: + t.Fail() + } + })) + defer ts.Close() + svc, _ := NewBitbucketServiceNoAuth(context.Background(), ts.URL, "PROJECT", "REPO") + _, err := ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{}) + assert.Error(t, err) +} + +func TestListResponseEmpty(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + switch r.RequestURI { + case "/rest/api/1.0/projects/PROJECT/repos/REPO/pull-requests?limit=100": + _, err := io.WriteString(w, `{ + "size": 0, + "limit": 100, + "isLastPage": true, + "values": [], + "start": 0 + }`) + if err != nil { + t.Fail() + } + default: + t.Fail() + } + })) + defer ts.Close() + svc, err := NewBitbucketServiceNoAuth(context.Background(), ts.URL, "PROJECT", "REPO") + assert.NoError(t, err) + pullRequests, err := ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{}) + assert.NoError(t, err) + assert.Empty(t, pullRequests) +} + +func TestListPullRequestBranchMatch(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + var err error + switch r.RequestURI { + case "/rest/api/1.0/projects/PROJECT/repos/REPO/pull-requests?limit=100": + _, err = io.WriteString(w, `{ + "size": 2, + "limit": 2, + "isLastPage": false, + "values": [ + { + "id": 101, + "toRef": { + "latestCommit": "5b766e3564a3453808f3cd3dd3f2e5fad8ef0e7a", + "displayId": "master", + "id": "refs/heads/master" + }, + "fromRef": { + "id": "refs/heads/feature-101", + "displayId": "feature-101", + "latestCommit": "ab3cf2e4d1517c83e720d2585b9402dbef71f992" + } + }, + { + "id": 102, + "toRef": { + "latestCommit": "5b766e3564a3453808f3cd3dd3f2e5fad8ef0e7a", + "displayId": "branch", + "id": "refs/heads/branch" + }, + "fromRef": { + "id": "refs/heads/feature-102", + "displayId": "feature-102", + "latestCommit": "bb3cf2e4d1517c83e720d2585b9402dbef71f992" + } + } + ], + "nextPageStart": 200 + }`) + case "/rest/api/1.0/projects/PROJECT/repos/REPO/pull-requests?limit=100&start=200": + _, err = io.WriteString(w, `{ + "size": 1, + "limit": 2, + "isLastPage": true, + "values": [ + { + "id": 200, + "toRef": { + "latestCommit": "5b766e3564a3453808f3cd3dd3f2e5fad8ef0e7a", + "displayId": "master", + "id": "refs/heads/master" + }, + "fromRef": { + "id": "refs/heads/feature-200", + "displayId": "feature-200", + "latestCommit": "cb3cf2e4d1517c83e720d2585b9402dbef71f992" + } + } + ], + "start": 200 + }`) + default: + t.Fail() + } + if err != nil { + t.Fail() + } + })) + defer ts.Close() + regexp := `feature-1[\d]{2}` + svc, err := NewBitbucketServiceNoAuth(context.Background(), ts.URL, "PROJECT", "REPO") + assert.NoError(t, err) + pullRequests, err := ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{ + { + BranchMatch: ®exp, + }, + }) + assert.NoError(t, err) + assert.Equal(t, 2, len(pullRequests)) + assert.Equal(t, PullRequest{ + Number: 101, + Branch: "feature-101", + TargetBranch: "master", + HeadSHA: "ab3cf2e4d1517c83e720d2585b9402dbef71f992", + Labels: []string{}, + }, *pullRequests[0]) + assert.Equal(t, PullRequest{ + Number: 102, + Branch: "feature-102", + TargetBranch: "branch", + HeadSHA: "bb3cf2e4d1517c83e720d2585b9402dbef71f992", + Labels: []string{}, + }, *pullRequests[1]) + + regexp = `.*2$` + svc, err = NewBitbucketServiceNoAuth(context.Background(), ts.URL, "PROJECT", "REPO") + assert.NoError(t, err) + pullRequests, err = ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{ + { + BranchMatch: ®exp, + }, + }) + assert.NoError(t, err) + assert.Equal(t, 1, len(pullRequests)) + assert.Equal(t, PullRequest{ + Number: 102, + Branch: "feature-102", + TargetBranch: "branch", + HeadSHA: "bb3cf2e4d1517c83e720d2585b9402dbef71f992", + Labels: []string{}, + }, *pullRequests[0]) + + regexp = `[\d{2}` + svc, err = NewBitbucketServiceNoAuth(context.Background(), ts.URL, "PROJECT", "REPO") + assert.NoError(t, err) + _, err = ListPullRequests(context.Background(), svc, []v1alpha1.PullRequestGeneratorFilter{ + { + BranchMatch: ®exp, + }, + }) + assert.Error(t, err) +} diff --git a/applicationset/services/pull_request/fake.go b/applicationset/services/pull_request/fake.go new file mode 100644 index 0000000000000..845df70d6675f --- /dev/null +++ b/applicationset/services/pull_request/fake.go @@ -0,0 +1,23 @@ +package pull_request + +import ( + "context" +) + +type FakeService struct { + listPullReuests []*PullRequest + listError error +} + +var _ PullRequestService = (*FakeService)(nil) + +func NewFakeService(_ context.Context, listPullReuests []*PullRequest, listError error) (PullRequestService, error) { + return &FakeService{ + listPullReuests: listPullReuests, + listError: listError, + }, nil +} + +func (g *FakeService) List(ctx context.Context) ([]*PullRequest, error) { + return g.listPullReuests, g.listError +} diff --git a/applicationset/services/pull_request/fixtures/gitlab_mr_list_response.json b/applicationset/services/pull_request/fixtures/gitlab_mr_list_response.json new file mode 100644 index 0000000000000..5df5b2db30f93 --- /dev/null +++ b/applicationset/services/pull_request/fixtures/gitlab_mr_list_response.json @@ -0,0 +1,89 @@ +[ + { + "id": 35385049, + "iid": 15442, + "project_id": 278964, + "title": "Draft: Use structured logging for DB load balancer", + "description": "", + "state": "opened", + "created_at": "2019-08-20T10:58:54.413Z", + "updated_at": "2019-08-20T12:01:49.849Z", + "merged_by": null, + "merged_at": null, + "closed_by": null, + "closed_at": null, + "target_branch": "master", + "source_branch": "use-structured-logging-for-db-load-balancer", + "user_notes_count": 1, + "upvotes": 0, + "downvotes": 0, + "assignee": { + "id": 4088036, + "name": "Hordur Freyr Yngvason", + "username": "hfyngvason", + "state": "active", + "avatar_url": "https://assets.gitlab-static.net/uploads/-/system/user/avatar/4088036/avatar.png", + "web_url": "https://gitlab.com/hfyngvason" + }, + "author": { + "id": 4088036, + "name": "Hordur Freyr Yngvason", + "username": "hfyngvason", + "state": "active", + "avatar_url": "https://assets.gitlab-static.net/uploads/-/system/user/avatar/4088036/avatar.png", + "web_url": "https://gitlab.com/hfyngvason" + }, + "assignees": [ + { + "id": 4088036, + "name": "Hordur Freyr Yngvason", + "username": "hfyngvason", + "state": "active", + "avatar_url": "https://assets.gitlab-static.net/uploads/-/system/user/avatar/4088036/avatar.png", + "web_url": "https://gitlab.com/hfyngvason" + } + ], + "reviewers": [ + { + "id": 2535118, + "name": "Thong Kuah", + "username": "tkuah", + "state": "active", + "avatar_url": "https://secure.gravatar.com/avatar/f7b51bdd49a4914d29504d7ff4c3f7b9?s=80&d=identicon", + "web_url": "https://gitlab.com/tkuah" + } + ], + "source_project_id": 278964, + "target_project_id": 278964, + "labels": [ + "backend", + "backstage", + "database", + "database::review pending", + "group::autodevops and kubernetes" + ], + "work_in_progress": true, + "milestone": null, + "merge_when_pipeline_succeeds": false, + "merge_status": "can_be_merged", + "sha": "2fc4e8b972ff3208ec63b6143e34ad67ff343ad7", + "merge_commit_sha": null, + "discussion_locked": null, + "should_remove_source_branch": null, + "force_remove_source_branch": true, + "reference": "!15442", + "web_url": "https://gitlab.com/gitlab-org/gitlab-ee/merge_requests/15442", + "time_stats": { + "time_estimate": 0, + "total_time_spent": 0, + "human_time_estimate": null, + "human_total_time_spent": null + }, + "squash": true, + "task_completion_status": { + "count": 12, + "completed_count": 0 + }, + "approvals_before_merge": 1 + } +] diff --git a/applicationset/services/pull_request/gitea.go b/applicationset/services/pull_request/gitea.go new file mode 100644 index 0000000000000..ff385ff281c6d --- /dev/null +++ b/applicationset/services/pull_request/gitea.go @@ -0,0 +1,76 @@ +package pull_request + +import ( + "context" + "crypto/tls" + "net/http" + "net/http/cookiejar" + "os" + + "code.gitea.io/sdk/gitea" +) + +type GiteaService struct { + client *gitea.Client + owner string + repo string +} + +var _ PullRequestService = (*GiteaService)(nil) + +func NewGiteaService(ctx context.Context, token, url, owner, repo string, insecure bool) (PullRequestService, error) { + if token == "" { + token = os.Getenv("GITEA_TOKEN") + } + httpClient := &http.Client{} + if insecure { + cookieJar, _ := cookiejar.New(nil) + + tr := http.DefaultTransport.(*http.Transport).Clone() + tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} + + httpClient = &http.Client{ + Jar: cookieJar, + Transport: tr, + } + } + client, err := gitea.NewClient(url, gitea.SetToken(token), gitea.SetHTTPClient(httpClient)) + if err != nil { + return nil, err + } + return &GiteaService{ + client: client, + owner: owner, + repo: repo, + }, nil +} + +func (g *GiteaService) List(ctx context.Context) ([]*PullRequest, error) { + opts := gitea.ListPullRequestsOptions{ + State: gitea.StateOpen, + } + prs, _, err := g.client.ListRepoPullRequests(g.owner, g.repo, opts) + if err != nil { + return nil, err + } + list := []*PullRequest{} + for _, pr := range prs { + list = append(list, &PullRequest{ + Number: int(pr.Index), + Branch: pr.Head.Ref, + TargetBranch: pr.Base.Ref, + HeadSHA: pr.Head.Sha, + Labels: getGiteaPRLabelNames(pr.Labels), + }) + } + return list, nil +} + +// Get the Gitea pull request label names. +func getGiteaPRLabelNames(giteaLabels []*gitea.Label) []string { + var labelNames []string + for _, giteaLabel := range giteaLabels { + labelNames = append(labelNames, giteaLabel.Name) + } + return labelNames +} diff --git a/applicationset/services/pull_request/gitea_test.go b/applicationset/services/pull_request/gitea_test.go new file mode 100644 index 0000000000000..125c8ee481b3a --- /dev/null +++ b/applicationset/services/pull_request/gitea_test.go @@ -0,0 +1,290 @@ +package pull_request + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "testing" + + "code.gitea.io/sdk/gitea" + "github.com/stretchr/testify/assert" +) + +func giteaMockHandler(t *testing.T) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + fmt.Println(r.RequestURI) + switch r.RequestURI { + case "/api/v1/version": + _, err := io.WriteString(w, `{"version":"1.17.0+dev-452-g1f0541780"}`) + if err != nil { + t.Fail() + } + case "/api/v1/repos/test-argocd/pr-test/pulls?limit=0&page=1&state=open": + _, err := io.WriteString(w, `[{ + "id": 50721, + "url": "https://gitea.com/test-argocd/pr-test/pulls/1", + "number": 1, + "user": { + "id": 4476, + "login": "graytshirt", + "full_name": "Dan", + "email": "graytshirt@noreply.gitea.io", + "avatar_url": "https://secure.gravatar.com/avatar/2446c67bcd59d71f6ae3cf376ec2ae37?d=identicon", + "language": "", + "is_admin": false, + "last_login": "0001-01-01T00:00:00Z", + "created": "2020-04-07T01:14:36+08:00", + "restricted": false, + "active": false, + "prohibit_login": false, + "location": "", + "website": "", + "description": "", + "visibility": "public", + "followers_count": 0, + "following_count": 4, + "starred_repos_count": 1, + "username": "graytshirt" + }, + "title": "add an empty file", + "body": "", + "labels": [], + "milestone": null, + "assignee": null, + "assignees": null, + "state": "open", + "is_locked": false, + "comments": 0, + "html_url": "https://gitea.com/test-argocd/pr-test/pulls/1", + "diff_url": "https://gitea.com/test-argocd/pr-test/pulls/1.diff", + "patch_url": "https://gitea.com/test-argocd/pr-test/pulls/1.patch", + "mergeable": true, + "merged": false, + "merged_at": null, + "merge_commit_sha": null, + "merged_by": null, + "base": { + "label": "main", + "ref": "main", + "sha": "72687815ccba81ef014a96201cc2e846a68789d8", + "repo_id": 21618, + "repo": { + "id": 21618, + "owner": { + "id": 31480, + "login": "test-argocd", + "full_name": "", + "email": "", + "avatar_url": "https://gitea.com/avatars/22d1b1d3f61abf95951c4a958731d848", + "language": "", + "is_admin": false, + "last_login": "0001-01-01T00:00:00Z", + "created": "2022-04-06T02:28:06+08:00", + "restricted": false, + "active": false, + "prohibit_login": false, + "location": "", + "website": "", + "description": "", + "visibility": "public", + "followers_count": 0, + "following_count": 0, + "starred_repos_count": 0, + "username": "test-argocd" + }, + "name": "pr-test", + "full_name": "test-argocd/pr-test", + "description": "", + "empty": false, + "private": false, + "fork": false, + "template": false, + "parent": null, + "mirror": false, + "size": 28, + "language": "", + "languages_url": "https://gitea.com/api/v1/repos/test-argocd/pr-test/languages", + "html_url": "https://gitea.com/test-argocd/pr-test", + "ssh_url": "git@gitea.com:test-argocd/pr-test.git", + "clone_url": "https://gitea.com/test-argocd/pr-test.git", + "original_url": "", + "website": "", + "stars_count": 0, + "forks_count": 0, + "watchers_count": 1, + "open_issues_count": 0, + "open_pr_counter": 1, + "release_counter": 0, + "default_branch": "main", + "archived": false, + "created_at": "2022-04-06T02:32:09+08:00", + "updated_at": "2022-04-06T02:33:12+08:00", + "permissions": { + "admin": false, + "push": false, + "pull": true + }, + "has_issues": true, + "internal_tracker": { + "enable_time_tracker": true, + "allow_only_contributors_to_track_time": true, + "enable_issue_dependencies": true + }, + "has_wiki": true, + "has_pull_requests": true, + "has_projects": true, + "ignore_whitespace_conflicts": false, + "allow_merge_commits": true, + "allow_rebase": true, + "allow_rebase_explicit": true, + "allow_squash_merge": true, + "default_merge_style": "merge", + "avatar_url": "", + "internal": false, + "mirror_interval": "", + "mirror_updated": "0001-01-01T00:00:00Z", + "repo_transfer": null + } + }, + "head": { + "label": "test", + "ref": "test", + "sha": "7bbaf62d92ddfafd9cc8b340c619abaec32bc09f", + "repo_id": 21618, + "repo": { + "id": 21618, + "owner": { + "id": 31480, + "login": "test-argocd", + "full_name": "", + "email": "", + "avatar_url": "https://gitea.com/avatars/22d1b1d3f61abf95951c4a958731d848", + "language": "", + "is_admin": false, + "last_login": "0001-01-01T00:00:00Z", + "created": "2022-04-06T02:28:06+08:00", + "restricted": false, + "active": false, + "prohibit_login": false, + "location": "", + "website": "", + "description": "", + "visibility": "public", + "followers_count": 0, + "following_count": 0, + "starred_repos_count": 0, + "username": "test-argocd" + }, + "name": "pr-test", + "full_name": "test-argocd/pr-test", + "description": "", + "empty": false, + "private": false, + "fork": false, + "template": false, + "parent": null, + "mirror": false, + "size": 28, + "language": "", + "languages_url": "https://gitea.com/api/v1/repos/test-argocd/pr-test/languages", + "html_url": "https://gitea.com/test-argocd/pr-test", + "ssh_url": "git@gitea.com:test-argocd/pr-test.git", + "clone_url": "https://gitea.com/test-argocd/pr-test.git", + "original_url": "", + "website": "", + "stars_count": 0, + "forks_count": 0, + "watchers_count": 1, + "open_issues_count": 0, + "open_pr_counter": 1, + "release_counter": 0, + "default_branch": "main", + "archived": false, + "created_at": "2022-04-06T02:32:09+08:00", + "updated_at": "2022-04-06T02:33:12+08:00", + "permissions": { + "admin": false, + "push": false, + "pull": true + }, + "has_issues": true, + "internal_tracker": { + "enable_time_tracker": true, + "allow_only_contributors_to_track_time": true, + "enable_issue_dependencies": true + }, + "has_wiki": true, + "has_pull_requests": true, + "has_projects": true, + "ignore_whitespace_conflicts": false, + "allow_merge_commits": true, + "allow_rebase": true, + "allow_rebase_explicit": true, + "allow_squash_merge": true, + "default_merge_style": "merge", + "avatar_url": "", + "internal": false, + "mirror_interval": "", + "mirror_updated": "0001-01-01T00:00:00Z", + "repo_transfer": null + } + }, + "merge_base": "72687815ccba81ef014a96201cc2e846a68789d8", + "due_date": null, + "created_at": "2022-04-06T02:34:24+08:00", + "updated_at": "2022-04-06T02:34:24+08:00", + "closed_at": null + }]`) + if err != nil { + t.Fail() + } + } + } +} + +func TestGiteaList(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + giteaMockHandler(t)(w, r) + })) + host, err := NewGiteaService(context.Background(), "", ts.URL, "test-argocd", "pr-test", false) + assert.Nil(t, err) + prs, err := host.List(context.Background()) + assert.Nil(t, err) + assert.Equal(t, len(prs), 1) + assert.Equal(t, prs[0].Number, 1) + assert.Equal(t, prs[0].Branch, "test") + assert.Equal(t, prs[0].TargetBranch, "main") + assert.Equal(t, prs[0].HeadSHA, "7bbaf62d92ddfafd9cc8b340c619abaec32bc09f") +} + +func TestGetGiteaPRLabelNames(t *testing.T) { + Tests := []struct { + Name string + PullLabels []*gitea.Label + ExpectedResult []string + }{ + { + Name: "PR has labels", + PullLabels: []*gitea.Label{ + {Name: "label1"}, + {Name: "label2"}, + {Name: "label3"}, + }, + ExpectedResult: []string{"label1", "label2", "label3"}, + }, + { + Name: "PR does not have labels", + PullLabels: []*gitea.Label{}, + ExpectedResult: nil, + }, + } + for _, test := range Tests { + t.Run(test.Name, func(t *testing.T) { + labels := getGiteaPRLabelNames(test.PullLabels) + assert.Equal(t, test.ExpectedResult, labels) + }) + } +} diff --git a/applicationset/services/pull_request/github.go b/applicationset/services/pull_request/github.go new file mode 100644 index 0000000000000..7c801e7370f53 --- /dev/null +++ b/applicationset/services/pull_request/github.go @@ -0,0 +1,110 @@ +package pull_request + +import ( + "context" + "fmt" + "os" + + "github.com/google/go-github/v35/github" + "golang.org/x/oauth2" +) + +type GithubService struct { + client *github.Client + owner string + repo string + labels []string +} + +var _ PullRequestService = (*GithubService)(nil) + +func NewGithubService(ctx context.Context, token, url, owner, repo string, labels []string) (PullRequestService, error) { + var ts oauth2.TokenSource + // Undocumented environment variable to set a default token, to be used in testing to dodge anonymous rate limits. + if token == "" { + token = os.Getenv("GITHUB_TOKEN") + } + if token != "" { + ts = oauth2.StaticTokenSource( + &oauth2.Token{AccessToken: token}, + ) + } + httpClient := oauth2.NewClient(ctx, ts) + var client *github.Client + if url == "" { + client = github.NewClient(httpClient) + } else { + var err error + client, err = github.NewEnterpriseClient(url, url, httpClient) + if err != nil { + return nil, err + } + } + return &GithubService{ + client: client, + owner: owner, + repo: repo, + labels: labels, + }, nil +} + +func (g *GithubService) List(ctx context.Context) ([]*PullRequest, error) { + opts := &github.PullRequestListOptions{ + ListOptions: github.ListOptions{ + PerPage: 100, + }, + } + pullRequests := []*PullRequest{} + for { + pulls, resp, err := g.client.PullRequests.List(ctx, g.owner, g.repo, opts) + if err != nil { + return nil, fmt.Errorf("error listing pull requests for %s/%s: %w", g.owner, g.repo, err) + } + for _, pull := range pulls { + if !containLabels(g.labels, pull.Labels) { + continue + } + pullRequests = append(pullRequests, &PullRequest{ + Number: *pull.Number, + Branch: *pull.Head.Ref, + TargetBranch: *pull.Base.Ref, + HeadSHA: *pull.Head.SHA, + Labels: getGithubPRLabelNames(pull.Labels), + }) + } + if resp.NextPage == 0 { + break + } + opts.Page = resp.NextPage + } + return pullRequests, nil +} + +// containLabels returns true if gotLabels contains expectedLabels +func containLabels(expectedLabels []string, gotLabels []*github.Label) bool { + for _, expected := range expectedLabels { + found := false + for _, got := range gotLabels { + if got.Name == nil { + continue + } + if expected == *got.Name { + found = true + break + } + } + if !found { + return false + } + } + return true +} + +// Get the Github pull request label names. +func getGithubPRLabelNames(gitHubLabels []*github.Label) []string { + var labelNames []string + for _, gitHubLabel := range gitHubLabels { + labelNames = append(labelNames, *gitHubLabel.Name) + } + return labelNames +} diff --git a/applicationset/services/pull_request/github_app.go b/applicationset/services/pull_request/github_app.go new file mode 100644 index 0000000000000..8879a777ad277 --- /dev/null +++ b/applicationset/services/pull_request/github_app.go @@ -0,0 +1,19 @@ +package pull_request + +import ( + "github.com/argoproj/argo-cd/v2/applicationset/services/github_app_auth" + "github.com/argoproj/argo-cd/v2/applicationset/services/internal/github_app" +) + +func NewGithubAppService(g github_app_auth.Authentication, url, owner, repo string, labels []string) (PullRequestService, error) { + client, err := github_app.Client(g, url) + if err != nil { + return nil, err + } + return &GithubService{ + client: client, + owner: owner, + repo: repo, + labels: labels, + }, nil +} diff --git a/applicationset/services/pull_request/github_test.go b/applicationset/services/pull_request/github_test.go new file mode 100644 index 0000000000000..c47031acb7e31 --- /dev/null +++ b/applicationset/services/pull_request/github_test.go @@ -0,0 +1,89 @@ +package pull_request + +import ( + "testing" + + "github.com/google/go-github/v35/github" + "github.com/stretchr/testify/assert" +) + +func toPtr(s string) *string { + return &s +} + +func TestContainLabels(t *testing.T) { + cases := []struct { + Name string + Labels []string + PullLabels []*github.Label + Expect bool + }{ + { + Name: "Match labels", + Labels: []string{"label1", "label2"}, + PullLabels: []*github.Label{ + {Name: toPtr("label1")}, + {Name: toPtr("label2")}, + {Name: toPtr("label3")}, + }, + Expect: true, + }, + { + Name: "Not match labels", + Labels: []string{"label1", "label4"}, + PullLabels: []*github.Label{ + {Name: toPtr("label1")}, + {Name: toPtr("label2")}, + {Name: toPtr("label3")}, + }, + Expect: false, + }, + { + Name: "No specify", + Labels: []string{}, + PullLabels: []*github.Label{ + {Name: toPtr("label1")}, + {Name: toPtr("label2")}, + {Name: toPtr("label3")}, + }, + Expect: true, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + if got := containLabels(c.Labels, c.PullLabels); got != c.Expect { + t.Errorf("expect: %v, got: %v", c.Expect, got) + } + }) + } +} + +func TestGetGitHubPRLabelNames(t *testing.T) { + Tests := []struct { + Name string + PullLabels []*github.Label + ExpectedResult []string + }{ + { + Name: "PR has labels", + PullLabels: []*github.Label{ + {Name: toPtr("label1")}, + {Name: toPtr("label2")}, + {Name: toPtr("label3")}, + }, + ExpectedResult: []string{"label1", "label2", "label3"}, + }, + { + Name: "PR does not have labels", + PullLabels: []*github.Label{}, + ExpectedResult: nil, + }, + } + for _, test := range Tests { + t.Run(test.Name, func(t *testing.T) { + labels := getGithubPRLabelNames(test.PullLabels) + assert.Equal(t, test.ExpectedResult, labels) + }) + } +} diff --git a/applicationset/services/pull_request/gitlab.go b/applicationset/services/pull_request/gitlab.go new file mode 100644 index 0000000000000..04a4f3464f6f0 --- /dev/null +++ b/applicationset/services/pull_request/gitlab.go @@ -0,0 +1,96 @@ +package pull_request + +import ( + "context" + "fmt" + "net/http" + "os" + + "github.com/argoproj/argo-cd/v2/applicationset/utils" + "github.com/hashicorp/go-retryablehttp" + gitlab "github.com/xanzy/go-gitlab" +) + +type GitLabService struct { + client *gitlab.Client + project string + labels []string + pullRequestState string +} + +var _ PullRequestService = (*GitLabService)(nil) + +func NewGitLabService(ctx context.Context, token, url, project string, labels []string, pullRequestState string, scmRootCAPath string, insecure bool) (PullRequestService, error) { + var clientOptionFns []gitlab.ClientOptionFunc + + // Set a custom Gitlab base URL if one is provided + if url != "" { + clientOptionFns = append(clientOptionFns, gitlab.WithBaseURL(url)) + } + + if token == "" { + token = os.Getenv("GITLAB_TOKEN") + } + + tr := http.DefaultTransport.(*http.Transport).Clone() + tr.TLSClientConfig = utils.GetTlsConfig(scmRootCAPath, insecure) + + retryClient := retryablehttp.NewClient() + retryClient.HTTPClient.Transport = tr + + clientOptionFns = append(clientOptionFns, gitlab.WithHTTPClient(retryClient.HTTPClient)) + + client, err := gitlab.NewClient(token, clientOptionFns...) + if err != nil { + return nil, fmt.Errorf("error creating Gitlab client: %v", err) + } + + return &GitLabService{ + client: client, + project: project, + labels: labels, + pullRequestState: pullRequestState, + }, nil +} + +func (g *GitLabService) List(ctx context.Context) ([]*PullRequest, error) { + + // Filter the merge requests on labels, if they are specified. + var labels *gitlab.Labels + if len(g.labels) > 0 { + labels = (*gitlab.Labels)(&g.labels) + } + + opts := &gitlab.ListProjectMergeRequestsOptions{ + ListOptions: gitlab.ListOptions{ + PerPage: 100, + }, + Labels: labels, + } + + if g.pullRequestState != "" { + opts.State = &g.pullRequestState + } + + pullRequests := []*PullRequest{} + for { + mrs, resp, err := g.client.MergeRequests.ListProjectMergeRequests(g.project, opts) + if err != nil { + return nil, fmt.Errorf("error listing merge requests for project '%s': %v", g.project, err) + } + for _, mr := range mrs { + pullRequests = append(pullRequests, &PullRequest{ + Number: mr.IID, + Branch: mr.SourceBranch, + TargetBranch: mr.TargetBranch, + HeadSHA: mr.SHA, + Labels: mr.Labels, + }) + } + if resp.NextPage == 0 { + break + } + opts.Page = resp.NextPage + } + return pullRequests, nil +} diff --git a/applicationset/services/pull_request/gitlab_test.go b/applicationset/services/pull_request/gitlab_test.go new file mode 100644 index 0000000000000..59c476fcd713a --- /dev/null +++ b/applicationset/services/pull_request/gitlab_test.go @@ -0,0 +1,123 @@ +package pull_request + +import ( + "context" + "io" + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func writeMRListResponse(t *testing.T, w io.Writer) { + f, err := os.Open("fixtures/gitlab_mr_list_response.json") + if err != nil { + t.Fatalf("error opening fixture file: %v", err) + } + + if _, err = io.Copy(w, f); err != nil { + t.Fatalf("error writing response: %v", err) + } +} + +func TestGitLabServiceCustomBaseURL(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + defer server.Close() + + path := "/api/v4/projects/278964/merge_requests" + + mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, path+"?per_page=100", r.URL.RequestURI()) + writeMRListResponse(t, w) + }) + + svc, err := NewGitLabService(context.Background(), "", server.URL, "278964", nil, "", "", false) + assert.NoError(t, err) + + _, err = svc.List(context.Background()) + assert.NoError(t, err) +} + +func TestGitLabServiceToken(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + defer server.Close() + + path := "/api/v4/projects/278964/merge_requests" + + mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "token-123", r.Header.Get("Private-Token")) + writeMRListResponse(t, w) + }) + + svc, err := NewGitLabService(context.Background(), "token-123", server.URL, "278964", nil, "", "", false) + assert.NoError(t, err) + + _, err = svc.List(context.Background()) + assert.NoError(t, err) +} + +func TestList(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + defer server.Close() + + path := "/api/v4/projects/278964/merge_requests" + + mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, path+"?per_page=100", r.URL.RequestURI()) + writeMRListResponse(t, w) + }) + + svc, err := NewGitLabService(context.Background(), "", server.URL, "278964", []string{}, "", "", false) + assert.NoError(t, err) + + prs, err := svc.List(context.Background()) + assert.NoError(t, err) + assert.Len(t, prs, 1) + assert.Equal(t, prs[0].Number, 15442) + assert.Equal(t, prs[0].Branch, "use-structured-logging-for-db-load-balancer") + assert.Equal(t, prs[0].TargetBranch, "master") + assert.Equal(t, prs[0].HeadSHA, "2fc4e8b972ff3208ec63b6143e34ad67ff343ad7") +} + +func TestListWithLabels(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + defer server.Close() + + path := "/api/v4/projects/278964/merge_requests" + + mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, path+"?labels=feature%2Cready&per_page=100", r.URL.RequestURI()) + writeMRListResponse(t, w) + }) + + svc, err := NewGitLabService(context.Background(), "", server.URL, "278964", []string{"feature", "ready"}, "", "", false) + assert.NoError(t, err) + + _, err = svc.List(context.Background()) + assert.NoError(t, err) +} + +func TestListWithState(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + defer server.Close() + + path := "/api/v4/projects/278964/merge_requests" + + mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, path+"?per_page=100&state=opened", r.URL.RequestURI()) + writeMRListResponse(t, w) + }) + + svc, err := NewGitLabService(context.Background(), "", server.URL, "278964", []string{}, "opened", "", false) + assert.NoError(t, err) + + _, err = svc.List(context.Background()) + assert.NoError(t, err) +} diff --git a/applicationset/services/pull_request/interface.go b/applicationset/services/pull_request/interface.go new file mode 100644 index 0000000000000..0015cfe5eafa6 --- /dev/null +++ b/applicationset/services/pull_request/interface.go @@ -0,0 +1,29 @@ +package pull_request + +import ( + "context" + "regexp" +) + +type PullRequest struct { + // Number is a number that will be the ID of the pull request. + Number int + // Branch is the name of the branch from which the pull request originated. + Branch string + // TargetBranch is the name of the target branch of the pull request. + TargetBranch string + // HeadSHA is the SHA of the HEAD from which the pull request originated. + HeadSHA string + // Labels of the pull request. + Labels []string +} + +type PullRequestService interface { + // List gets a list of pull requests. + List(ctx context.Context) ([]*PullRequest, error) +} + +type Filter struct { + BranchMatch *regexp.Regexp + TargetBranchMatch *regexp.Regexp +} diff --git a/applicationset/services/pull_request/utils.go b/applicationset/services/pull_request/utils.go new file mode 100644 index 0000000000000..50d4e5a3c0098 --- /dev/null +++ b/applicationset/services/pull_request/utils.go @@ -0,0 +1,71 @@ +package pull_request + +import ( + "context" + "fmt" + "regexp" + + argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +func compileFilters(filters []argoprojiov1alpha1.PullRequestGeneratorFilter) ([]*Filter, error) { + outFilters := make([]*Filter, 0, len(filters)) + for _, filter := range filters { + outFilter := &Filter{} + var err error + if filter.BranchMatch != nil { + outFilter.BranchMatch, err = regexp.Compile(*filter.BranchMatch) + if err != nil { + return nil, fmt.Errorf("error compiling BranchMatch regexp %q: %v", *filter.BranchMatch, err) + } + } + if filter.TargetBranchMatch != nil { + outFilter.TargetBranchMatch, err = regexp.Compile(*filter.TargetBranchMatch) + if err != nil { + return nil, fmt.Errorf("error compiling TargetBranchMatch regexp %q: %v", *filter.TargetBranchMatch, err) + } + } + outFilters = append(outFilters, outFilter) + } + return outFilters, nil +} + +func matchFilter(pullRequest *PullRequest, filter *Filter) bool { + if filter.BranchMatch != nil && !filter.BranchMatch.MatchString(pullRequest.Branch) { + return false + } + if filter.TargetBranchMatch != nil && !filter.TargetBranchMatch.MatchString(pullRequest.TargetBranch) { + return false + } + + return true +} + +func ListPullRequests(ctx context.Context, provider PullRequestService, filters []argoprojiov1alpha1.PullRequestGeneratorFilter) ([]*PullRequest, error) { + compiledFilters, err := compileFilters(filters) + if err != nil { + return nil, err + } + + pullRequests, err := provider.List(ctx) + if err != nil { + return nil, err + } + + if len(compiledFilters) == 0 { + return pullRequests, nil + } + + filteredPullRequests := make([]*PullRequest, 0, len(pullRequests)) + for _, pullRequest := range pullRequests { + for _, filter := range compiledFilters { + matches := matchFilter(pullRequest, filter) + if matches { + filteredPullRequests = append(filteredPullRequests, pullRequest) + break + } + } + } + + return filteredPullRequests, nil +} diff --git a/applicationset/services/pull_request/utils_test.go b/applicationset/services/pull_request/utils_test.go new file mode 100644 index 0000000000000..3f813127edab7 --- /dev/null +++ b/applicationset/services/pull_request/utils_test.go @@ -0,0 +1,240 @@ +package pull_request + +import ( + "context" + "testing" + + argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/stretchr/testify/assert" +) + +func strp(s string) *string { + return &s +} +func TestFilterBranchMatchBadRegexp(t *testing.T) { + provider, _ := NewFakeService( + context.Background(), + []*PullRequest{ + { + Number: 1, + Branch: "branch1", + TargetBranch: "master", + HeadSHA: "089d92cbf9ff857a39e6feccd32798ca700fb958", + }, + }, + nil, + ) + filters := []argoprojiov1alpha1.PullRequestGeneratorFilter{ + { + BranchMatch: strp("("), + }, + } + _, err := ListPullRequests(context.Background(), provider, filters) + assert.Error(t, err) +} + +func TestFilterBranchMatch(t *testing.T) { + provider, _ := NewFakeService( + context.Background(), + []*PullRequest{ + { + Number: 1, + Branch: "one", + TargetBranch: "master", + HeadSHA: "189d92cbf9ff857a39e6feccd32798ca700fb958", + }, + { + Number: 2, + Branch: "two", + TargetBranch: "master", + HeadSHA: "289d92cbf9ff857a39e6feccd32798ca700fb958", + }, + { + Number: 3, + Branch: "three", + TargetBranch: "master", + HeadSHA: "389d92cbf9ff857a39e6feccd32798ca700fb958", + }, + { + Number: 4, + Branch: "four", + TargetBranch: "master", + HeadSHA: "489d92cbf9ff857a39e6feccd32798ca700fb958", + }, + }, + nil, + ) + filters := []argoprojiov1alpha1.PullRequestGeneratorFilter{ + { + BranchMatch: strp("w"), + }, + } + pullRequests, err := ListPullRequests(context.Background(), provider, filters) + assert.NoError(t, err) + assert.Len(t, pullRequests, 1) + assert.Equal(t, "two", pullRequests[0].Branch) +} + +func TestFilterTargetBranchMatch(t *testing.T) { + provider, _ := NewFakeService( + context.Background(), + []*PullRequest{ + { + Number: 1, + Branch: "one", + TargetBranch: "master", + HeadSHA: "189d92cbf9ff857a39e6feccd32798ca700fb958", + }, + { + Number: 2, + Branch: "two", + TargetBranch: "branch1", + HeadSHA: "289d92cbf9ff857a39e6feccd32798ca700fb958", + }, + { + Number: 3, + Branch: "three", + TargetBranch: "branch2", + HeadSHA: "389d92cbf9ff857a39e6feccd32798ca700fb958", + }, + { + Number: 4, + Branch: "four", + TargetBranch: "branch3", + HeadSHA: "489d92cbf9ff857a39e6feccd32798ca700fb958", + }, + }, + nil, + ) + filters := []argoprojiov1alpha1.PullRequestGeneratorFilter{ + { + TargetBranchMatch: strp("1"), + }, + } + pullRequests, err := ListPullRequests(context.Background(), provider, filters) + assert.NoError(t, err) + assert.Len(t, pullRequests, 1) + assert.Equal(t, "two", pullRequests[0].Branch) +} + +func TestMultiFilterOr(t *testing.T) { + provider, _ := NewFakeService( + context.Background(), + []*PullRequest{ + { + Number: 1, + Branch: "one", + TargetBranch: "master", + HeadSHA: "189d92cbf9ff857a39e6feccd32798ca700fb958", + }, + { + Number: 2, + Branch: "two", + TargetBranch: "master", + HeadSHA: "289d92cbf9ff857a39e6feccd32798ca700fb958", + }, + { + Number: 3, + Branch: "three", + TargetBranch: "master", + HeadSHA: "389d92cbf9ff857a39e6feccd32798ca700fb958", + }, + { + Number: 4, + Branch: "four", + TargetBranch: "master", + HeadSHA: "489d92cbf9ff857a39e6feccd32798ca700fb958", + }, + }, + nil, + ) + filters := []argoprojiov1alpha1.PullRequestGeneratorFilter{ + { + BranchMatch: strp("w"), + }, + { + BranchMatch: strp("r"), + }, + } + pullRequests, err := ListPullRequests(context.Background(), provider, filters) + assert.NoError(t, err) + assert.Len(t, pullRequests, 3) + assert.Equal(t, "two", pullRequests[0].Branch) + assert.Equal(t, "three", pullRequests[1].Branch) + assert.Equal(t, "four", pullRequests[2].Branch) +} + +func TestMultiFilterOrWithTargetBranchFilter(t *testing.T) { + provider, _ := NewFakeService( + context.Background(), + []*PullRequest{ + { + Number: 1, + Branch: "one", + TargetBranch: "master", + HeadSHA: "189d92cbf9ff857a39e6feccd32798ca700fb958", + }, + { + Number: 2, + Branch: "two", + TargetBranch: "branch1", + HeadSHA: "289d92cbf9ff857a39e6feccd32798ca700fb958", + }, + { + Number: 3, + Branch: "three", + TargetBranch: "branch2", + HeadSHA: "389d92cbf9ff857a39e6feccd32798ca700fb958", + }, + { + Number: 4, + Branch: "four", + TargetBranch: "branch3", + HeadSHA: "489d92cbf9ff857a39e6feccd32798ca700fb958", + }, + }, + nil, + ) + filters := []argoprojiov1alpha1.PullRequestGeneratorFilter{ + { + BranchMatch: strp("w"), + TargetBranchMatch: strp("1"), + }, + { + BranchMatch: strp("r"), + TargetBranchMatch: strp("3"), + }, + } + pullRequests, err := ListPullRequests(context.Background(), provider, filters) + assert.NoError(t, err) + assert.Len(t, pullRequests, 2) + assert.Equal(t, "two", pullRequests[0].Branch) + assert.Equal(t, "four", pullRequests[1].Branch) +} + +func TestNoFilters(t *testing.T) { + provider, _ := NewFakeService( + context.Background(), + []*PullRequest{ + { + Number: 1, + Branch: "one", + TargetBranch: "master", + HeadSHA: "189d92cbf9ff857a39e6feccd32798ca700fb958", + }, + { + Number: 2, + Branch: "two", + TargetBranch: "master", + HeadSHA: "289d92cbf9ff857a39e6feccd32798ca700fb958", + }, + }, + nil, + ) + filters := []argoprojiov1alpha1.PullRequestGeneratorFilter{} + repos, err := ListPullRequests(context.Background(), provider, filters) + assert.NoError(t, err) + assert.Len(t, repos, 2) + assert.Equal(t, "one", repos[0].Branch) + assert.Equal(t, "two", repos[1].Branch) +} diff --git a/applicationset/services/repo_service.go b/applicationset/services/repo_service.go new file mode 100644 index 0000000000000..8ad261fda11cd --- /dev/null +++ b/applicationset/services/repo_service.go @@ -0,0 +1,96 @@ +package services + +import ( + "context" + "fmt" + + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/argoproj/argo-cd/v2/reposerver/apiclient" + "github.com/argoproj/argo-cd/v2/util/db" + "github.com/argoproj/argo-cd/v2/util/git" + "github.com/argoproj/argo-cd/v2/util/io" +) + +// RepositoryDB Is a lean facade for ArgoDB, +// Using a lean interface makes it easier to test the functionality of the git generator +type RepositoryDB interface { + GetRepository(ctx context.Context, url string) (*v1alpha1.Repository, error) +} + +type argoCDService struct { + repositoriesDB RepositoryDB + storecreds git.CredsStore + submoduleEnabled bool + repoServerClientSet apiclient.Clientset + newFileGlobbingEnabled bool +} + +type Repos interface { + + // GetFiles returns content of files (not directories) within the target repo + GetFiles(ctx context.Context, repoURL string, revision string, pattern string) (map[string][]byte, error) + + // GetDirectories returns a list of directories (not files) within the target repo + GetDirectories(ctx context.Context, repoURL string, revision string) ([]string, error) +} + +func NewArgoCDService(db db.ArgoDB, submoduleEnabled bool, repoClientset apiclient.Clientset, newFileGlobbingEnabled bool) (Repos, error) { + return &argoCDService{ + repositoriesDB: db.(RepositoryDB), + submoduleEnabled: submoduleEnabled, + repoServerClientSet: repoClientset, + newFileGlobbingEnabled: newFileGlobbingEnabled, + }, nil +} + +func (a *argoCDService) GetFiles(ctx context.Context, repoURL string, revision string, pattern string) (map[string][]byte, error) { + repo, err := a.repositoriesDB.GetRepository(ctx, repoURL) + if err != nil { + return nil, fmt.Errorf("error in GetRepository: %w", err) + } + + fileRequest := &apiclient.GitFilesRequest{ + Repo: repo, + SubmoduleEnabled: a.submoduleEnabled, + Revision: revision, + Path: pattern, + NewGitFileGlobbingEnabled: a.newFileGlobbingEnabled, + } + closer, client, err := a.repoServerClientSet.NewRepoServerClient() + if err != nil { + return nil, fmt.Errorf("error initialising new repo server client: %w", err) + } + defer io.Close(closer) + + fileResponse, err := client.GetGitFiles(ctx, fileRequest) + if err != nil { + return nil, fmt.Errorf("error retrieving Git files: %w", err) + } + return fileResponse.GetMap(), nil +} + +func (a *argoCDService) GetDirectories(ctx context.Context, repoURL string, revision string) ([]string, error) { + repo, err := a.repositoriesDB.GetRepository(ctx, repoURL) + if err != nil { + return nil, fmt.Errorf("error in GetRepository: %w", err) + } + + dirRequest := &apiclient.GitDirectoriesRequest{ + Repo: repo, + SubmoduleEnabled: a.submoduleEnabled, + Revision: revision, + } + + closer, client, err := a.repoServerClientSet.NewRepoServerClient() + if err != nil { + return nil, fmt.Errorf("error initialising new repo server client: %w", err) + } + defer io.Close(closer) + + dirResponse, err := client.GetGitDirectories(ctx, dirRequest) + if err != nil { + return nil, fmt.Errorf("error retrieving Git Directories: %w", err) + } + return dirResponse.GetPaths(), nil + +} diff --git a/applicationset/services/repo_service_test.go b/applicationset/services/repo_service_test.go new file mode 100644 index 0000000000000..62f8c11c172d0 --- /dev/null +++ b/applicationset/services/repo_service_test.go @@ -0,0 +1,191 @@ +package services + +import ( + "context" + "fmt" + "testing" + + "github.com/argoproj/argo-cd/v2/applicationset/services/mocks" + "github.com/argoproj/argo-cd/v2/reposerver/apiclient" + repo_mocks "github.com/argoproj/argo-cd/v2/reposerver/apiclient/mocks" + db_mocks "github.com/argoproj/argo-cd/v2/util/db/mocks" + "github.com/argoproj/argo-cd/v2/util/git" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +func TestGetDirectories(t *testing.T) { + + type fields struct { + repositoriesDBFuncs []func(*mocks.RepositoryDB) + storecreds git.CredsStore + submoduleEnabled bool + repoServerClientFuncs []func(*repo_mocks.RepoServerServiceClient) + } + type args struct { + ctx context.Context + repoURL string + revision string + } + tests := []struct { + name string + fields fields + args args + want []string + wantErr assert.ErrorAssertionFunc + }{ + {name: "ErrorGettingRepos", fields: fields{ + repositoriesDBFuncs: []func(*mocks.RepositoryDB){ + func(db *mocks.RepositoryDB) { + db.On("GetRepository", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("unable to get repos")) + }, + }, + }, args: args{}, want: nil, wantErr: assert.Error}, + {name: "ErrorGettingDirs", fields: fields{ + repositoriesDBFuncs: []func(*mocks.RepositoryDB){ + func(db *mocks.RepositoryDB) { + db.On("GetRepository", mock.Anything, mock.Anything).Return(&v1alpha1.Repository{}, nil) + }, + }, + repoServerClientFuncs: []func(*repo_mocks.RepoServerServiceClient){ + func(client *repo_mocks.RepoServerServiceClient) { + client.On("GetGitDirectories", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("unable to get dirs")) + }, + }, + }, args: args{}, want: nil, wantErr: assert.Error}, + {name: "HappyCase", fields: fields{ + repositoriesDBFuncs: []func(*mocks.RepositoryDB){ + func(db *mocks.RepositoryDB) { + db.On("GetRepository", mock.Anything, mock.Anything).Return(&v1alpha1.Repository{}, nil) + }, + }, + repoServerClientFuncs: []func(*repo_mocks.RepoServerServiceClient){ + func(client *repo_mocks.RepoServerServiceClient) { + client.On("GetGitDirectories", mock.Anything, mock.Anything).Return(&apiclient.GitDirectoriesResponse{ + Paths: []string{"foo", "foo/bar", "bar/foo"}, + }, nil) + }, + }, + }, args: args{}, want: []string{"foo", "foo/bar", "bar/foo"}, wantErr: assert.NoError}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockDb := &mocks.RepositoryDB{} + mockRepoClient := &repo_mocks.RepoServerServiceClient{} + // decorate the mocks + for i := range tt.fields.repositoriesDBFuncs { + tt.fields.repositoriesDBFuncs[i](mockDb) + } + for i := range tt.fields.repoServerClientFuncs { + tt.fields.repoServerClientFuncs[i](mockRepoClient) + } + + a := &argoCDService{ + repositoriesDB: mockDb, + storecreds: tt.fields.storecreds, + submoduleEnabled: tt.fields.submoduleEnabled, + repoServerClientSet: &repo_mocks.Clientset{RepoServerServiceClient: mockRepoClient}, + } + got, err := a.GetDirectories(tt.args.ctx, tt.args.repoURL, tt.args.revision) + if !tt.wantErr(t, err, fmt.Sprintf("GetDirectories(%v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision)) { + return + } + assert.Equalf(t, tt.want, got, "GetDirectories(%v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision) + }) + } +} + +func TestGetFiles(t *testing.T) { + type fields struct { + repositoriesDBFuncs []func(*mocks.RepositoryDB) + storecreds git.CredsStore + submoduleEnabled bool + repoServerClientFuncs []func(*repo_mocks.RepoServerServiceClient) + } + type args struct { + ctx context.Context + repoURL string + revision string + pattern string + } + tests := []struct { + name string + fields fields + args args + want map[string][]byte + wantErr assert.ErrorAssertionFunc + }{ + {name: "ErrorGettingRepos", fields: fields{ + repositoriesDBFuncs: []func(*mocks.RepositoryDB){ + func(db *mocks.RepositoryDB) { + db.On("GetRepository", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("unable to get repos")) + }, + }, + }, args: args{}, want: nil, wantErr: assert.Error}, + {name: "ErrorGettingFiles", fields: fields{ + repositoriesDBFuncs: []func(*mocks.RepositoryDB){ + func(db *mocks.RepositoryDB) { + db.On("GetRepository", mock.Anything, mock.Anything).Return(&v1alpha1.Repository{}, nil) + }, + }, + repoServerClientFuncs: []func(*repo_mocks.RepoServerServiceClient){ + func(client *repo_mocks.RepoServerServiceClient) { + client.On("GetGitFiles", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("unable to get files")) + }, + }, + }, args: args{}, want: nil, wantErr: assert.Error}, + {name: "HappyCase", fields: fields{ + repositoriesDBFuncs: []func(*mocks.RepositoryDB){ + func(db *mocks.RepositoryDB) { + db.On("GetRepository", mock.Anything, mock.Anything).Return(&v1alpha1.Repository{}, nil) + }, + }, + repoServerClientFuncs: []func(*repo_mocks.RepoServerServiceClient){ + func(client *repo_mocks.RepoServerServiceClient) { + client.On("GetGitFiles", mock.Anything, mock.Anything).Return(&apiclient.GitFilesResponse{ + Map: map[string][]byte{ + "foo.json": []byte("hello: world!"), + "bar.yaml": []byte("yay: appsets"), + }, + }, nil) + }, + }, + }, args: args{}, want: map[string][]byte{ + "foo.json": []byte("hello: world!"), + "bar.yaml": []byte("yay: appsets"), + }, wantErr: assert.NoError}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockDb := &mocks.RepositoryDB{} + mockRepoClient := &repo_mocks.RepoServerServiceClient{} + // decorate the mocks + for i := range tt.fields.repositoriesDBFuncs { + tt.fields.repositoriesDBFuncs[i](mockDb) + } + for i := range tt.fields.repoServerClientFuncs { + tt.fields.repoServerClientFuncs[i](mockRepoClient) + } + + a := &argoCDService{ + repositoriesDB: mockDb, + storecreds: tt.fields.storecreds, + submoduleEnabled: tt.fields.submoduleEnabled, + repoServerClientSet: &repo_mocks.Clientset{RepoServerServiceClient: mockRepoClient}, + } + got, err := a.GetFiles(tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.pattern) + if !tt.wantErr(t, err, fmt.Sprintf("GetFiles(%v, %v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.pattern)) { + return + } + assert.Equalf(t, tt.want, got, "GetFiles(%v, %v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.pattern) + }) + } +} + +func TestNewArgoCDService(t *testing.T) { + service, err := NewArgoCDService(&db_mocks.ArgoDB{}, false, &repo_mocks.Clientset{}, false) + assert.NoError(t, err, err) + assert.NotNil(t, service) +} diff --git a/applicationset/services/scm_provider/aws_codecommit.go b/applicationset/services/scm_provider/aws_codecommit.go new file mode 100644 index 0000000000000..280711271cfb0 --- /dev/null +++ b/applicationset/services/scm_provider/aws_codecommit.go @@ -0,0 +1,376 @@ +package scm_provider + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go/aws/request" + pathpkg "path" + "path/filepath" + "strings" + + application "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/codecommit" + "github.com/aws/aws-sdk-go/service/resourcegroupstaggingapi" + log "github.com/sirupsen/logrus" + "golang.org/x/exp/maps" + "k8s.io/utils/strings/slices" +) + +const ( + resourceTypeCodeCommitRepository = "codecommit:repository" + prefixGitUrlHttps = "https://git-codecommit." + prefixGitUrlHttpsFIPS = "https://git-codecommit-fips." +) + +// AWSCodeCommitClient is a lean facade to the codecommitiface.CodeCommitAPI +// it helps to reduce the mockery generated code. +type AWSCodeCommitClient interface { + ListRepositoriesWithContext(aws.Context, *codecommit.ListRepositoriesInput, ...request.Option) (*codecommit.ListRepositoriesOutput, error) + GetRepositoryWithContext(aws.Context, *codecommit.GetRepositoryInput, ...request.Option) (*codecommit.GetRepositoryOutput, error) + ListBranchesWithContext(aws.Context, *codecommit.ListBranchesInput, ...request.Option) (*codecommit.ListBranchesOutput, error) + GetFolderWithContext(aws.Context, *codecommit.GetFolderInput, ...request.Option) (*codecommit.GetFolderOutput, error) +} + +// AWSTaggingClient is a lean facade to the resourcegroupstaggingapiiface.ResourceGroupsTaggingAPIAPI +// it helps to reduce the mockery generated code. +type AWSTaggingClient interface { + GetResourcesWithContext(aws.Context, *resourcegroupstaggingapi.GetResourcesInput, ...request.Option) (*resourcegroupstaggingapi.GetResourcesOutput, error) +} + +type AWSCodeCommitProvider struct { + codeCommitClient AWSCodeCommitClient + taggingClient AWSTaggingClient + tagFilters []*application.TagFilter + allBranches bool +} + +func NewAWSCodeCommitProvider(ctx context.Context, tagFilters []*application.TagFilter, role string, region string, allBranches bool) (*AWSCodeCommitProvider, error) { + taggingClient, codeCommitClient, err := createAWSDiscoveryClients(ctx, role, region) + if err != nil { + return nil, err + } + return &AWSCodeCommitProvider{ + codeCommitClient: codeCommitClient, + taggingClient: taggingClient, + tagFilters: tagFilters, + allBranches: allBranches, + }, nil +} + +func (p *AWSCodeCommitProvider) ListRepos(ctx context.Context, cloneProtocol string) ([]*Repository, error) { + repos := make([]*Repository, 0) + + repoNames, err := p.listRepoNames(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list codecommit repository: %w", err) + } + + for _, repoName := range repoNames { + repo, err := p.codeCommitClient.GetRepositoryWithContext(ctx, &codecommit.GetRepositoryInput{ + RepositoryName: aws.String(repoName), + }) + if err != nil { + // we don't want to skip at this point. It's a valid repo, we don't want to have flapping Application on an AWS outage. + return nil, fmt.Errorf("failed to get codecommit repository: %w", err) + } + if repo == nil || repo.RepositoryMetadata == nil { + // unlikely to happen, but just in case to protect nil pointer dereferences. + log.Warnf("codecommit returned invalid response for repository %s, skipped", repoName) + continue + } + if aws.StringValue(repo.RepositoryMetadata.DefaultBranch) == "" { + // if a codecommit repo doesn't have default branch, it's uninitialized. not going to bother with it. + log.Warnf("repository %s does not have default branch, skipped", repoName) + continue + } + var url string + switch cloneProtocol { + // default to SSH if unspecified (i.e. if ""). + case "", "ssh": + url = aws.StringValue(repo.RepositoryMetadata.CloneUrlSsh) + case "https": + url = aws.StringValue(repo.RepositoryMetadata.CloneUrlHttp) + case "https-fips": + url, err = getCodeCommitFIPSEndpoint(aws.StringValue(repo.RepositoryMetadata.CloneUrlHttp)) + if err != nil { + return nil, fmt.Errorf("https-fips is provided but repoUrl can't be transformed to FIPS endpoint: %w", err) + } + default: + return nil, fmt.Errorf("unknown clone protocol for codecommit %v", cloneProtocol) + } + repos = append(repos, &Repository{ + // there's no "organization" level at codecommit. + // we are just using AWS accountId for now. + Organization: aws.StringValue(repo.RepositoryMetadata.AccountId), + Repository: aws.StringValue(repo.RepositoryMetadata.RepositoryName), + URL: url, + Branch: aws.StringValue(repo.RepositoryMetadata.DefaultBranch), + // we could propagate repo tag keys, but without value not sure if it's any useful. + Labels: []string{}, + RepositoryId: aws.StringValue(repo.RepositoryMetadata.RepositoryId), + }) + } + + return repos, nil +} + +func (p *AWSCodeCommitProvider) RepoHasPath(ctx context.Context, repo *Repository, path string) (bool, error) { + // we use GetFolder instead of GetFile here because GetFile always downloads the full blob which has scalability problem. + // GetFolder is slightly less concerning. + + path = toAbsolutePath(path) + // shortcut: if it's root folder ('/'), we always return true. + if path == "/" { + return true, nil + } + // here we are sure it's not root folder, strip the suffix for easier comparison. + path = strings.TrimSuffix(path, "/") + + // we always get the parent folder, so we could support both submodule, file, symlink and folder cases. + parentPath := pathpkg.Dir(path) + basePath := pathpkg.Base(path) + + input := &codecommit.GetFolderInput{ + CommitSpecifier: aws.String(repo.Branch), + FolderPath: aws.String(parentPath), + RepositoryName: aws.String(repo.Repository), + } + output, err := p.codeCommitClient.GetFolderWithContext(ctx, input) + if err != nil { + if hasAwsError(err, + codecommit.ErrCodeRepositoryDoesNotExistException, + codecommit.ErrCodeCommitDoesNotExistException, + codecommit.ErrCodeFolderDoesNotExistException, + ) { + return false, nil + } + // unhandled exception, propagate out + return false, err + } + + // anything that matches. + for _, submodule := range output.SubModules { + if basePath == aws.StringValue(submodule.RelativePath) { + return true, nil + } + } + for _, subpath := range output.SubFolders { + if basePath == aws.StringValue(subpath.RelativePath) { + return true, nil + } + } + for _, subpath := range output.Files { + if basePath == aws.StringValue(subpath.RelativePath) { + return true, nil + } + } + for _, subpath := range output.SymbolicLinks { + if basePath == aws.StringValue(subpath.RelativePath) { + return true, nil + } + } + return false, nil +} + +func (p *AWSCodeCommitProvider) GetBranches(ctx context.Context, repo *Repository) ([]*Repository, error) { + repos := make([]*Repository, 0) + if !p.allBranches { + output, err := p.codeCommitClient.GetRepositoryWithContext(ctx, &codecommit.GetRepositoryInput{ + RepositoryName: aws.String(repo.Repository), + }) + if err != nil { + return nil, err + } + repos = append(repos, &Repository{ + Organization: repo.Organization, + Repository: repo.Repository, + URL: repo.URL, + Branch: aws.StringValue(output.RepositoryMetadata.DefaultBranch), + RepositoryId: repo.RepositoryId, + Labels: repo.Labels, + // getting SHA of the branch requires a separate GetBranch call. + // too expensive. for now, we just don't support it. + // SHA: "", + }) + } else { + input := &codecommit.ListBranchesInput{ + RepositoryName: aws.String(repo.Repository), + } + for { + output, err := p.codeCommitClient.ListBranchesWithContext(ctx, input) + if err != nil { + return nil, err + } + for _, branch := range output.Branches { + repos = append(repos, &Repository{ + Organization: repo.Organization, + Repository: repo.Repository, + URL: repo.URL, + Branch: aws.StringValue(branch), + RepositoryId: repo.RepositoryId, + Labels: repo.Labels, + // getting SHA of the branch requires a separate GetBranch call. + // too expensive. for now, we just don't support it. + // SHA: "", + }) + } + input.NextToken = output.NextToken + if aws.StringValue(output.NextToken) == "" { + break + } + } + } + + return repos, nil +} + +func (p *AWSCodeCommitProvider) listRepoNames(ctx context.Context) ([]string, error) { + tagFilters := p.getTagFilters() + repoNames := make([]string, 0) + var err error + + if len(tagFilters) < 1 { + log.Debugf("no tag filer, calling codecommit api to list repos") + listReposInput := &codecommit.ListRepositoriesInput{} + var output *codecommit.ListRepositoriesOutput + for { + output, err = p.codeCommitClient.ListRepositoriesWithContext(ctx, listReposInput) + if err != nil { + break + } + for _, repo := range output.Repositories { + repoNames = append(repoNames, aws.StringValue(repo.RepositoryName)) + } + listReposInput.NextToken = output.NextToken + if aws.StringValue(output.NextToken) == "" { + break + } + } + } else { + log.Debugf("tag filer is specified, calling tagging api to list repos") + discoveryInput := &resourcegroupstaggingapi.GetResourcesInput{ + ResourceTypeFilters: aws.StringSlice([]string{resourceTypeCodeCommitRepository}), + TagFilters: tagFilters, + } + var output *resourcegroupstaggingapi.GetResourcesOutput + for { + output, err = p.taggingClient.GetResourcesWithContext(ctx, discoveryInput) + if err != nil { + break + } + for _, resource := range output.ResourceTagMappingList { + repoArn := aws.StringValue(resource.ResourceARN) + log.Debugf("discovered codecommit repo with arn %s", repoArn) + repoName, extractErr := getCodeCommitRepoName(repoArn) + if extractErr != nil { + log.Warnf("discovered codecommit repoArn %s cannot be parsed due to %v", repoArn, err) + continue + } + repoNames = append(repoNames, repoName) + } + discoveryInput.PaginationToken = output.PaginationToken + if aws.StringValue(output.PaginationToken) == "" { + break + } + } + } + return repoNames, err +} + +func (p *AWSCodeCommitProvider) getTagFilters() []*resourcegroupstaggingapi.TagFilter { + filters := make(map[string]*resourcegroupstaggingapi.TagFilter) + for _, tagFilter := range p.tagFilters { + filter, hasKey := filters[tagFilter.Key] + if !hasKey { + filter = &resourcegroupstaggingapi.TagFilter{ + Key: aws.String(tagFilter.Key), + } + filters[tagFilter.Key] = filter + } + if tagFilter.Value != "" { + filter.Values = append(filter.Values, aws.String(tagFilter.Value)) + } + } + return maps.Values(filters) +} + +func getCodeCommitRepoName(repoArn string) (string, error) { + parsedArn, err := arn.Parse(repoArn) + if err != nil { + return "", fmt.Errorf("failed to parse codecommit repository ARN: %w", err) + } + // see: https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-permissions-reference.html + // arn:aws:codecommit:region:account-id:repository-name + return parsedArn.Resource, nil +} + +// getCodeCommitFIPSEndpoint transforms provided https:// codecommit URL to a FIPS-compliant endpoint. +// note that the specified region must support FIPS, otherwise the returned URL won't be reachable +// see: https://docs.aws.amazon.com/codecommit/latest/userguide/regions.html#regions-git +func getCodeCommitFIPSEndpoint(repoUrl string) (string, error) { + if strings.HasPrefix(repoUrl, prefixGitUrlHttpsFIPS) { + log.Debugf("provided repoUrl %s is already a fips endpoint", repoUrl) + return repoUrl, nil + } + if !strings.HasPrefix(repoUrl, prefixGitUrlHttps) { + return "", fmt.Errorf("the provided https endpoint isn't recognized, cannot be transformed to FIPS endpoint: %s", repoUrl) + } + // we already have the prefix, so we guarantee to replace exactly the prefix only. + return strings.Replace(repoUrl, prefixGitUrlHttps, prefixGitUrlHttpsFIPS, 1), nil +} + +func hasAwsError(err error, codes ...string) bool { + if awsErr, ok := err.(awserr.Error); ok { + return slices.Contains(codes, awsErr.Code()) + } + return false +} + +// toAbsolutePath transforms a path input to absolute path, as required by AWS CodeCommit +// see https://docs.aws.amazon.com/codecommit/latest/APIReference/API_GetFolder.html +func toAbsolutePath(path string) string { + if filepath.IsAbs(path) { + return path + } + return filepath.ToSlash(filepath.Join("/", path)) +} + +func createAWSDiscoveryClients(_ context.Context, role string, region string) (*resourcegroupstaggingapi.ResourceGroupsTaggingAPI, *codecommit.CodeCommit, error) { + podSession, err := session.NewSession() + if err != nil { + return nil, nil, fmt.Errorf("error creating new AWS pod session: %w", err) + } + discoverySession := podSession + // assume role if provided - this allows cross account CodeCommit repo discovery. + if role != "" { + log.Debugf("role %s is provided for AWS CodeCommit discovery", role) + assumeRoleCreds := stscreds.NewCredentials(podSession, role) + discoverySession, err = session.NewSession(&aws.Config{ + Credentials: assumeRoleCreds, + }) + if err != nil { + return nil, nil, fmt.Errorf("error creating new AWS discovery session: %s", err) + } + } else { + log.Debugf("role is not provided for AWS CodeCommit discovery, using pod role") + } + // use region explicitly if provided - this allows cross region CodeCommit repo discovery. + if region != "" { + log.Debugf("region %s is provided for AWS CodeCommit discovery", region) + discoverySession = discoverySession.Copy(&aws.Config{ + Region: aws.String(region), + }) + } else { + log.Debugf("region is not provided for AWS CodeCommit discovery, using pod region") + } + + taggingClient := resourcegroupstaggingapi.New(discoverySession) + codeCommitClient := codecommit.New(discoverySession) + + return taggingClient, codeCommitClient, nil +} diff --git a/applicationset/services/scm_provider/aws_codecommit/mocks/AWSCodeCommitClient.go b/applicationset/services/scm_provider/aws_codecommit/mocks/AWSCodeCommitClient.go new file mode 100644 index 0000000000000..b9d6f6a5d5956 --- /dev/null +++ b/applicationset/services/scm_provider/aws_codecommit/mocks/AWSCodeCommitClient.go @@ -0,0 +1,321 @@ +// Code generated by mockery v2.26.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + codecommit "github.com/aws/aws-sdk-go/service/codecommit" + + mock "github.com/stretchr/testify/mock" + + request "github.com/aws/aws-sdk-go/aws/request" +) + +// AWSCodeCommitClient is an autogenerated mock type for the AWSCodeCommitClient type +type AWSCodeCommitClient struct { + mock.Mock +} + +type AWSCodeCommitClient_Expecter struct { + mock *mock.Mock +} + +func (_m *AWSCodeCommitClient) EXPECT() *AWSCodeCommitClient_Expecter { + return &AWSCodeCommitClient_Expecter{mock: &_m.Mock} +} + +// GetFolderWithContext provides a mock function with given fields: _a0, _a1, _a2 +func (_m *AWSCodeCommitClient) GetFolderWithContext(_a0 context.Context, _a1 *codecommit.GetFolderInput, _a2 ...request.Option) (*codecommit.GetFolderOutput, error) { + _va := make([]interface{}, len(_a2)) + for _i := range _a2 { + _va[_i] = _a2[_i] + } + var _ca []interface{} + _ca = append(_ca, _a0, _a1) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 *codecommit.GetFolderOutput + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *codecommit.GetFolderInput, ...request.Option) (*codecommit.GetFolderOutput, error)); ok { + return rf(_a0, _a1, _a2...) + } + if rf, ok := ret.Get(0).(func(context.Context, *codecommit.GetFolderInput, ...request.Option) *codecommit.GetFolderOutput); ok { + r0 = rf(_a0, _a1, _a2...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*codecommit.GetFolderOutput) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *codecommit.GetFolderInput, ...request.Option) error); ok { + r1 = rf(_a0, _a1, _a2...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AWSCodeCommitClient_GetFolderWithContext_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFolderWithContext' +type AWSCodeCommitClient_GetFolderWithContext_Call struct { + *mock.Call +} + +// GetFolderWithContext is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *codecommit.GetFolderInput +// - _a2 ...request.Option +func (_e *AWSCodeCommitClient_Expecter) GetFolderWithContext(_a0 interface{}, _a1 interface{}, _a2 ...interface{}) *AWSCodeCommitClient_GetFolderWithContext_Call { + return &AWSCodeCommitClient_GetFolderWithContext_Call{Call: _e.mock.On("GetFolderWithContext", + append([]interface{}{_a0, _a1}, _a2...)...)} +} + +func (_c *AWSCodeCommitClient_GetFolderWithContext_Call) Run(run func(_a0 context.Context, _a1 *codecommit.GetFolderInput, _a2 ...request.Option)) *AWSCodeCommitClient_GetFolderWithContext_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]request.Option, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(request.Option) + } + } + run(args[0].(context.Context), args[1].(*codecommit.GetFolderInput), variadicArgs...) + }) + return _c +} + +func (_c *AWSCodeCommitClient_GetFolderWithContext_Call) Return(_a0 *codecommit.GetFolderOutput, _a1 error) *AWSCodeCommitClient_GetFolderWithContext_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AWSCodeCommitClient_GetFolderWithContext_Call) RunAndReturn(run func(context.Context, *codecommit.GetFolderInput, ...request.Option) (*codecommit.GetFolderOutput, error)) *AWSCodeCommitClient_GetFolderWithContext_Call { + _c.Call.Return(run) + return _c +} + +// GetRepositoryWithContext provides a mock function with given fields: _a0, _a1, _a2 +func (_m *AWSCodeCommitClient) GetRepositoryWithContext(_a0 context.Context, _a1 *codecommit.GetRepositoryInput, _a2 ...request.Option) (*codecommit.GetRepositoryOutput, error) { + _va := make([]interface{}, len(_a2)) + for _i := range _a2 { + _va[_i] = _a2[_i] + } + var _ca []interface{} + _ca = append(_ca, _a0, _a1) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 *codecommit.GetRepositoryOutput + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *codecommit.GetRepositoryInput, ...request.Option) (*codecommit.GetRepositoryOutput, error)); ok { + return rf(_a0, _a1, _a2...) + } + if rf, ok := ret.Get(0).(func(context.Context, *codecommit.GetRepositoryInput, ...request.Option) *codecommit.GetRepositoryOutput); ok { + r0 = rf(_a0, _a1, _a2...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*codecommit.GetRepositoryOutput) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *codecommit.GetRepositoryInput, ...request.Option) error); ok { + r1 = rf(_a0, _a1, _a2...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AWSCodeCommitClient_GetRepositoryWithContext_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRepositoryWithContext' +type AWSCodeCommitClient_GetRepositoryWithContext_Call struct { + *mock.Call +} + +// GetRepositoryWithContext is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *codecommit.GetRepositoryInput +// - _a2 ...request.Option +func (_e *AWSCodeCommitClient_Expecter) GetRepositoryWithContext(_a0 interface{}, _a1 interface{}, _a2 ...interface{}) *AWSCodeCommitClient_GetRepositoryWithContext_Call { + return &AWSCodeCommitClient_GetRepositoryWithContext_Call{Call: _e.mock.On("GetRepositoryWithContext", + append([]interface{}{_a0, _a1}, _a2...)...)} +} + +func (_c *AWSCodeCommitClient_GetRepositoryWithContext_Call) Run(run func(_a0 context.Context, _a1 *codecommit.GetRepositoryInput, _a2 ...request.Option)) *AWSCodeCommitClient_GetRepositoryWithContext_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]request.Option, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(request.Option) + } + } + run(args[0].(context.Context), args[1].(*codecommit.GetRepositoryInput), variadicArgs...) + }) + return _c +} + +func (_c *AWSCodeCommitClient_GetRepositoryWithContext_Call) Return(_a0 *codecommit.GetRepositoryOutput, _a1 error) *AWSCodeCommitClient_GetRepositoryWithContext_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AWSCodeCommitClient_GetRepositoryWithContext_Call) RunAndReturn(run func(context.Context, *codecommit.GetRepositoryInput, ...request.Option) (*codecommit.GetRepositoryOutput, error)) *AWSCodeCommitClient_GetRepositoryWithContext_Call { + _c.Call.Return(run) + return _c +} + +// ListBranchesWithContext provides a mock function with given fields: _a0, _a1, _a2 +func (_m *AWSCodeCommitClient) ListBranchesWithContext(_a0 context.Context, _a1 *codecommit.ListBranchesInput, _a2 ...request.Option) (*codecommit.ListBranchesOutput, error) { + _va := make([]interface{}, len(_a2)) + for _i := range _a2 { + _va[_i] = _a2[_i] + } + var _ca []interface{} + _ca = append(_ca, _a0, _a1) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 *codecommit.ListBranchesOutput + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *codecommit.ListBranchesInput, ...request.Option) (*codecommit.ListBranchesOutput, error)); ok { + return rf(_a0, _a1, _a2...) + } + if rf, ok := ret.Get(0).(func(context.Context, *codecommit.ListBranchesInput, ...request.Option) *codecommit.ListBranchesOutput); ok { + r0 = rf(_a0, _a1, _a2...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*codecommit.ListBranchesOutput) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *codecommit.ListBranchesInput, ...request.Option) error); ok { + r1 = rf(_a0, _a1, _a2...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AWSCodeCommitClient_ListBranchesWithContext_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListBranchesWithContext' +type AWSCodeCommitClient_ListBranchesWithContext_Call struct { + *mock.Call +} + +// ListBranchesWithContext is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *codecommit.ListBranchesInput +// - _a2 ...request.Option +func (_e *AWSCodeCommitClient_Expecter) ListBranchesWithContext(_a0 interface{}, _a1 interface{}, _a2 ...interface{}) *AWSCodeCommitClient_ListBranchesWithContext_Call { + return &AWSCodeCommitClient_ListBranchesWithContext_Call{Call: _e.mock.On("ListBranchesWithContext", + append([]interface{}{_a0, _a1}, _a2...)...)} +} + +func (_c *AWSCodeCommitClient_ListBranchesWithContext_Call) Run(run func(_a0 context.Context, _a1 *codecommit.ListBranchesInput, _a2 ...request.Option)) *AWSCodeCommitClient_ListBranchesWithContext_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]request.Option, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(request.Option) + } + } + run(args[0].(context.Context), args[1].(*codecommit.ListBranchesInput), variadicArgs...) + }) + return _c +} + +func (_c *AWSCodeCommitClient_ListBranchesWithContext_Call) Return(_a0 *codecommit.ListBranchesOutput, _a1 error) *AWSCodeCommitClient_ListBranchesWithContext_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AWSCodeCommitClient_ListBranchesWithContext_Call) RunAndReturn(run func(context.Context, *codecommit.ListBranchesInput, ...request.Option) (*codecommit.ListBranchesOutput, error)) *AWSCodeCommitClient_ListBranchesWithContext_Call { + _c.Call.Return(run) + return _c +} + +// ListRepositoriesWithContext provides a mock function with given fields: _a0, _a1, _a2 +func (_m *AWSCodeCommitClient) ListRepositoriesWithContext(_a0 context.Context, _a1 *codecommit.ListRepositoriesInput, _a2 ...request.Option) (*codecommit.ListRepositoriesOutput, error) { + _va := make([]interface{}, len(_a2)) + for _i := range _a2 { + _va[_i] = _a2[_i] + } + var _ca []interface{} + _ca = append(_ca, _a0, _a1) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 *codecommit.ListRepositoriesOutput + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *codecommit.ListRepositoriesInput, ...request.Option) (*codecommit.ListRepositoriesOutput, error)); ok { + return rf(_a0, _a1, _a2...) + } + if rf, ok := ret.Get(0).(func(context.Context, *codecommit.ListRepositoriesInput, ...request.Option) *codecommit.ListRepositoriesOutput); ok { + r0 = rf(_a0, _a1, _a2...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*codecommit.ListRepositoriesOutput) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *codecommit.ListRepositoriesInput, ...request.Option) error); ok { + r1 = rf(_a0, _a1, _a2...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AWSCodeCommitClient_ListRepositoriesWithContext_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListRepositoriesWithContext' +type AWSCodeCommitClient_ListRepositoriesWithContext_Call struct { + *mock.Call +} + +// ListRepositoriesWithContext is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *codecommit.ListRepositoriesInput +// - _a2 ...request.Option +func (_e *AWSCodeCommitClient_Expecter) ListRepositoriesWithContext(_a0 interface{}, _a1 interface{}, _a2 ...interface{}) *AWSCodeCommitClient_ListRepositoriesWithContext_Call { + return &AWSCodeCommitClient_ListRepositoriesWithContext_Call{Call: _e.mock.On("ListRepositoriesWithContext", + append([]interface{}{_a0, _a1}, _a2...)...)} +} + +func (_c *AWSCodeCommitClient_ListRepositoriesWithContext_Call) Run(run func(_a0 context.Context, _a1 *codecommit.ListRepositoriesInput, _a2 ...request.Option)) *AWSCodeCommitClient_ListRepositoriesWithContext_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]request.Option, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(request.Option) + } + } + run(args[0].(context.Context), args[1].(*codecommit.ListRepositoriesInput), variadicArgs...) + }) + return _c +} + +func (_c *AWSCodeCommitClient_ListRepositoriesWithContext_Call) Return(_a0 *codecommit.ListRepositoriesOutput, _a1 error) *AWSCodeCommitClient_ListRepositoriesWithContext_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AWSCodeCommitClient_ListRepositoriesWithContext_Call) RunAndReturn(run func(context.Context, *codecommit.ListRepositoriesInput, ...request.Option) (*codecommit.ListRepositoriesOutput, error)) *AWSCodeCommitClient_ListRepositoriesWithContext_Call { + _c.Call.Return(run) + return _c +} + +type mockConstructorTestingTNewAWSCodeCommitClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewAWSCodeCommitClient creates a new instance of AWSCodeCommitClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewAWSCodeCommitClient(t mockConstructorTestingTNewAWSCodeCommitClient) *AWSCodeCommitClient { + mock := &AWSCodeCommitClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/applicationset/services/scm_provider/aws_codecommit/mocks/AWSTaggingClient.go b/applicationset/services/scm_provider/aws_codecommit/mocks/AWSTaggingClient.go new file mode 100644 index 0000000000000..9acd8979b7818 --- /dev/null +++ b/applicationset/services/scm_provider/aws_codecommit/mocks/AWSTaggingClient.go @@ -0,0 +1,110 @@ +// Code generated by mockery v2.26.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + request "github.com/aws/aws-sdk-go/aws/request" + mock "github.com/stretchr/testify/mock" + + resourcegroupstaggingapi "github.com/aws/aws-sdk-go/service/resourcegroupstaggingapi" +) + +// AWSTaggingClient is an autogenerated mock type for the AWSTaggingClient type +type AWSTaggingClient struct { + mock.Mock +} + +type AWSTaggingClient_Expecter struct { + mock *mock.Mock +} + +func (_m *AWSTaggingClient) EXPECT() *AWSTaggingClient_Expecter { + return &AWSTaggingClient_Expecter{mock: &_m.Mock} +} + +// GetResourcesWithContext provides a mock function with given fields: _a0, _a1, _a2 +func (_m *AWSTaggingClient) GetResourcesWithContext(_a0 context.Context, _a1 *resourcegroupstaggingapi.GetResourcesInput, _a2 ...request.Option) (*resourcegroupstaggingapi.GetResourcesOutput, error) { + _va := make([]interface{}, len(_a2)) + for _i := range _a2 { + _va[_i] = _a2[_i] + } + var _ca []interface{} + _ca = append(_ca, _a0, _a1) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 *resourcegroupstaggingapi.GetResourcesOutput + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *resourcegroupstaggingapi.GetResourcesInput, ...request.Option) (*resourcegroupstaggingapi.GetResourcesOutput, error)); ok { + return rf(_a0, _a1, _a2...) + } + if rf, ok := ret.Get(0).(func(context.Context, *resourcegroupstaggingapi.GetResourcesInput, ...request.Option) *resourcegroupstaggingapi.GetResourcesOutput); ok { + r0 = rf(_a0, _a1, _a2...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*resourcegroupstaggingapi.GetResourcesOutput) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *resourcegroupstaggingapi.GetResourcesInput, ...request.Option) error); ok { + r1 = rf(_a0, _a1, _a2...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AWSTaggingClient_GetResourcesWithContext_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetResourcesWithContext' +type AWSTaggingClient_GetResourcesWithContext_Call struct { + *mock.Call +} + +// GetResourcesWithContext is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *resourcegroupstaggingapi.GetResourcesInput +// - _a2 ...request.Option +func (_e *AWSTaggingClient_Expecter) GetResourcesWithContext(_a0 interface{}, _a1 interface{}, _a2 ...interface{}) *AWSTaggingClient_GetResourcesWithContext_Call { + return &AWSTaggingClient_GetResourcesWithContext_Call{Call: _e.mock.On("GetResourcesWithContext", + append([]interface{}{_a0, _a1}, _a2...)...)} +} + +func (_c *AWSTaggingClient_GetResourcesWithContext_Call) Run(run func(_a0 context.Context, _a1 *resourcegroupstaggingapi.GetResourcesInput, _a2 ...request.Option)) *AWSTaggingClient_GetResourcesWithContext_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]request.Option, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(request.Option) + } + } + run(args[0].(context.Context), args[1].(*resourcegroupstaggingapi.GetResourcesInput), variadicArgs...) + }) + return _c +} + +func (_c *AWSTaggingClient_GetResourcesWithContext_Call) Return(_a0 *resourcegroupstaggingapi.GetResourcesOutput, _a1 error) *AWSTaggingClient_GetResourcesWithContext_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AWSTaggingClient_GetResourcesWithContext_Call) RunAndReturn(run func(context.Context, *resourcegroupstaggingapi.GetResourcesInput, ...request.Option) (*resourcegroupstaggingapi.GetResourcesOutput, error)) *AWSTaggingClient_GetResourcesWithContext_Call { + _c.Call.Return(run) + return _c +} + +type mockConstructorTestingTNewAWSTaggingClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewAWSTaggingClient creates a new instance of AWSTaggingClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewAWSTaggingClient(t mockConstructorTestingTNewAWSTaggingClient) *AWSTaggingClient { + mock := &AWSTaggingClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/applicationset/services/scm_provider/aws_codecommit_test.go b/applicationset/services/scm_provider/aws_codecommit_test.go new file mode 100644 index 0000000000000..3a4f7c1a9a6a8 --- /dev/null +++ b/applicationset/services/scm_provider/aws_codecommit_test.go @@ -0,0 +1,483 @@ +package scm_provider + +import ( + "context" + "errors" + "sort" + "testing" + + "github.com/argoproj/argo-cd/v2/applicationset/services/scm_provider/aws_codecommit/mocks" + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/codecommit" + "github.com/aws/aws-sdk-go/service/resourcegroupstaggingapi" + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +type awsCodeCommitTestRepository struct { + name string + id string + arn string + accountId string + defaultBranch string + expectedCloneUrl string + getRepositoryError error + getRepositoryNilMetadata bool + valid bool +} + +func TestAWSCodeCommitListRepos(t *testing.T) { + testCases := []struct { + name string + repositories []*awsCodeCommitTestRepository + cloneProtocol string + tagFilters []*v1alpha1.TagFilter + expectTagFilters []*resourcegroupstaggingapi.TagFilter + listRepositoryError error + expectOverallError bool + expectListAtCodeCommit bool + }{ + { + name: "ListRepos by tag with https", + cloneProtocol: "https", + repositories: []*awsCodeCommitTestRepository{ + { + name: "repo1", + id: "8235624d-d248-4df9-a983-2558b01dbe83", + arn: "arn:aws:codecommit:us-east-1:111111111111:repo1", + defaultBranch: "main", + expectedCloneUrl: "https://git-codecommit.us-east-1.amazonaws.com/v1/repos/repo1", + valid: true, + }, + }, + tagFilters: []*v1alpha1.TagFilter{ + {Key: "key1", Value: "value1"}, + {Key: "key1", Value: "value2"}, + {Key: "key2"}, + }, + expectTagFilters: []*resourcegroupstaggingapi.TagFilter{ + {Key: aws.String("key1"), Values: aws.StringSlice([]string{"value1", "value2"})}, + {Key: aws.String("key2")}, + }, + expectOverallError: false, + expectListAtCodeCommit: false, + }, + { + name: "ListRepos by tag with https-fips", + cloneProtocol: "https-fips", + repositories: []*awsCodeCommitTestRepository{ + { + name: "repo1", + id: "8235624d-d248-4df9-a983-2558b01dbe83", + arn: "arn:aws:codecommit:us-east-1:111111111111:repo1", + defaultBranch: "main", + expectedCloneUrl: "https://git-codecommit-fips.us-east-1.amazonaws.com/v1/repos/repo1", + valid: true, + }, + }, + tagFilters: []*v1alpha1.TagFilter{ + {Key: "key1"}, + }, + expectTagFilters: []*resourcegroupstaggingapi.TagFilter{ + {Key: aws.String("key1")}, + }, + expectOverallError: false, + expectListAtCodeCommit: false, + }, + { + name: "ListRepos without tag with invalid repo", + cloneProtocol: "ssh", + repositories: []*awsCodeCommitTestRepository{ + { + name: "repo1", + id: "8235624d-d248-4df9-a983-2558b01dbe83", + arn: "arn:aws:codecommit:us-east-1:111111111111:repo1", + defaultBranch: "main", + expectedCloneUrl: "ssh://git-codecommit.us-east-1.amazonaws.com/v1/repos/repo1", + valid: true, + }, + { + name: "repo2", + id: "640d5859-d265-4e27-a9fa-e0731eb13ed7", + arn: "arn:aws:codecommit:us-east-1:111111111111:repo2", + valid: false, + }, + { + name: "repo3-nil-metadata", + id: "24a6ee96-d3a0-4be6-a595-c5e5b1ab1617", + arn: "arn:aws:codecommit:us-east-1:111111111111:repo3-nil-metadata", + getRepositoryNilMetadata: true, + valid: false, + }, + }, + expectOverallError: false, + expectListAtCodeCommit: true, + }, + { + name: "ListRepos with invalid protocol", + cloneProtocol: "invalid-protocol", + repositories: []*awsCodeCommitTestRepository{ + { + name: "repo1", + id: "8235624d-d248-4df9-a983-2558b01dbe83", + arn: "arn:aws:codecommit:us-east-1:111111111111:repo1", + defaultBranch: "main", + valid: true, + }, + }, + expectOverallError: true, + expectListAtCodeCommit: true, + }, + { + name: "ListRepos error on listRepos", + cloneProtocol: "https", + listRepositoryError: errors.New("list repo error"), + expectOverallError: true, + expectListAtCodeCommit: true, + }, + { + name: "ListRepos error on getRepo", + cloneProtocol: "https", + repositories: []*awsCodeCommitTestRepository{ + { + name: "repo1", + id: "8235624d-d248-4df9-a983-2558b01dbe83", + arn: "arn:aws:codecommit:us-east-1:111111111111:repo1", + defaultBranch: "main", + getRepositoryError: errors.New("get repo error"), + valid: true, + }, + }, + expectOverallError: true, + expectListAtCodeCommit: true, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + codeCommitClient := mocks.NewAWSCodeCommitClient(t) + taggingClient := mocks.NewAWSTaggingClient(t) + ctx := context.Background() + codecommitRepoNameIdPairs := make([]*codecommit.RepositoryNameIdPair, 0) + resourceTaggings := make([]*resourcegroupstaggingapi.ResourceTagMapping, 0) + validRepositories := make([]*awsCodeCommitTestRepository, 0) + + for _, repo := range testCase.repositories { + repoMetadata := &codecommit.RepositoryMetadata{ + AccountId: aws.String(repo.accountId), + Arn: aws.String(repo.arn), + CloneUrlHttp: aws.String("https://git-codecommit.us-east-1.amazonaws.com/v1/repos/" + repo.name), + CloneUrlSsh: aws.String("ssh://git-codecommit.us-east-1.amazonaws.com/v1/repos/" + repo.name), + DefaultBranch: aws.String(repo.defaultBranch), + RepositoryId: aws.String(repo.id), + RepositoryName: aws.String(repo.name), + } + if repo.getRepositoryNilMetadata { + repoMetadata = nil + } + codeCommitClient.EXPECT(). + GetRepositoryWithContext(ctx, &codecommit.GetRepositoryInput{RepositoryName: aws.String(repo.name)}). + Return(&codecommit.GetRepositoryOutput{RepositoryMetadata: repoMetadata}, repo.getRepositoryError) + codecommitRepoNameIdPairs = append(codecommitRepoNameIdPairs, &codecommit.RepositoryNameIdPair{ + RepositoryId: aws.String(repo.id), + RepositoryName: aws.String(repo.name), + }) + resourceTaggings = append(resourceTaggings, &resourcegroupstaggingapi.ResourceTagMapping{ + ResourceARN: aws.String(repo.arn), + }) + if repo.valid { + validRepositories = append(validRepositories, repo) + } + } + + if testCase.expectListAtCodeCommit { + codeCommitClient.EXPECT(). + ListRepositoriesWithContext(ctx, &codecommit.ListRepositoriesInput{}). + Return(&codecommit.ListRepositoriesOutput{ + Repositories: codecommitRepoNameIdPairs, + }, testCase.listRepositoryError) + } else { + taggingClient.EXPECT(). + GetResourcesWithContext(ctx, mock.MatchedBy(equalIgnoringTagFilterOrder(&resourcegroupstaggingapi.GetResourcesInput{ + TagFilters: testCase.expectTagFilters, + ResourceTypeFilters: aws.StringSlice([]string{resourceTypeCodeCommitRepository}), + }))). + Return(&resourcegroupstaggingapi.GetResourcesOutput{ + ResourceTagMappingList: resourceTaggings, + }, testCase.listRepositoryError) + } + + provider := &AWSCodeCommitProvider{ + codeCommitClient: codeCommitClient, + taggingClient: taggingClient, + tagFilters: testCase.tagFilters, + } + repos, err := provider.ListRepos(ctx, testCase.cloneProtocol) + if testCase.expectOverallError { + assert.Error(t, err) + } else { + assert.Len(t, repos, len(validRepositories)) + for i, repo := range repos { + originRepo := validRepositories[i] + assert.Equal(t, originRepo.accountId, repo.Organization) + assert.Equal(t, originRepo.name, repo.Repository) + assert.Equal(t, originRepo.id, repo.RepositoryId) + assert.Equal(t, originRepo.defaultBranch, repo.Branch) + assert.Equal(t, originRepo.expectedCloneUrl, repo.URL) + assert.Empty(t, repo.SHA, "SHA is always empty") + } + } + }) + } +} + +func TestAWSCodeCommitRepoHasPath(t *testing.T) { + organization := "111111111111" + repoName := "repo1" + branch := "main" + + testCases := []struct { + name string + path string + expectedGetFolderPath string + getFolderOutput *codecommit.GetFolderOutput + getFolderError error + expectOverallError bool + expectedResult bool + }{ + { + name: "RepoHasPath on regular file", + path: "lib/config.yaml", + expectedGetFolderPath: "/lib", + getFolderOutput: &codecommit.GetFolderOutput{ + Files: []*codecommit.File{ + {RelativePath: aws.String("config.yaml")}, + }, + }, + expectOverallError: false, + expectedResult: true, + }, + { + name: "RepoHasPath on folder", + path: "lib/config", + expectedGetFolderPath: "/lib", + getFolderOutput: &codecommit.GetFolderOutput{ + SubFolders: []*codecommit.Folder{ + {RelativePath: aws.String("config")}, + }, + }, + expectOverallError: false, + expectedResult: true, + }, + { + name: "RepoHasPath on submodules", + path: "/lib/submodule/", + expectedGetFolderPath: "/lib", + getFolderOutput: &codecommit.GetFolderOutput{ + SubModules: []*codecommit.SubModule{ + {RelativePath: aws.String("submodule")}, + }, + }, + expectOverallError: false, + expectedResult: true, + }, + { + name: "RepoHasPath on symlink", + path: "./lib/service.json", + expectedGetFolderPath: "/lib", + getFolderOutput: &codecommit.GetFolderOutput{ + SymbolicLinks: []*codecommit.SymbolicLink{ + {RelativePath: aws.String("service.json")}, + }, + }, + expectOverallError: false, + expectedResult: true, + }, + { + name: "RepoHasPath when no match", + path: "no-match.json", + expectedGetFolderPath: "/", + getFolderOutput: &codecommit.GetFolderOutput{ + Files: []*codecommit.File{ + {RelativePath: aws.String("config.yaml")}, + }, + SubFolders: []*codecommit.Folder{ + {RelativePath: aws.String("config")}, + }, + SubModules: []*codecommit.SubModule{ + {RelativePath: aws.String("submodule")}, + }, + SymbolicLinks: []*codecommit.SymbolicLink{ + {RelativePath: aws.String("service.json")}, + }, + }, + expectOverallError: false, + expectedResult: false, + }, + { + name: "RepoHasPath when parent folder not found", + path: "lib/submodule", + expectedGetFolderPath: "/lib", + getFolderError: &codecommit.FolderDoesNotExistException{}, + expectOverallError: false, + }, + { + name: "RepoHasPath when unknown error", + path: "lib/submodule", + expectedGetFolderPath: "/lib", + getFolderError: errors.New("unknown error"), + expectOverallError: true, + }, + { + name: "RepoHasPath on root folder - './'", + path: "./", + expectOverallError: false, + expectedResult: true, + }, + { + name: "RepoHasPath on root folder - '/'", + path: "/", + expectOverallError: false, + expectedResult: true, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + codeCommitClient := mocks.NewAWSCodeCommitClient(t) + taggingClient := mocks.NewAWSTaggingClient(t) + ctx := context.Background() + if testCase.expectedGetFolderPath != "" { + codeCommitClient.EXPECT(). + GetFolderWithContext(ctx, &codecommit.GetFolderInput{ + CommitSpecifier: aws.String(branch), + FolderPath: aws.String(testCase.expectedGetFolderPath), + RepositoryName: aws.String(repoName), + }). + Return(testCase.getFolderOutput, testCase.getFolderError) + } + provider := &AWSCodeCommitProvider{ + codeCommitClient: codeCommitClient, + taggingClient: taggingClient, + } + actual, err := provider.RepoHasPath(ctx, &Repository{ + Organization: organization, + Repository: repoName, + Branch: branch, + }, testCase.path) + if testCase.expectOverallError { + assert.Error(t, err) + } else { + assert.Equal(t, testCase.expectedResult, actual) + } + }) + } +} + +func TestAWSCodeCommitGetBranches(t *testing.T) { + name := "repo1" + id := "1a64adc4-2fb5-4abd-afe7-127984ba83c0" + defaultBranch := "main" + organization := "111111111111" + cloneUrl := "https://git-codecommit.us-east-1.amazonaws.com/v1/repos/repo1" + + testCases := []struct { + name string + branches []string + apiError error + expectOverallError bool + allBranches bool + }{ + { + name: "GetBranches all branches", + branches: []string{"main", "feature/codecommit", "chore/go-upgrade"}, + allBranches: true, + }, + { + name: "GetBranches default branch only", + allBranches: false, + }, + { + name: "GetBranches default branch only", + allBranches: false, + }, + { + name: "GetBranches all branches on api error", + apiError: errors.New("api error"), + expectOverallError: true, + allBranches: true, + }, + { + name: "GetBranches default branch on api error", + apiError: errors.New("api error"), + expectOverallError: true, + allBranches: false, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + codeCommitClient := mocks.NewAWSCodeCommitClient(t) + taggingClient := mocks.NewAWSTaggingClient(t) + ctx := context.Background() + if testCase.allBranches { + codeCommitClient.EXPECT(). + ListBranchesWithContext(ctx, &codecommit.ListBranchesInput{ + RepositoryName: aws.String(name), + }). + Return(&codecommit.ListBranchesOutput{Branches: aws.StringSlice(testCase.branches)}, testCase.apiError) + } else { + codeCommitClient.EXPECT(). + GetRepositoryWithContext(ctx, &codecommit.GetRepositoryInput{RepositoryName: aws.String(name)}). + Return(&codecommit.GetRepositoryOutput{RepositoryMetadata: &codecommit.RepositoryMetadata{ + AccountId: aws.String(organization), + DefaultBranch: aws.String(defaultBranch), + }}, testCase.apiError) + } + provider := &AWSCodeCommitProvider{ + codeCommitClient: codeCommitClient, + taggingClient: taggingClient, + allBranches: testCase.allBranches, + } + actual, err := provider.GetBranches(ctx, &Repository{ + Organization: organization, + Repository: name, + URL: cloneUrl, + RepositoryId: id, + }) + if testCase.expectOverallError { + assert.Error(t, err) + } else { + assertCopiedProperties := func(repo *Repository) { + assert.Equal(t, id, repo.RepositoryId) + assert.Equal(t, name, repo.Repository) + assert.Equal(t, cloneUrl, repo.URL) + assert.Equal(t, organization, repo.Organization) + assert.Empty(t, repo.SHA) + } + actualBranches := make([]string, 0) + for _, repo := range actual { + assertCopiedProperties(repo) + actualBranches = append(actualBranches, repo.Branch) + } + if testCase.allBranches { + assert.ElementsMatch(t, testCase.branches, actualBranches) + } else { + assert.ElementsMatch(t, []string{defaultBranch}, actualBranches) + } + } + }) + } +} + +// equalIgnoringTagFilterOrder provides an argumentMatcher function that can be used to compare equality of GetResourcesInput ignoring the tagFilter ordering. +func equalIgnoringTagFilterOrder(expected *resourcegroupstaggingapi.GetResourcesInput) func(*resourcegroupstaggingapi.GetResourcesInput) bool { + return func(actual *resourcegroupstaggingapi.GetResourcesInput) bool { + sort.Slice(actual.TagFilters, func(i, j int) bool { + return *actual.TagFilters[i].Key < *actual.TagFilters[j].Key + }) + return cmp.Equal(expected, actual) + } +} diff --git a/applicationset/services/scm_provider/azure_devops.go b/applicationset/services/scm_provider/azure_devops.go new file mode 100644 index 0000000000000..c71dabd0509f1 --- /dev/null +++ b/applicationset/services/scm_provider/azure_devops.go @@ -0,0 +1,219 @@ +package scm_provider + +import ( + "context" + "fmt" + netUrl "net/url" + "strings" + + "github.com/google/uuid" + "github.com/microsoft/azure-devops-go-api/azuredevops" + azureGit "github.com/microsoft/azure-devops-go-api/azuredevops/git" +) + +const AZURE_DEVOPS_DEFAULT_URL = "https://dev.azure.com" + +type azureDevOpsErrorTypeKeyValuesType struct { + GitRepositoryNotFound string + GitItemNotFound string +} + +var AzureDevOpsErrorsTypeKeyValues = azureDevOpsErrorTypeKeyValuesType{ + GitRepositoryNotFound: "GitRepositoryNotFoundException", + GitItemNotFound: "GitItemNotFoundException", +} + +type AzureDevOpsClientFactory interface { + // Returns an Azure Devops Client interface. + GetClient(ctx context.Context) (azureGit.Client, error) +} + +type devopsFactoryImpl struct { + connection *azuredevops.Connection +} + +func (factory *devopsFactoryImpl) GetClient(ctx context.Context) (azureGit.Client, error) { + gitClient, err := azureGit.NewClient(ctx, factory.connection) + if err != nil { + return nil, fmt.Errorf("failed to get new Azure DevOps git client for SCM generator: %w", err) + } + return gitClient, nil +} + +// Contains Azure Devops REST API implementation of SCMProviderService. +// See https://docs.microsoft.com/en-us/rest/api/azure/devops + +type AzureDevOpsProvider struct { + organization string + teamProject string + accessToken string + clientFactory AzureDevOpsClientFactory + allBranches bool +} + +var _ SCMProviderService = &AzureDevOpsProvider{} +var _ AzureDevOpsClientFactory = &devopsFactoryImpl{} + +func NewAzureDevOpsProvider(ctx context.Context, accessToken string, org string, url string, project string, allBranches bool) (*AzureDevOpsProvider, error) { + if accessToken == "" { + return nil, fmt.Errorf("no access token provided") + } + + devOpsURL, err := getValidDevOpsURL(url, org) + + if err != nil { + return nil, err + } + + connection := azuredevops.NewPatConnection(devOpsURL, accessToken) + + return &AzureDevOpsProvider{organization: org, teamProject: project, accessToken: accessToken, clientFactory: &devopsFactoryImpl{connection: connection}, allBranches: allBranches}, nil +} + +func (g *AzureDevOpsProvider) ListRepos(ctx context.Context, cloneProtocol string) ([]*Repository, error) { + gitClient, err := g.clientFactory.GetClient(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get Azure DevOps client: %w", err) + } + getRepoArgs := azureGit.GetRepositoriesArgs{Project: &g.teamProject} + azureRepos, err := gitClient.GetRepositories(ctx, getRepoArgs) + + if err != nil { + return nil, err + } + repos := []*Repository{} + for _, azureRepo := range *azureRepos { + if azureRepo.Name == nil || azureRepo.DefaultBranch == nil || azureRepo.RemoteUrl == nil || azureRepo.Id == nil { + continue + } + repos = append(repos, &Repository{ + Organization: g.organization, + Repository: *azureRepo.Name, + URL: *azureRepo.RemoteUrl, + Branch: *azureRepo.DefaultBranch, + Labels: []string{}, + RepositoryId: *azureRepo.Id, + }) + } + + return repos, nil +} + +func (g *AzureDevOpsProvider) RepoHasPath(ctx context.Context, repo *Repository, path string) (bool, error) { + gitClient, err := g.clientFactory.GetClient(ctx) + if err != nil { + return false, fmt.Errorf("failed to get Azure DevOps client: %w", err) + } + + var repoId string + if uuid, isUuid := repo.RepositoryId.(uuid.UUID); isUuid { //most likely an UUID, but do type-safe check anyway. Do %v fallback if not expected type. + repoId = uuid.String() + } else { + repoId = fmt.Sprintf("%v", repo.RepositoryId) + } + + branchName := repo.Branch + getItemArgs := azureGit.GetItemArgs{RepositoryId: &repoId, Project: &g.teamProject, Path: &path, VersionDescriptor: &azureGit.GitVersionDescriptor{Version: &branchName}} + _, err = gitClient.GetItem(ctx, getItemArgs) + + if err != nil { + if wrappedError, isWrappedError := err.(azuredevops.WrappedError); isWrappedError && wrappedError.TypeKey != nil { + if *wrappedError.TypeKey == AzureDevOpsErrorsTypeKeyValues.GitItemNotFound { + return false, nil + } + } + + return false, fmt.Errorf("failed to check for path existence in Azure DevOps: %w", err) + } + + return true, nil +} + +func (g *AzureDevOpsProvider) GetBranches(ctx context.Context, repo *Repository) ([]*Repository, error) { + gitClient, err := g.clientFactory.GetClient(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get Azure DevOps client: %w", err) + } + + repos := []*Repository{} + + if !g.allBranches { + defaultBranchName := strings.Replace(repo.Branch, "refs/heads/", "", 1) //Azure DevOps returns default branch info like 'refs/heads/main', but does not support branch lookup of this format. + getBranchArgs := azureGit.GetBranchArgs{RepositoryId: &repo.Repository, Project: &g.teamProject, Name: &defaultBranchName} + branchResult, err := gitClient.GetBranch(ctx, getBranchArgs) + if err != nil { + if wrappedError, isWrappedError := err.(azuredevops.WrappedError); isWrappedError && wrappedError.TypeKey != nil { + if *wrappedError.TypeKey == AzureDevOpsErrorsTypeKeyValues.GitRepositoryNotFound { + return repos, nil + } + } + return nil, fmt.Errorf("could not get default branch %v (%v) from repository %v: %w", defaultBranchName, repo.Branch, repo.Repository, err) + } + + if branchResult.Name == nil || branchResult.Commit == nil { + return nil, fmt.Errorf("invalid branch result after requesting branch %v from repository %v", repo.Branch, repo.Repository) + } + + repos = append(repos, &Repository{ + Branch: *branchResult.Name, + SHA: *branchResult.Commit.CommitId, + Organization: repo.Organization, + Repository: repo.Repository, + URL: repo.URL, + Labels: []string{}, + RepositoryId: repo.RepositoryId, + }) + + return repos, nil + } + + getBranchesRequest := azureGit.GetBranchesArgs{RepositoryId: &repo.Repository, Project: &g.teamProject} + branches, err := gitClient.GetBranches(ctx, getBranchesRequest) + if err != nil { + if wrappedError, isWrappedError := err.(azuredevops.WrappedError); isWrappedError && wrappedError.TypeKey != nil { + if *wrappedError.TypeKey == AzureDevOpsErrorsTypeKeyValues.GitRepositoryNotFound { + return repos, nil + } + } + return nil, fmt.Errorf("failed getting branches from repository %v, project %v: %w", repo.Repository, g.teamProject, err) + } + + if branches == nil { + return nil, fmt.Errorf("got empty branch result from repository %v, project %v: %w", repo.Repository, g.teamProject, err) + } + + for _, azureBranch := range *branches { + repos = append(repos, &Repository{ + Branch: *azureBranch.Name, + SHA: *azureBranch.Commit.CommitId, + Organization: repo.Organization, + Repository: repo.Repository, + URL: repo.URL, + Labels: []string{}, + RepositoryId: repo.RepositoryId, + }) + } + + return repos, nil +} + +func getValidDevOpsURL(url string, org string) (string, error) { + if url == "" { + url = AZURE_DEVOPS_DEFAULT_URL + } + separator := "" + if !strings.HasSuffix(url, "/") { + separator = "/" + } + + devOpsURL := fmt.Sprintf("%s%s%s", url, separator, org) + + urlCheck, err := netUrl.ParseRequestURI(devOpsURL) + + if err != nil { + return "", fmt.Errorf("got an invalid URL for the Azure SCM generator: %w", err) + } + + ret := urlCheck.String() + return ret, nil +} diff --git a/applicationset/services/scm_provider/azure_devops/git/mocks/Client.go b/applicationset/services/scm_provider/azure_devops/git/mocks/Client.go new file mode 100644 index 0000000000000..7843753c9df5b --- /dev/null +++ b/applicationset/services/scm_provider/azure_devops/git/mocks/Client.go @@ -0,0 +1,2554 @@ +// Code generated by mockery v2.10.4. DO NOT EDIT. + +package mocks + +import ( + context "context" + + core "github.com/microsoft/azure-devops-go-api/azuredevops/core" + git "github.com/microsoft/azure-devops-go-api/azuredevops/git" + + io "io" + + mock "github.com/stretchr/testify/mock" + + webapi "github.com/microsoft/azure-devops-go-api/azuredevops/webapi" +) + +// Client is an autogenerated mock type for the Client type +type Client struct { + mock.Mock +} + +// CreateAnnotatedTag provides a mock function with given fields: _a0, _a1 +func (_m *Client) CreateAnnotatedTag(_a0 context.Context, _a1 git.CreateAnnotatedTagArgs) (*git.GitAnnotatedTag, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitAnnotatedTag + if rf, ok := ret.Get(0).(func(context.Context, git.CreateAnnotatedTagArgs) *git.GitAnnotatedTag); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitAnnotatedTag) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.CreateAnnotatedTagArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateAttachment provides a mock function with given fields: _a0, _a1 +func (_m *Client) CreateAttachment(_a0 context.Context, _a1 git.CreateAttachmentArgs) (*git.Attachment, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.Attachment + if rf, ok := ret.Get(0).(func(context.Context, git.CreateAttachmentArgs) *git.Attachment); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.Attachment) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.CreateAttachmentArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateCherryPick provides a mock function with given fields: _a0, _a1 +func (_m *Client) CreateCherryPick(_a0 context.Context, _a1 git.CreateCherryPickArgs) (*git.GitCherryPick, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitCherryPick + if rf, ok := ret.Get(0).(func(context.Context, git.CreateCherryPickArgs) *git.GitCherryPick); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitCherryPick) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.CreateCherryPickArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateComment provides a mock function with given fields: _a0, _a1 +func (_m *Client) CreateComment(_a0 context.Context, _a1 git.CreateCommentArgs) (*git.Comment, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.Comment + if rf, ok := ret.Get(0).(func(context.Context, git.CreateCommentArgs) *git.Comment); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.Comment) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.CreateCommentArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateCommitStatus provides a mock function with given fields: _a0, _a1 +func (_m *Client) CreateCommitStatus(_a0 context.Context, _a1 git.CreateCommitStatusArgs) (*git.GitStatus, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitStatus + if rf, ok := ret.Get(0).(func(context.Context, git.CreateCommitStatusArgs) *git.GitStatus); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitStatus) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.CreateCommitStatusArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateFavorite provides a mock function with given fields: _a0, _a1 +func (_m *Client) CreateFavorite(_a0 context.Context, _a1 git.CreateFavoriteArgs) (*git.GitRefFavorite, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitRefFavorite + if rf, ok := ret.Get(0).(func(context.Context, git.CreateFavoriteArgs) *git.GitRefFavorite); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitRefFavorite) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.CreateFavoriteArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateForkSyncRequest provides a mock function with given fields: _a0, _a1 +func (_m *Client) CreateForkSyncRequest(_a0 context.Context, _a1 git.CreateForkSyncRequestArgs) (*git.GitForkSyncRequest, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitForkSyncRequest + if rf, ok := ret.Get(0).(func(context.Context, git.CreateForkSyncRequestArgs) *git.GitForkSyncRequest); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitForkSyncRequest) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.CreateForkSyncRequestArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateImportRequest provides a mock function with given fields: _a0, _a1 +func (_m *Client) CreateImportRequest(_a0 context.Context, _a1 git.CreateImportRequestArgs) (*git.GitImportRequest, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitImportRequest + if rf, ok := ret.Get(0).(func(context.Context, git.CreateImportRequestArgs) *git.GitImportRequest); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitImportRequest) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.CreateImportRequestArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateLike provides a mock function with given fields: _a0, _a1 +func (_m *Client) CreateLike(_a0 context.Context, _a1 git.CreateLikeArgs) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, git.CreateLikeArgs) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CreateMergeRequest provides a mock function with given fields: _a0, _a1 +func (_m *Client) CreateMergeRequest(_a0 context.Context, _a1 git.CreateMergeRequestArgs) (*git.GitMerge, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitMerge + if rf, ok := ret.Get(0).(func(context.Context, git.CreateMergeRequestArgs) *git.GitMerge); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitMerge) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.CreateMergeRequestArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreatePullRequest provides a mock function with given fields: _a0, _a1 +func (_m *Client) CreatePullRequest(_a0 context.Context, _a1 git.CreatePullRequestArgs) (*git.GitPullRequest, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitPullRequest + if rf, ok := ret.Get(0).(func(context.Context, git.CreatePullRequestArgs) *git.GitPullRequest); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitPullRequest) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.CreatePullRequestArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreatePullRequestIterationStatus provides a mock function with given fields: _a0, _a1 +func (_m *Client) CreatePullRequestIterationStatus(_a0 context.Context, _a1 git.CreatePullRequestIterationStatusArgs) (*git.GitPullRequestStatus, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitPullRequestStatus + if rf, ok := ret.Get(0).(func(context.Context, git.CreatePullRequestIterationStatusArgs) *git.GitPullRequestStatus); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitPullRequestStatus) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.CreatePullRequestIterationStatusArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreatePullRequestLabel provides a mock function with given fields: _a0, _a1 +func (_m *Client) CreatePullRequestLabel(_a0 context.Context, _a1 git.CreatePullRequestLabelArgs) (*core.WebApiTagDefinition, error) { + ret := _m.Called(_a0, _a1) + + var r0 *core.WebApiTagDefinition + if rf, ok := ret.Get(0).(func(context.Context, git.CreatePullRequestLabelArgs) *core.WebApiTagDefinition); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*core.WebApiTagDefinition) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.CreatePullRequestLabelArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreatePullRequestReviewer provides a mock function with given fields: _a0, _a1 +func (_m *Client) CreatePullRequestReviewer(_a0 context.Context, _a1 git.CreatePullRequestReviewerArgs) (*git.IdentityRefWithVote, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.IdentityRefWithVote + if rf, ok := ret.Get(0).(func(context.Context, git.CreatePullRequestReviewerArgs) *git.IdentityRefWithVote); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.IdentityRefWithVote) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.CreatePullRequestReviewerArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreatePullRequestReviewers provides a mock function with given fields: _a0, _a1 +func (_m *Client) CreatePullRequestReviewers(_a0 context.Context, _a1 git.CreatePullRequestReviewersArgs) (*[]git.IdentityRefWithVote, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.IdentityRefWithVote + if rf, ok := ret.Get(0).(func(context.Context, git.CreatePullRequestReviewersArgs) *[]git.IdentityRefWithVote); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.IdentityRefWithVote) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.CreatePullRequestReviewersArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreatePullRequestStatus provides a mock function with given fields: _a0, _a1 +func (_m *Client) CreatePullRequestStatus(_a0 context.Context, _a1 git.CreatePullRequestStatusArgs) (*git.GitPullRequestStatus, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitPullRequestStatus + if rf, ok := ret.Get(0).(func(context.Context, git.CreatePullRequestStatusArgs) *git.GitPullRequestStatus); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitPullRequestStatus) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.CreatePullRequestStatusArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreatePush provides a mock function with given fields: _a0, _a1 +func (_m *Client) CreatePush(_a0 context.Context, _a1 git.CreatePushArgs) (*git.GitPush, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitPush + if rf, ok := ret.Get(0).(func(context.Context, git.CreatePushArgs) *git.GitPush); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitPush) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.CreatePushArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateRepository provides a mock function with given fields: _a0, _a1 +func (_m *Client) CreateRepository(_a0 context.Context, _a1 git.CreateRepositoryArgs) (*git.GitRepository, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitRepository + if rf, ok := ret.Get(0).(func(context.Context, git.CreateRepositoryArgs) *git.GitRepository); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitRepository) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.CreateRepositoryArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateRevert provides a mock function with given fields: _a0, _a1 +func (_m *Client) CreateRevert(_a0 context.Context, _a1 git.CreateRevertArgs) (*git.GitRevert, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitRevert + if rf, ok := ret.Get(0).(func(context.Context, git.CreateRevertArgs) *git.GitRevert); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitRevert) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.CreateRevertArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateThread provides a mock function with given fields: _a0, _a1 +func (_m *Client) CreateThread(_a0 context.Context, _a1 git.CreateThreadArgs) (*git.GitPullRequestCommentThread, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitPullRequestCommentThread + if rf, ok := ret.Get(0).(func(context.Context, git.CreateThreadArgs) *git.GitPullRequestCommentThread); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitPullRequestCommentThread) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.CreateThreadArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DeleteAttachment provides a mock function with given fields: _a0, _a1 +func (_m *Client) DeleteAttachment(_a0 context.Context, _a1 git.DeleteAttachmentArgs) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, git.DeleteAttachmentArgs) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteComment provides a mock function with given fields: _a0, _a1 +func (_m *Client) DeleteComment(_a0 context.Context, _a1 git.DeleteCommentArgs) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, git.DeleteCommentArgs) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteLike provides a mock function with given fields: _a0, _a1 +func (_m *Client) DeleteLike(_a0 context.Context, _a1 git.DeleteLikeArgs) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, git.DeleteLikeArgs) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeletePullRequestIterationStatus provides a mock function with given fields: _a0, _a1 +func (_m *Client) DeletePullRequestIterationStatus(_a0 context.Context, _a1 git.DeletePullRequestIterationStatusArgs) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, git.DeletePullRequestIterationStatusArgs) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeletePullRequestLabels provides a mock function with given fields: _a0, _a1 +func (_m *Client) DeletePullRequestLabels(_a0 context.Context, _a1 git.DeletePullRequestLabelsArgs) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, git.DeletePullRequestLabelsArgs) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeletePullRequestReviewer provides a mock function with given fields: _a0, _a1 +func (_m *Client) DeletePullRequestReviewer(_a0 context.Context, _a1 git.DeletePullRequestReviewerArgs) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, git.DeletePullRequestReviewerArgs) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeletePullRequestStatus provides a mock function with given fields: _a0, _a1 +func (_m *Client) DeletePullRequestStatus(_a0 context.Context, _a1 git.DeletePullRequestStatusArgs) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, git.DeletePullRequestStatusArgs) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteRefFavorite provides a mock function with given fields: _a0, _a1 +func (_m *Client) DeleteRefFavorite(_a0 context.Context, _a1 git.DeleteRefFavoriteArgs) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, git.DeleteRefFavoriteArgs) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteRepository provides a mock function with given fields: _a0, _a1 +func (_m *Client) DeleteRepository(_a0 context.Context, _a1 git.DeleteRepositoryArgs) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, git.DeleteRepositoryArgs) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteRepositoryFromRecycleBin provides a mock function with given fields: _a0, _a1 +func (_m *Client) DeleteRepositoryFromRecycleBin(_a0 context.Context, _a1 git.DeleteRepositoryFromRecycleBinArgs) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, git.DeleteRepositoryFromRecycleBinArgs) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetAnnotatedTag provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetAnnotatedTag(_a0 context.Context, _a1 git.GetAnnotatedTagArgs) (*git.GitAnnotatedTag, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitAnnotatedTag + if rf, ok := ret.Get(0).(func(context.Context, git.GetAnnotatedTagArgs) *git.GitAnnotatedTag); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitAnnotatedTag) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetAnnotatedTagArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAttachmentContent provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetAttachmentContent(_a0 context.Context, _a1 git.GetAttachmentContentArgs) (io.ReadCloser, error) { + ret := _m.Called(_a0, _a1) + + var r0 io.ReadCloser + if rf, ok := ret.Get(0).(func(context.Context, git.GetAttachmentContentArgs) io.ReadCloser); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.ReadCloser) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetAttachmentContentArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAttachmentZip provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetAttachmentZip(_a0 context.Context, _a1 git.GetAttachmentZipArgs) (io.ReadCloser, error) { + ret := _m.Called(_a0, _a1) + + var r0 io.ReadCloser + if rf, ok := ret.Get(0).(func(context.Context, git.GetAttachmentZipArgs) io.ReadCloser); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.ReadCloser) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetAttachmentZipArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAttachments provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetAttachments(_a0 context.Context, _a1 git.GetAttachmentsArgs) (*[]git.Attachment, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.Attachment + if rf, ok := ret.Get(0).(func(context.Context, git.GetAttachmentsArgs) *[]git.Attachment); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.Attachment) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetAttachmentsArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetBlob provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetBlob(_a0 context.Context, _a1 git.GetBlobArgs) (*git.GitBlobRef, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitBlobRef + if rf, ok := ret.Get(0).(func(context.Context, git.GetBlobArgs) *git.GitBlobRef); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitBlobRef) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetBlobArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetBlobContent provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetBlobContent(_a0 context.Context, _a1 git.GetBlobContentArgs) (io.ReadCloser, error) { + ret := _m.Called(_a0, _a1) + + var r0 io.ReadCloser + if rf, ok := ret.Get(0).(func(context.Context, git.GetBlobContentArgs) io.ReadCloser); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.ReadCloser) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetBlobContentArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetBlobZip provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetBlobZip(_a0 context.Context, _a1 git.GetBlobZipArgs) (io.ReadCloser, error) { + ret := _m.Called(_a0, _a1) + + var r0 io.ReadCloser + if rf, ok := ret.Get(0).(func(context.Context, git.GetBlobZipArgs) io.ReadCloser); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.ReadCloser) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetBlobZipArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetBlobsZip provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetBlobsZip(_a0 context.Context, _a1 git.GetBlobsZipArgs) (io.ReadCloser, error) { + ret := _m.Called(_a0, _a1) + + var r0 io.ReadCloser + if rf, ok := ret.Get(0).(func(context.Context, git.GetBlobsZipArgs) io.ReadCloser); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.ReadCloser) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetBlobsZipArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetBranch provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetBranch(_a0 context.Context, _a1 git.GetBranchArgs) (*git.GitBranchStats, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitBranchStats + if rf, ok := ret.Get(0).(func(context.Context, git.GetBranchArgs) *git.GitBranchStats); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitBranchStats) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetBranchArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetBranches provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetBranches(_a0 context.Context, _a1 git.GetBranchesArgs) (*[]git.GitBranchStats, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.GitBranchStats + if rf, ok := ret.Get(0).(func(context.Context, git.GetBranchesArgs) *[]git.GitBranchStats); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.GitBranchStats) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetBranchesArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetChanges provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetChanges(_a0 context.Context, _a1 git.GetChangesArgs) (*git.GitCommitChanges, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitCommitChanges + if rf, ok := ret.Get(0).(func(context.Context, git.GetChangesArgs) *git.GitCommitChanges); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitCommitChanges) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetChangesArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetCherryPick provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetCherryPick(_a0 context.Context, _a1 git.GetCherryPickArgs) (*git.GitCherryPick, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitCherryPick + if rf, ok := ret.Get(0).(func(context.Context, git.GetCherryPickArgs) *git.GitCherryPick); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitCherryPick) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetCherryPickArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetCherryPickForRefName provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetCherryPickForRefName(_a0 context.Context, _a1 git.GetCherryPickForRefNameArgs) (*git.GitCherryPick, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitCherryPick + if rf, ok := ret.Get(0).(func(context.Context, git.GetCherryPickForRefNameArgs) *git.GitCherryPick); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitCherryPick) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetCherryPickForRefNameArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetComment provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetComment(_a0 context.Context, _a1 git.GetCommentArgs) (*git.Comment, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.Comment + if rf, ok := ret.Get(0).(func(context.Context, git.GetCommentArgs) *git.Comment); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.Comment) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetCommentArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetComments provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetComments(_a0 context.Context, _a1 git.GetCommentsArgs) (*[]git.Comment, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.Comment + if rf, ok := ret.Get(0).(func(context.Context, git.GetCommentsArgs) *[]git.Comment); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.Comment) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetCommentsArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetCommit provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetCommit(_a0 context.Context, _a1 git.GetCommitArgs) (*git.GitCommit, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitCommit + if rf, ok := ret.Get(0).(func(context.Context, git.GetCommitArgs) *git.GitCommit); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitCommit) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetCommitArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetCommitDiffs provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetCommitDiffs(_a0 context.Context, _a1 git.GetCommitDiffsArgs) (*git.GitCommitDiffs, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitCommitDiffs + if rf, ok := ret.Get(0).(func(context.Context, git.GetCommitDiffsArgs) *git.GitCommitDiffs); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitCommitDiffs) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetCommitDiffsArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetCommits provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetCommits(_a0 context.Context, _a1 git.GetCommitsArgs) (*[]git.GitCommitRef, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.GitCommitRef + if rf, ok := ret.Get(0).(func(context.Context, git.GetCommitsArgs) *[]git.GitCommitRef); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.GitCommitRef) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetCommitsArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetCommitsBatch provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetCommitsBatch(_a0 context.Context, _a1 git.GetCommitsBatchArgs) (*[]git.GitCommitRef, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.GitCommitRef + if rf, ok := ret.Get(0).(func(context.Context, git.GetCommitsBatchArgs) *[]git.GitCommitRef); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.GitCommitRef) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetCommitsBatchArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetDeletedRepositories provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetDeletedRepositories(_a0 context.Context, _a1 git.GetDeletedRepositoriesArgs) (*[]git.GitDeletedRepository, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.GitDeletedRepository + if rf, ok := ret.Get(0).(func(context.Context, git.GetDeletedRepositoriesArgs) *[]git.GitDeletedRepository); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.GitDeletedRepository) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetDeletedRepositoriesArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetForkSyncRequest provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetForkSyncRequest(_a0 context.Context, _a1 git.GetForkSyncRequestArgs) (*git.GitForkSyncRequest, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitForkSyncRequest + if rf, ok := ret.Get(0).(func(context.Context, git.GetForkSyncRequestArgs) *git.GitForkSyncRequest); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitForkSyncRequest) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetForkSyncRequestArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetForkSyncRequests provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetForkSyncRequests(_a0 context.Context, _a1 git.GetForkSyncRequestsArgs) (*[]git.GitForkSyncRequest, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.GitForkSyncRequest + if rf, ok := ret.Get(0).(func(context.Context, git.GetForkSyncRequestsArgs) *[]git.GitForkSyncRequest); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.GitForkSyncRequest) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetForkSyncRequestsArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetForks provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetForks(_a0 context.Context, _a1 git.GetForksArgs) (*[]git.GitRepositoryRef, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.GitRepositoryRef + if rf, ok := ret.Get(0).(func(context.Context, git.GetForksArgs) *[]git.GitRepositoryRef); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.GitRepositoryRef) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetForksArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetImportRequest provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetImportRequest(_a0 context.Context, _a1 git.GetImportRequestArgs) (*git.GitImportRequest, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitImportRequest + if rf, ok := ret.Get(0).(func(context.Context, git.GetImportRequestArgs) *git.GitImportRequest); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitImportRequest) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetImportRequestArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetItem provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetItem(_a0 context.Context, _a1 git.GetItemArgs) (*git.GitItem, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitItem + if rf, ok := ret.Get(0).(func(context.Context, git.GetItemArgs) *git.GitItem); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitItem) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetItemArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetItemContent provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetItemContent(_a0 context.Context, _a1 git.GetItemContentArgs) (io.ReadCloser, error) { + ret := _m.Called(_a0, _a1) + + var r0 io.ReadCloser + if rf, ok := ret.Get(0).(func(context.Context, git.GetItemContentArgs) io.ReadCloser); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.ReadCloser) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetItemContentArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetItemText provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetItemText(_a0 context.Context, _a1 git.GetItemTextArgs) (io.ReadCloser, error) { + ret := _m.Called(_a0, _a1) + + var r0 io.ReadCloser + if rf, ok := ret.Get(0).(func(context.Context, git.GetItemTextArgs) io.ReadCloser); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.ReadCloser) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetItemTextArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetItemZip provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetItemZip(_a0 context.Context, _a1 git.GetItemZipArgs) (io.ReadCloser, error) { + ret := _m.Called(_a0, _a1) + + var r0 io.ReadCloser + if rf, ok := ret.Get(0).(func(context.Context, git.GetItemZipArgs) io.ReadCloser); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.ReadCloser) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetItemZipArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetItems provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetItems(_a0 context.Context, _a1 git.GetItemsArgs) (*[]git.GitItem, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.GitItem + if rf, ok := ret.Get(0).(func(context.Context, git.GetItemsArgs) *[]git.GitItem); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.GitItem) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetItemsArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetItemsBatch provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetItemsBatch(_a0 context.Context, _a1 git.GetItemsBatchArgs) (*[][]git.GitItem, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[][]git.GitItem + if rf, ok := ret.Get(0).(func(context.Context, git.GetItemsBatchArgs) *[][]git.GitItem); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[][]git.GitItem) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetItemsBatchArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetLikes provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetLikes(_a0 context.Context, _a1 git.GetLikesArgs) (*[]webapi.IdentityRef, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]webapi.IdentityRef + if rf, ok := ret.Get(0).(func(context.Context, git.GetLikesArgs) *[]webapi.IdentityRef); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]webapi.IdentityRef) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetLikesArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetMergeBases provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetMergeBases(_a0 context.Context, _a1 git.GetMergeBasesArgs) (*[]git.GitCommitRef, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.GitCommitRef + if rf, ok := ret.Get(0).(func(context.Context, git.GetMergeBasesArgs) *[]git.GitCommitRef); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.GitCommitRef) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetMergeBasesArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetMergeRequest provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetMergeRequest(_a0 context.Context, _a1 git.GetMergeRequestArgs) (*git.GitMerge, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitMerge + if rf, ok := ret.Get(0).(func(context.Context, git.GetMergeRequestArgs) *git.GitMerge); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitMerge) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetMergeRequestArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPolicyConfigurations provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetPolicyConfigurations(_a0 context.Context, _a1 git.GetPolicyConfigurationsArgs) (*git.GitPolicyConfigurationResponse, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitPolicyConfigurationResponse + if rf, ok := ret.Get(0).(func(context.Context, git.GetPolicyConfigurationsArgs) *git.GitPolicyConfigurationResponse); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitPolicyConfigurationResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetPolicyConfigurationsArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPullRequest provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetPullRequest(_a0 context.Context, _a1 git.GetPullRequestArgs) (*git.GitPullRequest, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitPullRequest + if rf, ok := ret.Get(0).(func(context.Context, git.GetPullRequestArgs) *git.GitPullRequest); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitPullRequest) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetPullRequestArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPullRequestById provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetPullRequestById(_a0 context.Context, _a1 git.GetPullRequestByIdArgs) (*git.GitPullRequest, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitPullRequest + if rf, ok := ret.Get(0).(func(context.Context, git.GetPullRequestByIdArgs) *git.GitPullRequest); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitPullRequest) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetPullRequestByIdArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPullRequestCommits provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetPullRequestCommits(_a0 context.Context, _a1 git.GetPullRequestCommitsArgs) (*git.GetPullRequestCommitsResponseValue, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GetPullRequestCommitsResponseValue + if rf, ok := ret.Get(0).(func(context.Context, git.GetPullRequestCommitsArgs) *git.GetPullRequestCommitsResponseValue); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GetPullRequestCommitsResponseValue) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetPullRequestCommitsArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPullRequestIteration provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetPullRequestIteration(_a0 context.Context, _a1 git.GetPullRequestIterationArgs) (*git.GitPullRequestIteration, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitPullRequestIteration + if rf, ok := ret.Get(0).(func(context.Context, git.GetPullRequestIterationArgs) *git.GitPullRequestIteration); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitPullRequestIteration) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetPullRequestIterationArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPullRequestIterationChanges provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetPullRequestIterationChanges(_a0 context.Context, _a1 git.GetPullRequestIterationChangesArgs) (*git.GitPullRequestIterationChanges, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitPullRequestIterationChanges + if rf, ok := ret.Get(0).(func(context.Context, git.GetPullRequestIterationChangesArgs) *git.GitPullRequestIterationChanges); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitPullRequestIterationChanges) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetPullRequestIterationChangesArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPullRequestIterationCommits provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetPullRequestIterationCommits(_a0 context.Context, _a1 git.GetPullRequestIterationCommitsArgs) (*[]git.GitCommitRef, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.GitCommitRef + if rf, ok := ret.Get(0).(func(context.Context, git.GetPullRequestIterationCommitsArgs) *[]git.GitCommitRef); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.GitCommitRef) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetPullRequestIterationCommitsArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPullRequestIterationStatus provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetPullRequestIterationStatus(_a0 context.Context, _a1 git.GetPullRequestIterationStatusArgs) (*git.GitPullRequestStatus, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitPullRequestStatus + if rf, ok := ret.Get(0).(func(context.Context, git.GetPullRequestIterationStatusArgs) *git.GitPullRequestStatus); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitPullRequestStatus) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetPullRequestIterationStatusArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPullRequestIterationStatuses provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetPullRequestIterationStatuses(_a0 context.Context, _a1 git.GetPullRequestIterationStatusesArgs) (*[]git.GitPullRequestStatus, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.GitPullRequestStatus + if rf, ok := ret.Get(0).(func(context.Context, git.GetPullRequestIterationStatusesArgs) *[]git.GitPullRequestStatus); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.GitPullRequestStatus) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetPullRequestIterationStatusesArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPullRequestIterations provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetPullRequestIterations(_a0 context.Context, _a1 git.GetPullRequestIterationsArgs) (*[]git.GitPullRequestIteration, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.GitPullRequestIteration + if rf, ok := ret.Get(0).(func(context.Context, git.GetPullRequestIterationsArgs) *[]git.GitPullRequestIteration); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.GitPullRequestIteration) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetPullRequestIterationsArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPullRequestLabel provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetPullRequestLabel(_a0 context.Context, _a1 git.GetPullRequestLabelArgs) (*core.WebApiTagDefinition, error) { + ret := _m.Called(_a0, _a1) + + var r0 *core.WebApiTagDefinition + if rf, ok := ret.Get(0).(func(context.Context, git.GetPullRequestLabelArgs) *core.WebApiTagDefinition); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*core.WebApiTagDefinition) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetPullRequestLabelArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPullRequestLabels provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetPullRequestLabels(_a0 context.Context, _a1 git.GetPullRequestLabelsArgs) (*[]core.WebApiTagDefinition, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]core.WebApiTagDefinition + if rf, ok := ret.Get(0).(func(context.Context, git.GetPullRequestLabelsArgs) *[]core.WebApiTagDefinition); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]core.WebApiTagDefinition) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetPullRequestLabelsArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPullRequestProperties provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetPullRequestProperties(_a0 context.Context, _a1 git.GetPullRequestPropertiesArgs) (interface{}, error) { + ret := _m.Called(_a0, _a1) + + var r0 interface{} + if rf, ok := ret.Get(0).(func(context.Context, git.GetPullRequestPropertiesArgs) interface{}); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(interface{}) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetPullRequestPropertiesArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPullRequestQuery provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetPullRequestQuery(_a0 context.Context, _a1 git.GetPullRequestQueryArgs) (*git.GitPullRequestQuery, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitPullRequestQuery + if rf, ok := ret.Get(0).(func(context.Context, git.GetPullRequestQueryArgs) *git.GitPullRequestQuery); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitPullRequestQuery) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetPullRequestQueryArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPullRequestReviewer provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetPullRequestReviewer(_a0 context.Context, _a1 git.GetPullRequestReviewerArgs) (*git.IdentityRefWithVote, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.IdentityRefWithVote + if rf, ok := ret.Get(0).(func(context.Context, git.GetPullRequestReviewerArgs) *git.IdentityRefWithVote); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.IdentityRefWithVote) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetPullRequestReviewerArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPullRequestReviewers provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetPullRequestReviewers(_a0 context.Context, _a1 git.GetPullRequestReviewersArgs) (*[]git.IdentityRefWithVote, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.IdentityRefWithVote + if rf, ok := ret.Get(0).(func(context.Context, git.GetPullRequestReviewersArgs) *[]git.IdentityRefWithVote); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.IdentityRefWithVote) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetPullRequestReviewersArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPullRequestStatus provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetPullRequestStatus(_a0 context.Context, _a1 git.GetPullRequestStatusArgs) (*git.GitPullRequestStatus, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitPullRequestStatus + if rf, ok := ret.Get(0).(func(context.Context, git.GetPullRequestStatusArgs) *git.GitPullRequestStatus); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitPullRequestStatus) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetPullRequestStatusArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPullRequestStatuses provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetPullRequestStatuses(_a0 context.Context, _a1 git.GetPullRequestStatusesArgs) (*[]git.GitPullRequestStatus, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.GitPullRequestStatus + if rf, ok := ret.Get(0).(func(context.Context, git.GetPullRequestStatusesArgs) *[]git.GitPullRequestStatus); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.GitPullRequestStatus) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetPullRequestStatusesArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPullRequestThread provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetPullRequestThread(_a0 context.Context, _a1 git.GetPullRequestThreadArgs) (*git.GitPullRequestCommentThread, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitPullRequestCommentThread + if rf, ok := ret.Get(0).(func(context.Context, git.GetPullRequestThreadArgs) *git.GitPullRequestCommentThread); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitPullRequestCommentThread) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetPullRequestThreadArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPullRequestWorkItemRefs provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetPullRequestWorkItemRefs(_a0 context.Context, _a1 git.GetPullRequestWorkItemRefsArgs) (*[]webapi.ResourceRef, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]webapi.ResourceRef + if rf, ok := ret.Get(0).(func(context.Context, git.GetPullRequestWorkItemRefsArgs) *[]webapi.ResourceRef); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]webapi.ResourceRef) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetPullRequestWorkItemRefsArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPullRequests provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetPullRequests(_a0 context.Context, _a1 git.GetPullRequestsArgs) (*[]git.GitPullRequest, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.GitPullRequest + if rf, ok := ret.Get(0).(func(context.Context, git.GetPullRequestsArgs) *[]git.GitPullRequest); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.GitPullRequest) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetPullRequestsArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPullRequestsByProject provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetPullRequestsByProject(_a0 context.Context, _a1 git.GetPullRequestsByProjectArgs) (*[]git.GitPullRequest, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.GitPullRequest + if rf, ok := ret.Get(0).(func(context.Context, git.GetPullRequestsByProjectArgs) *[]git.GitPullRequest); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.GitPullRequest) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetPullRequestsByProjectArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPush provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetPush(_a0 context.Context, _a1 git.GetPushArgs) (*git.GitPush, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitPush + if rf, ok := ret.Get(0).(func(context.Context, git.GetPushArgs) *git.GitPush); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitPush) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetPushArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPushCommits provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetPushCommits(_a0 context.Context, _a1 git.GetPushCommitsArgs) (*[]git.GitCommitRef, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.GitCommitRef + if rf, ok := ret.Get(0).(func(context.Context, git.GetPushCommitsArgs) *[]git.GitCommitRef); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.GitCommitRef) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetPushCommitsArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPushes provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetPushes(_a0 context.Context, _a1 git.GetPushesArgs) (*[]git.GitPush, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.GitPush + if rf, ok := ret.Get(0).(func(context.Context, git.GetPushesArgs) *[]git.GitPush); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.GitPush) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetPushesArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRecycleBinRepositories provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetRecycleBinRepositories(_a0 context.Context, _a1 git.GetRecycleBinRepositoriesArgs) (*[]git.GitDeletedRepository, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.GitDeletedRepository + if rf, ok := ret.Get(0).(func(context.Context, git.GetRecycleBinRepositoriesArgs) *[]git.GitDeletedRepository); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.GitDeletedRepository) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetRecycleBinRepositoriesArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRefFavorite provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetRefFavorite(_a0 context.Context, _a1 git.GetRefFavoriteArgs) (*git.GitRefFavorite, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitRefFavorite + if rf, ok := ret.Get(0).(func(context.Context, git.GetRefFavoriteArgs) *git.GitRefFavorite); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitRefFavorite) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetRefFavoriteArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRefFavorites provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetRefFavorites(_a0 context.Context, _a1 git.GetRefFavoritesArgs) (*[]git.GitRefFavorite, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.GitRefFavorite + if rf, ok := ret.Get(0).(func(context.Context, git.GetRefFavoritesArgs) *[]git.GitRefFavorite); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.GitRefFavorite) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetRefFavoritesArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRefs provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetRefs(_a0 context.Context, _a1 git.GetRefsArgs) (*git.GetRefsResponseValue, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GetRefsResponseValue + if rf, ok := ret.Get(0).(func(context.Context, git.GetRefsArgs) *git.GetRefsResponseValue); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GetRefsResponseValue) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetRefsArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRepositories provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetRepositories(_a0 context.Context, _a1 git.GetRepositoriesArgs) (*[]git.GitRepository, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.GitRepository + if rf, ok := ret.Get(0).(func(context.Context, git.GetRepositoriesArgs) *[]git.GitRepository); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.GitRepository) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetRepositoriesArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRepository provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetRepository(_a0 context.Context, _a1 git.GetRepositoryArgs) (*git.GitRepository, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitRepository + if rf, ok := ret.Get(0).(func(context.Context, git.GetRepositoryArgs) *git.GitRepository); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitRepository) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetRepositoryArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRepositoryWithParent provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetRepositoryWithParent(_a0 context.Context, _a1 git.GetRepositoryWithParentArgs) (*git.GitRepository, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitRepository + if rf, ok := ret.Get(0).(func(context.Context, git.GetRepositoryWithParentArgs) *git.GitRepository); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitRepository) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetRepositoryWithParentArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRevert provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetRevert(_a0 context.Context, _a1 git.GetRevertArgs) (*git.GitRevert, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitRevert + if rf, ok := ret.Get(0).(func(context.Context, git.GetRevertArgs) *git.GitRevert); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitRevert) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetRevertArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRevertForRefName provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetRevertForRefName(_a0 context.Context, _a1 git.GetRevertForRefNameArgs) (*git.GitRevert, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitRevert + if rf, ok := ret.Get(0).(func(context.Context, git.GetRevertForRefNameArgs) *git.GitRevert); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitRevert) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetRevertForRefNameArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetStatuses provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetStatuses(_a0 context.Context, _a1 git.GetStatusesArgs) (*[]git.GitStatus, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.GitStatus + if rf, ok := ret.Get(0).(func(context.Context, git.GetStatusesArgs) *[]git.GitStatus); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.GitStatus) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetStatusesArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetSuggestions provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetSuggestions(_a0 context.Context, _a1 git.GetSuggestionsArgs) (*[]git.GitSuggestion, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.GitSuggestion + if rf, ok := ret.Get(0).(func(context.Context, git.GetSuggestionsArgs) *[]git.GitSuggestion); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.GitSuggestion) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetSuggestionsArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetThreads provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetThreads(_a0 context.Context, _a1 git.GetThreadsArgs) (*[]git.GitPullRequestCommentThread, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.GitPullRequestCommentThread + if rf, ok := ret.Get(0).(func(context.Context, git.GetThreadsArgs) *[]git.GitPullRequestCommentThread); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.GitPullRequestCommentThread) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetThreadsArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTree provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetTree(_a0 context.Context, _a1 git.GetTreeArgs) (*git.GitTreeRef, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitTreeRef + if rf, ok := ret.Get(0).(func(context.Context, git.GetTreeArgs) *git.GitTreeRef); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitTreeRef) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetTreeArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTreeZip provides a mock function with given fields: _a0, _a1 +func (_m *Client) GetTreeZip(_a0 context.Context, _a1 git.GetTreeZipArgs) (io.ReadCloser, error) { + ret := _m.Called(_a0, _a1) + + var r0 io.ReadCloser + if rf, ok := ret.Get(0).(func(context.Context, git.GetTreeZipArgs) io.ReadCloser); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.ReadCloser) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.GetTreeZipArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QueryImportRequests provides a mock function with given fields: _a0, _a1 +func (_m *Client) QueryImportRequests(_a0 context.Context, _a1 git.QueryImportRequestsArgs) (*[]git.GitImportRequest, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.GitImportRequest + if rf, ok := ret.Get(0).(func(context.Context, git.QueryImportRequestsArgs) *[]git.GitImportRequest); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.GitImportRequest) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.QueryImportRequestsArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RestoreRepositoryFromRecycleBin provides a mock function with given fields: _a0, _a1 +func (_m *Client) RestoreRepositoryFromRecycleBin(_a0 context.Context, _a1 git.RestoreRepositoryFromRecycleBinArgs) (*git.GitRepository, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitRepository + if rf, ok := ret.Get(0).(func(context.Context, git.RestoreRepositoryFromRecycleBinArgs) *git.GitRepository); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitRepository) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.RestoreRepositoryFromRecycleBinArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SharePullRequest provides a mock function with given fields: _a0, _a1 +func (_m *Client) SharePullRequest(_a0 context.Context, _a1 git.SharePullRequestArgs) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, git.SharePullRequestArgs) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateComment provides a mock function with given fields: _a0, _a1 +func (_m *Client) UpdateComment(_a0 context.Context, _a1 git.UpdateCommentArgs) (*git.Comment, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.Comment + if rf, ok := ret.Get(0).(func(context.Context, git.UpdateCommentArgs) *git.Comment); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.Comment) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.UpdateCommentArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UpdateImportRequest provides a mock function with given fields: _a0, _a1 +func (_m *Client) UpdateImportRequest(_a0 context.Context, _a1 git.UpdateImportRequestArgs) (*git.GitImportRequest, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitImportRequest + if rf, ok := ret.Get(0).(func(context.Context, git.UpdateImportRequestArgs) *git.GitImportRequest); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitImportRequest) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.UpdateImportRequestArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UpdatePullRequest provides a mock function with given fields: _a0, _a1 +func (_m *Client) UpdatePullRequest(_a0 context.Context, _a1 git.UpdatePullRequestArgs) (*git.GitPullRequest, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitPullRequest + if rf, ok := ret.Get(0).(func(context.Context, git.UpdatePullRequestArgs) *git.GitPullRequest); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitPullRequest) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.UpdatePullRequestArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UpdatePullRequestIterationStatuses provides a mock function with given fields: _a0, _a1 +func (_m *Client) UpdatePullRequestIterationStatuses(_a0 context.Context, _a1 git.UpdatePullRequestIterationStatusesArgs) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, git.UpdatePullRequestIterationStatusesArgs) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdatePullRequestProperties provides a mock function with given fields: _a0, _a1 +func (_m *Client) UpdatePullRequestProperties(_a0 context.Context, _a1 git.UpdatePullRequestPropertiesArgs) (interface{}, error) { + ret := _m.Called(_a0, _a1) + + var r0 interface{} + if rf, ok := ret.Get(0).(func(context.Context, git.UpdatePullRequestPropertiesArgs) interface{}); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(interface{}) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.UpdatePullRequestPropertiesArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UpdatePullRequestReviewers provides a mock function with given fields: _a0, _a1 +func (_m *Client) UpdatePullRequestReviewers(_a0 context.Context, _a1 git.UpdatePullRequestReviewersArgs) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, git.UpdatePullRequestReviewersArgs) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdatePullRequestStatuses provides a mock function with given fields: _a0, _a1 +func (_m *Client) UpdatePullRequestStatuses(_a0 context.Context, _a1 git.UpdatePullRequestStatusesArgs) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, git.UpdatePullRequestStatusesArgs) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateRef provides a mock function with given fields: _a0, _a1 +func (_m *Client) UpdateRef(_a0 context.Context, _a1 git.UpdateRefArgs) (*git.GitRef, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitRef + if rf, ok := ret.Get(0).(func(context.Context, git.UpdateRefArgs) *git.GitRef); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitRef) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.UpdateRefArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UpdateRefs provides a mock function with given fields: _a0, _a1 +func (_m *Client) UpdateRefs(_a0 context.Context, _a1 git.UpdateRefsArgs) (*[]git.GitRefUpdateResult, error) { + ret := _m.Called(_a0, _a1) + + var r0 *[]git.GitRefUpdateResult + if rf, ok := ret.Get(0).(func(context.Context, git.UpdateRefsArgs) *[]git.GitRefUpdateResult); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*[]git.GitRefUpdateResult) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.UpdateRefsArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UpdateRepository provides a mock function with given fields: _a0, _a1 +func (_m *Client) UpdateRepository(_a0 context.Context, _a1 git.UpdateRepositoryArgs) (*git.GitRepository, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitRepository + if rf, ok := ret.Get(0).(func(context.Context, git.UpdateRepositoryArgs) *git.GitRepository); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitRepository) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.UpdateRepositoryArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UpdateThread provides a mock function with given fields: _a0, _a1 +func (_m *Client) UpdateThread(_a0 context.Context, _a1 git.UpdateThreadArgs) (*git.GitPullRequestCommentThread, error) { + ret := _m.Called(_a0, _a1) + + var r0 *git.GitPullRequestCommentThread + if rf, ok := ret.Get(0).(func(context.Context, git.UpdateThreadArgs) *git.GitPullRequestCommentThread); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*git.GitPullRequestCommentThread) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, git.UpdateThreadArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/applicationset/services/scm_provider/azure_devops_test.go b/applicationset/services/scm_provider/azure_devops_test.go new file mode 100644 index 0000000000000..219e770d71250 --- /dev/null +++ b/applicationset/services/scm_provider/azure_devops_test.go @@ -0,0 +1,530 @@ +package scm_provider + +import ( + "context" + "fmt" + "testing" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "k8s.io/utils/pointer" + + azureMock "github.com/argoproj/argo-cd/v2/applicationset/services/scm_provider/azure_devops/git/mocks" + "github.com/microsoft/azure-devops-go-api/azuredevops" + azureGit "github.com/microsoft/azure-devops-go-api/azuredevops/git" +) + +func s(input string) *string { + return pointer.String(input) +} + +func TestAzureDevopsRepoHasPath(t *testing.T) { + organization := "myorg" + teamProject := "myorg_project" + repoName := "myorg_project_repo" + path := "dir/subdir/item.yaml" + branchName := "my/featurebranch" + + ctx := context.Background() + uuid := uuid.New().String() + + testCases := []struct { + name string + pathFound bool + azureDevopsError error + returnError bool + errorMessage string + clientError error + }{ + { + name: "RepoHasPath when Azure DevOps client factory fails returns error", + clientError: fmt.Errorf("Client factory error"), + }, + { + name: "RepoHasPath when found returns true", + pathFound: true, + }, + { + name: "RepoHasPath when no path found returns false", + pathFound: false, + azureDevopsError: azuredevops.WrappedError{TypeKey: s(AzureDevOpsErrorsTypeKeyValues.GitItemNotFound)}, + }, + { + name: "RepoHasPath when unknown Azure DevOps WrappedError occurs returns error", + pathFound: false, + azureDevopsError: azuredevops.WrappedError{TypeKey: s("OtherAzureDevopsException")}, + returnError: true, + errorMessage: "failed to check for path existence", + }, + { + name: "RepoHasPath when unknown Azure DevOps error occurs returns error", + pathFound: false, + azureDevopsError: fmt.Errorf("Undefined error from Azure Devops"), + returnError: true, + errorMessage: "failed to check for path existence", + }, + { + name: "RepoHasPath when wrapped Azure DevOps error occurs without TypeKey returns error", + pathFound: false, + azureDevopsError: azuredevops.WrappedError{}, + returnError: true, + errorMessage: "failed to check for path existence", + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + gitClientMock := azureMock.Client{} + + clientFactoryMock := &AzureClientFactoryMock{mock: &mock.Mock{}} + clientFactoryMock.mock.On("GetClient", mock.Anything).Return(&gitClientMock, testCase.clientError) + + repoId := &uuid + gitClientMock.On("GetItem", ctx, azureGit.GetItemArgs{Project: &teamProject, Path: &path, VersionDescriptor: &azureGit.GitVersionDescriptor{Version: &branchName}, RepositoryId: repoId}).Return(nil, testCase.azureDevopsError) + + provider := AzureDevOpsProvider{organization: organization, teamProject: teamProject, clientFactory: clientFactoryMock} + + repo := &Repository{Organization: organization, Repository: repoName, RepositoryId: uuid, Branch: branchName} + hasPath, err := provider.RepoHasPath(ctx, repo, path) + + if testCase.clientError != nil { + assert.ErrorContains(t, err, testCase.clientError.Error()) + gitClientMock.AssertNotCalled(t, "GetItem", ctx, azureGit.GetItemArgs{Project: &teamProject, Path: &path, VersionDescriptor: &azureGit.GitVersionDescriptor{Version: &branchName}, RepositoryId: repoId}) + + return + } + + if testCase.returnError { + assert.ErrorContains(t, err, testCase.errorMessage) + } + + assert.Equal(t, testCase.pathFound, hasPath) + + gitClientMock.AssertCalled(t, "GetItem", ctx, azureGit.GetItemArgs{Project: &teamProject, Path: &path, VersionDescriptor: &azureGit.GitVersionDescriptor{Version: &branchName}, RepositoryId: repoId}) + + }) + } +} + +func TestGetDefaultBranchOnDisabledRepo(t *testing.T) { + + organization := "myorg" + teamProject := "myorg_project" + repoName := "myorg_project_repo" + defaultBranch := "main" + + ctx := context.Background() + + testCases := []struct { + name string + azureDevOpsError error + shouldReturnError bool + }{ + { + name: "azure devops error when disabled repo causes empty return value", + azureDevOpsError: azuredevops.WrappedError{TypeKey: s(AzureDevOpsErrorsTypeKeyValues.GitRepositoryNotFound)}, + shouldReturnError: false, + }, + { + name: "azure devops error with unknown error type returns error", + azureDevOpsError: azuredevops.WrappedError{TypeKey: s("OtherError")}, + shouldReturnError: true, + }, + { + name: "other error when calling azure devops returns error", + azureDevOpsError: fmt.Errorf("some unknown error"), + shouldReturnError: true, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + uuid := uuid.New().String() + + gitClientMock := azureMock.Client{} + + clientFactoryMock := &AzureClientFactoryMock{mock: &mock.Mock{}} + clientFactoryMock.mock.On("GetClient", mock.Anything).Return(&gitClientMock, nil) + + gitClientMock.On("GetBranch", ctx, azureGit.GetBranchArgs{RepositoryId: &repoName, Project: &teamProject, Name: &defaultBranch}).Return(nil, testCase.azureDevOpsError) + + repo := &Repository{Organization: organization, Repository: repoName, RepositoryId: uuid, Branch: defaultBranch} + + provider := AzureDevOpsProvider{organization: organization, teamProject: teamProject, clientFactory: clientFactoryMock, allBranches: false} + branches, err := provider.GetBranches(ctx, repo) + + if testCase.shouldReturnError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + + assert.Empty(t, branches) + + gitClientMock.AssertExpectations(t) + }) + } +} + +func TestGetAllBranchesOnDisabledRepo(t *testing.T) { + + organization := "myorg" + teamProject := "myorg_project" + repoName := "myorg_project_repo" + defaultBranch := "main" + + ctx := context.Background() + + testCases := []struct { + name string + azureDevOpsError error + shouldReturnError bool + }{ + { + name: "azure devops error when disabled repo causes empty return value", + azureDevOpsError: azuredevops.WrappedError{TypeKey: s(AzureDevOpsErrorsTypeKeyValues.GitRepositoryNotFound)}, + shouldReturnError: false, + }, + { + name: "azure devops error with unknown error type returns error", + azureDevOpsError: azuredevops.WrappedError{TypeKey: s("OtherError")}, + shouldReturnError: true, + }, + { + name: "other error when calling azure devops returns error", + azureDevOpsError: fmt.Errorf("some unknown error"), + shouldReturnError: true, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + uuid := uuid.New().String() + + gitClientMock := azureMock.Client{} + + clientFactoryMock := &AzureClientFactoryMock{mock: &mock.Mock{}} + clientFactoryMock.mock.On("GetClient", mock.Anything).Return(&gitClientMock, nil) + + gitClientMock.On("GetBranches", ctx, azureGit.GetBranchesArgs{RepositoryId: &repoName, Project: &teamProject}).Return(nil, testCase.azureDevOpsError) + + repo := &Repository{Organization: organization, Repository: repoName, RepositoryId: uuid, Branch: defaultBranch} + + provider := AzureDevOpsProvider{organization: organization, teamProject: teamProject, clientFactory: clientFactoryMock, allBranches: true} + branches, err := provider.GetBranches(ctx, repo) + + if testCase.shouldReturnError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + + assert.Empty(t, branches) + + gitClientMock.AssertExpectations(t) + }) + } +} + +func TestAzureDevOpsGetDefaultBranchStripsRefsName(t *testing.T) { + + t.Run("Get branches only default branch removes characters before querying azure devops", func(t *testing.T) { + + organization := "myorg" + teamProject := "myorg_project" + repoName := "myorg_project_repo" + + ctx := context.Background() + uuid := uuid.New().String() + strippedBranchName := "somebranch" + defaultBranch := fmt.Sprintf("refs/heads/%v", strippedBranchName) + + branchReturn := &azureGit.GitBranchStats{Name: &strippedBranchName, Commit: &azureGit.GitCommitRef{CommitId: s("abc123233223")}} + repo := &Repository{Organization: organization, Repository: repoName, RepositoryId: uuid, Branch: defaultBranch} + + gitClientMock := azureMock.Client{} + + clientFactoryMock := &AzureClientFactoryMock{mock: &mock.Mock{}} + clientFactoryMock.mock.On("GetClient", mock.Anything).Return(&gitClientMock, nil) + + gitClientMock.On("GetBranch", ctx, azureGit.GetBranchArgs{RepositoryId: &repoName, Project: &teamProject, Name: &strippedBranchName}).Return(branchReturn, nil) + + provider := AzureDevOpsProvider{organization: organization, teamProject: teamProject, clientFactory: clientFactoryMock, allBranches: false} + branches, err := provider.GetBranches(ctx, repo) + + assert.NoError(t, err) + assert.Len(t, branches, 1) + assert.Equal(t, strippedBranchName, branches[0].Branch) + + gitClientMock.AssertCalled(t, "GetBranch", ctx, azureGit.GetBranchArgs{RepositoryId: &repoName, Project: &teamProject, Name: &strippedBranchName}) + }) +} + +func TestAzureDevOpsGetBranchesDefultBranchOnly(t *testing.T) { + organization := "myorg" + teamProject := "myorg_project" + repoName := "myorg_project_repo" + + ctx := context.Background() + uuid := uuid.New().String() + + defaultBranch := "main" + + testCases := []struct { + name string + expectedBranch *azureGit.GitBranchStats + getBranchesApiError error + clientError error + }{ + { + name: "GetBranches AllBranches false when single branch returned returns branch", + expectedBranch: &azureGit.GitBranchStats{Name: &defaultBranch, Commit: &azureGit.GitCommitRef{CommitId: s("abc123233223")}}, + }, + { + name: "GetBranches AllBranches false when request fails returns error and empty result", + getBranchesApiError: fmt.Errorf("Remote Azure Devops GetBranches error"), + }, + { + name: "GetBranches AllBranches false when Azure DevOps client fails returns error", + clientError: fmt.Errorf("Could not get Azure Devops API client"), + }, + { + name: "GetBranches AllBranches false when branch returned with long commit SHA", + expectedBranch: &azureGit.GitBranchStats{Name: &defaultBranch, Commit: &azureGit.GitCommitRef{CommitId: s("53863052ADF24229AB72154B4D83DAB7")}}, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + gitClientMock := azureMock.Client{} + + clientFactoryMock := &AzureClientFactoryMock{mock: &mock.Mock{}} + clientFactoryMock.mock.On("GetClient", mock.Anything).Return(&gitClientMock, testCase.clientError) + + gitClientMock.On("GetBranch", ctx, azureGit.GetBranchArgs{RepositoryId: &repoName, Project: &teamProject, Name: &defaultBranch}).Return(testCase.expectedBranch, testCase.getBranchesApiError) + + repo := &Repository{Organization: organization, Repository: repoName, RepositoryId: uuid, Branch: defaultBranch} + + provider := AzureDevOpsProvider{organization: organization, teamProject: teamProject, clientFactory: clientFactoryMock, allBranches: false} + branches, err := provider.GetBranches(ctx, repo) + + if testCase.clientError != nil { + assert.ErrorContains(t, err, testCase.clientError.Error()) + gitClientMock.AssertNotCalled(t, "GetBranch", ctx, azureGit.GetBranchArgs{RepositoryId: &repoName, Project: &teamProject, Name: &defaultBranch}) + + return + } + + if testCase.getBranchesApiError != nil { + assert.Empty(t, branches) + assert.ErrorContains(t, err, testCase.getBranchesApiError.Error()) + } else { + if testCase.expectedBranch != nil { + assert.NotEmpty(t, branches) + } + assert.Len(t, branches, 1) + assert.Equal(t, repo.RepositoryId, branches[0].RepositoryId) + } + + gitClientMock.AssertCalled(t, "GetBranch", ctx, azureGit.GetBranchArgs{RepositoryId: &repoName, Project: &teamProject, Name: &defaultBranch}) + }) + } +} + +func TestAzureDevopsGetBranches(t *testing.T) { + organization := "myorg" + teamProject := "myorg_project" + repoName := "myorg_project_repo" + + ctx := context.Background() + uuid := uuid.New().String() + + testCases := []struct { + name string + expectedBranches *[]azureGit.GitBranchStats + getBranchesApiError error + clientError error + allBranches bool + expectedProcessingErrorMsg string + }{ + { + name: "GetBranches when single branch returned returns this branch info", + expectedBranches: &[]azureGit.GitBranchStats{{Name: s("feature-feat1"), Commit: &azureGit.GitCommitRef{CommitId: s("abc123233223")}}}, + allBranches: true, + }, + { + name: "GetBranches when Azure DevOps request fails returns error and empty result", + getBranchesApiError: fmt.Errorf("Remote Azure Devops GetBranches error"), + allBranches: true, + }, + { + name: "GetBranches when no branches returned returns error", + allBranches: true, + expectedProcessingErrorMsg: "empty branch result", + }, + { + name: "GetBranches when git client retrievel fails returns error", + clientError: fmt.Errorf("Could not get Azure Devops API client"), + allBranches: true, + }, + { + name: "GetBranches when multiple branches returned returns branch info for all branches", + expectedBranches: &[]azureGit.GitBranchStats{ + {Name: s("feature-feat1"), Commit: &azureGit.GitCommitRef{CommitId: s("abc123233223")}}, + {Name: s("feature/feat2"), Commit: &azureGit.GitCommitRef{CommitId: s("4334")}}, + {Name: s("feature/feat2"), Commit: &azureGit.GitCommitRef{CommitId: s("53863052ADF24229AB72154B4D83DAB7")}}, + }, + allBranches: true, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + gitClientMock := azureMock.Client{} + + clientFactoryMock := &AzureClientFactoryMock{mock: &mock.Mock{}} + clientFactoryMock.mock.On("GetClient", mock.Anything).Return(&gitClientMock, testCase.clientError) + + gitClientMock.On("GetBranches", ctx, azureGit.GetBranchesArgs{RepositoryId: &repoName, Project: &teamProject}).Return(testCase.expectedBranches, testCase.getBranchesApiError) + + repo := &Repository{Organization: organization, Repository: repoName, RepositoryId: uuid} + + provider := AzureDevOpsProvider{organization: organization, teamProject: teamProject, clientFactory: clientFactoryMock, allBranches: testCase.allBranches} + branches, err := provider.GetBranches(ctx, repo) + + if testCase.expectedProcessingErrorMsg != "" { + assert.ErrorContains(t, err, testCase.expectedProcessingErrorMsg) + assert.Nil(t, branches) + + return + } + if testCase.clientError != nil { + assert.ErrorContains(t, err, testCase.clientError.Error()) + gitClientMock.AssertNotCalled(t, "GetBranches", ctx, azureGit.GetBranchesArgs{RepositoryId: &repoName, Project: &teamProject}) + return + + } + + if testCase.getBranchesApiError != nil { + assert.Empty(t, branches) + assert.ErrorContains(t, err, testCase.getBranchesApiError.Error()) + } else { + if len(*testCase.expectedBranches) > 0 { + assert.NotEmpty(t, branches) + } + assert.Len(t, branches, len(*testCase.expectedBranches)) + for _, branch := range branches { + assert.NotEmpty(t, branch.RepositoryId) + assert.Equal(t, repo.RepositoryId, branch.RepositoryId) + } + } + + gitClientMock.AssertCalled(t, "GetBranches", ctx, azureGit.GetBranchesArgs{RepositoryId: &repoName, Project: &teamProject}) + }) + } +} + +func TestGetAzureDevopsRepositories(t *testing.T) { + organization := "myorg" + teamProject := "myorg_project" + + uuid := uuid.New() + ctx := context.Background() + + repoId := &uuid + + testCases := []struct { + name string + getRepositoriesError error + repositories []azureGit.GitRepository + expectedNumberOfRepos int + }{ + { + name: "ListRepos when single repo found returns repo info", + repositories: []azureGit.GitRepository{{Name: s("repo1"), DefaultBranch: s("main"), RemoteUrl: s("https://remoteurl.u"), Id: repoId}}, + expectedNumberOfRepos: 1, + }, + { + name: "ListRepos when repo has no default branch returns empty list", + repositories: []azureGit.GitRepository{{Name: s("repo2"), RemoteUrl: s("https://remoteurl.u"), Id: repoId}}, + }, + { + name: "ListRepos when Azure DevOps request fails returns error", + getRepositoriesError: fmt.Errorf("Could not get repos"), + }, + { + name: "ListRepos when repo has no name returns empty list", + repositories: []azureGit.GitRepository{{DefaultBranch: s("main"), RemoteUrl: s("https://remoteurl.u"), Id: repoId}}, + }, + { + name: "ListRepos when repo has no remote URL returns empty list", + repositories: []azureGit.GitRepository{{DefaultBranch: s("main"), Name: s("repo_name"), Id: repoId}}, + }, + { + name: "ListRepos when repo has no ID returns empty list", + repositories: []azureGit.GitRepository{{DefaultBranch: s("main"), Name: s("repo_name"), RemoteUrl: s("https://remoteurl.u")}}, + }, + { + name: "ListRepos when multiple repos returned returns list of eligible repos only", + repositories: []azureGit.GitRepository{ + {Name: s("returned1"), DefaultBranch: s("main"), RemoteUrl: s("https://remoteurl.u"), Id: repoId}, + {Name: s("missing_default_branch"), RemoteUrl: s("https://remoteurl.u"), Id: repoId}, + {DefaultBranch: s("missing_name"), RemoteUrl: s("https://remoteurl.u"), Id: repoId}, + {Name: s("missing_remote_url"), DefaultBranch: s("main"), Id: repoId}, + {Name: s("missing_id"), DefaultBranch: s("main"), RemoteUrl: s("https://remoteurl.u")}}, + expectedNumberOfRepos: 1, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + + gitClientMock := azureMock.Client{} + gitClientMock.On("GetRepositories", ctx, azureGit.GetRepositoriesArgs{Project: s(teamProject)}).Return(&testCase.repositories, testCase.getRepositoriesError) + + clientFactoryMock := &AzureClientFactoryMock{mock: &mock.Mock{}} + clientFactoryMock.mock.On("GetClient", mock.Anything).Return(&gitClientMock) + + provider := AzureDevOpsProvider{organization: organization, teamProject: teamProject, clientFactory: clientFactoryMock} + + repositories, err := provider.ListRepos(ctx, "https") + + if testCase.getRepositoriesError != nil { + assert.Error(t, err, "Expected an error from test case %v", testCase.name) + } + + if testCase.expectedNumberOfRepos == 0 { + assert.Empty(t, repositories) + } else { + assert.NotEmpty(t, repositories) + assert.Len(t, repositories, testCase.expectedNumberOfRepos) + } + + gitClientMock.AssertExpectations(t) + }) + } +} + +type AzureClientFactoryMock struct { + mock *mock.Mock +} + +func (m *AzureClientFactoryMock) GetClient(ctx context.Context) (azureGit.Client, error) { + args := m.mock.Called(ctx) + + var client azureGit.Client + c := args.Get(0) + if c != nil { + client = c.(azureGit.Client) + } + + var err error + if len(args) > 1 { + if e, ok := args.Get(1).(error); ok { + err = e + } + } + + return client, err +} diff --git a/applicationset/services/scm_provider/bitbucket_cloud.go b/applicationset/services/scm_provider/bitbucket_cloud.go new file mode 100644 index 0000000000000..3c453f6b9c17d --- /dev/null +++ b/applicationset/services/scm_provider/bitbucket_cloud.go @@ -0,0 +1,177 @@ +package scm_provider + +import ( + "context" + "fmt" + "net/http" + "strings" + + bitbucket "github.com/ktrysmt/go-bitbucket" +) + +type BitBucketCloudProvider struct { + client *ExtendedClient + allBranches bool + owner string +} + +type ExtendedClient struct { + *bitbucket.Client + username string + password string + owner string +} + +func (c *ExtendedClient) GetContents(repo *Repository, path string) (bool, error) { + urlStr := c.GetApiBaseURL() + + // Getting file contents from V2 defined at https://developer.atlassian.com/cloud/bitbucket/rest/api-group-source/#api-repositories-workspace-repo-slug-src-commit-path-get + urlStr += fmt.Sprintf("/repositories/%s/%s/src/%s/%s?format=meta", c.owner, repo.Repository, repo.SHA, path) + body := strings.NewReader("") + + req, err := http.NewRequest(http.MethodGet, urlStr, body) + if err != nil { + return false, err + } + req.SetBasicAuth(c.username, c.password) + resp, err := c.HttpClient.Do(req) + if err != nil { + return false, err + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusNotFound { + return false, nil + } + if resp.StatusCode == http.StatusOK { + return true, nil + } + + return false, fmt.Errorf(resp.Status) +} + +var _ SCMProviderService = &BitBucketCloudProvider{} + +func NewBitBucketCloudProvider(ctx context.Context, owner string, user string, password string, allBranches bool) (*BitBucketCloudProvider, error) { + + client := &ExtendedClient{ + bitbucket.NewBasicAuth(user, password), + user, + password, + owner, + } + return &BitBucketCloudProvider{client: client, owner: owner, allBranches: allBranches}, nil +} + +func (g *BitBucketCloudProvider) GetBranches(ctx context.Context, repo *Repository) ([]*Repository, error) { + repos := []*Repository{} + branches, err := g.listBranches(repo) + if err != nil { + return nil, fmt.Errorf("error listing branches for %s/%s: %v", repo.Organization, repo.Repository, err) + } + + for _, branch := range branches { + hash, ok := branch.Target["hash"].(string) + if !ok { + return nil, fmt.Errorf("error getting SHA for branch for %s/%s/%s: %v", g.owner, repo.Repository, branch.Name, err) + } + repos = append(repos, &Repository{ + Organization: repo.Organization, + Repository: repo.Repository, + URL: repo.URL, + Branch: branch.Name, + SHA: hash, + Labels: repo.Labels, + RepositoryId: repo.RepositoryId, + }) + } + return repos, nil +} + +func (g *BitBucketCloudProvider) ListRepos(ctx context.Context, cloneProtocol string) ([]*Repository, error) { + if cloneProtocol == "" { + cloneProtocol = "ssh" + } + opt := &bitbucket.RepositoriesOptions{ + Owner: g.owner, + Role: "member", + } + repos := []*Repository{} + accountReposResp, err := g.client.Repositories.ListForAccount(opt) + if err != nil { + return nil, fmt.Errorf("error listing repositories for %s: %v", g.owner, err) + } + for _, bitBucketRepo := range accountReposResp.Items { + cloneUrl, err := findCloneURL(cloneProtocol, &bitBucketRepo) + if err != nil { + return nil, fmt.Errorf("error fetching clone url for repo %s: %v", bitBucketRepo.Slug, err) + } + repos = append(repos, &Repository{ + Organization: g.owner, + Repository: bitBucketRepo.Slug, + Branch: bitBucketRepo.Mainbranch.Name, + URL: *cloneUrl, + Labels: []string{}, + RepositoryId: bitBucketRepo.Uuid, + }) + } + return repos, nil +} + +func (g *BitBucketCloudProvider) RepoHasPath(ctx context.Context, repo *Repository, path string) (bool, error) { + contents, err := g.client.GetContents(repo, path) + if err != nil { + return false, err + } + if contents { + return true, nil + } + return false, nil +} + +func (g *BitBucketCloudProvider) listBranches(repo *Repository) ([]bitbucket.RepositoryBranch, error) { + if !g.allBranches { + repoBranch, err := g.client.Repositories.Repository.GetBranch(&bitbucket.RepositoryBranchOptions{ + Owner: g.owner, + RepoSlug: repo.Repository, + BranchName: repo.Branch, + }) + if err != nil { + return nil, err + } + return []bitbucket.RepositoryBranch{ + *repoBranch, + }, nil + } + + branches, err := g.client.Repositories.Repository.ListBranches(&bitbucket.RepositoryBranchOptions{ + Owner: g.owner, + RepoSlug: repo.Repository, + }) + if err != nil { + return nil, err + } + return branches.Branches, nil + +} + +func findCloneURL(cloneProtocol string, repo *bitbucket.Repository) (*string, error) { + + cloneLinks, ok := repo.Links["clone"].([]interface{}) + if !ok { + return nil, fmt.Errorf("unknown type returned from repo links") + } + for _, link := range cloneLinks { + linkEntry, ok := link.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("unknown type returned from clone link") + } + if linkEntry["name"] == cloneProtocol { + url, ok := linkEntry["href"].(string) + if !ok { + return nil, fmt.Errorf("could not find href for clone link") + } + return &url, nil + } + } + return nil, fmt.Errorf("unknown clone protocol for Bitbucket cloud %v", cloneProtocol) +} diff --git a/applicationset/services/scm_provider/bitbucket_cloud_test.go b/applicationset/services/scm_provider/bitbucket_cloud_test.go new file mode 100644 index 0000000000000..fca03e1693ade --- /dev/null +++ b/applicationset/services/scm_provider/bitbucket_cloud_test.go @@ -0,0 +1,510 @@ +package scm_provider + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +func TestBitbucketHasRepo(t *testing.T) { + testServer := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + if req.URL.Path == "/repositories/test-owner/testmike/src/dc1edb6c7d650d8ba67719ddf7b662ad8f8fb798/.gitignore2" { + res.WriteHeader(http.StatusNotFound) + _, err := res.Write([]byte("")) + if err != nil { + assert.NoError(t, fmt.Errorf("Error in mock response %v", err)) + } + } + if req.URL.Path == "/repositories/test-owner/testmike/src/dc1edb6c7d650d8ba67719ddf7b662ad8f8fb798/.gitignore" { + res.WriteHeader(http.StatusOK) + _, err := res.Write([]byte(`{ + "mimetype": null, + "links": { + "self": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/src/dc1edb6c7d650d8ba67719ddf7b662ad8f8fb798/.gitignore" + }, + "meta": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/src/dc1edb6c7d650d8ba67719ddf7b662ad8f8fb798/.gitignore?format=meta" + }, + "history": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/filehistory/dc1edb6c7d650d8ba67719ddf7b662ad8f8fb798/.gitignore" + } + }, + "escaped_path": ".gitignore", + "path": ".gitignore", + "commit": { + "type": "commit", + "hash": "dc1edb6c7d650d8ba67719ddf7b662ad8f8fb798", + "links": { + "self": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/commit/dc1edb6c7d650d8ba67719ddf7b662ad8f8fb798" + }, + "html": { + "href": "https://bitbucket.org/test-owner/testmike/commits/dc1edb6c7d650d8ba67719ddf7b662ad8f8fb798" + } + } + }, + "attributes": [], + "type": "commit_file", + "size": 624 + }`)) + if err != nil { + assert.NoError(t, fmt.Errorf("Error in mock response %v", err)) + } + } + })) + defer func() { testServer.Close() }() + + t.Setenv("BITBUCKET_API_BASE_URL", testServer.URL) + cases := []struct { + name, path, repo, owner, sha string + status int + }{ + { + name: "exists", + owner: "test-owner", + repo: "testmike", + path: ".gitignore", + sha: "dc1edb6c7d650d8ba67719ddf7b662ad8f8fb798", + status: http.StatusOK, + }, + { + name: "not exists", + owner: "test-owner", + repo: "testmike", + path: ".gitignore2", + sha: "dc1edb6c7d650d8ba67719ddf7b662ad8f8fb798", + status: http.StatusNotFound, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + provider, _ := NewBitBucketCloudProvider(context.Background(), c.owner, "user", "password", false) + repo := &Repository{ + Organization: c.owner, + Repository: c.repo, + SHA: c.sha, + Branch: "main", + } + hasPath, err := provider.RepoHasPath(context.Background(), repo, c.path) + if err != nil { + assert.Error(t, fmt.Errorf("Error in test %v", err)) + } + if c.status != http.StatusOK { + assert.False(t, hasPath) + } else { + assert.True(t, hasPath) + } + }) + } +} + +func TestBitbucketListRepos(t *testing.T) { + testServer := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + res.WriteHeader(http.StatusOK) + if req.URL.Path == "/repositories/test-owner/testmike/refs/branches" { + _, err := res.Write([]byte(`{ + "pagelen": 10, + "values": [ + { + "name": "main", + "links": { + "commits": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/commits/main" + }, + "self": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/refs/branches/main" + }, + "html": { + "href": "https://bitbucket.org/test-owner/testmike/branch/main" + } + }, + "default_merge_strategy": "merge_commit", + "merge_strategies": [ + "merge_commit", + "squash", + "fast_forward" + ], + "type": "branch", + "target": { + "hash": "dc1edb6c7d650d8ba67719ddf7b662ad8f8fb798", + "repository": { + "links": { + "self": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike" + }, + "html": { + "href": "https://bitbucket.org/test-owner/testmike" + }, + "avatar": { + "href": "https://bytebucket.org/ravatar/%7B76606e75-8aeb-4a87-9396-4abee652ec63%7D?ts=default" + } + }, + "type": "repository", + "name": "testMike", + "full_name": "test-owner/testmike", + "uuid": "{76606e75-8aeb-4a87-9396-4abee652ec63}" + }, + "links": { + "self": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/commit/dc1edb6c7d650d8ba67719ddf7b662ad8f8fb798" + }, + "comments": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/commit/dc1edb6c7d650d8ba67719ddf7b662ad8f8fb798/comments" + }, + "patch": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/patch/dc1edb6c7d650d8ba67719ddf7b662ad8f8fb798" + }, + "html": { + "href": "https://bitbucket.org/test-owner/testmike/commits/dc1edb6c7d650d8ba67719ddf7b662ad8f8fb798" + }, + "diff": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/diff/dc1edb6c7d650d8ba67719ddf7b662ad8f8fb798" + }, + "approve": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/commit/dc1edb6c7d650d8ba67719ddf7b662ad8f8fb798/approve" + }, + "statuses": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/commit/dc1edb6c7d650d8ba67719ddf7b662ad8f8fb798/statuses" + } + }, + "author": { + "raw": "Mike Tester ", + "type": "author", + "user": { + "display_name": "Mike Tester", + "uuid": "{ca84788f-050b-456b-5cac-93fb4484a686}", + "links": { + "self": { + "href": "https://api.bitbucket.org/2.0/users/%7Bca84788f-050b-456b-5cac-93fb4484a686%7D" + }, + "html": { + "href": "https://bitbucket.org/%7Bca84788f-050b-456b-5cac-93fb4484a686%7D/" + }, + "avatar": { + "href": "https://secure.gravatar.com/avatar/03450fe11788d0dbb39b804110c07b9f?d=https%3A%2F%2Favatar-management--avatars.us-west-2.prod.public.atl-paas.net%2Finitials%2FMM-4.png" + } + }, + "type": "user", + "nickname": "Mike Tester", + "account_id": "61ec57859d174000690f702b" + } + }, + "parents": [], + "date": "2022-03-07T19:37:58+00:00", + "message": "Initial commit", + "type": "commit" + } + } + ], + "page": 1, + "size": 1 + }`)) + if err != nil { + assert.NoError(t, fmt.Errorf("Error in mock response %v", err)) + } + } + if req.URL.Path == "/repositories/test-owner/testmike/refs/branches/main" { + _, err := res.Write([]byte(`{ + "name": "main", + "links": { + "commits": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/commits/main" + }, + "self": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/refs/branches/main" + }, + "html": { + "href": "https://bitbucket.org/test-owner/testmike/branch/main" + } + }, + "default_merge_strategy": "merge_commit", + "merge_strategies": [ + "merge_commit", + "squash", + "fast_forward" + ], + "type": "branch", + "target": { + "hash": "dc1edb6c7d650d8ba67719ddf7b662ad8f8fb798", + "repository": { + "links": { + "self": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike" + }, + "html": { + "href": "https://bitbucket.org/test-owner/testmike" + }, + "avatar": { + "href": "https://bytebucket.org/ravatar/%7B76606e75-8aeb-4a87-9396-4abee652ec63%7D?ts=default" + } + }, + "type": "repository", + "name": "testMike", + "full_name": "test-owner/testmike", + "uuid": "{76606e75-8aeb-4a87-9396-4abee652ec63}" + }, + "links": { + "self": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/commit/dc1edb6c7d650d8ba67719ddf7b662ad8f8fb798" + }, + "comments": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/commit/dc1edb6c7d650d8ba67719ddf7b662ad8f8fb798/comments" + }, + "patch": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/patch/dc1edb6c7d650d8ba67719ddf7b662ad8f8fb798" + }, + "html": { + "href": "https://bitbucket.org/test-owner/testmike/commits/dc1edb6c7d650d8ba67719ddf7b662ad8f8fb798" + }, + "diff": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/diff/dc1edb6c7d650d8ba67719ddf7b662ad8f8fb798" + }, + "approve": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/commit/dc1edb6c7d650d8ba67719ddf7b662ad8f8fb798/approve" + }, + "statuses": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/commit/dc1edb6c7d650d8ba67719ddf7b662ad8f8fb798/statuses" + } + }, + "author": { + "raw": "Mike Tester ", + "type": "author", + "user": { + "display_name": "Mike Tester", + "uuid": "{ca84788f-050b-456b-5cac-93fb4484a686}", + "links": { + "self": { + "href": "https://api.bitbucket.org/2.0/users/%7Bca84788f-050b-456b-5cac-93fb4484a686%7D" + }, + "html": { + "href": "https://bitbucket.org/%7Bca84788f-050b-456b-5cac-93fb4484a686%7D/" + }, + "avatar": { + "href": "https://secure.gravatar.com/avatar/03450fe11788d0dbb39b804110c07b9f?d=https%3A%2F%2Favatar-management--avatars.us-west-2.prod.public.atl-paas.net%2Finitials%2FMM-4.png" + } + }, + "type": "user", + "nickname": "Mike Tester", + "account_id": "61ec57859d174000690f702b" + } + }, + "parents": [], + "date": "2022-03-07T19:37:58+00:00", + "message": "Initial commit", + "type": "commit" + } + }`)) + if err != nil { + assert.NoError(t, fmt.Errorf("Error in mock response %v", err)) + } + } + if req.URL.Path == "/repositories/test-owner" { + _, err := res.Write([]byte(`{ + "pagelen": 10, + "values": [ + { + "scm": "git", + "has_wiki": false, + "links": { + "watchers": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/watchers" + }, + "branches": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/refs/branches" + }, + "tags": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/refs/tags" + }, + "commits": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/commits" + }, + "clone": [ + { + "href": "https://test-owner@bitbucket.org/test-owner/testmike.git", + "name": "https" + }, + { + "href": "git@bitbucket.org:test-owner/testmike.git", + "name": "ssh" + } + ], + "self": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike" + }, + "source": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/src" + }, + "html": { + "href": "https://bitbucket.org/test-owner/testmike" + }, + "avatar": { + "href": "https://bytebucket.org/ravatar/%7B76606e75-8aeb-4a87-9396-4abee652ec63%7D?ts=default" + }, + "hooks": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/hooks" + }, + "forks": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/forks" + }, + "downloads": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/downloads" + }, + "pullrequests": { + "href": "https://api.bitbucket.org/2.0/repositories/test-owner/testmike/pullrequests" + } + }, + "created_on": "2022-03-07T19:37:58.199968+00:00", + "full_name": "test-owner/testmike", + "owner": { + "display_name": "Mike Tester", + "uuid": "{ca84788f-050b-456b-5cac-93fb4484a686}", + "links": { + "self": { + "href": "https://api.bitbucket.org/2.0/users/%7Bca84788f-050b-456b-5cac-93fb4484a686%7D" + }, + "html": { + "href": "https://bitbucket.org/%7Bca84788f-050b-456b-5cac-93fb4484a686%7D/" + }, + "avatar": { + "href": "https://secure.gravatar.com/avatar/03450fe11788d0dbb39b804110c07b9f?d=https%3A%2F%2Favatar-management--avatars.us-west-2.prod.public.atl-paas.net%2Finitials%2FMM-4.png" + } + }, + "type": "user", + "nickname": "Mike Tester", + "account_id": "61ec57859d174000690f702b" + }, + "size": 58894, + "uuid": "{76606e75-8aeb-4a87-9396-4abee652ec63}", + "type": "repository", + "website": null, + "override_settings": { + "branching_model": true, + "default_merge_strategy": true, + "branch_restrictions": true + }, + "description": "", + "has_issues": false, + "slug": "testmike", + "is_private": false, + "name": "testMike", + "language": "", + "fork_policy": "allow_forks", + "project": { + "links": { + "self": { + "href": "https://api.bitbucket.org/2.0/workspaces/test-owner/projects/TEST" + }, + "html": { + "href": "https://bitbucket.org/test-owner/workspace/projects/TEST" + }, + "avatar": { + "href": "https://bitbucket.org/account/user/test-owner/projects/TEST/avatar/32?ts=1642881431" + } + }, + "type": "project", + "name": "test", + "key": "TEST", + "uuid": "{603a1564-1509-4c97-b2a6-300a3fad2758}" + }, + "mainbranch": { + "type": "branch", + "name": "main" + }, + "workspace": { + "slug": "test-owner", + "type": "workspace", + "name": "Mike Tester", + "links": { + "self": { + "href": "https://api.bitbucket.org/2.0/workspaces/test-owner" + }, + "html": { + "href": "https://bitbucket.org/test-owner/" + }, + "avatar": { + "href": "https://bitbucket.org/workspaces/test-owner/avatar/?ts=1642878863" + } + }, + "uuid": "{ca84788f-050b-456b-5cac-93fb4484a686}" + }, + "updated_on": "2022-03-07T19:37:59.933133+00:00" + } + ], + "page": 1, + "size": 1 + }`)) + if err != nil { + assert.NoError(t, fmt.Errorf("Error in mock response %v", err)) + } + } + })) + defer func() { testServer.Close() }() + + t.Setenv("BITBUCKET_API_BASE_URL", testServer.URL) + cases := []struct { + name, proto, owner string + hasError, allBranches bool + branches []string + filters []v1alpha1.SCMProviderGeneratorFilter + }{ + { + name: "blank protocol", + owner: "test-owner", + branches: []string{"main"}, + }, + { + name: "ssh protocol", + proto: "ssh", + owner: "test-owner", + }, + { + name: "https protocol", + proto: "https", + owner: "test-owner", + }, + { + name: "other protocol", + proto: "other", + owner: "test-owner", + hasError: true, + }, + { + name: "all branches", + allBranches: true, + owner: "test-owner", + branches: []string{"main"}, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + provider, _ := NewBitBucketCloudProvider(context.Background(), c.owner, "user", "password", c.allBranches) + rawRepos, err := ListRepos(context.Background(), provider, c.filters, c.proto) + if c.hasError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + repos := []*Repository{} + branches := []string{} + for _, r := range rawRepos { + if r.Repository == "testmike" { + repos = append(repos, r) + branches = append(branches, r.Branch) + } + } + assert.NotEmpty(t, repos) + for _, b := range c.branches { + assert.Contains(t, branches, b) + } + } + }) + } +} diff --git a/applicationset/services/scm_provider/bitbucket_server.go b/applicationset/services/scm_provider/bitbucket_server.go new file mode 100644 index 0000000000000..9e46569512156 --- /dev/null +++ b/applicationset/services/scm_provider/bitbucket_server.go @@ -0,0 +1,209 @@ +package scm_provider + +import ( + "context" + "fmt" + "io" + + "github.com/argoproj/argo-cd/v2/applicationset/utils" + bitbucketv1 "github.com/gfleury/go-bitbucket-v1" + log "github.com/sirupsen/logrus" +) + +type BitbucketServerProvider struct { + client *bitbucketv1.APIClient + projectKey string + allBranches bool +} + +var _ SCMProviderService = &BitbucketServerProvider{} + +func NewBitbucketServerProviderBasicAuth(ctx context.Context, username, password, url, projectKey string, allBranches bool) (*BitbucketServerProvider, error) { + bitbucketConfig := bitbucketv1.NewConfiguration(url) + // Avoid the XSRF check + bitbucketConfig.AddDefaultHeader("x-atlassian-token", "no-check") + bitbucketConfig.AddDefaultHeader("x-requested-with", "XMLHttpRequest") + + ctx = context.WithValue(ctx, bitbucketv1.ContextBasicAuth, bitbucketv1.BasicAuth{ + UserName: username, + Password: password, + }) + return newBitbucketServerProvider(ctx, bitbucketConfig, projectKey, allBranches) +} + +func NewBitbucketServerProviderNoAuth(ctx context.Context, url, projectKey string, allBranches bool) (*BitbucketServerProvider, error) { + return newBitbucketServerProvider(ctx, bitbucketv1.NewConfiguration(url), projectKey, allBranches) +} + +func newBitbucketServerProvider(ctx context.Context, bitbucketConfig *bitbucketv1.Configuration, projectKey string, allBranches bool) (*BitbucketServerProvider, error) { + bitbucketConfig.BasePath = utils.NormalizeBitbucketBasePath(bitbucketConfig.BasePath) + bitbucketClient := bitbucketv1.NewAPIClient(ctx, bitbucketConfig) + + return &BitbucketServerProvider{ + client: bitbucketClient, + projectKey: projectKey, + allBranches: allBranches, + }, nil +} + +func (b *BitbucketServerProvider) ListRepos(_ context.Context, cloneProtocol string) ([]*Repository, error) { + paged := map[string]interface{}{ + "limit": 100, + } + repos := []*Repository{} + for { + response, err := b.client.DefaultApi.GetRepositoriesWithOptions(b.projectKey, paged) + if err != nil { + return nil, fmt.Errorf("error listing repositories for %s: %v", b.projectKey, err) + } + repositories, err := bitbucketv1.GetRepositoriesResponse(response) + if err != nil { + log.Errorf("error parsing repositories response '%v'", response.Values) + return nil, fmt.Errorf("error parsing repositories response %s: %v", b.projectKey, err) + } + for _, bitbucketRepo := range repositories { + var url string + switch cloneProtocol { + // Default to SSH if unspecified (i.e. if ""). + case "", "ssh": + url = getCloneURLFromLinks(bitbucketRepo.Links.Clone, "ssh") + case "https": + url = getCloneURLFromLinks(bitbucketRepo.Links.Clone, "http") + default: + return nil, fmt.Errorf("unknown clone protocol for Bitbucket Server %v", cloneProtocol) + } + + org := bitbucketRepo.Project.Key + repo := bitbucketRepo.Name + // Bitbucket doesn't return the default branch in the repo query, fetch it here + branch, err := b.getDefaultBranch(org, repo) + if err != nil { + return nil, err + } + if branch == nil { + log.Debugf("%s/%s does not have a default branch, skipping", org, repo) + continue + } + + repos = append(repos, &Repository{ + Organization: org, + Repository: repo, + URL: url, + Branch: branch.DisplayID, + SHA: branch.LatestCommit, + Labels: []string{}, // Not supported by library + RepositoryId: bitbucketRepo.ID, + }) + } + hasNextPage, nextPageStart := bitbucketv1.HasNextPage(response) + if !hasNextPage { + break + } + paged["start"] = nextPageStart + } + return repos, nil +} + +func (b *BitbucketServerProvider) RepoHasPath(_ context.Context, repo *Repository, path string) (bool, error) { + opts := map[string]interface{}{ + "limit": 100, + "at": repo.Branch, + "type_": true, + } + // No need to query for all pages here + response, err := b.client.DefaultApi.GetContent_0(repo.Organization, repo.Repository, path, opts) + if response != nil && response.StatusCode == 404 { + // File/directory not found + return false, nil + } + if err != nil { + return false, err + } + return true, nil +} + +func (b *BitbucketServerProvider) GetBranches(_ context.Context, repo *Repository) ([]*Repository, error) { + repos := []*Repository{} + branches, err := b.listBranches(repo) + if err != nil { + return nil, fmt.Errorf("error listing branches for %s/%s: %v", repo.Organization, repo.Repository, err) + } + + for _, branch := range branches { + repos = append(repos, &Repository{ + Organization: repo.Organization, + Repository: repo.Repository, + URL: repo.URL, + Branch: branch.DisplayID, + SHA: branch.LatestCommit, + Labels: repo.Labels, + RepositoryId: repo.RepositoryId, + }) + } + return repos, nil +} + +func (b *BitbucketServerProvider) listBranches(repo *Repository) ([]bitbucketv1.Branch, error) { + // If we don't specifically want to query for all branches, just use the default branch and call it a day. + if !b.allBranches { + branch, err := b.getDefaultBranch(repo.Organization, repo.Repository) + if err != nil { + return nil, err + } + if branch == nil { + return []bitbucketv1.Branch{}, nil + } + return []bitbucketv1.Branch{*branch}, nil + } + // Otherwise, scrape the GetBranches API. + branches := []bitbucketv1.Branch{} + paged := map[string]interface{}{ + "limit": 100, + } + for { + response, err := b.client.DefaultApi.GetBranches(repo.Organization, repo.Repository, paged) + if err != nil { + return nil, fmt.Errorf("error listing branches for %s/%s: %v", repo.Organization, repo.Repository, err) + } + bitbucketBranches, err := bitbucketv1.GetBranchesResponse(response) + if err != nil { + log.Errorf("error parsing branches response '%v'", response.Values) + return nil, fmt.Errorf("error parsing branches response for %s/%s: %v", repo.Organization, repo.Repository, err) + } + + branches = append(branches, bitbucketBranches...) + + hasNextPage, nextPageStart := bitbucketv1.HasNextPage(response) + if !hasNextPage { + break + } + paged["start"] = nextPageStart + } + return branches, nil +} + +func (b *BitbucketServerProvider) getDefaultBranch(org string, repo string) (*bitbucketv1.Branch, error) { + response, err := b.client.DefaultApi.GetDefaultBranch(org, repo) + // The API will return 404 if a default branch is set but doesn't exist. In case the repo is empty and default branch is unset, + // we will get an EOF and a nil response. + if (response != nil && response.StatusCode == 404) || (response == nil && err == io.EOF) { + return nil, nil + } + if err != nil { + return nil, err + } + branch, err := bitbucketv1.GetBranchResponse(response) + if err != nil { + return nil, err + } + return &branch, nil +} + +func getCloneURLFromLinks(links []bitbucketv1.CloneLink, name string) string { + for _, link := range links { + if link.Name == name { + return link.Href + } + } + return "" +} diff --git a/applicationset/services/scm_provider/bitbucket_server_test.go b/applicationset/services/scm_provider/bitbucket_server_test.go new file mode 100644 index 0000000000000..d403bd72caaac --- /dev/null +++ b/applicationset/services/scm_provider/bitbucket_server_test.go @@ -0,0 +1,594 @@ +package scm_provider + +import ( + "context" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" +) + +func defaultHandler(t *testing.T) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + var err error + switch r.RequestURI { + case "/rest/api/1.0/projects/PROJECT/repos?limit=100": + _, err = io.WriteString(w, `{ + "size": 1, + "limit": 100, + "isLastPage": true, + "values": [ + { + "id": 1, + "name": "REPO", + "project": { + "key": "PROJECT" + }, + "links": { + "clone": [ + { + "href": "ssh://git@mycompany.bitbucket.org/PROJECT/REPO.git", + "name": "ssh" + }, + { + "href": "https://mycompany.bitbucket.org/scm/PROJECT/REPO.git", + "name": "http" + } + ] + } + } + ], + "start": 0 + }`) + case "/rest/api/1.0/projects/PROJECT/repos/REPO/branches?limit=100": + _, err = io.WriteString(w, `{ + "size": 1, + "limit": 100, + "isLastPage": true, + "values": [ + { + "id": "refs/heads/main", + "displayId": "main", + "type": "BRANCH", + "latestCommit": "8d51122def5632836d1cb1026e879069e10a1e13", + "latestChangeset": "8d51122def5632836d1cb1026e879069e10a1e13", + "isDefault": true + } + ], + "start": 0 + }`) + case "/rest/api/1.0/projects/PROJECT/repos/REPO/branches/default": + _, err = io.WriteString(w, `{ + "id": "refs/heads/main", + "displayId": "main", + "type": "BRANCH", + "latestCommit": "8d51122def5632836d1cb1026e879069e10a1e13", + "latestChangeset": "8d51122def5632836d1cb1026e879069e10a1e13", + "isDefault": true + }`) + default: + t.Fail() + } + if err != nil { + t.Fail() + } + } +} + +func verifyDefaultRepo(t *testing.T, err error, repos []*Repository) { + assert.NoError(t, err) + assert.Equal(t, 1, len(repos)) + assert.Equal(t, Repository{ + Organization: "PROJECT", + Repository: "REPO", + URL: "ssh://git@mycompany.bitbucket.org/PROJECT/REPO.git", + Branch: "main", + SHA: "8d51122def5632836d1cb1026e879069e10a1e13", + Labels: []string{}, + RepositoryId: 1, + }, *repos[0]) +} + +func TestListReposNoAuth(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Empty(t, r.Header.Get("Authorization")) + defaultHandler(t)(w, r) + })) + defer ts.Close() + provider, err := NewBitbucketServerProviderNoAuth(context.Background(), ts.URL, "PROJECT", true) + assert.NoError(t, err) + repos, err := provider.ListRepos(context.Background(), "ssh") + verifyDefaultRepo(t, err, repos) +} + +func TestListReposPagination(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Empty(t, r.Header.Get("Authorization")) + var err error + switch r.RequestURI { + case "/rest/api/1.0/projects/PROJECT/repos?limit=100": + _, err = io.WriteString(w, `{ + "size": 1, + "limit": 100, + "isLastPage": false, + "values": [ + { + "id": 100, + "name": "REPO", + "project": { + "key": "PROJECT" + }, + "links": { + "clone": [ + { + "href": "ssh://git@mycompany.bitbucket.org/PROJECT/REPO.git", + "name": "ssh" + }, + { + "href": "https://mycompany.bitbucket.org/scm/PROJECT/REPO.git", + "name": "http" + } + ] + } + } + ], + "start": 0, + "nextPageStart": 200 + }`) + case "/rest/api/1.0/projects/PROJECT/repos?limit=100&start=200": + _, err = io.WriteString(w, `{ + "size": 1, + "limit": 100, + "isLastPage": true, + "values": [ + { + "id": 200, + "name": "REPO2", + "project": { + "key": "PROJECT" + }, + "links": { + "clone": [ + { + "href": "ssh://git@mycompany.bitbucket.org/PROJECT/REPO2.git", + "name": "ssh" + }, + { + "href": "https://mycompany.bitbucket.org/scm/PROJECT/REPO2.git", + "name": "http" + } + ] + } + } + ], + "start": 200 + }`) + case "/rest/api/1.0/projects/PROJECT/repos/REPO/branches/default": + _, err = io.WriteString(w, `{ + "id": "refs/heads/main", + "displayId": "main", + "type": "BRANCH", + "latestCommit": "8d51122def5632836d1cb1026e879069e10a1e13", + "isDefault": true + }`) + case "/rest/api/1.0/projects/PROJECT/repos/REPO2/branches/default": + _, err = io.WriteString(w, `{ + "id": "refs/heads/development", + "displayId": "development", + "type": "BRANCH", + "latestCommit": "2d51122def5632836d1cb1026e879069e10a1e13", + "isDefault": true + }`) + default: + t.Fail() + } + if err != nil { + t.Fail() + } + })) + defer ts.Close() + provider, err := NewBitbucketServerProviderNoAuth(context.Background(), ts.URL, "PROJECT", true) + assert.NoError(t, err) + repos, err := provider.ListRepos(context.Background(), "ssh") + assert.NoError(t, err) + assert.Equal(t, 2, len(repos)) + assert.Equal(t, Repository{ + Organization: "PROJECT", + Repository: "REPO", + URL: "ssh://git@mycompany.bitbucket.org/PROJECT/REPO.git", + Branch: "main", + SHA: "8d51122def5632836d1cb1026e879069e10a1e13", + Labels: []string{}, + RepositoryId: 100, + }, *repos[0]) + + assert.Equal(t, Repository{ + Organization: "PROJECT", + Repository: "REPO2", + URL: "ssh://git@mycompany.bitbucket.org/PROJECT/REPO2.git", + Branch: "development", + SHA: "2d51122def5632836d1cb1026e879069e10a1e13", + Labels: []string{}, + RepositoryId: 200, + }, *repos[1]) +} + +func TestGetBranchesBranchPagination(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Empty(t, r.Header.Get("Authorization")) + switch r.RequestURI { + case "/rest/api/1.0/projects/PROJECT/repos/REPO/branches?limit=100": + _, err := io.WriteString(w, `{ + "size": 1, + "limit": 100, + "isLastPage": false, + "values": [ + { + "id": "refs/heads/main", + "displayId": "main", + "type": "BRANCH", + "latestCommit": "8d51122def5632836d1cb1026e879069e10a1e13", + "latestChangeset": "8d51122def5632836d1cb1026e879069e10a1e13", + "isDefault": true + } + ], + "start": 0, + "nextPageStart": 200 + }`) + if err != nil { + t.Fail() + } + return + case "/rest/api/1.0/projects/PROJECT/repos/REPO/branches?limit=100&start=200": + _, err := io.WriteString(w, `{ + "size": 1, + "limit": 100, + "isLastPage": true, + "values": [ + { + "id": "refs/heads/feature", + "displayId": "feature", + "type": "BRANCH", + "latestCommit": "9d51122def5632836d1cb1026e879069e10a1e13", + "latestChangeset": "9d51122def5632836d1cb1026e879069e10a1e13", + "isDefault": true + } + ], + "start": 200 + }`) + if err != nil { + t.Fail() + } + return + } + defaultHandler(t)(w, r) + })) + defer ts.Close() + provider, err := NewBitbucketServerProviderNoAuth(context.Background(), ts.URL, "PROJECT", true) + assert.NoError(t, err) + repos, err := provider.GetBranches(context.Background(), &Repository{ + Organization: "PROJECT", + Repository: "REPO", + URL: "ssh://git@mycompany.bitbucket.org/PROJECT/REPO.git", + Labels: []string{}, + RepositoryId: 1, + }) + assert.NoError(t, err) + assert.Equal(t, 2, len(repos)) + assert.Equal(t, Repository{ + Organization: "PROJECT", + Repository: "REPO", + URL: "ssh://git@mycompany.bitbucket.org/PROJECT/REPO.git", + Branch: "main", + SHA: "8d51122def5632836d1cb1026e879069e10a1e13", + Labels: []string{}, + RepositoryId: 1, + }, *repos[0]) + + assert.Equal(t, Repository{ + Organization: "PROJECT", + Repository: "REPO", + URL: "ssh://git@mycompany.bitbucket.org/PROJECT/REPO.git", + Branch: "feature", + SHA: "9d51122def5632836d1cb1026e879069e10a1e13", + Labels: []string{}, + RepositoryId: 1, + }, *repos[1]) +} + +func TestGetBranchesDefaultOnly(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Empty(t, r.Header.Get("Authorization")) + switch r.RequestURI { + case "/rest/api/1.0/projects/PROJECT/repos/REPO/branches/default": + _, err := io.WriteString(w, `{ + "id": "refs/heads/default", + "displayId": "default", + "type": "BRANCH", + "latestCommit": "ab51122def5632836d1cb1026e879069e10a1e13", + "latestChangeset": "ab51122def5632836d1cb1026e879069e10a1e13", + "isDefault": true + }`) + if err != nil { + t.Fail() + } + return + } + defaultHandler(t)(w, r) + })) + defer ts.Close() + provider, err := NewBitbucketServerProviderNoAuth(context.Background(), ts.URL, "PROJECT", false) + assert.NoError(t, err) + repos, err := provider.GetBranches(context.Background(), &Repository{ + Organization: "PROJECT", + Repository: "REPO", + URL: "ssh://git@mycompany.bitbucket.org/PROJECT/REPO.git", + Labels: []string{}, + RepositoryId: 1, + }) + assert.NoError(t, err) + assert.Equal(t, 1, len(repos)) + assert.Equal(t, Repository{ + Organization: "PROJECT", + Repository: "REPO", + URL: "ssh://git@mycompany.bitbucket.org/PROJECT/REPO.git", + Branch: "default", + SHA: "ab51122def5632836d1cb1026e879069e10a1e13", + Labels: []string{}, + RepositoryId: 1, + }, *repos[0]) +} + +func TestGetBranchesMissingDefault(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Empty(t, r.Header.Get("Authorization")) + switch r.RequestURI { + case "/rest/api/1.0/projects/PROJECT/repos/REPO/branches/default": + http.Error(w, "Not found", http.StatusNotFound) + } + defaultHandler(t)(w, r) + })) + defer ts.Close() + provider, err := NewBitbucketServerProviderNoAuth(context.Background(), ts.URL, "PROJECT", false) + assert.NoError(t, err) + repos, err := provider.GetBranches(context.Background(), &Repository{ + Organization: "PROJECT", + Repository: "REPO", + URL: "ssh://git@mycompany.bitbucket.org/PROJECT/REPO.git", + Labels: []string{}, + RepositoryId: 1, + }) + assert.NoError(t, err) + assert.Empty(t, repos) +} + +func TestGetBranchesEmptyRepo(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Empty(t, r.Header.Get("Authorization")) + switch r.RequestURI { + case "/rest/api/1.0/projects/PROJECT/repos/REPO/branches/default": + return + } + })) + defer ts.Close() + provider, err := NewBitbucketServerProviderNoAuth(context.Background(), ts.URL, "PROJECT", false) + assert.NoError(t, err) + repos, err := provider.GetBranches(context.Background(), &Repository{ + Organization: "PROJECT", + Repository: "REPO", + URL: "ssh://git@mycompany.bitbucket.org/PROJECT/REPO.git", + Labels: []string{}, + RepositoryId: 1, + }) + assert.Empty(t, repos) + assert.NoError(t, err) +} + +func TestGetBranchesErrorDefaultBranch(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Empty(t, r.Header.Get("Authorization")) + switch r.RequestURI { + case "/rest/api/1.0/projects/PROJECT/repos/REPO/branches/default": + http.Error(w, "Internal server error", http.StatusInternalServerError) + } + defaultHandler(t)(w, r) + })) + defer ts.Close() + provider, err := NewBitbucketServerProviderNoAuth(context.Background(), ts.URL, "PROJECT", false) + assert.NoError(t, err) + _, err = provider.GetBranches(context.Background(), &Repository{ + Organization: "PROJECT", + Repository: "REPO", + URL: "ssh://git@mycompany.bitbucket.org/PROJECT/REPO.git", + Labels: []string{}, + RepositoryId: 1, + }) + assert.Error(t, err) +} + +func TestListReposBasicAuth(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "Basic dXNlcjpwYXNzd29yZA==", r.Header.Get("Authorization")) + assert.Equal(t, "no-check", r.Header.Get("X-Atlassian-Token")) + defaultHandler(t)(w, r) + })) + defer ts.Close() + provider, err := NewBitbucketServerProviderBasicAuth(context.Background(), "user", "password", ts.URL, "PROJECT", true) + assert.NoError(t, err) + repos, err := provider.ListRepos(context.Background(), "ssh") + verifyDefaultRepo(t, err, repos) +} + +func TestListReposDefaultBranch(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Empty(t, r.Header.Get("Authorization")) + switch r.RequestURI { + case "/rest/api/1.0/projects/PROJECT/repos/REPO/branches/default": + _, err := io.WriteString(w, `{ + "id": "refs/heads/default", + "displayId": "default", + "type": "BRANCH", + "latestCommit": "1d51122def5632836d1cb1026e879069e10a1e13", + "latestChangeset": "1d51122def5632836d1cb1026e879069e10a1e13", + "isDefault": true + }`) + if err != nil { + t.Fail() + } + return + } + defaultHandler(t)(w, r) + })) + defer ts.Close() + provider, err := NewBitbucketServerProviderNoAuth(context.Background(), ts.URL, "PROJECT", false) + assert.NoError(t, err) + repos, err := provider.ListRepos(context.Background(), "ssh") + assert.NoError(t, err) + assert.Equal(t, 1, len(repos)) + assert.Equal(t, Repository{ + Organization: "PROJECT", + Repository: "REPO", + URL: "ssh://git@mycompany.bitbucket.org/PROJECT/REPO.git", + Branch: "default", + SHA: "1d51122def5632836d1cb1026e879069e10a1e13", + Labels: []string{}, + RepositoryId: 1, + }, *repos[0]) +} + +func TestListReposMissingDefaultBranch(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Empty(t, r.Header.Get("Authorization")) + switch r.RequestURI { + case "/rest/api/1.0/projects/PROJECT/repos/REPO/branches/default": + http.Error(w, "Not found", http.StatusNotFound) + } + defaultHandler(t)(w, r) + })) + defer ts.Close() + provider, err := NewBitbucketServerProviderNoAuth(context.Background(), ts.URL, "PROJECT", false) + assert.NoError(t, err) + repos, err := provider.ListRepos(context.Background(), "ssh") + assert.NoError(t, err) + assert.Empty(t, repos) +} + +func TestListReposErrorDefaultBranch(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Empty(t, r.Header.Get("Authorization")) + switch r.RequestURI { + case "/rest/api/1.0/projects/PROJECT/repos/REPO/branches/default": + http.Error(w, "Internal server error", http.StatusInternalServerError) + } + defaultHandler(t)(w, r) + })) + defer ts.Close() + provider, err := NewBitbucketServerProviderNoAuth(context.Background(), ts.URL, "PROJECT", false) + assert.NoError(t, err) + _, err = provider.ListRepos(context.Background(), "ssh") + assert.Error(t, err) +} + +func TestListReposCloneProtocol(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Empty(t, r.Header.Get("Authorization")) + defaultHandler(t)(w, r) + })) + defer ts.Close() + provider, err := NewBitbucketServerProviderNoAuth(context.Background(), ts.URL, "PROJECT", true) + assert.NoError(t, err) + repos, err := provider.ListRepos(context.Background(), "https") + assert.NoError(t, err) + assert.Equal(t, 1, len(repos)) + assert.Equal(t, Repository{ + Organization: "PROJECT", + Repository: "REPO", + URL: "https://mycompany.bitbucket.org/scm/PROJECT/REPO.git", + Branch: "main", + SHA: "8d51122def5632836d1cb1026e879069e10a1e13", + Labels: []string{}, + RepositoryId: 1, + }, *repos[0]) +} + +func TestListReposUnknownProtocol(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Empty(t, r.Header.Get("Authorization")) + defaultHandler(t)(w, r) + })) + defer ts.Close() + provider, err := NewBitbucketServerProviderNoAuth(context.Background(), ts.URL, "PROJECT", true) + assert.NoError(t, err) + _, errProtocol := provider.ListRepos(context.Background(), "http") + assert.NotNil(t, errProtocol) +} + +func TestBitbucketServerHasPath(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var err error + switch r.RequestURI { + case "/rest/api/1.0/projects/PROJECT/repos/REPO/browse/pkg?at=main&limit=100&type=true": + _, err = io.WriteString(w, `{"type":"DIRECTORY"}`) + case "/rest/api/1.0/projects/PROJECT/repos/REPO/browse/pkg/?at=main&limit=100&type=true": + _, err = io.WriteString(w, `{"type":"DIRECTORY"}`) + case "/rest/api/1.0/projects/PROJECT/repos/REPO/browse/anotherpkg/file.txt?at=main&limit=100&type=true": + _, err = io.WriteString(w, `{"type":"FILE"}`) + + case "/rest/api/1.0/projects/PROJECT/repos/REPO/browse/anotherpkg/missing.txt?at=main&limit=100&type=true": + http.Error(w, "The path \"anotherpkg/missing.txt\" does not exist at revision \"main\"", http.StatusNotFound) + case "/rest/api/1.0/projects/PROJECT/repos/REPO/browse/notathing?at=main&limit=100&type=true": + http.Error(w, "The path \"notathing\" does not exist at revision \"main\"", http.StatusNotFound) + + case "/rest/api/1.0/projects/PROJECT/repos/REPO/browse/return-redirect?at=main&limit=100&type=true": + http.Redirect(w, r, "http://"+r.Host+"/rest/api/1.0/projects/PROJECT/repos/REPO/browse/redirected?at=main&limit=100&type=true", http.StatusMovedPermanently) + case "/rest/api/1.0/projects/PROJECT/repos/REPO/browse/redirected?at=main&limit=100&type=true": + _, err = io.WriteString(w, `{"type":"DIRECTORY"}`) + + case "/rest/api/1.0/projects/PROJECT/repos/REPO/browse/unauthorized-response?at=main&limit=100&type=true": + http.Error(w, "Authentication failed", http.StatusUnauthorized) + + default: + t.Fail() + } + if err != nil { + t.Fail() + } + })) + defer ts.Close() + provider, err := NewBitbucketServerProviderNoAuth(context.Background(), ts.URL, "PROJECT", true) + assert.NoError(t, err) + repo := &Repository{ + Organization: "PROJECT", + Repository: "REPO", + Branch: "main", + } + ok, err := provider.RepoHasPath(context.Background(), repo, "pkg") + assert.NoError(t, err) + assert.True(t, ok) + + ok, err = provider.RepoHasPath(context.Background(), repo, "pkg/") + assert.NoError(t, err) + assert.True(t, ok) + + ok, err = provider.RepoHasPath(context.Background(), repo, "anotherpkg/file.txt") + assert.NoError(t, err) + assert.True(t, ok) + + ok, err = provider.RepoHasPath(context.Background(), repo, "anotherpkg/missing.txt") + assert.NoError(t, err) + assert.False(t, ok) + + ok, err = provider.RepoHasPath(context.Background(), repo, "notathing") + assert.NoError(t, err) + assert.False(t, ok) + + ok, err = provider.RepoHasPath(context.Background(), repo, "return-redirect") + assert.NoError(t, err) + assert.True(t, ok) + + _, err = provider.RepoHasPath(context.Background(), repo, "unauthorized-response") + assert.Error(t, err) +} diff --git a/applicationset/services/scm_provider/gitea.go b/applicationset/services/scm_provider/gitea.go new file mode 100644 index 0000000000000..25554d52af85f --- /dev/null +++ b/applicationset/services/scm_provider/gitea.go @@ -0,0 +1,141 @@ +package scm_provider + +import ( + "context" + "crypto/tls" + "fmt" + "net/http" + "net/http/cookiejar" + "os" + + "code.gitea.io/sdk/gitea" +) + +type GiteaProvider struct { + client *gitea.Client + owner string + allBranches bool +} + +var _ SCMProviderService = &GiteaProvider{} + +func NewGiteaProvider(ctx context.Context, owner, token, url string, allBranches, insecure bool) (*GiteaProvider, error) { + if token == "" { + token = os.Getenv("GITEA_TOKEN") + } + httpClient := &http.Client{} + if insecure { + cookieJar, _ := cookiejar.New(nil) + + tr := http.DefaultTransport.(*http.Transport).Clone() + tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} + + httpClient = &http.Client{ + Jar: cookieJar, + Transport: tr, + } + } + client, err := gitea.NewClient(url, gitea.SetToken(token), gitea.SetHTTPClient(httpClient)) + if err != nil { + return nil, fmt.Errorf("error creating a new gitea client: %w", err) + } + return &GiteaProvider{ + client: client, + owner: owner, + allBranches: allBranches, + }, nil +} + +func (g *GiteaProvider) GetBranches(ctx context.Context, repo *Repository) ([]*Repository, error) { + if !g.allBranches { + branch, status, err := g.client.GetRepoBranch(g.owner, repo.Repository, repo.Branch) + if status.StatusCode == http.StatusNotFound { + return nil, fmt.Errorf("got 404 while getting default branch %q for repo %q - check your repo config: %w", repo.Branch, repo.Repository, err) + } + if err != nil { + return nil, err + } + return []*Repository{ + { + Organization: repo.Organization, + Repository: repo.Repository, + Branch: repo.Branch, + URL: repo.URL, + SHA: branch.Commit.ID, + Labels: repo.Labels, + RepositoryId: repo.RepositoryId, + }, + }, nil + } + repos := []*Repository{} + opts := gitea.ListRepoBranchesOptions{} + branches, _, err := g.client.ListRepoBranches(g.owner, repo.Repository, opts) + if err != nil { + return nil, err + } + for _, branch := range branches { + repos = append(repos, &Repository{ + Organization: repo.Organization, + Repository: repo.Repository, + Branch: branch.Name, + URL: repo.URL, + SHA: branch.Commit.ID, + Labels: repo.Labels, + RepositoryId: repo.RepositoryId, + }) + } + return repos, nil +} + +func (g *GiteaProvider) ListRepos(ctx context.Context, cloneProtocol string) ([]*Repository, error) { + repos := []*Repository{} + repoOpts := gitea.ListOrgReposOptions{} + giteaRepos, _, err := g.client.ListOrgRepos(g.owner, repoOpts) + if err != nil { + return nil, err + } + for _, repo := range giteaRepos { + var url string + switch cloneProtocol { + // Default to SSH if unspecified (i.e. if ""). + case "", "ssh": + url = repo.SSHURL + case "https": + url = repo.HTMLURL + default: + return nil, fmt.Errorf("unknown clone protocol for GitHub %v", cloneProtocol) + } + labelOpts := gitea.ListLabelsOptions{} + giteaLabels, _, err := g.client.ListRepoLabels(g.owner, repo.Name, labelOpts) + if err != nil { + return nil, err + } + labels := []string{} + for _, label := range giteaLabels { + labels = append(labels, label.Name) + } + repos = append(repos, &Repository{ + Organization: g.owner, + Repository: repo.Name, + Branch: repo.DefaultBranch, + URL: url, + Labels: labels, + RepositoryId: int(repo.ID), + }) + } + return repos, nil +} + +func (g *GiteaProvider) RepoHasPath(ctx context.Context, repo *Repository, path string) (bool, error) { + _, resp, err := g.client.GetContents(repo.Organization, repo.Repository, repo.Branch, path) + if resp != nil && resp.StatusCode == 404 { + return false, nil + } + if fmt.Sprint(err) == "expect file, got directory" { + return true, nil + } + if err != nil { + return false, err + } + return true, nil +} diff --git a/applicationset/services/scm_provider/gitea_test.go b/applicationset/services/scm_provider/gitea_test.go new file mode 100644 index 0000000000000..3d17e3175c4f8 --- /dev/null +++ b/applicationset/services/scm_provider/gitea_test.go @@ -0,0 +1,359 @@ +package scm_provider + +import ( + "context" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/argoproj/argo-cd/v2/applicationset/services/scm_provider/testdata" + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +func giteaMockHandler(t *testing.T) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + switch r.RequestURI { + case "/api/v1/version": + _, err := io.WriteString(w, `{"version":"1.17.0+dev-452-g1f0541780"}`) + if err != nil { + t.Fail() + } + case "/api/v1/orgs/test-argocd/repos?limit=0&page=1": + _, err := io.WriteString(w, `[{ + "id": 21618, + "owner": { + "id": 31480, + "login": "test-argocd", + "full_name": "", + "email": "", + "avatar_url": "https://gitea.com/avatars/22d1b1d3f61abf95951c4a958731d848", + "language": "", + "is_admin": false, + "last_login": "0001-01-01T00:00:00Z", + "created": "2022-04-06T02:28:06+08:00", + "restricted": false, + "active": false, + "prohibit_login": false, + "location": "", + "website": "", + "description": "", + "visibility": "public", + "followers_count": 0, + "following_count": 0, + "starred_repos_count": 0, + "username": "test-argocd" + }, + "name": "pr-test", + "full_name": "test-argocd/pr-test", + "description": "", + "empty": false, + "private": false, + "fork": false, + "template": false, + "parent": null, + "mirror": false, + "size": 28, + "language": "", + "languages_url": "https://gitea.com/api/v1/repos/test-argocd/pr-test/languages", + "html_url": "https://gitea.com/test-argocd/pr-test", + "ssh_url": "git@gitea.com:test-argocd/pr-test.git", + "clone_url": "https://gitea.com/test-argocd/pr-test.git", + "original_url": "", + "website": "", + "stars_count": 0, + "forks_count": 0, + "watchers_count": 1, + "open_issues_count": 0, + "open_pr_counter": 1, + "release_counter": 0, + "default_branch": "main", + "archived": false, + "created_at": "2022-04-06T02:32:09+08:00", + "updated_at": "2022-04-06T02:33:12+08:00", + "permissions": { + "admin": false, + "push": false, + "pull": true + }, + "has_issues": true, + "internal_tracker": { + "enable_time_tracker": true, + "allow_only_contributors_to_track_time": true, + "enable_issue_dependencies": true + }, + "has_wiki": true, + "has_pull_requests": true, + "has_projects": true, + "ignore_whitespace_conflicts": false, + "allow_merge_commits": true, + "allow_rebase": true, + "allow_rebase_explicit": true, + "allow_squash_merge": true, + "default_merge_style": "merge", + "avatar_url": "", + "internal": false, + "mirror_interval": "", + "mirror_updated": "0001-01-01T00:00:00Z", + "repo_transfer": null + }]`) + if err != nil { + t.Fail() + } + case "/api/v1/repos/test-argocd/pr-test/branches/main": + _, err := io.WriteString(w, `{ + "name": "main", + "commit": { + "id": "72687815ccba81ef014a96201cc2e846a68789d8", + "message": "initial commit\n", + "url": "https://gitea.com/test-argocd/pr-test/commit/72687815ccba81ef014a96201cc2e846a68789d8", + "author": { + "name": "Dan Molik", + "email": "dan@danmolik.com", + "username": "graytshirt" + }, + "committer": { + "name": "Dan Molik", + "email": "dan@danmolik.com", + "username": "graytshirt" + }, + "verification": { + "verified": false, + "reason": "gpg.error.no_gpg_keys_found", + "signature": "-----BEGIN PGP SIGNATURE-----\n\niQEzBAABCAAdFiEEXYAkwEBRpXzXgHFWlgCr7m50zBMFAmJMiqUACgkQlgCr7m50\nzBPSmQgAiVVEIxC42tuks4iGFNURrtYvypZAEIc+hJgt2kBpmdCrAphYPeAj+Wtr\n9KT7dDscCZIba2wx39HEXO2S7wNCXESvAzrA8rdfbXjR4L2miZ1urfBkEoqK5i/F\noblWGuAyjurX4KPa2ARROd0H4AXxt6gNAXaFPgZO+xXCyNKZfad/lkEP1AiPRknD\nvTTMbEkIzFHK9iVwZ9DORGpfF1wnLzxWmMfhYatZnBgFNnoeJNtFhCJo05rHBgqc\nqVZWXt1iF7nysBoXSzyx1ZAsmBr/Qerkuj0nonh0aPVa6NKJsdmeJyPX4zXXoi6E\ne/jpxX2UQJkpFezg3IjUpvE5FvIiYg==\n=3Af2\n-----END PGP SIGNATURE-----\n", + "signer": null, + "payload": "tree 64d47c7fc6e31dcf00654223ec4ab749dd0a464e\nauthor Dan Molik \u003cdan@danmolik.com\u003e 1649183391 -0400\ncommitter Dan Molik \u003cdan@danmolik.com\u003e 1649183391 -0400\n\ninitial commit\n" + }, + "timestamp": "2022-04-05T14:29:51-04:00", + "added": null, + "removed": null, + "modified": null + }, + "protected": false, + "required_approvals": 0, + "enable_status_check": false, + "status_check_contexts": [], + "user_can_push": false, + "user_can_merge": false, + "effective_branch_protection_name": "" + }`) + if err != nil { + t.Fail() + } + case "/api/v1/repos/test-argocd/pr-test/branches?limit=0&page=1": + _, err := io.WriteString(w, `[{ + "name": "main", + "commit": { + "id": "72687815ccba81ef014a96201cc2e846a68789d8", + "message": "initial commit\n", + "url": "https://gitea.com/test-argocd/pr-test/commit/72687815ccba81ef014a96201cc2e846a68789d8", + "author": { + "name": "Dan Molik", + "email": "dan@danmolik.com", + "username": "graytshirt" + }, + "committer": { + "name": "Dan Molik", + "email": "dan@danmolik.com", + "username": "graytshirt" + }, + "verification": { + "verified": false, + "reason": "gpg.error.no_gpg_keys_found", + "signature": "-----BEGIN PGP SIGNATURE-----\n\niQEzBAABCAAdFiEEXYAkwEBRpXzXgHFWlgCr7m50zBMFAmJMiqUACgkQlgCr7m50\nzBPSmQgAiVVEIxC42tuks4iGFNURrtYvypZAEIc+hJgt2kBpmdCrAphYPeAj+Wtr\n9KT7dDscCZIba2wx39HEXO2S7wNCXESvAzrA8rdfbXjR4L2miZ1urfBkEoqK5i/F\noblWGuAyjurX4KPa2ARROd0H4AXxt6gNAXaFPgZO+xXCyNKZfad/lkEP1AiPRknD\nvTTMbEkIzFHK9iVwZ9DORGpfF1wnLzxWmMfhYatZnBgFNnoeJNtFhCJo05rHBgqc\nqVZWXt1iF7nysBoXSzyx1ZAsmBr/Qerkuj0nonh0aPVa6NKJsdmeJyPX4zXXoi6E\ne/jpxX2UQJkpFezg3IjUpvE5FvIiYg==\n=3Af2\n-----END PGP SIGNATURE-----\n", + "signer": null, + "payload": "tree 64d47c7fc6e31dcf00654223ec4ab749dd0a464e\nauthor Dan Molik \u003cdan@danmolik.com\u003e 1649183391 -0400\ncommitter Dan Molik \u003cdan@danmolik.com\u003e 1649183391 -0400\n\ninitial commit\n" + }, + "timestamp": "2022-04-05T14:29:51-04:00", + "added": null, + "removed": null, + "modified": null + }, + "protected": false, + "required_approvals": 0, + "enable_status_check": false, + "status_check_contexts": [], + "user_can_push": false, + "user_can_merge": false, + "effective_branch_protection_name": "" + }, { + "name": "test", + "commit": { + "id": "7bbaf62d92ddfafd9cc8b340c619abaec32bc09f", + "message": "add an empty file\n", + "url": "https://gitea.com/test-argocd/pr-test/commit/7bbaf62d92ddfafd9cc8b340c619abaec32bc09f", + "author": { + "name": "Dan Molik", + "email": "dan@danmolik.com", + "username": "graytshirt" + }, + "committer": { + "name": "Dan Molik", + "email": "dan@danmolik.com", + "username": "graytshirt" + }, + "verification": { + "verified": false, + "reason": "gpg.error.no_gpg_keys_found", + "signature": "-----BEGIN PGP SIGNATURE-----\n\niQEzBAABCAAdFiEEXYAkwEBRpXzXgHFWlgCr7m50zBMFAmJMiugACgkQlgCr7m50\nzBN+7wgAkCHD3KfX3Ffkqv2qPwqgHNYM1bA6Hmffzhv0YeD9jWCI3tp0JulP4iFZ\ncQ7jqx9xP9tCQMSFCaijLRHaE6Js1xrVtf0OKRkbpdlvkyrIM3sQhqyQgAsISrDG\nLzSqeoQQjglzeWESYh2Tjn1CgqQNKjI6LLepSwvF1pIeV4pJpJobaEbIfTgStdzM\nWEk8o0I+EZaYqK0C0vU9N0LK/LR/jnlaHsb4OUjvk+S7lRjZwBkrsg7P/QsqtCVd\nw5nkxDiCx1J58zKMnQ7ZinJEK9A5WYdnMYc6aBn7ARgZrblXPPBkkKUhEv3ZSPeW\nKv9i4GQy838xkVSTFkHNj1+a5o6zEA==\n=JiFw\n-----END PGP SIGNATURE-----\n", + "signer": null, + "payload": "tree cdddf3e1d6a8a7e6899a044d0e1bc73bf798e2f5\nparent 72687815ccba81ef014a96201cc2e846a68789d8\nauthor Dan Molik \u003cdan@danmolik.com\u003e 1649183458 -0400\ncommitter Dan Molik \u003cdan@danmolik.com\u003e 1649183458 -0400\n\nadd an empty file\n" + }, + "timestamp": "2022-04-05T14:30:58-04:00", + "added": null, + "removed": null, + "modified": null + }, + "protected": false, + "required_approvals": 0, + "enable_status_check": false, + "status_check_contexts": [], + "user_can_push": false, + "user_can_merge": false, + "effective_branch_protection_name": "" + }]`) + if err != nil { + t.Fail() + } + case "/api/v1/repos/gitea/go-sdk/contents/README.md?ref=master": + _, err := io.WriteString(w, `{ + "name": "README.md", + "path": "README.md", + "sha": "3605625ef3f80dc092167b54e3f55eb0663d729f", + "last_commit_sha": "6b6fdd91ce769bb4641084e15f76554fb841bf27", + "type": "file", + "size": 1673, + "encoding": "base64", + "content": "IyBHaXRlYSBTREsgZm9yIEdvCgpbIVtMaWNlbnNlOiBNSVRdKGh0dHBzOi8vaW1nLnNoaWVsZHMuaW8vYmFkZ2UvTGljZW5zZS1NSVQtYmx1ZS5zdmcpXShodHRwczovL29wZW5zb3VyY2Uub3JnL2xpY2Vuc2VzL01JVCkgWyFbUmVsZWFzZV0oaHR0cHM6Ly9yYXN0ZXIuc2hpZWxkcy5pby9iYWRnZS9keW5hbWljL2pzb24uc3ZnP2xhYmVsPXJlbGVhc2UmdXJsPWh0dHBzOi8vZ2l0ZWEuY29tL2FwaS92MS9yZXBvcy9naXRlYS9nby1zZGsvcmVsZWFzZXMmcXVlcnk9JFswXS50YWdfbmFtZSldKGh0dHBzOi8vZ2l0ZWEuY29tL2dpdGVhL2dvLXNkay9yZWxlYXNlcykgWyFbQnVpbGQgU3RhdHVzXShodHRwczovL2Ryb25lLmdpdGVhLmNvbS9hcGkvYmFkZ2VzL2dpdGVhL2dvLXNkay9zdGF0dXMuc3ZnKV0oaHR0cHM6Ly9kcm9uZS5naXRlYS5jb20vZ2l0ZWEvZ28tc2RrKSBbIVtKb2luIHRoZSBjaGF0IGF0IGh0dHBzOi8vaW1nLnNoaWVsZHMuaW8vZGlzY29yZC8zMjI1Mzg5NTQxMTkxODQzODQuc3ZnXShodHRwczovL2ltZy5zaGllbGRzLmlvL2Rpc2NvcmQvMzIyNTM4OTU0MTE5MTg0Mzg0LnN2ZyldKGh0dHBzOi8vZGlzY29yZC5nZy9HaXRlYSkgWyFbXShodHRwczovL2ltYWdlcy5taWNyb2JhZGdlci5jb20vYmFkZ2VzL2ltYWdlL2dpdGVhL2dpdGVhLnN2ZyldKGh0dHA6Ly9taWNyb2JhZGdlci5jb20vaW1hZ2VzL2dpdGVhL2dpdGVhICJHZXQgeW91ciBvd24gaW1hZ2UgYmFkZ2Ugb24gbWljcm9iYWRnZXIuY29tIikgWyFbR28gUmVwb3J0IENhcmRdKGh0dHBzOi8vZ29yZXBvcnRjYXJkLmNvbS9iYWRnZS9jb2RlLmdpdGVhLmlvL3NkayldKGh0dHBzOi8vZ29yZXBvcnRjYXJkLmNvbS9yZXBvcnQvY29kZS5naXRlYS5pby9zZGspIFshW0dvRG9jXShodHRwczovL2dvZG9jLm9yZy9jb2RlLmdpdGVhLmlvL3Nkay9naXRlYT9zdGF0dXMuc3ZnKV0oaHR0cHM6Ly9nb2RvYy5vcmcvY29kZS5naXRlYS5pby9zZGsvZ2l0ZWEpCgpUaGlzIHByb2plY3QgYWN0cyBhcyBhIGNsaWVudCBTREsgaW1wbGVtZW50YXRpb24gd3JpdHRlbiBpbiBHbyB0byBpbnRlcmFjdCB3aXRoIHRoZSBHaXRlYSBBUEkgaW1wbGVtZW50YXRpb24uIEZvciBmdXJ0aGVyIGluZm9ybWF0aW9ucyB0YWtlIGEgbG9vayBhdCB0aGUgY3VycmVudCBbZG9jdW1lbnRhdGlvbl0oaHR0cHM6Ly9nb2RvYy5vcmcvY29kZS5naXRlYS5pby9zZGsvZ2l0ZWEpLgoKTm90ZTogZnVuY3Rpb24gYXJndW1lbnRzIGFyZSBlc2NhcGVkIGJ5IHRoZSBTREsuCgojIyBVc2UgaXQKCmBgYGdvCmltcG9ydCAiY29kZS5naXRlYS5pby9zZGsvZ2l0ZWEiCmBgYAoKIyMgVmVyc2lvbiBSZXF1aXJlbWVudHMKICogZ28gPj0gMS4xMwogKiBnaXRlYSA+PSAxLjExCgojIyBDb250cmlidXRpbmcKCkZvcmsgLT4gUGF0Y2ggLT4gUHVzaCAtPiBQdWxsIFJlcXVlc3QKCiMjIEF1dGhvcnMKCiogW01haW50YWluZXJzXShodHRwczovL2dpdGh1Yi5jb20vb3Jncy9nby1naXRlYS9wZW9wbGUpCiogW0NvbnRyaWJ1dG9yc10oaHR0cHM6Ly9naXRodWIuY29tL2dvLWdpdGVhL2dvLXNkay9ncmFwaHMvY29udHJpYnV0b3JzKQoKIyMgTGljZW5zZQoKVGhpcyBwcm9qZWN0IGlzIHVuZGVyIHRoZSBNSVQgTGljZW5zZS4gU2VlIHRoZSBbTElDRU5TRV0oTElDRU5TRSkgZmlsZSBmb3IgdGhlIGZ1bGwgbGljZW5zZSB0ZXh0Lgo=", + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/README.md?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/README.md", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/3605625ef3f80dc092167b54e3f55eb0663d729f", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/README.md", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/README.md?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/3605625ef3f80dc092167b54e3f55eb0663d729f", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/README.md" + } +} +`) + require.NoError(t, err) + case "/api/v1/repos/gitea/go-sdk/contents/gitea?ref=master": + _, err := io.WriteString(w, testdata.ReposGiteaGoSdkContentsGiteaResponse) + require.NoError(t, err) + case "/api/v1/repos/gitea/go-sdk/contents/notathing?ref=master": + w.WriteHeader(http.StatusNotFound) + _, err := io.WriteString(w, `{"errors":["object does not exist [id: , rel_path: notathing]"],"message":"GetContentsOrList","url":"https://gitea.com/api/swagger"}`) + require.NoError(t, err) + default: + _, err := io.WriteString(w, `[]`) + if err != nil { + t.Fail() + } + } + } +} +func TestGiteaListRepos(t *testing.T) { + cases := []struct { + name, proto, url string + hasError, allBranches, includeSubgroups bool + branches []string + filters []v1alpha1.SCMProviderGeneratorFilter + }{ + { + name: "blank protocol", + allBranches: false, + url: "git@gitea.com:test-argocd/pr-test.git", + branches: []string{"main"}, + }, + { + name: "ssh protocol", + allBranches: false, + proto: "ssh", + url: "git@gitea.com:test-argocd/pr-test.git", + }, + { + name: "https protocol", + allBranches: false, + proto: "https", + url: "https://gitea.com/test-argocd/pr-test", + }, + { + name: "other protocol", + allBranches: false, + proto: "other", + hasError: true, + }, + { + name: "all branches", + allBranches: true, + url: "git@gitea.com:test-argocd/pr-test.git", + branches: []string{"main"}, + }, + } + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + giteaMockHandler(t)(w, r) + })) + defer ts.Close() + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + provider, _ := NewGiteaProvider(context.Background(), "test-argocd", "", ts.URL, c.allBranches, false) + rawRepos, err := ListRepos(context.Background(), provider, c.filters, c.proto) + if c.hasError { + assert.NotNil(t, err) + } else { + assert.Nil(t, err) + // Just check that this one project shows up. Not a great test but better thing nothing? + repos := []*Repository{} + branches := []string{} + for _, r := range rawRepos { + if r.Repository == "pr-test" { + repos = append(repos, r) + branches = append(branches, r.Branch) + } + } + assert.NotEmpty(t, repos) + assert.Equal(t, c.url, repos[0].URL) + for _, b := range c.branches { + assert.Contains(t, branches, b) + } + } + }) + } +} + +func TestGiteaHasPath(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + giteaMockHandler(t)(w, r) + })) + defer ts.Close() + host, _ := NewGiteaProvider(context.Background(), "gitea", "", ts.URL, false, false) + repo := &Repository{ + Organization: "gitea", + Repository: "go-sdk", + Branch: "master", + } + + t.Run("file exists", func(t *testing.T) { + ok, err := host.RepoHasPath(context.Background(), repo, "README.md") + assert.Nil(t, err) + assert.True(t, ok) + }) + + t.Run("directory exists", func(t *testing.T) { + ok, err := host.RepoHasPath(context.Background(), repo, "gitea") + assert.Nil(t, err) + assert.True(t, ok) + }) + + t.Run("does not exists", func(t *testing.T) { + ok, err := host.RepoHasPath(context.Background(), repo, "notathing") + assert.Nil(t, err) + assert.False(t, ok) + }) +} diff --git a/applicationset/services/scm_provider/github.go b/applicationset/services/scm_provider/github.go new file mode 100644 index 0000000000000..1a6edae5837e9 --- /dev/null +++ b/applicationset/services/scm_provider/github.go @@ -0,0 +1,156 @@ +package scm_provider + +import ( + "context" + "errors" + "fmt" + "net/http" + "os" + + "github.com/google/go-github/v35/github" + "golang.org/x/oauth2" +) + +type GithubProvider struct { + client *github.Client + organization string + allBranches bool +} + +var _ SCMProviderService = &GithubProvider{} + +func NewGithubProvider(ctx context.Context, organization string, token string, url string, allBranches bool) (*GithubProvider, error) { + var ts oauth2.TokenSource + // Undocumented environment variable to set a default token, to be used in testing to dodge anonymous rate limits. + if token == "" { + token = os.Getenv("GITHUB_TOKEN") + } + if token != "" { + ts = oauth2.StaticTokenSource( + &oauth2.Token{AccessToken: token}, + ) + } + httpClient := oauth2.NewClient(ctx, ts) + var client *github.Client + if url == "" { + client = github.NewClient(httpClient) + } else { + var err error + client, err = github.NewEnterpriseClient(url, url, httpClient) + if err != nil { + return nil, err + } + } + return &GithubProvider{client: client, organization: organization, allBranches: allBranches}, nil +} + +func (g *GithubProvider) GetBranches(ctx context.Context, repo *Repository) ([]*Repository, error) { + repos := []*Repository{} + branches, err := g.listBranches(ctx, repo) + if err != nil { + return nil, fmt.Errorf("error listing branches for %s/%s: %w", repo.Organization, repo.Repository, err) + } + + for _, branch := range branches { + repos = append(repos, &Repository{ + Organization: repo.Organization, + Repository: repo.Repository, + URL: repo.URL, + Branch: branch.GetName(), + SHA: branch.GetCommit().GetSHA(), + Labels: repo.Labels, + RepositoryId: repo.RepositoryId, + }) + } + return repos, nil +} + +func (g *GithubProvider) ListRepos(ctx context.Context, cloneProtocol string) ([]*Repository, error) { + opt := &github.RepositoryListByOrgOptions{ + ListOptions: github.ListOptions{PerPage: 100}, + } + repos := []*Repository{} + for { + githubRepos, resp, err := g.client.Repositories.ListByOrg(ctx, g.organization, opt) + if err != nil { + return nil, fmt.Errorf("error listing repositories for %s: %w", g.organization, err) + } + for _, githubRepo := range githubRepos { + var url string + switch cloneProtocol { + // Default to SSH if unspecified (i.e. if ""). + case "", "ssh": + url = githubRepo.GetSSHURL() + case "https": + url = githubRepo.GetCloneURL() + default: + return nil, fmt.Errorf("unknown clone protocol for GitHub %v", cloneProtocol) + } + repos = append(repos, &Repository{ + Organization: githubRepo.Owner.GetLogin(), + Repository: githubRepo.GetName(), + Branch: githubRepo.GetDefaultBranch(), + URL: url, + Labels: githubRepo.Topics, + RepositoryId: githubRepo.ID, + }) + } + if resp.NextPage == 0 { + break + } + opt.Page = resp.NextPage + } + return repos, nil +} + +func (g *GithubProvider) RepoHasPath(ctx context.Context, repo *Repository, path string) (bool, error) { + _, _, resp, err := g.client.Repositories.GetContents(ctx, repo.Organization, repo.Repository, path, &github.RepositoryContentGetOptions{ + Ref: repo.Branch, + }) + // 404s are not an error here, just a normal false. + if resp != nil && resp.StatusCode == 404 { + return false, nil + } + if err != nil { + return false, err + } + return true, nil +} + +func (g *GithubProvider) listBranches(ctx context.Context, repo *Repository) ([]github.Branch, error) { + // If we don't specifically want to query for all branches, just use the default branch and call it a day. + if !g.allBranches { + defaultBranch, _, err := g.client.Repositories.GetBranch(ctx, repo.Organization, repo.Repository, repo.Branch) + if err != nil { + var githubErrorResponse *github.ErrorResponse + if errors.As(err, &githubErrorResponse) { + if githubErrorResponse.Response.StatusCode == http.StatusNotFound { + // Default branch doesn't exist, so the repo is empty. + return []github.Branch{}, nil + } + } + return nil, err + } + return []github.Branch{*defaultBranch}, nil + } + // Otherwise, scrape the ListBranches API. + opt := &github.BranchListOptions{ + ListOptions: github.ListOptions{PerPage: 100}, + } + branches := []github.Branch{} + for { + githubBranches, resp, err := g.client.Repositories.ListBranches(ctx, repo.Organization, repo.Repository, opt) + if err != nil { + return nil, err + } + for _, githubBranch := range githubBranches { + branches = append(branches, *githubBranch) + } + + if resp.NextPage == 0 { + break + } + opt.Page = resp.NextPage + } + return branches, nil +} diff --git a/applicationset/services/scm_provider/github_app.go b/applicationset/services/scm_provider/github_app.go new file mode 100644 index 0000000000000..5429ed48ee8ab --- /dev/null +++ b/applicationset/services/scm_provider/github_app.go @@ -0,0 +1,14 @@ +package scm_provider + +import ( + "github.com/argoproj/argo-cd/v2/applicationset/services/github_app_auth" + "github.com/argoproj/argo-cd/v2/applicationset/services/internal/github_app" +) + +func NewGithubAppProviderFor(g github_app_auth.Authentication, organization string, url string, allBranches bool) (*GithubProvider, error) { + client, err := github_app.Client(g, url) + if err != nil { + return nil, err + } + return &GithubProvider{client: client, organization: organization, allBranches: allBranches}, nil +} diff --git a/applicationset/services/scm_provider/github_test.go b/applicationset/services/scm_provider/github_test.go new file mode 100644 index 0000000000000..d413250f03126 --- /dev/null +++ b/applicationset/services/scm_provider/github_test.go @@ -0,0 +1,324 @@ +package scm_provider + +import ( + "context" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +func githubMockHandler(t *testing.T) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + switch r.RequestURI { + case "/api/v3/orgs/argoproj/repos?per_page=100": + _, err := io.WriteString(w, `[ + { + "id": 1296269, + "node_id": "MDEwOlJlcG9zaXRvcnkxMjk2MjY5", + "name": "argo-cd", + "full_name": "argoproj/argo-cd", + "owner": { + "login": "argoproj", + "id": 1, + "node_id": "MDQ6VXNlcjE=", + "avatar_url": "https://github.com/images/error/argoproj_happy.gif", + "gravatar_id": "", + "url": "https://api.github.com/users/argoproj", + "html_url": "https://github.com/argoproj", + "followers_url": "https://api.github.com/users/argoproj/followers", + "following_url": "https://api.github.com/users/argoproj/following{/other_user}", + "gists_url": "https://api.github.com/users/argoproj/gists{/gist_id}", + "starred_url": "https://api.github.com/users/argoproj/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/argoproj/subscriptions", + "organizations_url": "https://api.github.com/users/argoproj/orgs", + "repos_url": "https://api.github.com/users/argoproj/repos", + "events_url": "https://api.github.com/users/argoproj/events{/privacy}", + "received_events_url": "https://api.github.com/users/argoproj/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/argoproj/argo-cd", + "description": "This your first repo!", + "fork": false, + "url": "https://api.github.com/repos/argoproj/argo-cd", + "archive_url": "https://api.github.com/repos/argoproj/argo-cd/{archive_format}{/ref}", + "assignees_url": "https://api.github.com/repos/argoproj/argo-cd/assignees{/user}", + "blobs_url": "https://api.github.com/repos/argoproj/argo-cd/git/blobs{/sha}", + "branches_url": "https://api.github.com/repos/argoproj/argo-cd/branches{/branch}", + "collaborators_url": "https://api.github.com/repos/argoproj/argo-cd/collaborators{/collaborator}", + "comments_url": "https://api.github.com/repos/argoproj/argo-cd/comments{/number}", + "commits_url": "https://api.github.com/repos/argoproj/argo-cd/commits{/sha}", + "compare_url": "https://api.github.com/repos/argoproj/argo-cd/compare/{base}...{head}", + "contents_url": "https://api.github.com/repos/argoproj/argo-cd/contents/{path}", + "contributors_url": "https://api.github.com/repos/argoproj/argo-cd/contributors", + "deployments_url": "https://api.github.com/repos/argoproj/argo-cd/deployments", + "downloads_url": "https://api.github.com/repos/argoproj/argo-cd/downloads", + "events_url": "https://api.github.com/repos/argoproj/argo-cd/events", + "forks_url": "https://api.github.com/repos/argoproj/argo-cd/forks", + "git_commits_url": "https://api.github.com/repos/argoproj/argo-cd/git/commits{/sha}", + "git_refs_url": "https://api.github.com/repos/argoproj/argo-cd/git/refs{/sha}", + "git_tags_url": "https://api.github.com/repos/argoproj/argo-cd/git/tags{/sha}", + "git_url": "git:github.com/argoproj/argo-cd.git", + "issue_comment_url": "https://api.github.com/repos/argoproj/argo-cd/issues/comments{/number}", + "issue_events_url": "https://api.github.com/repos/argoproj/argo-cd/issues/events{/number}", + "issues_url": "https://api.github.com/repos/argoproj/argo-cd/issues{/number}", + "keys_url": "https://api.github.com/repos/argoproj/argo-cd/keys{/key_id}", + "labels_url": "https://api.github.com/repos/argoproj/argo-cd/labels{/name}", + "languages_url": "https://api.github.com/repos/argoproj/argo-cd/languages", + "merges_url": "https://api.github.com/repos/argoproj/argo-cd/merges", + "milestones_url": "https://api.github.com/repos/argoproj/argo-cd/milestones{/number}", + "notifications_url": "https://api.github.com/repos/argoproj/argo-cd/notifications{?since,all,participating}", + "pulls_url": "https://api.github.com/repos/argoproj/argo-cd/pulls{/number}", + "releases_url": "https://api.github.com/repos/argoproj/argo-cd/releases{/id}", + "ssh_url": "git@github.com:argoproj/argo-cd.git", + "stargazers_url": "https://api.github.com/repos/argoproj/argo-cd/stargazers", + "statuses_url": "https://api.github.com/repos/argoproj/argo-cd/statuses/{sha}", + "subscribers_url": "https://api.github.com/repos/argoproj/argo-cd/subscribers", + "subscription_url": "https://api.github.com/repos/argoproj/argo-cd/subscription", + "tags_url": "https://api.github.com/repos/argoproj/argo-cd/tags", + "teams_url": "https://api.github.com/repos/argoproj/argo-cd/teams", + "trees_url": "https://api.github.com/repos/argoproj/argo-cd/git/trees{/sha}", + "clone_url": "https://github.com/argoproj/argo-cd.git", + "mirror_url": "git:git.example.com/argoproj/argo-cd", + "hooks_url": "https://api.github.com/repos/argoproj/argo-cd/hooks", + "svn_url": "https://svn.github.com/argoproj/argo-cd", + "homepage": "https://github.com", + "language": null, + "forks_count": 9, + "stargazers_count": 80, + "watchers_count": 80, + "size": 108, + "default_branch": "master", + "open_issues_count": 0, + "is_template": false, + "topics": [ + "argoproj", + "atom", + "electron", + "api" + ], + "has_issues": true, + "has_projects": true, + "has_wiki": true, + "has_pages": false, + "has_downloads": true, + "archived": false, + "disabled": false, + "visibility": "public", + "pushed_at": "2011-01-26T19:06:43Z", + "created_at": "2011-01-26T19:01:12Z", + "updated_at": "2011-01-26T19:14:43Z", + "permissions": { + "admin": false, + "push": false, + "pull": true + }, + "template_repository": null + } + ]`) + if err != nil { + t.Fail() + } + case "/api/v3/repos/argoproj/argo-cd/branches?per_page=100": + _, err := io.WriteString(w, `[ + { + "name": "master", + "commit": { + "sha": "c5b97d5ae6c19d5c5df71a34c7fbeeda2479ccbc", + "url": "https://api.github.com/repos/argoproj/argo-cd/commits/c5b97d5ae6c19d5c5df71a34c7fbeeda2479ccbc" + }, + "protected": true, + "protection": { + "required_status_checks": { + "enforcement_level": "non_admins", + "contexts": [ + "ci-test", + "linter" + ] + } + }, + "protection_url": "https://api.github.com/repos/argoproj/hello-world/branches/master/protection" + } + ] + `) + if err != nil { + t.Fail() + } + case "/api/v3/repos/argoproj/argo-cd/contents/pkg?ref=master": + _, err := io.WriteString(w, `{ + "type": "file", + "encoding": "base64", + "size": 5362, + "name": "pkg/", + "path": "pkg/", + "content": "encoded content ...", + "sha": "3d21ec53a331a6f037a91c368710b99387d012c1", + "url": "https://api.github.com/repos/octokit/octokit.rb/contents/README.md", + "git_url": "https://api.github.com/repos/octokit/octokit.rb/git/blobs/3d21ec53a331a6f037a91c368710b99387d012c1", + "html_url": "https://github.com/octokit/octokit.rb/blob/master/README.md", + "download_url": "https://raw.githubusercontent.com/octokit/octokit.rb/master/README.md", + "_links": { + "git": "https://api.github.com/repos/octokit/octokit.rb/git/blobs/3d21ec53a331a6f037a91c368710b99387d012c1", + "self": "https://api.github.com/repos/octokit/octokit.rb/contents/README.md", + "html": "https://github.com/octokit/octokit.rb/blob/master/README.md" + } + }`) + if err != nil { + t.Fail() + } + case "/api/v3/repos/argoproj/argo-cd/branches/master": + _, err := io.WriteString(w, `{ + "name": "master", + "commit": { + "sha": "c5b97d5ae6c19d5c5df71a34c7fbeeda2479ccbc", + "url": "https://api.github.com/repos/octocat/Hello-World/commits/c5b97d5ae6c19d5c5df71a34c7fbeeda2479ccbc" + }, + "protected": true, + "protection": { + "required_status_checks": { + "enforcement_level": "non_admins", + "contexts": [ + "ci-test", + "linter" + ] + } + }, + "protection_url": "https://api.github.com/repos/octocat/hello-world/branches/master/protection" + }`) + if err != nil { + t.Fail() + } + default: + w.WriteHeader(http.StatusNotFound) + } + } +} + +func TestGithubListRepos(t *testing.T) { + cases := []struct { + name, proto, url string + hasError, allBranches bool + branches []string + filters []v1alpha1.SCMProviderGeneratorFilter + }{ + { + name: "blank protocol", + url: "git@github.com:argoproj/argo-cd.git", + branches: []string{"master"}, + }, + { + name: "ssh protocol", + proto: "ssh", + url: "git@github.com:argoproj/argo-cd.git", + }, + { + name: "https protocol", + proto: "https", + url: "https://github.com/argoproj/argo-cd.git", + }, + { + name: "other protocol", + proto: "other", + hasError: true, + }, + { + name: "all branches", + allBranches: true, + url: "git@github.com:argoproj/argo-cd.git", + branches: []string{"master"}, + }, + } + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + githubMockHandler(t)(w, r) + })) + defer ts.Close() + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + provider, _ := NewGithubProvider(context.Background(), "argoproj", "", ts.URL, c.allBranches) + rawRepos, err := ListRepos(context.Background(), provider, c.filters, c.proto) + if c.hasError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + // Just check that this one project shows up. Not a great test but better thing nothing? + repos := []*Repository{} + branches := []string{} + for _, r := range rawRepos { + if r.Repository == "argo-cd" { + repos = append(repos, r) + branches = append(branches, r.Branch) + } + } + assert.NotEmpty(t, repos) + assert.Equal(t, c.url, repos[0].URL) + for _, b := range c.branches { + assert.Contains(t, branches, b) + } + } + }) + } +} + +func TestGithubHasPath(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + githubMockHandler(t)(w, r) + })) + defer ts.Close() + host, _ := NewGithubProvider(context.Background(), "argoproj", "", ts.URL, false) + repo := &Repository{ + Organization: "argoproj", + Repository: "argo-cd", + Branch: "master", + } + ok, err := host.RepoHasPath(context.Background(), repo, "pkg/") + assert.Nil(t, err) + assert.True(t, ok) + + ok, err = host.RepoHasPath(context.Background(), repo, "notathing/") + assert.Nil(t, err) + assert.False(t, ok) +} + +func TestGithubGetBranches(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + githubMockHandler(t)(w, r) + })) + defer ts.Close() + host, _ := NewGithubProvider(context.Background(), "argoproj", "", ts.URL, false) + repo := &Repository{ + Organization: "argoproj", + Repository: "argo-cd", + Branch: "master", + } + repos, err := host.GetBranches(context.Background(), repo) + if err != nil { + assert.NoError(t, err) + } else { + assert.Equal(t, repos[0].Branch, "master") + } + //Branch Doesn't exists instead of error will return no error + repo2 := &Repository{ + Organization: "argoproj", + Repository: "applicationset", + Branch: "main", + } + _, err = host.GetBranches(context.Background(), repo2) + assert.NoError(t, err) + + // Get all branches + host.allBranches = true + repos, err = host.GetBranches(context.Background(), repo) + if err != nil { + assert.NoError(t, err) + } else { + // considering master branch to exist. + assert.Equal(t, len(repos), 1) + } +} diff --git a/applicationset/services/scm_provider/gitlab.go b/applicationset/services/scm_provider/gitlab.go new file mode 100644 index 0000000000000..f4b92b3ed9e5f --- /dev/null +++ b/applicationset/services/scm_provider/gitlab.go @@ -0,0 +1,197 @@ +package scm_provider + +import ( + "context" + "fmt" + "net/http" + "os" + pathpkg "path" + + "github.com/argoproj/argo-cd/v2/applicationset/utils" + "github.com/hashicorp/go-retryablehttp" + "github.com/xanzy/go-gitlab" +) + +type GitlabProvider struct { + client *gitlab.Client + organization string + allBranches bool + includeSubgroups bool + includeSharedProjects bool + topic string +} + +var _ SCMProviderService = &GitlabProvider{} + +func NewGitlabProvider(ctx context.Context, organization string, token string, url string, allBranches, includeSubgroups, includeSharedProjects, insecure bool, scmRootCAPath, topic string) (*GitlabProvider, error) { + // Undocumented environment variable to set a default token, to be used in testing to dodge anonymous rate limits. + if token == "" { + token = os.Getenv("GITLAB_TOKEN") + } + var client *gitlab.Client + + tr := http.DefaultTransport.(*http.Transport).Clone() + tr.TLSClientConfig = utils.GetTlsConfig(scmRootCAPath, insecure) + + retryClient := retryablehttp.NewClient() + retryClient.HTTPClient.Transport = tr + + if url == "" { + var err error + client, err = gitlab.NewClient(token, gitlab.WithHTTPClient(retryClient.HTTPClient)) + if err != nil { + return nil, err + } + } else { + var err error + client, err = gitlab.NewClient(token, gitlab.WithBaseURL(url), gitlab.WithHTTPClient(retryClient.HTTPClient)) + if err != nil { + return nil, err + } + } + + return &GitlabProvider{client: client, organization: organization, allBranches: allBranches, includeSubgroups: includeSubgroups, includeSharedProjects: includeSharedProjects, topic: topic}, nil +} + +func (g *GitlabProvider) GetBranches(ctx context.Context, repo *Repository) ([]*Repository, error) { + repos := []*Repository{} + branches, err := g.listBranches(ctx, repo) + if err != nil { + return nil, fmt.Errorf("error listing branches for %s/%s: %v", repo.Organization, repo.Repository, err) + } + + for _, branch := range branches { + repos = append(repos, &Repository{ + Organization: repo.Organization, + Repository: repo.Repository, + URL: repo.URL, + Branch: branch.Name, + SHA: branch.Commit.ID, + Labels: repo.Labels, + RepositoryId: repo.RepositoryId, + }) + } + return repos, nil +} + +func (g *GitlabProvider) ListRepos(ctx context.Context, cloneProtocol string) ([]*Repository, error) { + opt := &gitlab.ListGroupProjectsOptions{ + ListOptions: gitlab.ListOptions{PerPage: 100}, + IncludeSubGroups: &g.includeSubgroups, + WithShared: &g.includeSharedProjects, + Topic: &g.topic, + } + + repos := []*Repository{} + for { + gitlabRepos, resp, err := g.client.Groups.ListGroupProjects(g.organization, opt) + if err != nil { + return nil, fmt.Errorf("error listing projects for %s: %v", g.organization, err) + } + for _, gitlabRepo := range gitlabRepos { + var url string + switch cloneProtocol { + // Default to SSH if unspecified (i.e. if ""). + case "", "ssh": + url = gitlabRepo.SSHURLToRepo + case "https": + url = gitlabRepo.HTTPURLToRepo + default: + return nil, fmt.Errorf("unknown clone protocol for Gitlab %v", cloneProtocol) + } + + repos = append(repos, &Repository{ + Organization: gitlabRepo.Namespace.FullPath, + Repository: gitlabRepo.Path, + URL: url, + Branch: gitlabRepo.DefaultBranch, + Labels: gitlabRepo.TagList, + RepositoryId: gitlabRepo.ID, + }) + } + if resp.CurrentPage >= resp.TotalPages { + break + } + opt.Page = resp.NextPage + } + return repos, nil +} + +func (g *GitlabProvider) RepoHasPath(_ context.Context, repo *Repository, path string) (bool, error) { + p, _, err := g.client.Projects.GetProject(repo.Organization+"/"+repo.Repository, nil) + if err != nil { + return false, err + } + directories := []string{ + path, + pathpkg.Dir(path), + } + for _, directory := range directories { + options := gitlab.ListTreeOptions{ + Path: &directory, + Ref: &repo.Branch, + } + for { + treeNode, resp, err := g.client.Repositories.ListTree(p.ID, &options) + if err != nil { + return false, err + } + if path == directory { + if resp.TotalItems > 0 { + return true, nil + } + } + for i := range treeNode { + if treeNode[i].Path == path { + return true, nil + } + } + if resp.NextPage == 0 { + // no future pages + break + } + options.Page = resp.NextPage + } + } + return false, nil +} + +func (g *GitlabProvider) listBranches(_ context.Context, repo *Repository) ([]gitlab.Branch, error) { + branches := []gitlab.Branch{} + // If we don't specifically want to query for all branches, just use the default branch and call it a day. + if !g.allBranches { + gitlabBranch, resp, err := g.client.Branches.GetBranch(repo.RepositoryId, repo.Branch, nil) + // 404s are not an error here, just a normal false. + if resp != nil && resp.StatusCode == http.StatusNotFound { + return []gitlab.Branch{}, nil + } + if err != nil { + return nil, err + } + branches = append(branches, *gitlabBranch) + return branches, nil + } + // Otherwise, scrape the ListBranches API. + opt := &gitlab.ListBranchesOptions{ + ListOptions: gitlab.ListOptions{PerPage: 100}, + } + for { + gitlabBranches, resp, err := g.client.Branches.ListBranches(repo.RepositoryId, opt) + // 404s are not an error here, just a normal false. + if resp != nil && resp.StatusCode == http.StatusNotFound { + return []gitlab.Branch{}, nil + } + if err != nil { + return nil, err + } + for _, gitlabBranch := range gitlabBranches { + branches = append(branches, *gitlabBranch) + } + + if resp.NextPage == 0 { + break + } + opt.Page = resp.NextPage + } + return branches, nil +} diff --git a/applicationset/services/scm_provider/gitlab_test.go b/applicationset/services/scm_provider/gitlab_test.go new file mode 100644 index 0000000000000..11b21cb6da6d4 --- /dev/null +++ b/applicationset/services/scm_provider/gitlab_test.go @@ -0,0 +1,1217 @@ +package scm_provider + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +func gitlabMockHandler(t *testing.T) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + switch r.RequestURI { + case "/api/v4": + fmt.Println("here1") + case "/api/v4/groups/test-argocd-proton/projects?include_subgroups=false&per_page=100", "/api/v4/groups/test-argocd-proton/projects?include_subgroups=false&per_page=100&topic=&with_shared=false": + fmt.Println("here") + _, err := io.WriteString(w, `[{ + "id": 27084533, + "description": "", + "name": "argocd", + "name_with_namespace": "test argocd proton / argocd", + "path": "argocd", + "path_with_namespace": "test-argocd-proton/argocd", + "created_at": "2021-06-01T17:30:44.724Z", + "default_branch": "master", + "tag_list": [ + "test-topic" + ], + "topics": [ + "test-topic" + ], + "ssh_url_to_repo": "git@gitlab.com:test-argocd-proton/argocd.git", + "http_url_to_repo": "https://gitlab.com/test-argocd-proton/argocd.git", + "web_url": "https://gitlab.com/test-argocd-proton/argocd", + "readme_url": null, + "avatar_url": null, + "forks_count": 0, + "star_count": 0, + "last_activity_at": "2021-06-04T08:19:51.656Z", + "namespace": { + "id": 12258515, + "name": "test argocd proton", + "path": "test-argocd-proton", + "kind": "gro* Connection #0 to host gitlab.com left intact up ", + "full_path ": "test - argocd - proton ", + "parent_id ": null, + "avatar_url ": null, + "web_url ": "https: //gitlab.com/groups/test-argocd-proton" + }, + "container_registry_image_prefix": "registry.gitlab.com/test-argocd-proton/argocd", + "_links": { + "self": "https://gitlab.com/api/v4/projects/27084533", + "issues": "https://gitlab.com/api/v4/projects/27084533/issues", + "merge_requests": "https://gitlab.com/api/v4/projects/27084533/merge_requests", + "repo_branches": "https://gitlab.com/api/v4/projects/27084533/repository/branches", + "labels": "https://gitlab.com/api/v4/projects/27084533/labels", + "events": "https://gitlab.com/api/v4/projects/27084533/events", + "members": "https://gitlab.com/api/v4/projects/27084533/members", + "cluster_agents": "https://gitlab.com/api/v4/projects/27084533/cluster_agents" + }, + "packages_enabled": true, + "empty_repo": false, + "archived": false, + "visibility": "public", + "resolve_outdated_diff_discussions": false, + "container_expiration_policy": { + "cadence": "1d", + "enabled": false, + "keep_n": 10, + "older_than": "90d", + "name_regex": ".*", + "name_regex_keep": null, + "next_run_at": "2021-06-02T17:30:44.740Z" + }, + "issues_enabled": true, + "merge_requests_enabled": true, + "wiki_enabled": true, + "jobs_enabled": true, + "snippets_enabled": true, + "container_registry_enabled": true, + "service_desk_enabled": true, + "can_create_merge_request_in": false, + "issues_access_level": "enabled", + "repository_access_level": "enabled", + "merge_requests_access_level": "enabled", + "forking_access_level": "enabled", + "wiki_access_level": "enabled", + "builds_access_level": "enabled", + "snippets_access_level": "enabled", + "pages_access_level": "enabled", + "operations_access_level": "enabled", + "analytics_access_level": "enabled", + "container_registry_access_level": "enabled", + "security_and_compliance_access_level": "private", + "emails_disabled": null, + "shared_runners_enabled": true, + "lfs_enabled": true, + "creator_id": 2378866, + "import_status": "none", + "open_issues_count": 0, + "ci_default_git_depth": 50, + "ci_forward_deployment_enabled": true, + "ci_job_token_scope_enabled": false, + "public_jobs": true, + "build_timeout": 3600, + "auto_cancel_pending_pipelines": "enabled", + "ci_config_path": "", + "shared_with_groups": [], + "only_allow_merge_if_pipeline_succeeds": false, + "allow_merge_on_skipped_pipeline": null, + "restrict_user_defined_variables": false, + "request_access_enabled": true, + "only_allow_merge_if_all_discussions_are_resolved": false, + "remove_source_branch_after_merge": true, + "printing_merge_request_link_enabled": true, + "merge_method": "merge", + "squash_option": "default_off", + "suggestion_commit_message": null, + "merge_commit_template": null, + "squash_commit_template": null, + "auto_devops_enabled": false, + "auto_devops_deploy_strategy": "continuous", + "autoclose_referenced_issues": true, + "keep_latest_artifact": true, + "runner_token_expiration_interval": null, + "approvals_before_merge": 0, + "mirror": false, + "external_authorization_classification_label": "", + "marked_for_deletion_at": null, + "marked_for_deletion_on": null, + "requirements_enabled": true, + "requirements_access_level": "enabled", + "security_and_compliance_enabled": false, + "compliance_frameworks": [], + "issues_template": null, + "merge_requests_template": null, + "merge_pipelines_enabled": false, + "merge_trains_enabled": false + }]`) + if err != nil { + t.Fail() + } + case "/api/v4/groups/test-argocd-proton/projects?include_subgroups=true&per_page=100&topic=&with_shared=false": + fmt.Println("here") + _, err := io.WriteString(w, `[{ + "id": 27084533, + "description": "", + "name": "argocd", + "name_with_namespace": "test argocd proton / argocd", + "path": "argocd", + "path_with_namespace": "test-argocd-proton/argocd", + "created_at": "2021-06-01T17:30:44.724Z", + "default_branch": "master", + "tag_list": [ + "test-topic", + "specific-topic" + ], + "topics": [ + "test-topic", + "specific-topic" + ], + "ssh_url_to_repo": "git@gitlab.com:test-argocd-proton/argocd.git", + "http_url_to_repo": "https://gitlab.com/test-argocd-proton/argocd.git", + "web_url": "https://gitlab.com/test-argocd-proton/argocd", + "readme_url": null, + "avatar_url": null, + "forks_count": 0, + "star_count": 0, + "last_activity_at": "2021-06-04T08:19:51.656Z", + "namespace": { + "id": 12258515, + "name": "test argocd proton", + "path": "test-argocd-proton", + "kind": "gro* Connection #0 to host gitlab.com left intact up ", + "full_path ": "test - argocd - proton ", + "parent_id ": null, + "avatar_url ": null, + "web_url ": "https: //gitlab.com/groups/test-argocd-proton" + }, + "container_registry_image_prefix": "registry.gitlab.com/test-argocd-proton/argocd", + "_links": { + "self": "https://gitlab.com/api/v4/projects/27084533", + "issues": "https://gitlab.com/api/v4/projects/27084533/issues", + "merge_requests": "https://gitlab.com/api/v4/projects/27084533/merge_requests", + "repo_branches": "https://gitlab.com/api/v4/projects/27084533/repository/branches", + "labels": "https://gitlab.com/api/v4/projects/27084533/labels", + "events": "https://gitlab.com/api/v4/projects/27084533/events", + "members": "https://gitlab.com/api/v4/projects/27084533/members", + "cluster_agents": "https://gitlab.com/api/v4/projects/27084533/cluster_agents" + }, + "packages_enabled": true, + "empty_repo": false, + "archived": false, + "visibility": "public", + "resolve_outdated_diff_discussions": false, + "container_expiration_policy": { + "cadence": "1d", + "enabled": false, + "keep_n": 10, + "older_than": "90d", + "name_regex": ".*", + "name_regex_keep": null, + "next_run_at": "2021-06-02T17:30:44.740Z" + }, + "issues_enabled": true, + "merge_requests_enabled": true, + "wiki_enabled": true, + "jobs_enabled": true, + "snippets_enabled": true, + "container_registry_enabled": true, + "service_desk_enabled": true, + "can_create_merge_request_in": false, + "issues_access_level": "enabled", + "repository_access_level": "enabled", + "merge_requests_access_level": "enabled", + "forking_access_level": "enabled", + "wiki_access_level": "enabled", + "builds_access_level": "enabled", + "snippets_access_level": "enabled", + "pages_access_level": "enabled", + "operations_access_level": "enabled", + "analytics_access_level": "enabled", + "container_registry_access_level": "enabled", + "security_and_compliance_access_level": "private", + "emails_disabled": null, + "shared_runners_enabled": true, + "lfs_enabled": true, + "creator_id": 2378866, + "import_status": "none", + "open_issues_count": 0, + "ci_default_git_depth": 50, + "ci_forward_deployment_enabled": true, + "ci_job_token_scope_enabled": false, + "public_jobs": true, + "build_timeout": 3600, + "auto_cancel_pending_pipelines": "enabled", + "ci_config_path": "", + "shared_with_groups": [], + "only_allow_merge_if_pipeline_succeeds": false, + "allow_merge_on_skipped_pipeline": null, + "restrict_user_defined_variables": false, + "request_access_enabled": true, + "only_allow_merge_if_all_discussions_are_resolved": false, + "remove_source_branch_after_merge": true, + "printing_merge_request_link_enabled": true, + "merge_method": "merge", + "squash_option": "default_off", + "suggestion_commit_message": null, + "merge_commit_template": null, + "squash_commit_template": null, + "auto_devops_enabled": false, + "auto_devops_deploy_strategy": "continuous", + "autoclose_referenced_issues": true, + "keep_latest_artifact": true, + "runner_token_expiration_interval": null, + "approvals_before_merge": 0, + "mirror": false, + "external_authorization_classification_label": "", + "marked_for_deletion_at": null, + "marked_for_deletion_on": null, + "requirements_enabled": true, + "requirements_access_level": "enabled", + "security_and_compliance_enabled": false, + "compliance_frameworks": [], + "issues_template": null, + "merge_requests_template": null, + "merge_pipelines_enabled": false, + "merge_trains_enabled": false + }, + { + "id": 27084538, + "description": "This is a Project from a Subgroup", + "name": "argocd-subgroup", + "name_with_namespace": "test argocd proton / subgroup / argocd-subgroup", + "path": "argocd-subgroup", + "path_with_namespace": "test-argocd-proton/subgroup/argocd-subgroup", + "created_at": "2021-06-01T17:30:44.724Z", + "default_branch": "master", + "tag_list": [ + "test-topic" + ], + "topics": [ + "test-topic" + ], + "ssh_url_to_repo": "git@gitlab.com:test-argocd-proton/subgroup/argocd-subgroup.git", + "http_url_to_repo": "https://gitlab.com/test-argocd-proton/subgroup/argocd-subgroup.git", + "web_url": "https://gitlab.com/test-argocd-proton/subgroup/argocd-subgroup", + "readme_url": null, + "avatar_url": null, + "forks_count": 0, + "star_count": 0, + "last_activity_at": "2021-06-04T08:19:51.656Z", + "namespace": { + "id": 12258542, + "name": "subgroup", + "path": "subgroup", + "kind": "group ", + "full_path ": "test-argocd-proton/subgroup", + "parent_id ": 12258515, + "avatar_url ": null, + "web_url ": "https: //gitlab.com/groups/test-argocd-proton/subgroup" + }, + "container_registry_image_prefix": "registry.gitlab.com/test-argocd-proton/subgroup/argocd", + "_links": { + "self": "https://gitlab.com/api/v4/projects/27084538", + "issues": "https://gitlab.com/api/v4/projects/27084538/issues", + "merge_requests": "https://gitlab.com/api/v4/projects/27084538/merge_requests", + "repo_branches": "https://gitlab.com/api/v4/projects/27084538/repository/branches", + "labels": "https://gitlab.com/api/v4/projects/27084538/labels", + "events": "https://gitlab.com/api/v4/projects/27084538/events", + "members": "https://gitlab.com/api/v4/projects/27084538/members", + "cluster_agents": "https://gitlab.com/api/v4/projects/27084538/cluster_agents" + }, + "packages_enabled": true, + "empty_repo": false, + "archived": false, + "visibility": "public", + "resolve_outdated_diff_discussions": false, + "container_expiration_policy": { + "cadence": "1d", + "enabled": false, + "keep_n": 10, + "older_than": "90d", + "name_regex": ".*", + "name_regex_keep": null, + "next_run_at": "2021-06-02T17:30:44.740Z" + }, + "issues_enabled": true, + "merge_requests_enabled": true, + "wiki_enabled": true, + "jobs_enabled": true, + "snippets_enabled": true, + "container_registry_enabled": true, + "service_desk_enabled": true, + "can_create_merge_request_in": false, + "issues_access_level": "enabled", + "repository_access_level": "enabled", + "merge_requests_access_level": "enabled", + "forking_access_level": "enabled", + "wiki_access_level": "enabled", + "builds_access_level": "enabled", + "snippets_access_level": "enabled", + "pages_access_level": "enabled", + "operations_access_level": "enabled", + "analytics_access_level": "enabled", + "container_registry_access_level": "enabled", + "security_and_compliance_access_level": "private", + "emails_disabled": null, + "shared_runners_enabled": true, + "lfs_enabled": true, + "creator_id": 2378866, + "import_status": "none", + "open_issues_count": 0, + "ci_default_git_depth": 50, + "ci_forward_deployment_enabled": true, + "ci_job_token_scope_enabled": false, + "public_jobs": true, + "build_timeout": 3600, + "auto_cancel_pending_pipelines": "enabled", + "ci_config_path": "", + "shared_with_groups": [], + "only_allow_merge_if_pipeline_succeeds": false, + "allow_merge_on_skipped_pipeline": null, + "restrict_user_defined_variables": false, + "request_access_enabled": true, + "only_allow_merge_if_all_discussions_are_resolved": false, + "remove_source_branch_after_merge": true, + "printing_merge_request_link_enabled": true, + "merge_method": "merge", + "squash_option": "default_off", + "suggestion_commit_message": null, + "merge_commit_template": null, + "squash_commit_template": null, + "auto_devops_enabled": false, + "auto_devops_deploy_strategy": "continuous", + "autoclose_referenced_issues": true, + "keep_latest_artifact": true, + "runner_token_expiration_interval": null, + "approvals_before_merge": 0, + "mirror": false, + "external_authorization_classification_label": "", + "marked_for_deletion_at": null, + "marked_for_deletion_on": null, + "requirements_enabled": true, + "requirements_access_level": "enabled", + "security_and_compliance_enabled": false, + "compliance_frameworks": [], + "issues_template": null, + "merge_requests_template": null, + "merge_pipelines_enabled": false, + "merge_trains_enabled": false + } + ]`) + if err != nil { + t.Fail() + } + case "/api/v4/groups/test-argocd-proton/projects?include_subgroups=false&per_page=100&topic=specific-topic&with_shared=false": + fmt.Println("here") + _, err := io.WriteString(w, `[{ + "id": 27084533, + "description": "", + "name": "argocd", + "name_with_namespace": "test argocd proton / argocd", + "path": "argocd", + "path_with_namespace": "test-argocd-proton/argocd", + "created_at": "2021-06-01T17:30:44.724Z", + "default_branch": "master", + "tag_list": [ + "test-topic", + "specific-topic" + ], + "topics": [ + "test-topic", + "specific-topic" + ], + "ssh_url_to_repo": "git@gitlab.com:test-argocd-proton/argocd.git", + "http_url_to_repo": "https://gitlab.com/test-argocd-proton/argocd.git", + "web_url": "https://gitlab.com/test-argocd-proton/argocd", + "readme_url": null, + "avatar_url": null, + "forks_count": 0, + "star_count": 0, + "last_activity_at": "2021-06-04T08:19:51.656Z", + "namespace": { + "id": 12258515, + "name": "test argocd proton", + "path": "test-argocd-proton", + "kind": "gro* Connection #0 to host gitlab.com left intact up ", + "full_path ": "test - argocd - proton ", + "parent_id ": null, + "avatar_url ": null, + "web_url ": "https: //gitlab.com/groups/test-argocd-proton" + }, + "container_registry_image_prefix": "registry.gitlab.com/test-argocd-proton/argocd", + "_links": { + "self": "https://gitlab.com/api/v4/projects/27084533", + "issues": "https://gitlab.com/api/v4/projects/27084533/issues", + "merge_requests": "https://gitlab.com/api/v4/projects/27084533/merge_requests", + "repo_branches": "https://gitlab.com/api/v4/projects/27084533/repository/branches", + "labels": "https://gitlab.com/api/v4/projects/27084533/labels", + "events": "https://gitlab.com/api/v4/projects/27084533/events", + "members": "https://gitlab.com/api/v4/projects/27084533/members", + "cluster_agents": "https://gitlab.com/api/v4/projects/27084533/cluster_agents" + }, + "packages_enabled": true, + "empty_repo": false, + "archived": false, + "visibility": "public", + "resolve_outdated_diff_discussions": false, + "container_expiration_policy": { + "cadence": "1d", + "enabled": false, + "keep_n": 10, + "older_than": "90d", + "name_regex": ".*", + "name_regex_keep": null, + "next_run_at": "2021-06-02T17:30:44.740Z" + }, + "issues_enabled": true, + "merge_requests_enabled": true, + "wiki_enabled": true, + "jobs_enabled": true, + "snippets_enabled": true, + "container_registry_enabled": true, + "service_desk_enabled": true, + "can_create_merge_request_in": false, + "issues_access_level": "enabled", + "repository_access_level": "enabled", + "merge_requests_access_level": "enabled", + "forking_access_level": "enabled", + "wiki_access_level": "enabled", + "builds_access_level": "enabled", + "snippets_access_level": "enabled", + "pages_access_level": "enabled", + "operations_access_level": "enabled", + "analytics_access_level": "enabled", + "container_registry_access_level": "enabled", + "security_and_compliance_access_level": "private", + "emails_disabled": null, + "shared_runners_enabled": true, + "lfs_enabled": true, + "creator_id": 2378866, + "import_status": "none", + "open_issues_count": 0, + "ci_default_git_depth": 50, + "ci_forward_deployment_enabled": true, + "ci_job_token_scope_enabled": false, + "public_jobs": true, + "build_timeout": 3600, + "auto_cancel_pending_pipelines": "enabled", + "ci_config_path": "", + "shared_with_groups": [], + "only_allow_merge_if_pipeline_succeeds": false, + "allow_merge_on_skipped_pipeline": null, + "restrict_user_defined_variables": false, + "request_access_enabled": true, + "only_allow_merge_if_all_discussions_are_resolved": false, + "remove_source_branch_after_merge": true, + "printing_merge_request_link_enabled": true, + "merge_method": "merge", + "squash_option": "default_off", + "suggestion_commit_message": null, + "merge_commit_template": null, + "squash_commit_template": null, + "auto_devops_enabled": false, + "auto_devops_deploy_strategy": "continuous", + "autoclose_referenced_issues": true, + "keep_latest_artifact": true, + "runner_token_expiration_interval": null, + "approvals_before_merge": 0, + "mirror": false, + "external_authorization_classification_label": "", + "marked_for_deletion_at": null, + "marked_for_deletion_on": null, + "requirements_enabled": true, + "requirements_access_level": "enabled", + "security_and_compliance_enabled": false, + "compliance_frameworks": [], + "issues_template": null, + "merge_requests_template": null, + "merge_pipelines_enabled": false, + "merge_trains_enabled": false + } + ]`) + if err != nil { + t.Fail() + } + case "/api/v4/groups/test-argocd-proton/projects?include_subgroups=true&per_page=100&topic=&with_shared=true": + fmt.Println("here") + _, err := io.WriteString(w, `[{ + "id": 27084533, + "description": "", + "name": "argocd", + "name_with_namespace": "test argocd proton / argocd", + "path": "argocd", + "path_with_namespace": "test-argocd-proton/argocd", + "created_at": "2021-06-01T17:30:44.724Z", + "default_branch": "master", + "tag_list": [ + "test-topic" + ], + "topics": [ + "test-topic" + ], + "ssh_url_to_repo": "git@gitlab.com:test-argocd-proton/argocd.git", + "http_url_to_repo": "https://gitlab.com/test-argocd-proton/argocd.git", + "web_url": "https://gitlab.com/test-argocd-proton/argocd", + "readme_url": null, + "avatar_url": null, + "forks_count": 0, + "star_count": 0, + "last_activity_at": "2021-06-04T08:19:51.656Z", + "namespace": { + "id": 12258515, + "name": "test argocd proton", + "path": "test-argocd-proton", + "kind": "gro* Connection #0 to host gitlab.com left intact up ", + "full_path ": "test - argocd - proton ", + "parent_id ": null, + "avatar_url ": null, + "web_url ": "https: //gitlab.com/groups/test-argocd-proton" + }, + "container_registry_image_prefix": "registry.gitlab.com/test-argocd-proton/argocd", + "_links": { + "self": "https://gitlab.com/api/v4/projects/27084533", + "issues": "https://gitlab.com/api/v4/projects/27084533/issues", + "merge_requests": "https://gitlab.com/api/v4/projects/27084533/merge_requests", + "repo_branches": "https://gitlab.com/api/v4/projects/27084533/repository/branches", + "labels": "https://gitlab.com/api/v4/projects/27084533/labels", + "events": "https://gitlab.com/api/v4/projects/27084533/events", + "members": "https://gitlab.com/api/v4/projects/27084533/members", + "cluster_agents": "https://gitlab.com/api/v4/projects/27084533/cluster_agents" + }, + "packages_enabled": true, + "empty_repo": false, + "archived": false, + "visibility": "public", + "resolve_outdated_diff_discussions": false, + "container_expiration_policy": { + "cadence": "1d", + "enabled": false, + "keep_n": 10, + "older_than": "90d", + "name_regex": ".*", + "name_regex_keep": null, + "next_run_at": "2021-06-02T17:30:44.740Z" + }, + "issues_enabled": true, + "merge_requests_enabled": true, + "wiki_enabled": true, + "jobs_enabled": true, + "snippets_enabled": true, + "container_registry_enabled": true, + "service_desk_enabled": true, + "can_create_merge_request_in": false, + "issues_access_level": "enabled", + "repository_access_level": "enabled", + "merge_requests_access_level": "enabled", + "forking_access_level": "enabled", + "wiki_access_level": "enabled", + "builds_access_level": "enabled", + "snippets_access_level": "enabled", + "pages_access_level": "enabled", + "operations_access_level": "enabled", + "analytics_access_level": "enabled", + "container_registry_access_level": "enabled", + "security_and_compliance_access_level": "private", + "emails_disabled": null, + "shared_runners_enabled": true, + "lfs_enabled": true, + "creator_id": 2378866, + "import_status": "none", + "open_issues_count": 0, + "ci_default_git_depth": 50, + "ci_forward_deployment_enabled": true, + "ci_job_token_scope_enabled": false, + "public_jobs": true, + "build_timeout": 3600, + "auto_cancel_pending_pipelines": "enabled", + "ci_config_path": "", + "shared_with_groups": [], + "only_allow_merge_if_pipeline_succeeds": false, + "allow_merge_on_skipped_pipeline": null, + "restrict_user_defined_variables": false, + "request_access_enabled": true, + "only_allow_merge_if_all_discussions_are_resolved": false, + "remove_source_branch_after_merge": true, + "printing_merge_request_link_enabled": true, + "merge_method": "merge", + "squash_option": "default_off", + "suggestion_commit_message": null, + "merge_commit_template": null, + "squash_commit_template": null, + "auto_devops_enabled": false, + "auto_devops_deploy_strategy": "continuous", + "autoclose_referenced_issues": true, + "keep_latest_artifact": true, + "runner_token_expiration_interval": null, + "approvals_before_merge": 0, + "mirror": false, + "external_authorization_classification_label": "", + "marked_for_deletion_at": null, + "marked_for_deletion_on": null, + "requirements_enabled": true, + "requirements_access_level": "enabled", + "security_and_compliance_enabled": false, + "compliance_frameworks": [], + "issues_template": null, + "merge_requests_template": null, + "merge_pipelines_enabled": false, + "merge_trains_enabled": false + }, + { + "id": 27084534, + "description": "This is a Shared Project", + "name": "shared-argocd", + "name_with_namespace": "shared project to test argocd proton / argocd", + "path": "shared-argocd", + "path_with_namespace": "test-shared-argocd-proton/shared-argocd", + "created_at": "2021-06-11T17:30:44.724Z", + "default_branch": "master", + "tag_list": [ + "test-topic" + ], + "topics": [ + "test-topic" + ], + "ssh_url_to_repo": "git@gitlab.com:test-shared-argocd-proton/shared-argocd.git", + "http_url_to_repo": "https://gitlab.com/test-shared-argocd-proton/shared-argocd.git", + "web_url": "https://gitlab.com/test-shared-argocd-proton/shared-argocd", + "readme_url": null, + "avatar_url": null, + "forks_count": 0, + "star_count": 0, + "last_activity_at": "2021-06-04T08:19:51.656Z", + "namespace": { + "id": 12258518, + "name": "test shared argocd proton", + "path": "test-shared-argocd-proton", + "kind": "group", + "full_path ": "test-shared-argocd-proton", + "parent_id ": null, + "avatar_url ": null, + "web_url ": "https: //gitlab.com/groups/test-shared-argocd-proton" + }, + "container_registry_image_prefix": "registry.gitlab.com/test-shared-argocd-proton/shared-argocd", + "_links": { + "self": "https://gitlab.com/api/v4/projects/27084534", + "issues": "https://gitlab.com/api/v4/projects/27084534/issues", + "merge_requests": "https://gitlab.com/api/v4/projects/27084534/merge_requests", + "repo_branches": "https://gitlab.com/api/v4/projects/27084534/repository/branches", + "labels": "https://gitlab.com/api/v4/projects/27084534/labels", + "events": "https://gitlab.com/api/v4/projects/27084534/events", + "members": "https://gitlab.com/api/v4/projects/27084534/members", + "cluster_agents": "https://gitlab.com/api/v4/projects/27084534/cluster_agents" + }, + "packages_enabled": true, + "empty_repo": false, + "archived": false, + "visibility": "public", + "resolve_outdated_diff_discussions": false, + "container_expiration_policy": { + "cadence": "1d", + "enabled": false, + "keep_n": 10, + "older_than": "90d", + "name_regex": ".*", + "name_regex_keep": null, + "next_run_at": "2021-06-12T17:30:44.740Z" + }, + "issues_enabled": true, + "merge_requests_enabled": true, + "wiki_enabled": true, + "jobs_enabled": true, + "snippets_enabled": true, + "container_registry_enabled": true, + "service_desk_enabled": true, + "can_create_merge_request_in": false, + "issues_access_level": "enabled", + "repository_access_level": "enabled", + "merge_requests_access_level": "enabled", + "forking_access_level": "enabled", + "wiki_access_level": "enabled", + "builds_access_level": "enabled", + "snippets_access_level": "enabled", + "pages_access_level": "enabled", + "operations_access_level": "enabled", + "analytics_access_level": "enabled", + "container_registry_access_level": "enabled", + "security_and_compliance_access_level": "private", + "emails_disabled": null, + "shared_runners_enabled": true, + "lfs_enabled": true, + "creator_id": 2378866, + "import_status": "none", + "open_issues_count": 0, + "ci_default_git_depth": 50, + "ci_forward_deployment_enabled": true, + "ci_job_token_scope_enabled": false, + "public_jobs": true, + "build_timeout": 3600, + "auto_cancel_pending_pipelines": "enabled", + "ci_config_path": "", + "shared_with_groups": [ + { + "group_id": 12258515, + "group_name": "test-argocd-proton", + "group_full_path": "test-shared-argocd-proton", + "group_access_level": 30, + "expires_at": null + } + ], + "only_allow_merge_if_pipeline_succeeds": false, + "allow_merge_on_skipped_pipeline": null, + "restrict_user_defined_variables": false, + "request_access_enabled": true, + "only_allow_merge_if_all_discussions_are_resolved": false, + "remove_source_branch_after_merge": true, + "printing_merge_request_link_enabled": true, + "merge_method": "merge", + "squash_option": "default_off", + "suggestion_commit_message": null, + "merge_commit_template": null, + "squash_commit_template": null, + "auto_devops_enabled": false, + "auto_devops_deploy_strategy": "continuous", + "autoclose_referenced_issues": true, + "keep_latest_artifact": true, + "runner_token_expiration_interval": null, + "approvals_before_merge": 0, + "mirror": false, + "external_authorization_classification_label": "", + "marked_for_deletion_at": null, + "marked_for_deletion_on": null, + "requirements_enabled": true, + "requirements_access_level": "enabled", + "security_and_compliance_enabled": false, + "compliance_frameworks": [], + "issues_template": null, + "merge_requests_template": null, + "merge_pipelines_enabled": false, + "merge_trains_enabled": false + }]`) + if err != nil { + t.Fail() + } + case "/api/v4/projects/27084533/repository/branches/master": + fmt.Println("returning") + _, err := io.WriteString(w, `{ + "name": "master", + "commit": { + "id": "8898d7999fc99dd0fd578650b58b244fc63f6b53", + "short_id": "8898d799", + "created_at": "2021-06-04T08:24:44.000+00:00", + "parent_ids": ["3c9d50be1ef949ad28674e238c7e12a17b1e9706", "56482e001731640b4123cf177e51c696f08a3005"], + "title": "Merge branch 'pipeline-1317911429' into 'master'", + "message": "Merge branch 'pipeline-1317911429' into 'master'\n\n[testapp-ci] manifests/demo/test-app.yaml: release v1.1.0\n\nSee merge request test-argocd-proton/argocd!3", + "author_name": "Martin Vozník", + "author_email": "martin@voznik.cz", + "authored_date": "2021-06-04T08:24:44.000+00:00", + "committer_name": "Martin Vozník", + "committer_email": "martin@voznik.cz", + "committed_date": "2021-06-04T08:24:44.000+00:00", + "trailers": {}, + "web_url": "https://gitlab.com/test-argocd-proton/argocd/-/commit/8898d7999fc99dd0fd578650b58b244fc63f6b53" + }, + "merged": false, + "protected": true, + "developers_can_push": false, + "developers_can_merge": false, + "can_push": false, + "default": true, + "web_url": "https://gitlab.com/test-argocd-proton/argocd/-/tree/master" + }`) + if err != nil { + t.Fail() + } + case "/api/v4/projects/27084533/repository/branches?per_page=100": + _, err := io.WriteString(w, `[{ + "name": "master", + "commit": { + "id": "8898d7999fc99dd0fd578650b58b244fc63f6b53", + "short_id": "8898d799", + "created_at": "2021-06-04T08:24:44.000+00:00", + "parent_ids": null, + "title": "Merge branch 'pipeline-1317911429' into 'master'", + "message": "Merge branch 'pipeline-1317911429' into 'master'", + "author_name": "Martin Vozník", + "author_email": "martin@voznik.cz", + "authored_date": "2021-06-04T08:24:44.000+00:00", + "committer_name": "Martin Vozník", + "committer_email": "martin@voznik.cz", + "committed_date": "2021-06-04T08:24:44.000+00:00", + "trailers": null, + "web_url": "https://gitlab.com/test-argocd-proton/argocd/-/commit/8898d7999fc99dd0fd578650b58b244fc63f6b53" + }, + "merged": false, + "protected": true, + "developers_can_push": false, + "developers_can_merge": false, + "can_push": false, + "default": true, + "web_url": "https://gitlab.com/test-argocd-proton/argocd/-/tree/master" + }, { + "name": "pipeline-1310077506", + "commit": { + "id": "0f92540e5f396ba960adea4ed0aa905baf3f73d1", + "short_id": "0f92540e", + "created_at": "2021-06-01T18:39:59.000+00:00", + "parent_ids": null, + "title": "[testapp-ci] manifests/demo/test-app.yaml: release v1.0.1", + "message": "[testapp-ci] manifests/demo/test-app.yaml: release v1.0.1", + "author_name": "ci-test-app", + "author_email": "mvoznik+cicd@protonmail.com", + "authored_date": "2021-06-01T18:39:59.000+00:00", + "committer_name": "ci-test-app", + "committer_email": "mvoznik+cicd@protonmail.com", + "committed_date": "2021-06-01T18:39:59.000+00:00", + "trailers": null, + "web_url": "https://gitlab.com/test-argocd-proton/argocd/-/commit/0f92540e5f396ba960adea4ed0aa905baf3f73d1" + }, + "merged": false, + "protected": false, + "developers_can_push": false, + "developers_can_merge": false, + "can_push": false, + "default": false, + "web_url": "https://gitlab.com/test-argocd-proton/argocd/-/tree/pipeline-1310077506" + }]`) + if err != nil { + t.Fail() + } + case "/api/v4/projects/27084534/repository/branches?per_page=100": + _, err := io.WriteString(w, `[{ + "name": "master", + "commit": { + "id": "8898d7999fc99dd0fd578650b58b244fc63f6b53", + "short_id": "8898d799", + "created_at": "2021-06-04T08:24:44.000+00:00", + "parent_ids": null, + "title": "Merge branch 'pipeline-1317911429' into 'master'", + "message": "Merge branch 'pipeline-1317911429' into 'master'", + "author_name": "Martin Vozník", + "author_email": "martin@voznik.cz", + "authored_date": "2021-06-04T08:24:44.000+00:00", + "committer_name": "Martin Vozník", + "committer_email": "martin@voznik.cz", + "committed_date": "2021-06-04T08:24:44.000+00:00", + "trailers": null, + "web_url": "https://gitlab.com/test-shared-argocd-proton/shared-argocd/-/commit/8898d7999fc99dd0fd578650b58b244fc63f6b53" + }, + "merged": false, + "protected": true, + "developers_can_push": false, + "developers_can_merge": false, + "can_push": false, + "default": true, + "web_url": "https://gitlab.com/test-shared-argocd-proton/shared-argocd/-/tree/master" + }, { + "name": "pipeline-2310077506", + "commit": { + "id": "0f92540e5f396ba960adea4ed0aa905baf3f73d1", + "short_id": "0f92540e", + "created_at": "2021-06-01T18:39:59.000+00:00", + "parent_ids": null, + "title": "[testapp-ci] manifests/demo/test-app.yaml: release v1.0.1", + "message": "[testapp-ci] manifests/demo/test-app.yaml: release v1.0.1", + "author_name": "ci-test-app", + "author_email": "mvoznik+cicd@protonmail.com", + "authored_date": "2021-06-01T18:39:59.000+00:00", + "committer_name": "ci-test-app", + "committer_email": "mvoznik+cicd@protonmail.com", + "committed_date": "2021-06-01T18:39:59.000+00:00", + "trailers": null, + "web_url": "https://gitlab.com/test-shared-argocd-proton/shared-argocd/-/commit/0f92540e5f396ba960adea4ed0aa905baf3f73d1" + }, + "merged": false, + "protected": false, + "developers_can_push": false, + "developers_can_merge": false, + "can_push": false, + "default": false, + "web_url": "https://gitlab.com/test-shared-argocd-proton/shared-argocd/-/tree/pipeline-1310077506" + }]`) + if err != nil { + t.Fail() + } + case "/api/v4/projects/27084538/repository/branches?per_page=100": + _, err := io.WriteString(w, `[{ + "name": "master", + "commit": { + "id": "8898d7999fc99dd0fd578650b58b244fc63f6b58", + "short_id": "8898d801", + "created_at": "2021-06-04T08:24:44.000+00:00", + "parent_ids": null, + "title": "Merge branch 'pipeline-1317911429' into 'master'", + "message": "Merge branch 'pipeline-1317911429' into 'master'", + "author_name": "Martin Vozník", + "author_email": "martin@voznik.cz", + "authored_date": "2021-06-04T08:24:44.000+00:00", + "committer_name": "Martin Vozník", + "committer_email": "martin@voznik.cz", + "committed_date": "2021-06-04T08:24:44.000+00:00", + "trailers": null, + "web_url": "https://gitlab.com/test-argocd-proton/subgroup/argocd-subgroup/-/commit/8898d7999fc99dd0fd578650b58b244fc63f6b53" + }, + "merged": false, + "protected": true, + "developers_can_push": false, + "developers_can_merge": false, + "can_push": false, + "default": true, + "web_url": "https://gitlab.com/test-argocd-proton/subgroup/argocd-subgroup/-/tree/master" + }, { + "name": "pipeline-2310077506", + "commit": { + "id": "0f92540e5f396ba960adea4ed0aa905baf3f73d1", + "short_id": "0f92540e", + "created_at": "2021-06-01T18:39:59.000+00:00", + "parent_ids": null, + "title": "[testapp-ci] manifests/demo/test-app.yaml: release v1.0.1", + "message": "[testapp-ci] manifests/demo/test-app.yaml: release v1.0.1", + "author_name": "ci-test-app", + "author_email": "mvoznik+cicd@protonmail.com", + "authored_date": "2021-06-01T18:39:59.000+00:00", + "committer_name": "ci-test-app", + "committer_email": "mvoznik+cicd@protonmail.com", + "committed_date": "2021-06-01T18:39:59.000+00:00", + "trailers": null, + "web_url": "https://gitlab.com/test-argocd-proton/subgroup/argocd-subgroup/-/commit/0f92540e5f396ba960adea4ed0aa905baf3f73d1" + }, + "merged": false, + "protected": false, + "developers_can_push": false, + "developers_can_merge": false, + "can_push": false, + "default": false, + "web_url": "https://gitlab.com/test-argocd-proton/subgroup/argocd-subgroup/-/tree/pipeline-1310077506" + }]`) + if err != nil { + t.Fail() + } + case "/api/v4/projects/test-argocd-proton%2Fargocd": + fmt.Println("auct") + _, err := io.WriteString(w, `{ + "id": 27084533, + "description": "", + "name": "argocd", + "name_with_namespace": "test argocd proton / argocd", + "path": "argocd", + "path_with_namespace": "test-argocd-proton/argocd", + "created_at": "2021-06-01T17:30:44.724Z", + "default_branch": "master", + "tag_list": [ + "test-topic" + ], + "topics": [ + "test-topic" + ], + "ssh_url_to_repo": "git@gitlab.com:test-argocd-proton/argocd.git", + "http_url_to_repo": "https://gitlab.com/test-argocd-proton/argocd.git", + "web_url": "https://gitlab.com/test-argocd-proton/argocd", + "readme_url": null, + "avatar_url": null, + "forks_count": 0, + "star_count": 0, + "last_activity_at": "2021-06-04T08:19:51.656Z", + "namespace": { + "id": 12258515, + "name": "test argocd proton", + "path": "test-argocd-proton", + "kind": "group", + "full_path": "test-argocd-proton", + "parent_id": null, + "avatar_url": null, + "web_url": "https://gitlab.com/groups/test-argocd-proton" + } + }`) + if err != nil { + t.Fail() + } + case "/api/v4/projects/27084533/repository/tree?path=argocd&ref=master": + _, err := io.WriteString(w, `[{"id":"ca14f2a3718159c74572a5325fb4bfb0662a2d3e","name":"ingress.yaml","type":"blob","path":"argocd/ingress.yaml","mode":"100644"},{"id":"de2a53a73b1550b3e0f4d37ea0a6d878bf9c5096","name":"install.yaml","type":"blob","path":"argocd/install.yaml","mode":"100644"}]`) + if err != nil { + t.Fail() + } + case "/api/v4/projects/27084533/repository/tree?path=.&ref=master": + _, err := io.WriteString(w, `[{"id":"f2bf99fa8f7a27df9c43d2dffc8c8cd747f3181a","name":"argocd","type":"tree","path":"argocd","mode":"040000"},{"id":"68a3125232e01c1583a6a6299534ce10c5e7dd83","name":"manifests","type":"tree","path":"manifests","mode":"040000"}]`) + if err != nil { + t.Fail() + } + case "/api/v4/projects/27084533/repository/branches/foo": + w.WriteHeader(http.StatusNotFound) + default: + _, err := io.WriteString(w, `[]`) + if err != nil { + t.Fail() + } + } + } +} +func TestGitlabListRepos(t *testing.T) { + cases := []struct { + name, proto, url, topic string + hasError, allBranches, includeSubgroups, includeSharedProjects, insecure bool + branches []string + filters []v1alpha1.SCMProviderGeneratorFilter + }{ + { + name: "blank protocol", + url: "git@gitlab.com:test-argocd-proton/argocd.git", + branches: []string{"master"}, + }, + { + name: "ssh protocol", + proto: "ssh", + url: "git@gitlab.com:test-argocd-proton/argocd.git", + }, + { + name: "https protocol", + proto: "https", + url: "https://gitlab.com/test-argocd-proton/argocd.git", + }, + { + name: "other protocol", + proto: "other", + hasError: true, + }, + { + name: "all branches", + allBranches: true, + url: "git@gitlab.com:test-argocd-proton/argocd.git", + branches: []string{"master"}, + }, + { + name: "all subgroups", + allBranches: true, + url: "git@gitlab.com:test-argocd-proton/argocd.git", + branches: []string{"master"}, + includeSharedProjects: false, + includeSubgroups: true, + }, + { + name: "all subgroups and shared projects", + allBranches: true, + url: "git@gitlab.com:test-argocd-proton/argocd.git", + branches: []string{"master"}, + includeSharedProjects: true, + includeSubgroups: true, + }, + { + name: "specific topic", + allBranches: true, + url: "git@gitlab.com:test-argocd-proton/argocd.git", + branches: []string{"master"}, + includeSubgroups: false, + topic: "specific-topic", + }, + } + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + gitlabMockHandler(t)(w, r) + })) + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + provider, _ := NewGitlabProvider(context.Background(), "test-argocd-proton", "", ts.URL, c.allBranches, c.includeSubgroups, c.includeSharedProjects, c.insecure, "", c.topic) + rawRepos, err := ListRepos(context.Background(), provider, c.filters, c.proto) + if c.hasError { + assert.NotNil(t, err) + } else { + assert.Nil(t, err) + // Just check that this one project shows up. Not a great test but better than nothing? + repos := []*Repository{} + uniqueRepos := map[string]int{} + branches := []string{} + for _, r := range rawRepos { + if r.Repository == "argocd" { + repos = append(repos, r) + branches = append(branches, r.Branch) + } + uniqueRepos[r.Repository]++ + } + assert.NotEmpty(t, repos) + assert.Equal(t, c.url, repos[0].URL) + for _, b := range c.branches { + assert.Contains(t, branches, b) + } + // In case of listing subgroups, validate the number of returned projects + if c.includeSubgroups || c.includeSharedProjects { + assert.Equal(t, 2, len(uniqueRepos)) + } + // In case we filter on the topic, ensure we got only one repo returned + if c.topic != "" { + assert.Equal(t, 1, len(uniqueRepos)) + } + } + }) + } +} + +func TestGitlabHasPath(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + gitlabMockHandler(t)(w, r) + })) + host, _ := NewGitlabProvider(context.Background(), "test-argocd-proton", "", ts.URL, false, true, true, false, "", "") + repo := &Repository{ + Organization: "test-argocd-proton", + Repository: "argocd", + Branch: "master", + } + + cases := []struct { + name, path string + exists bool + }{ + { + name: "directory exists", + path: "argocd", + exists: true, + }, + { + name: "file exists", + path: "argocd/install.yaml", + exists: true, + }, + { + name: "directory does not exist", + path: "notathing", + exists: false, + }, + { + name: "file does not exist", + path: "argocd/notathing.yaml", + exists: false, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + ok, err := host.RepoHasPath(context.Background(), repo, c.path) + assert.Nil(t, err) + assert.Equal(t, c.exists, ok) + }) + } +} + +func TestGitlabGetBranches(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + gitlabMockHandler(t)(w, r) + })) + host, _ := NewGitlabProvider(context.Background(), "test-argocd-proton", "", ts.URL, false, true, true, false, "", "") + + repo := &Repository{ + RepositoryId: 27084533, + Branch: "master", + } + t.Run("branch exists", func(t *testing.T) { + repos, err := host.GetBranches(context.Background(), repo) + assert.Nil(t, err) + assert.Equal(t, repos[0].Branch, "master") + }) + + repo2 := &Repository{ + RepositoryId: 27084533, + Branch: "foo", + } + t.Run("unknown branch", func(t *testing.T) { + _, err := host.GetBranches(context.Background(), repo2) + assert.NoError(t, err) + }) +} diff --git a/applicationset/services/scm_provider/mock.go b/applicationset/services/scm_provider/mock.go new file mode 100644 index 0000000000000..bf7e452c81c3a --- /dev/null +++ b/applicationset/services/scm_provider/mock.go @@ -0,0 +1,50 @@ +package scm_provider + +import "context" + +type MockProvider struct { + Repos []*Repository +} + +var _ SCMProviderService = &MockProvider{} + +func (m *MockProvider) ListRepos(_ context.Context, _ string) ([]*Repository, error) { + repos := []*Repository{} + for _, candidateRepo := range m.Repos { + found := false + for _, alreadySetRepo := range repos { + if alreadySetRepo.Repository == candidateRepo.Repository { + found = true + break + } + } + if !found { + repos = append(repos, candidateRepo) + } + } + return repos, nil +} + +func (*MockProvider) RepoHasPath(_ context.Context, repo *Repository, path string) (bool, error) { + return path == repo.Repository, nil +} + +func (m *MockProvider) GetBranches(_ context.Context, repo *Repository) ([]*Repository, error) { + branchRepos := []*Repository{} + for _, candidateRepo := range m.Repos { + if candidateRepo.Repository == repo.Repository { + found := false + for _, alreadySetRepo := range branchRepos { + if alreadySetRepo.Branch == candidateRepo.Branch { + found = true + break + } + } + if !found { + branchRepos = append(branchRepos, candidateRepo) + } + } + + } + return branchRepos, nil +} diff --git a/applicationset/services/scm_provider/testdata/data.go b/applicationset/services/scm_provider/testdata/data.go new file mode 100644 index 0000000000000..1958177a04ad2 --- /dev/null +++ b/applicationset/services/scm_provider/testdata/data.go @@ -0,0 +1,6 @@ +package testdata + +import _ "embed" + +//go:embed repos_gitea_go-sdk_contents_gitea.json +var ReposGiteaGoSdkContentsGiteaResponse string diff --git a/applicationset/services/scm_provider/testdata/repos_gitea_go-sdk_contents_gitea.json b/applicationset/services/scm_provider/testdata/repos_gitea_go-sdk_contents_gitea.json new file mode 100644 index 0000000000000..752ad5bf026eb --- /dev/null +++ b/applicationset/services/scm_provider/testdata/repos_gitea_go-sdk_contents_gitea.json @@ -0,0 +1,2039 @@ +[ + { + "name": "admin_cron.go", + "path": "gitea/admin_cron.go", + "sha": "84316da2b141a493cd738b9d7f64480309d240a1", + "last_commit_sha": "6b6fdd91ce769bb4641084e15f76554fb841bf27", + "type": "file", + "size": 1390, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/admin_cron.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/admin_cron.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/84316da2b141a493cd738b9d7f64480309d240a1", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/admin_cron.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/admin_cron.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/84316da2b141a493cd738b9d7f64480309d240a1", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/admin_cron.go" + } + }, + { + "name": "admin_org.go", + "path": "gitea/admin_org.go", + "sha": "26bf81fb4d9087978875c8f557237fd80a67d007", + "last_commit_sha": "6b6fdd91ce769bb4641084e15f76554fb841bf27", + "type": "file", + "size": 1215, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/admin_org.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/admin_org.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/26bf81fb4d9087978875c8f557237fd80a67d007", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/admin_org.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/admin_org.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/26bf81fb4d9087978875c8f557237fd80a67d007", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/admin_org.go" + } + }, + { + "name": "admin_repo.go", + "path": "gitea/admin_repo.go", + "sha": "8666690cde763762e23a0afe13fef4cb53024792", + "last_commit_sha": "6b6fdd91ce769bb4641084e15f76554fb841bf27", + "type": "file", + "size": 696, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/admin_repo.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/admin_repo.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/8666690cde763762e23a0afe13fef4cb53024792", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/admin_repo.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/admin_repo.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/8666690cde763762e23a0afe13fef4cb53024792", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/admin_repo.go" + } + }, + { + "name": "admin_test.go", + "path": "gitea/admin_test.go", + "sha": "e5a49e37f21b7f9526a0a620de100a27a4f960b2", + "last_commit_sha": "23e13163375a05e827c98e567e9b9fcfa086972b", + "type": "file", + "size": 1270, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/admin_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/admin_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/e5a49e37f21b7f9526a0a620de100a27a4f960b2", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/admin_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/admin_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/e5a49e37f21b7f9526a0a620de100a27a4f960b2", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/admin_test.go" + } + }, + { + "name": "admin_user.go", + "path": "gitea/admin_user.go", + "sha": "172f0645c9bb7c5179a9291b6527effaff4deeb1", + "last_commit_sha": "13d2d23dfc79d0b346589a7981569876da6917e6", + "type": "file", + "size": 4622, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/admin_user.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/admin_user.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/172f0645c9bb7c5179a9291b6527effaff4deeb1", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/admin_user.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/admin_user.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/172f0645c9bb7c5179a9291b6527effaff4deeb1", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/admin_user.go" + } + }, + { + "name": "agent_darwin.go", + "path": "gitea/agent_darwin.go", + "sha": "d6aeab245caa313e0290539853bff45cf511daaf", + "last_commit_sha": "e5f0c189f2c720238cdcea8aaa15eff33a901a68", + "type": "symlink", + "size": 14, + "encoding": null, + "content": null, + "target": "agent_linux.go", + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/agent_darwin.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/agent_darwin.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/d6aeab245caa313e0290539853bff45cf511daaf", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/agent_darwin.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/agent_darwin.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/d6aeab245caa313e0290539853bff45cf511daaf", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/agent_darwin.go" + } + }, + { + "name": "agent_linux.go", + "path": "gitea/agent_linux.go", + "sha": "a375525badefc0748af860745059cc87711ca9df", + "last_commit_sha": "e5f0c189f2c720238cdcea8aaa15eff33a901a68", + "type": "file", + "size": 708, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/agent_linux.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/agent_linux.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/a375525badefc0748af860745059cc87711ca9df", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/agent_linux.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/agent_linux.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/a375525badefc0748af860745059cc87711ca9df", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/agent_linux.go" + } + }, + { + "name": "agent_windows.go", + "path": "gitea/agent_windows.go", + "sha": "865fa379c962fa407a8d7a9d0fb5840ea229c3bb", + "last_commit_sha": "e5f0c189f2c720238cdcea8aaa15eff33a901a68", + "type": "file", + "size": 538, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/agent_windows.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/agent_windows.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/865fa379c962fa407a8d7a9d0fb5840ea229c3bb", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/agent_windows.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/agent_windows.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/865fa379c962fa407a8d7a9d0fb5840ea229c3bb", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/agent_windows.go" + } + }, + { + "name": "attachment.go", + "path": "gitea/attachment.go", + "sha": "f02c721418739faee065740139e0ba066d7ea3d9", + "last_commit_sha": "8fab37e7407b734426a5a8aab31f464598b43706", + "type": "file", + "size": 3879, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/attachment.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/attachment.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/f02c721418739faee065740139e0ba066d7ea3d9", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/attachment.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/attachment.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/f02c721418739faee065740139e0ba066d7ea3d9", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/attachment.go" + } + }, + { + "name": "client.go", + "path": "gitea/client.go", + "sha": "b8c610bcc65f567ac6ae6868bb6fb08fbd00afd1", + "last_commit_sha": "cc14c63cccfaf871f9e684cabbd0d8adf26c1e58", + "type": "file", + "size": 10977, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/client.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/client.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/b8c610bcc65f567ac6ae6868bb6fb08fbd00afd1", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/client.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/client.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/b8c610bcc65f567ac6ae6868bb6fb08fbd00afd1", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/client.go" + } + }, + { + "name": "doc.go", + "path": "gitea/doc.go", + "sha": "6bd327db4623c07be1ac3c0b4a3a61acdae0c883", + "last_commit_sha": "0d0c73819d94d0cc14ee466d4170a068a2dbb818", + "type": "file", + "size": 213, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/doc.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/doc.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/6bd327db4623c07be1ac3c0b4a3a61acdae0c883", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/doc.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/doc.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/6bd327db4623c07be1ac3c0b4a3a61acdae0c883", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/doc.go" + } + }, + { + "name": "fork.go", + "path": "gitea/fork.go", + "sha": "0373c352bc30396f5119a1d2185fa29f0a71e43d", + "last_commit_sha": "8fab37e7407b734426a5a8aab31f464598b43706", + "type": "file", + "size": 1537, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/fork.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/fork.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/0373c352bc30396f5119a1d2185fa29f0a71e43d", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/fork.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/fork.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/0373c352bc30396f5119a1d2185fa29f0a71e43d", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/fork.go" + } + }, + { + "name": "git_blob.go", + "path": "gitea/git_blob.go", + "sha": "7668672dc07e9584495a86b475dba5ad76078f8e", + "last_commit_sha": "6b6fdd91ce769bb4641084e15f76554fb841bf27", + "type": "file", + "size": 834, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/git_blob.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/git_blob.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/7668672dc07e9584495a86b475dba5ad76078f8e", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/git_blob.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/git_blob.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/7668672dc07e9584495a86b475dba5ad76078f8e", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/git_blob.go" + } + }, + { + "name": "git_hook.go", + "path": "gitea/git_hook.go", + "sha": "d8fbf71bd964052dddae752ab2a5448b3dc0c62e", + "last_commit_sha": "6b6fdd91ce769bb4641084e15f76554fb841bf27", + "type": "file", + "size": 2369, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/git_hook.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/git_hook.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/d8fbf71bd964052dddae752ab2a5448b3dc0c62e", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/git_hook.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/git_hook.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/d8fbf71bd964052dddae752ab2a5448b3dc0c62e", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/git_hook.go" + } + }, + { + "name": "go.mod", + "path": "gitea/go.mod", + "sha": "f549458bd761942824833d19ea77d8cbd1059fc9", + "last_commit_sha": "e5f0c189f2c720238cdcea8aaa15eff33a901a68", + "type": "file", + "size": 219, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/go.mod?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/go.mod", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/f549458bd761942824833d19ea77d8cbd1059fc9", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/go.mod", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/go.mod?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/f549458bd761942824833d19ea77d8cbd1059fc9", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/go.mod" + } + }, + { + "name": "go.sum", + "path": "gitea/go.sum", + "sha": "419cfb963daeb05584d1a24603a1dcf65a185671", + "last_commit_sha": "e5f0c189f2c720238cdcea8aaa15eff33a901a68", + "type": "file", + "size": 3134, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/go.sum?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/go.sum", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/419cfb963daeb05584d1a24603a1dcf65a185671", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/go.sum", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/go.sum?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/419cfb963daeb05584d1a24603a1dcf65a185671", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/go.sum" + } + }, + { + "name": "helper.go", + "path": "gitea/helper.go", + "sha": "ff8038b1285c38f32acb9316c2ff6dea47af0254", + "last_commit_sha": "aa13606bc631b708eff7b5233ba3ae9335b715f9", + "type": "file", + "size": 492, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/helper.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/helper.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/ff8038b1285c38f32acb9316c2ff6dea47af0254", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/helper.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/helper.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/ff8038b1285c38f32acb9316c2ff6dea47af0254", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/helper.go" + } + }, + { + "name": "hook.go", + "path": "gitea/hook.go", + "sha": "67a7518b68da21ceb590c3ad161ea1d99b27c980", + "last_commit_sha": "13d2d23dfc79d0b346589a7981569876da6917e6", + "type": "file", + "size": 6675, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/hook.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/hook.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/67a7518b68da21ceb590c3ad161ea1d99b27c980", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/hook.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/hook.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/67a7518b68da21ceb590c3ad161ea1d99b27c980", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/hook.go" + } + }, + { + "name": "hook_validate.go", + "path": "gitea/hook_validate.go", + "sha": "5ef614938170e753d39cfc999252ce3978e93a42", + "last_commit_sha": "321bd56d939c37dfc3a6e455a02c3672a5f31ff8", + "type": "file", + "size": 1628, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/hook_validate.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/hook_validate.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/5ef614938170e753d39cfc999252ce3978e93a42", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/hook_validate.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/hook_validate.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/5ef614938170e753d39cfc999252ce3978e93a42", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/hook_validate.go" + } + }, + { + "name": "hook_validate_test.go", + "path": "gitea/hook_validate_test.go", + "sha": "946d4c28f5a05da545df953eb3c557edc17103fe", + "last_commit_sha": "321bd56d939c37dfc3a6e455a02c3672a5f31ff8", + "type": "file", + "size": 3116, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/hook_validate_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/hook_validate_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/946d4c28f5a05da545df953eb3c557edc17103fe", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/hook_validate_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/hook_validate_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/946d4c28f5a05da545df953eb3c557edc17103fe", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/hook_validate_test.go" + } + }, + { + "name": "httpsign.go", + "path": "gitea/httpsign.go", + "sha": "49b005954e5e5171ebd5dd4887ee8bbd7bc7b9bb", + "last_commit_sha": "e5f0c189f2c720238cdcea8aaa15eff33a901a68", + "type": "file", + "size": 6373, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/httpsign.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/httpsign.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/49b005954e5e5171ebd5dd4887ee8bbd7bc7b9bb", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/httpsign.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/httpsign.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/49b005954e5e5171ebd5dd4887ee8bbd7bc7b9bb", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/httpsign.go" + } + }, + { + "name": "issue.go", + "path": "gitea/issue.go", + "sha": "660e7c5130bf9c62da9958d00abbb6bbd05106a9", + "last_commit_sha": "559cc2fb2a8ac8071aa1389df753d912c69f83ef", + "type": "file", + "size": 8914, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/issue.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/issue.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/660e7c5130bf9c62da9958d00abbb6bbd05106a9", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/issue.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/issue.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/660e7c5130bf9c62da9958d00abbb6bbd05106a9", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/issue.go" + } + }, + { + "name": "issue_comment.go", + "path": "gitea/issue_comment.go", + "sha": "8131a6edc279c96e9c72143533d257681f96f061", + "last_commit_sha": "6b6fdd91ce769bb4641084e15f76554fb841bf27", + "type": "file", + "size": 5177, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/issue_comment.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/issue_comment.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/8131a6edc279c96e9c72143533d257681f96f061", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/issue_comment.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/issue_comment.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/8131a6edc279c96e9c72143533d257681f96f061", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/issue_comment.go" + } + }, + { + "name": "issue_comment_test.go", + "path": "gitea/issue_comment_test.go", + "sha": "10a96d098b1d151a72e700f4cb977f70aa2f3a7c", + "last_commit_sha": "63e97e127ca1834e4faff8c84d25a7b668e84484", + "type": "file", + "size": 2855, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/issue_comment_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/issue_comment_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/10a96d098b1d151a72e700f4cb977f70aa2f3a7c", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/issue_comment_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/issue_comment_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/10a96d098b1d151a72e700f4cb977f70aa2f3a7c", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/issue_comment_test.go" + } + }, + { + "name": "issue_label.go", + "path": "gitea/issue_label.go", + "sha": "f343ee5ef6a529bfdc0ec70ae5728d81c5a7782b", + "last_commit_sha": "6b6fdd91ce769bb4641084e15f76554fb841bf27", + "type": "file", + "size": 6828, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/issue_label.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/issue_label.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/f343ee5ef6a529bfdc0ec70ae5728d81c5a7782b", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/issue_label.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/issue_label.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/f343ee5ef6a529bfdc0ec70ae5728d81c5a7782b", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/issue_label.go" + } + }, + { + "name": "issue_label_test.go", + "path": "gitea/issue_label_test.go", + "sha": "d57736b72cfd254202798cfa55d5d5fb2b021d5c", + "last_commit_sha": "4debc6ca4b111808c3ad376eedd7ddef50d81d2d", + "type": "file", + "size": 4313, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/issue_label_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/issue_label_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/d57736b72cfd254202798cfa55d5d5fb2b021d5c", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/issue_label_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/issue_label_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/d57736b72cfd254202798cfa55d5d5fb2b021d5c", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/issue_label_test.go" + } + }, + { + "name": "issue_milestone.go", + "path": "gitea/issue_milestone.go", + "sha": "4e0d79a9f93e03f8c3cedddb39d474a32c2e5a70", + "last_commit_sha": "8fab37e7407b734426a5a8aab31f464598b43706", + "type": "file", + "size": 7777, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/issue_milestone.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/issue_milestone.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/4e0d79a9f93e03f8c3cedddb39d474a32c2e5a70", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/issue_milestone.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/issue_milestone.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/4e0d79a9f93e03f8c3cedddb39d474a32c2e5a70", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/issue_milestone.go" + } + }, + { + "name": "issue_milestone_test.go", + "path": "gitea/issue_milestone_test.go", + "sha": "e751689930f8034aecdf1189335b1acd1f063c5a", + "last_commit_sha": "8fab37e7407b734426a5a8aab31f464598b43706", + "type": "file", + "size": 2919, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/issue_milestone_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/issue_milestone_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/e751689930f8034aecdf1189335b1acd1f063c5a", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/issue_milestone_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/issue_milestone_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/e751689930f8034aecdf1189335b1acd1f063c5a", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/issue_milestone_test.go" + } + }, + { + "name": "issue_reaction.go", + "path": "gitea/issue_reaction.go", + "sha": "b45c0666466af2ce596803aebcdc7c1caabdfa91", + "last_commit_sha": "6b6fdd91ce769bb4641084e15f76554fb841bf27", + "type": "file", + "size": 3831, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/issue_reaction.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/issue_reaction.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/b45c0666466af2ce596803aebcdc7c1caabdfa91", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/issue_reaction.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/issue_reaction.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/b45c0666466af2ce596803aebcdc7c1caabdfa91", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/issue_reaction.go" + } + }, + { + "name": "issue_stopwatch.go", + "path": "gitea/issue_stopwatch.go", + "sha": "ebb0b8ae4042a168ae02fa007fd26e762f2b961b", + "last_commit_sha": "6b6fdd91ce769bb4641084e15f76554fb841bf27", + "type": "file", + "size": 2091, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/issue_stopwatch.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/issue_stopwatch.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/ebb0b8ae4042a168ae02fa007fd26e762f2b961b", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/issue_stopwatch.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/issue_stopwatch.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/ebb0b8ae4042a168ae02fa007fd26e762f2b961b", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/issue_stopwatch.go" + } + }, + { + "name": "issue_subscription.go", + "path": "gitea/issue_subscription.go", + "sha": "86853c718683cb1e22fcbdf2c49fb99785ece2e5", + "last_commit_sha": "6b6fdd91ce769bb4641084e15f76554fb841bf27", + "type": "file", + "size": 3059, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/issue_subscription.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/issue_subscription.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/86853c718683cb1e22fcbdf2c49fb99785ece2e5", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/issue_subscription.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/issue_subscription.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/86853c718683cb1e22fcbdf2c49fb99785ece2e5", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/issue_subscription.go" + } + }, + { + "name": "issue_subscription_test.go", + "path": "gitea/issue_subscription_test.go", + "sha": "585e9f3da8c189832dc00fac1c84c17fe22e0789", + "last_commit_sha": "688ee1978eddf7d0721b4a87bb64e5f35292a66e", + "type": "file", + "size": 1646, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/issue_subscription_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/issue_subscription_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/585e9f3da8c189832dc00fac1c84c17fe22e0789", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/issue_subscription_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/issue_subscription_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/585e9f3da8c189832dc00fac1c84c17fe22e0789", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/issue_subscription_test.go" + } + }, + { + "name": "issue_test.go", + "path": "gitea/issue_test.go", + "sha": "9db6fd8f0dd9b8ede88f5d836adefa7763c93c1f", + "last_commit_sha": "603e4358f80d195dfe4d1c677716096df95656ce", + "type": "file", + "size": 4878, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/issue_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/issue_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/9db6fd8f0dd9b8ede88f5d836adefa7763c93c1f", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/issue_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/issue_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/9db6fd8f0dd9b8ede88f5d836adefa7763c93c1f", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/issue_test.go" + } + }, + { + "name": "issue_tracked_time.go", + "path": "gitea/issue_tracked_time.go", + "sha": "c558516237db0ec901e47baefb15e97955a3399c", + "last_commit_sha": "6b6fdd91ce769bb4641084e15f76554fb841bf27", + "type": "file", + "size": 4541, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/issue_tracked_time.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/issue_tracked_time.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/c558516237db0ec901e47baefb15e97955a3399c", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/issue_tracked_time.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/issue_tracked_time.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/c558516237db0ec901e47baefb15e97955a3399c", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/issue_tracked_time.go" + } + }, + { + "name": "list_options.go", + "path": "gitea/list_options.go", + "sha": "fb1aff4e8321b87a990c7f7e742dd7d12877ed9d", + "last_commit_sha": "223f0a75e02d6c7545f80f588436f950d270b58d", + "type": "file", + "size": 1086, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/list_options.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/list_options.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/fb1aff4e8321b87a990c7f7e742dd7d12877ed9d", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/list_options.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/list_options.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/fb1aff4e8321b87a990c7f7e742dd7d12877ed9d", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/list_options.go" + } + }, + { + "name": "main_test.go", + "path": "gitea/main_test.go", + "sha": "40c81e52c994577d0eb3de8b996f4d19cafebfcf", + "last_commit_sha": "8fab37e7407b734426a5a8aab31f464598b43706", + "type": "file", + "size": 3628, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/main_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/main_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/40c81e52c994577d0eb3de8b996f4d19cafebfcf", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/main_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/main_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/40c81e52c994577d0eb3de8b996f4d19cafebfcf", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/main_test.go" + } + }, + { + "name": "notifications.go", + "path": "gitea/notifications.go", + "sha": "640cc4daecefb7f68d71b49c3d1836af61dadcc5", + "last_commit_sha": "99a9de3172a04322ce7d59e5ff30de3025bdd949", + "type": "file", + "size": 9164, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/notifications.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/notifications.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/640cc4daecefb7f68d71b49c3d1836af61dadcc5", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/notifications.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/notifications.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/640cc4daecefb7f68d71b49c3d1836af61dadcc5", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/notifications.go" + } + }, + { + "name": "notifications_test.go", + "path": "gitea/notifications_test.go", + "sha": "bdaffdc79c40cce2e2f63e7ec222f68399903b4a", + "last_commit_sha": "99a9de3172a04322ce7d59e5ff30de3025bdd949", + "type": "file", + "size": 4688, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/notifications_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/notifications_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/bdaffdc79c40cce2e2f63e7ec222f68399903b4a", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/notifications_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/notifications_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/bdaffdc79c40cce2e2f63e7ec222f68399903b4a", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/notifications_test.go" + } + }, + { + "name": "oauth2.go", + "path": "gitea/oauth2.go", + "sha": "bbdfdafb671bebe338c80dfde624931f075d52e8", + "last_commit_sha": "9c81fa936f743956e75496a8610997c10c1b2b42", + "type": "file", + "size": 3110, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/oauth2.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/oauth2.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/bbdfdafb671bebe338c80dfde624931f075d52e8", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/oauth2.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/oauth2.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/bbdfdafb671bebe338c80dfde624931f075d52e8", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/oauth2.go" + } + }, + { + "name": "oauth2_test.go", + "path": "gitea/oauth2_test.go", + "sha": "e18c760601faf7a122fd68d2aec4dbcfe3ff73fe", + "last_commit_sha": "688ee1978eddf7d0721b4a87bb64e5f35292a66e", + "type": "file", + "size": 1287, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/oauth2_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/oauth2_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/e18c760601faf7a122fd68d2aec4dbcfe3ff73fe", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/oauth2_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/oauth2_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/e18c760601faf7a122fd68d2aec4dbcfe3ff73fe", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/oauth2_test.go" + } + }, + { + "name": "org.go", + "path": "gitea/org.go", + "sha": "82e1bf540d9f84975531d06c8c3d6aaa4cd4da93", + "last_commit_sha": "13d2d23dfc79d0b346589a7981569876da6917e6", + "type": "file", + "size": 4958, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/org.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/org.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/82e1bf540d9f84975531d06c8c3d6aaa4cd4da93", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/org.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/org.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/82e1bf540d9f84975531d06c8c3d6aaa4cd4da93", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/org.go" + } + }, + { + "name": "org_member.go", + "path": "gitea/org_member.go", + "sha": "79dad4e87c65bea975e01bdbbbc1a9a703f7007f", + "last_commit_sha": "de34275bb64efe8baccdd54ee1a337202d4364ce", + "type": "file", + "size": 4540, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/org_member.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/org_member.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/79dad4e87c65bea975e01bdbbbc1a9a703f7007f", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/org_member.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/org_member.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/79dad4e87c65bea975e01bdbbbc1a9a703f7007f", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/org_member.go" + } + }, + { + "name": "org_member_test.go", + "path": "gitea/org_member_test.go", + "sha": "e017e1d06ae8774da273bc2035468caed48c44ea", + "last_commit_sha": "de34275bb64efe8baccdd54ee1a337202d4364ce", + "type": "file", + "size": 1954, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/org_member_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/org_member_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/e017e1d06ae8774da273bc2035468caed48c44ea", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/org_member_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/org_member_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/e017e1d06ae8774da273bc2035468caed48c44ea", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/org_member_test.go" + } + }, + { + "name": "org_team.go", + "path": "gitea/org_team.go", + "sha": "e4de31e05ae328b098a05be02bce3e105e382b9d", + "last_commit_sha": "8fab37e7407b734426a5a8aab31f464598b43706", + "type": "file", + "size": 9809, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/org_team.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/org_team.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/e4de31e05ae328b098a05be02bce3e105e382b9d", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/org_team.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/org_team.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/e4de31e05ae328b098a05be02bce3e105e382b9d", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/org_team.go" + } + }, + { + "name": "org_team_test.go", + "path": "gitea/org_team_test.go", + "sha": "1869182b2dff09a8a0ca5ed9fcdb2152c8efcceb", + "last_commit_sha": "468d48c978d64da85819dc2f6cc5a4b1c8b95303", + "type": "file", + "size": 1471, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/org_team_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/org_team_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/1869182b2dff09a8a0ca5ed9fcdb2152c8efcceb", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/org_team_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/org_team_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/1869182b2dff09a8a0ca5ed9fcdb2152c8efcceb", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/org_team_test.go" + } + }, + { + "name": "org_test.go", + "path": "gitea/org_test.go", + "sha": "93a1ba75713ecca6bb45ed55c210acf4e98726b1", + "last_commit_sha": "b81847d03d3f5dc546807e179f32b604370141ab", + "type": "file", + "size": 1124, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/org_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/org_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/93a1ba75713ecca6bb45ed55c210acf4e98726b1", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/org_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/org_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/93a1ba75713ecca6bb45ed55c210acf4e98726b1", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/org_test.go" + } + }, + { + "name": "pull.go", + "path": "gitea/pull.go", + "sha": "e277847976462bee899c84dba87acf841a44e57e", + "last_commit_sha": "8fab37e7407b734426a5a8aab31f464598b43706", + "type": "file", + "size": 11902, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/pull.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/pull.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/e277847976462bee899c84dba87acf841a44e57e", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/pull.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/pull.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/e277847976462bee899c84dba87acf841a44e57e", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/pull.go" + } + }, + { + "name": "pull_review.go", + "path": "gitea/pull_review.go", + "sha": "6d32c4f12468bfa8bf4e22f7995806361f42dc93", + "last_commit_sha": "319a978c6c717c754717ceccf8c784f07ba9b582", + "type": "file", + "size": 10930, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/pull_review.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/pull_review.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/6d32c4f12468bfa8bf4e22f7995806361f42dc93", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/pull_review.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/pull_review.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/6d32c4f12468bfa8bf4e22f7995806361f42dc93", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/pull_review.go" + } + }, + { + "name": "pull_review_test.go", + "path": "gitea/pull_review_test.go", + "sha": "949ab9d3ea868be7db94fb8fc021870ec2a8203e", + "last_commit_sha": "8fab37e7407b734426a5a8aab31f464598b43706", + "type": "file", + "size": 7151, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/pull_review_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/pull_review_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/949ab9d3ea868be7db94fb8fc021870ec2a8203e", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/pull_review_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/pull_review_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/949ab9d3ea868be7db94fb8fc021870ec2a8203e", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/pull_review_test.go" + } + }, + { + "name": "pull_test.go", + "path": "gitea/pull_test.go", + "sha": "f3b31b99b91caed430eef14e8c4d74c96a9668c7", + "last_commit_sha": "8fab37e7407b734426a5a8aab31f464598b43706", + "type": "file", + "size": 6596, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/pull_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/pull_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/f3b31b99b91caed430eef14e8c4d74c96a9668c7", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/pull_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/pull_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/f3b31b99b91caed430eef14e8c4d74c96a9668c7", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/pull_test.go" + } + }, + { + "name": "release.go", + "path": "gitea/release.go", + "sha": "3200f2034309912f0e73669680181da5274c29eb", + "last_commit_sha": "8fab37e7407b734426a5a8aab31f464598b43706", + "type": "file", + "size": 6240, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/release.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/release.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/3200f2034309912f0e73669680181da5274c29eb", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/release.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/release.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/3200f2034309912f0e73669680181da5274c29eb", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/release.go" + } + }, + { + "name": "release_test.go", + "path": "gitea/release_test.go", + "sha": "450fe8f54f4e7563dd6b0beb3308715291d3d099", + "last_commit_sha": "603e4358f80d195dfe4d1c677716096df95656ce", + "type": "file", + "size": 3805, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/release_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/release_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/450fe8f54f4e7563dd6b0beb3308715291d3d099", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/release_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/release_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/450fe8f54f4e7563dd6b0beb3308715291d3d099", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/release_test.go" + } + }, + { + "name": "repo.go", + "path": "gitea/repo.go", + "sha": "8f0e346abef035f2b265b0918b3c7db976ac787c", + "last_commit_sha": "8fab37e7407b734426a5a8aab31f464598b43706", + "type": "file", + "size": 20691, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/8f0e346abef035f2b265b0918b3c7db976ac787c", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/8f0e346abef035f2b265b0918b3c7db976ac787c", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo.go" + } + }, + { + "name": "repo_branch.go", + "path": "gitea/repo_branch.go", + "sha": "0b7e873c4fa153d72ec3593ac07618363adf4e92", + "last_commit_sha": "6b6fdd91ce769bb4641084e15f76554fb841bf27", + "type": "file", + "size": 5098, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_branch.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_branch.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/0b7e873c4fa153d72ec3593ac07618363adf4e92", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_branch.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_branch.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/0b7e873c4fa153d72ec3593ac07618363adf4e92", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_branch.go" + } + }, + { + "name": "repo_branch_protection.go", + "path": "gitea/repo_branch_protection.go", + "sha": "22bd7b962954e5876e14f4d6887528fd3f009482", + "last_commit_sha": "6b6fdd91ce769bb4641084e15f76554fb841bf27", + "type": "file", + "size": 8401, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_branch_protection.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_branch_protection.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/22bd7b962954e5876e14f4d6887528fd3f009482", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_branch_protection.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_branch_protection.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/22bd7b962954e5876e14f4d6887528fd3f009482", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_branch_protection.go" + } + }, + { + "name": "repo_branch_test.go", + "path": "gitea/repo_branch_test.go", + "sha": "c4f3e0d0015761c0b910e1ad0043e353f0fdfbe7", + "last_commit_sha": "603e4358f80d195dfe4d1c677716096df95656ce", + "type": "file", + "size": 5553, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_branch_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_branch_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/c4f3e0d0015761c0b910e1ad0043e353f0fdfbe7", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_branch_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_branch_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/c4f3e0d0015761c0b910e1ad0043e353f0fdfbe7", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_branch_test.go" + } + }, + { + "name": "repo_collaborator.go", + "path": "gitea/repo_collaborator.go", + "sha": "ab730df1689467e28e3aa57901d531ba39770214", + "last_commit_sha": "8fab37e7407b734426a5a8aab31f464598b43706", + "type": "file", + "size": 4612, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_collaborator.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_collaborator.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/ab730df1689467e28e3aa57901d531ba39770214", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_collaborator.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_collaborator.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/ab730df1689467e28e3aa57901d531ba39770214", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_collaborator.go" + } + }, + { + "name": "repo_collaborator_test.go", + "path": "gitea/repo_collaborator_test.go", + "sha": "686021419cc2317ab5d3ef3bbce1591682c68a0c", + "last_commit_sha": "8fab37e7407b734426a5a8aab31f464598b43706", + "type": "file", + "size": 2117, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_collaborator_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_collaborator_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/686021419cc2317ab5d3ef3bbce1591682c68a0c", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_collaborator_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_collaborator_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/686021419cc2317ab5d3ef3bbce1591682c68a0c", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_collaborator_test.go" + } + }, + { + "name": "repo_commit.go", + "path": "gitea/repo_commit.go", + "sha": "62c0ab31c745c00b100d3ba804694d4d6ef55997", + "last_commit_sha": "f3ebdb8afe832310703fa4522e8ea586e69b7afb", + "type": "file", + "size": 4608, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_commit.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_commit.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/62c0ab31c745c00b100d3ba804694d4d6ef55997", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_commit.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_commit.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/62c0ab31c745c00b100d3ba804694d4d6ef55997", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_commit.go" + } + }, + { + "name": "repo_commit_test.go", + "path": "gitea/repo_commit_test.go", + "sha": "4369080ccdc52cbf51c1127546d251237d2d2b2b", + "last_commit_sha": "f3ebdb8afe832310703fa4522e8ea586e69b7afb", + "type": "file", + "size": 2391, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_commit_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_commit_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/4369080ccdc52cbf51c1127546d251237d2d2b2b", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_commit_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_commit_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/4369080ccdc52cbf51c1127546d251237d2d2b2b", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_commit_test.go" + } + }, + { + "name": "repo_file.go", + "path": "gitea/repo_file.go", + "sha": "bcba705129c377cf381a25d12331812b58685a35", + "last_commit_sha": "cc14c63cccfaf871f9e684cabbd0d8adf26c1e58", + "type": "file", + "size": 10282, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_file.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_file.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/bcba705129c377cf381a25d12331812b58685a35", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_file.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_file.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/bcba705129c377cf381a25d12331812b58685a35", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_file.go" + } + }, + { + "name": "repo_file_test.go", + "path": "gitea/repo_file_test.go", + "sha": "32c0137260562b0c35039aff8b443539dfc3dec9", + "last_commit_sha": "cc14c63cccfaf871f9e684cabbd0d8adf26c1e58", + "type": "file", + "size": 4207, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_file_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_file_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/32c0137260562b0c35039aff8b443539dfc3dec9", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_file_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_file_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/32c0137260562b0c35039aff8b443539dfc3dec9", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_file_test.go" + } + }, + { + "name": "repo_key.go", + "path": "gitea/repo_key.go", + "sha": "ee2ff4084e32c0d5f364cc111342749e4082a555", + "last_commit_sha": "6b6fdd91ce769bb4641084e15f76554fb841bf27", + "type": "file", + "size": 2946, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_key.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_key.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/ee2ff4084e32c0d5f364cc111342749e4082a555", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_key.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_key.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/ee2ff4084e32c0d5f364cc111342749e4082a555", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_key.go" + } + }, + { + "name": "repo_migrate.go", + "path": "gitea/repo_migrate.go", + "sha": "3ab690e53e0cc4b19d1bddcb119d3f3167ac3c2b", + "last_commit_sha": "8fab37e7407b734426a5a8aab31f464598b43706", + "type": "file", + "size": 4364, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_migrate.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_migrate.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/3ab690e53e0cc4b19d1bddcb119d3f3167ac3c2b", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_migrate.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_migrate.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/3ab690e53e0cc4b19d1bddcb119d3f3167ac3c2b", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_migrate.go" + } + }, + { + "name": "repo_refs.go", + "path": "gitea/repo_refs.go", + "sha": "c954a80ef2f300a60a10b4941f2a76de203ad0a0", + "last_commit_sha": "6b6fdd91ce769bb4641084e15f76554fb841bf27", + "type": "file", + "size": 2206, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_refs.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_refs.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/c954a80ef2f300a60a10b4941f2a76de203ad0a0", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_refs.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_refs.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/c954a80ef2f300a60a10b4941f2a76de203ad0a0", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_refs.go" + } + }, + { + "name": "repo_stars.go", + "path": "gitea/repo_stars.go", + "sha": "01243c2505d3adf745a20dbfffad1f6bb9871a9f", + "last_commit_sha": "6b6fdd91ce769bb4641084e15f76554fb841bf27", + "type": "file", + "size": 3241, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_stars.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_stars.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/01243c2505d3adf745a20dbfffad1f6bb9871a9f", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_stars.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_stars.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/01243c2505d3adf745a20dbfffad1f6bb9871a9f", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_stars.go" + } + }, + { + "name": "repo_stars_test.go", + "path": "gitea/repo_stars_test.go", + "sha": "d677e8cc57e62826a39d6d101a9e6ed5824985f5", + "last_commit_sha": "63e97e127ca1834e4faff8c84d25a7b668e84484", + "type": "file", + "size": 1836, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_stars_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_stars_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/d677e8cc57e62826a39d6d101a9e6ed5824985f5", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_stars_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_stars_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/d677e8cc57e62826a39d6d101a9e6ed5824985f5", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_stars_test.go" + } + }, + { + "name": "repo_tag.go", + "path": "gitea/repo_tag.go", + "sha": "7317d3f3931e23b52d85642549e1d012bbd3199a", + "last_commit_sha": "f5cc003900ff51c62b4db8d3f0275c2349717fe9", + "type": "file", + "size": 4245, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_tag.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_tag.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/7317d3f3931e23b52d85642549e1d012bbd3199a", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_tag.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_tag.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/7317d3f3931e23b52d85642549e1d012bbd3199a", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_tag.go" + } + }, + { + "name": "repo_tag_test.go", + "path": "gitea/repo_tag_test.go", + "sha": "62665311c135326142669e19228d69c0e6e6c1e1", + "last_commit_sha": "603e4358f80d195dfe4d1c677716096df95656ce", + "type": "file", + "size": 1818, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_tag_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_tag_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/62665311c135326142669e19228d69c0e6e6c1e1", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_tag_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_tag_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/62665311c135326142669e19228d69c0e6e6c1e1", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_tag_test.go" + } + }, + { + "name": "repo_team.go", + "path": "gitea/repo_team.go", + "sha": "b983d8748a7fa00a6256587ef7379637c6fa8610", + "last_commit_sha": "b81847d03d3f5dc546807e179f32b604370141ab", + "type": "file", + "size": 2289, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_team.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_team.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/b983d8748a7fa00a6256587ef7379637c6fa8610", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_team.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_team.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/b983d8748a7fa00a6256587ef7379637c6fa8610", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_team.go" + } + }, + { + "name": "repo_team_test.go", + "path": "gitea/repo_team_test.go", + "sha": "b205a9f13d4c117090ba413e3ac5510b66015b0b", + "last_commit_sha": "b81847d03d3f5dc546807e179f32b604370141ab", + "type": "file", + "size": 2369, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_team_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_team_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/b205a9f13d4c117090ba413e3ac5510b66015b0b", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_team_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_team_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/b205a9f13d4c117090ba413e3ac5510b66015b0b", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_team_test.go" + } + }, + { + "name": "repo_template.go", + "path": "gitea/repo_template.go", + "sha": "8b689bea49e28c4925eab57a4c720dbb60b1c11c", + "last_commit_sha": "71d2bf01d1d8943f3b088c9cd0005caffd82f8b9", + "type": "file", + "size": 2108, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_template.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_template.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/8b689bea49e28c4925eab57a4c720dbb60b1c11c", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_template.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_template.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/8b689bea49e28c4925eab57a4c720dbb60b1c11c", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_template.go" + } + }, + { + "name": "repo_template_test.go", + "path": "gitea/repo_template_test.go", + "sha": "3f8d891657ab834f0b1af5883db0f9b44ed8c683", + "last_commit_sha": "71d2bf01d1d8943f3b088c9cd0005caffd82f8b9", + "type": "file", + "size": 1407, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_template_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_template_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/3f8d891657ab834f0b1af5883db0f9b44ed8c683", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_template_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_template_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/3f8d891657ab834f0b1af5883db0f9b44ed8c683", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_template_test.go" + } + }, + { + "name": "repo_test.go", + "path": "gitea/repo_test.go", + "sha": "3f3ebc888dab5e40d20ca6ea66f9a8e50eb86ec4", + "last_commit_sha": "603e4358f80d195dfe4d1c677716096df95656ce", + "type": "file", + "size": 5606, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/3f3ebc888dab5e40d20ca6ea66f9a8e50eb86ec4", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/3f3ebc888dab5e40d20ca6ea66f9a8e50eb86ec4", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_test.go" + } + }, + { + "name": "repo_topics.go", + "path": "gitea/repo_topics.go", + "sha": "92f2228cd9327f93539d3119ca5d51f8e79b1211", + "last_commit_sha": "6b6fdd91ce769bb4641084e15f76554fb841bf27", + "type": "file", + "size": 2133, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_topics.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_topics.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/92f2228cd9327f93539d3119ca5d51f8e79b1211", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_topics.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_topics.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/92f2228cd9327f93539d3119ca5d51f8e79b1211", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_topics.go" + } + }, + { + "name": "repo_topics_test.go", + "path": "gitea/repo_topics_test.go", + "sha": "837e4d7905fd56d943b67389922fa774350fa727", + "last_commit_sha": "688ee1978eddf7d0721b4a87bb64e5f35292a66e", + "type": "file", + "size": 1617, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_topics_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_topics_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/837e4d7905fd56d943b67389922fa774350fa727", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_topics_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_topics_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/837e4d7905fd56d943b67389922fa774350fa727", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_topics_test.go" + } + }, + { + "name": "repo_transfer.go", + "path": "gitea/repo_transfer.go", + "sha": "534a6e42c8aa298d0248c0f8b5b1d1975a12e3f8", + "last_commit_sha": "ad3580e44d67674e31d4b84757591990226200c2", + "type": "file", + "size": 2212, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_transfer.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_transfer.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/534a6e42c8aa298d0248c0f8b5b1d1975a12e3f8", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_transfer.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_transfer.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/534a6e42c8aa298d0248c0f8b5b1d1975a12e3f8", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_transfer.go" + } + }, + { + "name": "repo_transfer_test.go", + "path": "gitea/repo_transfer_test.go", + "sha": "7ec0694b85b69340c1ace94ed61e3ecf79451056", + "last_commit_sha": "603e4358f80d195dfe4d1c677716096df95656ce", + "type": "file", + "size": 1264, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_transfer_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_transfer_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/7ec0694b85b69340c1ace94ed61e3ecf79451056", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_transfer_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_transfer_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/7ec0694b85b69340c1ace94ed61e3ecf79451056", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_transfer_test.go" + } + }, + { + "name": "repo_tree.go", + "path": "gitea/repo_tree.go", + "sha": "c9ffc55e30b01a79b7acb3ddc3a359c7ff712ba0", + "last_commit_sha": "8fab37e7407b734426a5a8aab31f464598b43706", + "type": "file", + "size": 1291, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_tree.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_tree.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/c9ffc55e30b01a79b7acb3ddc3a359c7ff712ba0", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_tree.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_tree.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/c9ffc55e30b01a79b7acb3ddc3a359c7ff712ba0", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_tree.go" + } + }, + { + "name": "repo_watch.go", + "path": "gitea/repo_watch.go", + "sha": "c36ca6403d9c4fb455710e87e63901b16288fa22", + "last_commit_sha": "8fab37e7407b734426a5a8aab31f464598b43706", + "type": "file", + "size": 2783, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_watch.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_watch.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/c36ca6403d9c4fb455710e87e63901b16288fa22", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_watch.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_watch.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/c36ca6403d9c4fb455710e87e63901b16288fa22", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_watch.go" + } + }, + { + "name": "repo_watch_test.go", + "path": "gitea/repo_watch_test.go", + "sha": "8e74d7bac4b7a7171de1218910d66b5e220b8c52", + "last_commit_sha": "8fab37e7407b734426a5a8aab31f464598b43706", + "type": "file", + "size": 1369, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_watch_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_watch_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/8e74d7bac4b7a7171de1218910d66b5e220b8c52", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/repo_watch_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/repo_watch_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/8e74d7bac4b7a7171de1218910d66b5e220b8c52", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/repo_watch_test.go" + } + }, + { + "name": "settings.go", + "path": "gitea/settings.go", + "sha": "fb94248f63fcec16e97f951710ff38658cea1086", + "last_commit_sha": "13d2d23dfc79d0b346589a7981569876da6917e6", + "type": "file", + "size": 3204, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/settings.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/settings.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/fb94248f63fcec16e97f951710ff38658cea1086", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/settings.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/settings.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/fb94248f63fcec16e97f951710ff38658cea1086", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/settings.go" + } + }, + { + "name": "settings_test.go", + "path": "gitea/settings_test.go", + "sha": "e025464c00128a019d45bd32ea139f62a2227645", + "last_commit_sha": "cc14c63cccfaf871f9e684cabbd0d8adf26c1e58", + "type": "file", + "size": 1389, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/settings_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/settings_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/e025464c00128a019d45bd32ea139f62a2227645", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/settings_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/settings_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/e025464c00128a019d45bd32ea139f62a2227645", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/settings_test.go" + } + }, + { + "name": "status.go", + "path": "gitea/status.go", + "sha": "fe5d9711f98bf3f66dff50fd3871c6b5c2643993", + "last_commit_sha": "6b6fdd91ce769bb4641084e15f76554fb841bf27", + "type": "file", + "size": 3797, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/status.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/status.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/fe5d9711f98bf3f66dff50fd3871c6b5c2643993", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/status.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/status.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/fe5d9711f98bf3f66dff50fd3871c6b5c2643993", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/status.go" + } + }, + { + "name": "status_test.go", + "path": "gitea/status_test.go", + "sha": "a1f78d57564c8c780886d0cdf0007267dbaf5145", + "last_commit_sha": "8fab37e7407b734426a5a8aab31f464598b43706", + "type": "file", + "size": 2631, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/status_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/status_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/a1f78d57564c8c780886d0cdf0007267dbaf5145", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/status_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/status_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/a1f78d57564c8c780886d0cdf0007267dbaf5145", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/status_test.go" + } + }, + { + "name": "user.go", + "path": "gitea/user.go", + "sha": "67208fb06389c91cd5338dac3dccb3ffffd49cb6", + "last_commit_sha": "8fab37e7407b734426a5a8aab31f464598b43706", + "type": "file", + "size": 2406, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/user.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/user.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/67208fb06389c91cd5338dac3dccb3ffffd49cb6", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/user.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/user.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/67208fb06389c91cd5338dac3dccb3ffffd49cb6", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/user.go" + } + }, + { + "name": "user_app.go", + "path": "gitea/user_app.go", + "sha": "eb3ef21c68da7e857b2ee41f28d8bf1af446895e", + "last_commit_sha": "8fab37e7407b734426a5a8aab31f464598b43706", + "type": "file", + "size": 2777, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/user_app.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/user_app.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/eb3ef21c68da7e857b2ee41f28d8bf1af446895e", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/user_app.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/user_app.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/eb3ef21c68da7e857b2ee41f28d8bf1af446895e", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/user_app.go" + } + }, + { + "name": "user_email.go", + "path": "gitea/user_email.go", + "sha": "4962b082b407e20d146c686f0d95a8393e38f5d2", + "last_commit_sha": "688ee1978eddf7d0721b4a87bb64e5f35292a66e", + "type": "file", + "size": 1833, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/user_email.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/user_email.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/4962b082b407e20d146c686f0d95a8393e38f5d2", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/user_email.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/user_email.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/4962b082b407e20d146c686f0d95a8393e38f5d2", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/user_email.go" + } + }, + { + "name": "user_follow.go", + "path": "gitea/user_follow.go", + "sha": "7bd340ca26668019b15346addf792c2e3f4ab150", + "last_commit_sha": "6b6fdd91ce769bb4641084e15f76554fb841bf27", + "type": "file", + "size": 3364, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/user_follow.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/user_follow.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/7bd340ca26668019b15346addf792c2e3f4ab150", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/user_follow.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/user_follow.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/7bd340ca26668019b15346addf792c2e3f4ab150", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/user_follow.go" + } + }, + { + "name": "user_gpgkey.go", + "path": "gitea/user_gpgkey.go", + "sha": "6c1b9d10f345a8277ceddb29e7f47448d9923421", + "last_commit_sha": "6b6fdd91ce769bb4641084e15f76554fb841bf27", + "type": "file", + "size": 3031, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/user_gpgkey.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/user_gpgkey.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/6c1b9d10f345a8277ceddb29e7f47448d9923421", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/user_gpgkey.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/user_gpgkey.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/6c1b9d10f345a8277ceddb29e7f47448d9923421", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/user_gpgkey.go" + } + }, + { + "name": "user_key.go", + "path": "gitea/user_key.go", + "sha": "02795baefc6567a69d1be2f7bc0527b7784e5140", + "last_commit_sha": "6b6fdd91ce769bb4641084e15f76554fb841bf27", + "type": "file", + "size": 2830, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/user_key.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/user_key.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/02795baefc6567a69d1be2f7bc0527b7784e5140", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/user_key.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/user_key.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/02795baefc6567a69d1be2f7bc0527b7784e5140", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/user_key.go" + } + }, + { + "name": "user_search.go", + "path": "gitea/user_search.go", + "sha": "5ea0c45f815aead8c468f030929a8ae392ee014a", + "last_commit_sha": "e11a4f7f3bdb5251a25f754125887c88f88f2f63", + "type": "file", + "size": 1209, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/user_search.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/user_search.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/5ea0c45f815aead8c468f030929a8ae392ee014a", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/user_search.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/user_search.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/5ea0c45f815aead8c468f030929a8ae392ee014a", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/user_search.go" + } + }, + { + "name": "user_settings.go", + "path": "gitea/user_settings.go", + "sha": "494cab3629784a6e9e65586a71773b644f5aff1f", + "last_commit_sha": "79f379313cf9a2481e16c7802457b58151a8211b", + "type": "file", + "size": 2108, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/user_settings.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/user_settings.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/494cab3629784a6e9e65586a71773b644f5aff1f", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/user_settings.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/user_settings.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/494cab3629784a6e9e65586a71773b644f5aff1f", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/user_settings.go" + } + }, + { + "name": "user_settings_test.go", + "path": "gitea/user_settings_test.go", + "sha": "641089bd216515f338c9a0f4e94497d199a14324", + "last_commit_sha": "22f2853429d4355e16bc7ec16e6510a6acfc6b1e", + "type": "file", + "size": 1173, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/user_settings_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/user_settings_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/641089bd216515f338c9a0f4e94497d199a14324", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/user_settings_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/user_settings_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/641089bd216515f338c9a0f4e94497d199a14324", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/user_settings_test.go" + } + }, + { + "name": "user_test.go", + "path": "gitea/user_test.go", + "sha": "727fb1b8c676c825f1695341094853476b1cfd94", + "last_commit_sha": "22f2853429d4355e16bc7ec16e6510a6acfc6b1e", + "type": "file", + "size": 5850, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/user_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/user_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/727fb1b8c676c825f1695341094853476b1cfd94", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/user_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/user_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/727fb1b8c676c825f1695341094853476b1cfd94", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/user_test.go" + } + }, + { + "name": "version.go", + "path": "gitea/version.go", + "sha": "f11210117f363462e5cf97d1d92c00a316c3f9f2", + "last_commit_sha": "e5f0c189f2c720238cdcea8aaa15eff33a901a68", + "type": "file", + "size": 2974, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/version.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/version.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/f11210117f363462e5cf97d1d92c00a316c3f9f2", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/version.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/version.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/f11210117f363462e5cf97d1d92c00a316c3f9f2", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/version.go" + } + }, + { + "name": "version_test.go", + "path": "gitea/version_test.go", + "sha": "7383649c7abc2d6dce4fa236c58c42fcd7a5a81d", + "last_commit_sha": "635de1b8215ef584976bcee6067aa07a5a9be68d", + "type": "file", + "size": 956, + "encoding": null, + "content": null, + "target": null, + "url": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/version_test.go?ref=master", + "html_url": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/version_test.go", + "git_url": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/7383649c7abc2d6dce4fa236c58c42fcd7a5a81d", + "download_url": "https://gitea.com/gitea/go-sdk/raw/branch/master/gitea/version_test.go", + "submodule_git_url": null, + "_links": { + "self": "https://gitea.com/api/v1/repos/gitea/go-sdk/contents/gitea/version_test.go?ref=master", + "git": "https://gitea.com/api/v1/repos/gitea/go-sdk/git/blobs/7383649c7abc2d6dce4fa236c58c42fcd7a5a81d", + "html": "https://gitea.com/gitea/go-sdk/src/branch/master/gitea/version_test.go" + } + } +] diff --git a/applicationset/services/scm_provider/types.go b/applicationset/services/scm_provider/types.go new file mode 100644 index 0000000000000..dde6db03c7c27 --- /dev/null +++ b/applicationset/services/scm_provider/types.go @@ -0,0 +1,43 @@ +package scm_provider + +import ( + "context" + "regexp" +) + +// An abstract repository from an API provider. +type Repository struct { + Organization string + Repository string + URL string + Branch string + SHA string + Labels []string + RepositoryId interface{} +} + +type SCMProviderService interface { + ListRepos(context.Context, string) ([]*Repository, error) + RepoHasPath(context.Context, *Repository, string) (bool, error) + GetBranches(context.Context, *Repository) ([]*Repository, error) +} + +// A compiled version of SCMProviderGeneratorFilter for performance. +type Filter struct { + RepositoryMatch *regexp.Regexp + PathsExist []string + PathsDoNotExist []string + LabelMatch *regexp.Regexp + BranchMatch *regexp.Regexp + FilterType FilterType +} + +// A convenience type for indicating where to apply a filter +type FilterType int64 + +// The enum of filter types +const ( + FilterTypeUndefined FilterType = iota + FilterTypeBranch + FilterTypeRepo +) diff --git a/applicationset/services/scm_provider/utils.go b/applicationset/services/scm_provider/utils.go new file mode 100644 index 0000000000000..e92923f52707b --- /dev/null +++ b/applicationset/services/scm_provider/utils.go @@ -0,0 +1,182 @@ +package scm_provider + +import ( + "context" + "fmt" + "regexp" + "strings" + + argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +func compileFilters(filters []argoprojiov1alpha1.SCMProviderGeneratorFilter) ([]*Filter, error) { + outFilters := make([]*Filter, 0, len(filters)) + for _, filter := range filters { + outFilter := &Filter{} + var err error + if filter.RepositoryMatch != nil { + outFilter.RepositoryMatch, err = regexp.Compile(*filter.RepositoryMatch) + if err != nil { + return nil, fmt.Errorf("error compiling RepositoryMatch regexp %q: %v", *filter.RepositoryMatch, err) + } + outFilter.FilterType = FilterTypeRepo + } + if filter.LabelMatch != nil { + outFilter.LabelMatch, err = regexp.Compile(*filter.LabelMatch) + if err != nil { + return nil, fmt.Errorf("error compiling LabelMatch regexp %q: %v", *filter.LabelMatch, err) + } + outFilter.FilterType = FilterTypeRepo + } + if filter.PathsExist != nil { + outFilter.PathsExist = filter.PathsExist + outFilter.FilterType = FilterTypeBranch + } + if filter.PathsDoNotExist != nil { + outFilter.PathsDoNotExist = filter.PathsDoNotExist + outFilter.FilterType = FilterTypeBranch + } + if filter.BranchMatch != nil { + outFilter.BranchMatch, err = regexp.Compile(*filter.BranchMatch) + if err != nil { + return nil, fmt.Errorf("error compiling BranchMatch regexp %q: %v", *filter.BranchMatch, err) + } + outFilter.FilterType = FilterTypeBranch + } + outFilters = append(outFilters, outFilter) + } + return outFilters, nil +} + +func matchFilter(ctx context.Context, provider SCMProviderService, repo *Repository, filter *Filter) (bool, error) { + if filter.RepositoryMatch != nil && !filter.RepositoryMatch.MatchString(repo.Repository) { + return false, nil + } + + if filter.BranchMatch != nil && !filter.BranchMatch.MatchString(repo.Branch) { + return false, nil + } + + if filter.LabelMatch != nil { + found := false + for _, label := range repo.Labels { + if filter.LabelMatch.MatchString(label) { + found = true + break + } + } + if !found { + return false, nil + } + } + + if len(filter.PathsExist) != 0 { + for _, path := range filter.PathsExist { + path = strings.TrimRight(path, "/") + hasPath, err := provider.RepoHasPath(ctx, repo, path) + if err != nil { + return false, err + } + if !hasPath { + return false, nil + } + } + } + if len(filter.PathsDoNotExist) != 0 { + for _, path := range filter.PathsDoNotExist { + path = strings.TrimRight(path, "/") + hasPath, err := provider.RepoHasPath(ctx, repo, path) + if err != nil { + return false, err + } + if hasPath { + return false, nil + } + } + } + + return true, nil +} + +func ListRepos(ctx context.Context, provider SCMProviderService, filters []argoprojiov1alpha1.SCMProviderGeneratorFilter, cloneProtocol string) ([]*Repository, error) { + compiledFilters, err := compileFilters(filters) + if err != nil { + return nil, err + } + repos, err := provider.ListRepos(ctx, cloneProtocol) + if err != nil { + return nil, err + } + repoFilters := getApplicableFilters(compiledFilters)[FilterTypeRepo] + if len(repoFilters) == 0 { + repos, err := getBranches(ctx, provider, repos, compiledFilters) + if err != nil { + return nil, err + } + return repos, nil + } + filteredRepos := make([]*Repository, 0, len(repos)) + for _, repo := range repos { + for _, filter := range repoFilters { + matches, err := matchFilter(ctx, provider, repo, filter) + if err != nil { + return nil, err + } + if matches { + filteredRepos = append(filteredRepos, repo) + break + } + } + } + + repos, err = getBranches(ctx, provider, filteredRepos, compiledFilters) + if err != nil { + return nil, err + } + return repos, nil +} + +func getBranches(ctx context.Context, provider SCMProviderService, repos []*Repository, compiledFilters []*Filter) ([]*Repository, error) { + reposWithBranches := []*Repository{} + for _, repo := range repos { + reposFilled, err := provider.GetBranches(ctx, repo) + if err != nil { + return nil, err + } + reposWithBranches = append(reposWithBranches, reposFilled...) + } + branchFilters := getApplicableFilters(compiledFilters)[FilterTypeBranch] + if len(branchFilters) == 0 { + return reposWithBranches, nil + } + filteredRepos := make([]*Repository, 0, len(reposWithBranches)) + for _, repo := range reposWithBranches { + for _, filter := range branchFilters { + matches, err := matchFilter(ctx, provider, repo, filter) + if err != nil { + return nil, err + } + if matches { + filteredRepos = append(filteredRepos, repo) + break + } + } + } + return filteredRepos, nil +} + +// getApplicableFilters returns a map of filters separated by type. +func getApplicableFilters(filters []*Filter) map[FilterType][]*Filter { + filterMap := map[FilterType][]*Filter{ + FilterTypeBranch: {}, + FilterTypeRepo: {}, + } + for _, filter := range filters { + if filter.FilterType == FilterTypeBranch { + filterMap[FilterTypeBranch] = append(filterMap[FilterTypeBranch], filter) + } else if filter.FilterType == FilterTypeRepo { + filterMap[FilterTypeRepo] = append(filterMap[FilterTypeRepo], filter) + } + } + return filterMap +} diff --git a/applicationset/services/scm_provider/utils_test.go b/applicationset/services/scm_provider/utils_test.go new file mode 100644 index 0000000000000..5ef6d582f8d34 --- /dev/null +++ b/applicationset/services/scm_provider/utils_test.go @@ -0,0 +1,319 @@ +package scm_provider + +import ( + "context" + "regexp" + "testing" + + "github.com/stretchr/testify/assert" + + argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +func strp(s string) *string { + return &s +} + +func TestFilterRepoMatch(t *testing.T) { + provider := &MockProvider{ + Repos: []*Repository{ + { + Repository: "one", + }, + { + Repository: "two", + }, + { + Repository: "three", + }, + { + Repository: "four", + }, + }, + } + filters := []argoprojiov1alpha1.SCMProviderGeneratorFilter{ + { + RepositoryMatch: strp("n|hr"), + }, + } + repos, err := ListRepos(context.Background(), provider, filters, "") + assert.Nil(t, err) + assert.Len(t, repos, 2) + assert.Equal(t, "one", repos[0].Repository) + assert.Equal(t, "three", repos[1].Repository) +} + +func TestFilterLabelMatch(t *testing.T) { + provider := &MockProvider{ + Repos: []*Repository{ + { + Repository: "one", + Labels: []string{"prod-one", "prod-two", "staging"}, + }, + { + Repository: "two", + Labels: []string{"prod-two"}, + }, + { + Repository: "three", + Labels: []string{"staging"}, + }, + }, + } + filters := []argoprojiov1alpha1.SCMProviderGeneratorFilter{ + { + LabelMatch: strp("^prod-.*$"), + }, + } + repos, err := ListRepos(context.Background(), provider, filters, "") + assert.Nil(t, err) + assert.Len(t, repos, 2) + assert.Equal(t, "one", repos[0].Repository) + assert.Equal(t, "two", repos[1].Repository) +} + +func TestFilterPathExists(t *testing.T) { + provider := &MockProvider{ + Repos: []*Repository{ + { + Repository: "one", + }, + { + Repository: "two", + }, + { + Repository: "three", + }, + }, + } + filters := []argoprojiov1alpha1.SCMProviderGeneratorFilter{ + { + PathsExist: []string{"two"}, + }, + } + repos, err := ListRepos(context.Background(), provider, filters, "") + assert.Nil(t, err) + assert.Len(t, repos, 1) + assert.Equal(t, "two", repos[0].Repository) +} + +func TestFilterPathDoesntExists(t *testing.T) { + provider := &MockProvider{ + Repos: []*Repository{ + { + Repository: "one", + }, + { + Repository: "two", + }, + { + Repository: "three", + }, + }, + } + filters := []argoprojiov1alpha1.SCMProviderGeneratorFilter{ + { + PathsDoNotExist: []string{"two"}, + }, + } + repos, err := ListRepos(context.Background(), provider, filters, "") + assert.Nil(t, err) + assert.Len(t, repos, 2) +} +func TestFilterRepoMatchBadRegexp(t *testing.T) { + provider := &MockProvider{ + Repos: []*Repository{ + { + Repository: "one", + }, + }, + } + filters := []argoprojiov1alpha1.SCMProviderGeneratorFilter{ + { + RepositoryMatch: strp("("), + }, + } + _, err := ListRepos(context.Background(), provider, filters, "") + assert.NotNil(t, err) +} + +func TestFilterLabelMatchBadRegexp(t *testing.T) { + provider := &MockProvider{ + Repos: []*Repository{ + { + Repository: "one", + }, + }, + } + filters := []argoprojiov1alpha1.SCMProviderGeneratorFilter{ + { + LabelMatch: strp("("), + }, + } + _, err := ListRepos(context.Background(), provider, filters, "") + assert.NotNil(t, err) +} + +func TestFilterBranchMatch(t *testing.T) { + provider := &MockProvider{ + Repos: []*Repository{ + { + Repository: "one", + Branch: "one", + }, + { + Repository: "one", + Branch: "two", + }, + { + Repository: "two", + Branch: "one", + }, + { + Repository: "three", + Branch: "one", + }, + { + Repository: "three", + Branch: "two", + }, + }, + } + filters := []argoprojiov1alpha1.SCMProviderGeneratorFilter{ + { + BranchMatch: strp("w"), + }, + } + repos, err := ListRepos(context.Background(), provider, filters, "") + assert.Nil(t, err) + assert.Len(t, repos, 2) + assert.Equal(t, "one", repos[0].Repository) + assert.Equal(t, "two", repos[0].Branch) + assert.Equal(t, "three", repos[1].Repository) + assert.Equal(t, "two", repos[1].Branch) +} + +func TestMultiFilterAnd(t *testing.T) { + provider := &MockProvider{ + Repos: []*Repository{ + { + Repository: "one", + Labels: []string{"prod-one", "prod-two", "staging"}, + }, + { + Repository: "two", + Labels: []string{"prod-two"}, + }, + { + Repository: "three", + Labels: []string{"staging"}, + }, + }, + } + filters := []argoprojiov1alpha1.SCMProviderGeneratorFilter{ + { + RepositoryMatch: strp("w"), + LabelMatch: strp("^prod-.*$"), + }, + } + repos, err := ListRepos(context.Background(), provider, filters, "") + assert.Nil(t, err) + assert.Len(t, repos, 1) + assert.Equal(t, "two", repos[0].Repository) +} + +func TestMultiFilterOr(t *testing.T) { + provider := &MockProvider{ + Repos: []*Repository{ + { + Repository: "one", + Labels: []string{"prod-one", "prod-two", "staging"}, + }, + { + Repository: "two", + Labels: []string{"prod-two"}, + }, + { + Repository: "three", + Labels: []string{"staging"}, + }, + }, + } + filters := []argoprojiov1alpha1.SCMProviderGeneratorFilter{ + { + RepositoryMatch: strp("e"), + }, + { + LabelMatch: strp("^prod-.*$"), + }, + } + repos, err := ListRepos(context.Background(), provider, filters, "") + assert.Nil(t, err) + assert.Len(t, repos, 3) + assert.Equal(t, "one", repos[0].Repository) + assert.Equal(t, "two", repos[1].Repository) + assert.Equal(t, "three", repos[2].Repository) +} + +func TestNoFilters(t *testing.T) { + provider := &MockProvider{ + Repos: []*Repository{ + { + Repository: "one", + Labels: []string{"prod-one", "prod-two", "staging"}, + }, + { + Repository: "two", + Labels: []string{"prod-two"}, + }, + { + Repository: "three", + Labels: []string{"staging"}, + }, + }, + } + filters := []argoprojiov1alpha1.SCMProviderGeneratorFilter{} + repos, err := ListRepos(context.Background(), provider, filters, "") + assert.Nil(t, err) + assert.Len(t, repos, 3) + assert.Equal(t, "one", repos[0].Repository) + assert.Equal(t, "two", repos[1].Repository) + assert.Equal(t, "three", repos[2].Repository) +} + +// tests the getApplicableFilters function, passing in all the filters, and an unset filter, plus an additional +// branch filter +func TestApplicableFilterMap(t *testing.T) { + branchFilter := Filter{ + BranchMatch: ®exp.Regexp{}, + FilterType: FilterTypeBranch, + } + repoFilter := Filter{ + RepositoryMatch: ®exp.Regexp{}, + FilterType: FilterTypeRepo, + } + pathExistsFilter := Filter{ + PathsExist: []string{"test"}, + FilterType: FilterTypeBranch, + } + pathDoesntExistsFilter := Filter{ + PathsDoNotExist: []string{"test"}, + FilterType: FilterTypeBranch, + } + labelMatchFilter := Filter{ + LabelMatch: ®exp.Regexp{}, + FilterType: FilterTypeRepo, + } + unsetFilter := Filter{ + LabelMatch: ®exp.Regexp{}, + } + additionalBranchFilter := Filter{ + BranchMatch: ®exp.Regexp{}, + FilterType: FilterTypeBranch, + } + filterMap := getApplicableFilters([]*Filter{&branchFilter, &repoFilter, + &pathExistsFilter, &labelMatchFilter, &unsetFilter, &additionalBranchFilter, &pathDoesntExistsFilter}) + + assert.Len(t, filterMap[FilterTypeRepo], 2) + assert.Len(t, filterMap[FilterTypeBranch], 4) +} diff --git a/applicationset/utils/clusterUtils.go b/applicationset/utils/clusterUtils.go new file mode 100644 index 0000000000000..ee9832f533e5e --- /dev/null +++ b/applicationset/utils/clusterUtils.go @@ -0,0 +1,196 @@ +package utils + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + "strings" + "sync" + "time" + + log "github.com/sirupsen/logrus" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/argoproj/argo-cd/v2/common" + appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + + "k8s.io/client-go/kubernetes" + "k8s.io/utils/pointer" +) + +// The contents of this file are from +// github.com/argoproj/argo-cd/util/db/cluster.go +// +// The main difference is that ListClusters(...) calls the kubeclient directly, +// via `g.clientset.CoreV1().Secrets`, rather than using the `db.listClusterSecrets()`` +// which appears to have a race condition on when it is called. +// +// I was reminded of this issue that I opened, which might be related: +// https://github.com/argoproj/argo-cd/issues/4755 +// +// I hope to upstream this change in some form, so that we do not need to worry about +// Argo CD changing the logic on us. + +var ( + localCluster = appv1.Cluster{ + Name: "in-cluster", + Server: appv1.KubernetesInternalAPIServerAddr, + ConnectionState: appv1.ConnectionState{Status: appv1.ConnectionStatusSuccessful}, + } + initLocalCluster sync.Once +) + +const ( + ArgoCDSecretTypeLabel = "argocd.argoproj.io/secret-type" + ArgoCDSecretTypeCluster = "cluster" +) + +// ValidateDestination checks: +// if we used destination name we infer the server url +// if we used both name and server then we return an invalid spec error +func ValidateDestination(ctx context.Context, dest *appv1.ApplicationDestination, clientset kubernetes.Interface, argoCDNamespace string) error { + if dest.Name != "" { + if dest.Server == "" { + server, err := getDestinationServer(ctx, dest.Name, clientset, argoCDNamespace) + if err != nil { + return fmt.Errorf("unable to find destination server: %v", err) + } + if server == "" { + return fmt.Errorf("application references destination cluster %s which does not exist", dest.Name) + } + dest.SetInferredServer(server) + } else { + if !dest.IsServerInferred() { + return fmt.Errorf("application destination can't have both name and server defined: %s %s", dest.Name, dest.Server) + } + } + } + return nil +} + +func getDestinationServer(ctx context.Context, clusterName string, clientset kubernetes.Interface, argoCDNamespace string) (string, error) { + // settingsMgr := settings.NewSettingsManager(context.TODO(), clientset, namespace) + // argoDB := db.NewDB(namespace, settingsMgr, clientset) + // clusterList, err := argoDB.ListClusters(ctx) + clusterList, err := ListClusters(ctx, clientset, argoCDNamespace) + if err != nil { + return "", err + } + var servers []string + for _, c := range clusterList.Items { + if c.Name == clusterName { + servers = append(servers, c.Server) + } + } + if len(servers) > 1 { + return "", fmt.Errorf("there are %d clusters with the same name: %v", len(servers), servers) + } else if len(servers) == 0 { + return "", fmt.Errorf("there are no clusters with this name: %s", clusterName) + } + return servers[0], nil +} + +func ListClusters(ctx context.Context, clientset kubernetes.Interface, namespace string) (*appv1.ClusterList, error) { + + clusterSecretsList, err := clientset.CoreV1().Secrets(namespace).List(ctx, + metav1.ListOptions{LabelSelector: common.LabelKeySecretType + "=" + common.LabelValueSecretTypeCluster}) + if err != nil { + return nil, err + } + + if clusterSecretsList == nil { + return nil, nil + } + + clusterSecrets := clusterSecretsList.Items + + clusterList := appv1.ClusterList{ + Items: make([]appv1.Cluster, len(clusterSecrets)), + } + hasInClusterCredentials := false + for i, clusterSecret := range clusterSecrets { + // This line has changed from the original Argo CD code: now receives an error, and handles it + cluster, err := secretToCluster(&clusterSecret) + if err != nil || cluster == nil { + return nil, fmt.Errorf("unable to convert cluster secret to cluster object '%s': %v", clusterSecret.Name, err) + } + + clusterList.Items[i] = *cluster + if cluster.Server == appv1.KubernetesInternalAPIServerAddr { + hasInClusterCredentials = true + } + } + if !hasInClusterCredentials { + localCluster := getLocalCluster(clientset) + if localCluster != nil { + clusterList.Items = append(clusterList.Items, *localCluster) + } + } + return &clusterList, nil +} + +func getLocalCluster(clientset kubernetes.Interface) *appv1.Cluster { + initLocalCluster.Do(func() { + info, err := clientset.Discovery().ServerVersion() + if err == nil { + localCluster.ServerVersion = fmt.Sprintf("%s.%s", info.Major, info.Minor) + localCluster.ConnectionState = appv1.ConnectionState{Status: appv1.ConnectionStatusSuccessful} + } else { + localCluster.ConnectionState = appv1.ConnectionState{ + Status: appv1.ConnectionStatusFailed, + Message: err.Error(), + } + } + }) + cluster := localCluster.DeepCopy() + now := metav1.Now() + cluster.ConnectionState.ModifiedAt = &now + return cluster +} + +// secretToCluster converts a secret into a Cluster object +func secretToCluster(s *corev1.Secret) (*appv1.Cluster, error) { + var config appv1.ClusterConfig + if len(s.Data["config"]) > 0 { + if err := json.Unmarshal(s.Data["config"], &config); err != nil { + // This line has changed from the original Argo CD: now returns an error rather than panicing. + return nil, err + } + } + + var namespaces []string + for _, ns := range strings.Split(string(s.Data["namespaces"]), ",") { + if ns = strings.TrimSpace(ns); ns != "" { + namespaces = append(namespaces, ns) + } + } + var refreshRequestedAt *metav1.Time + if v, found := s.Annotations[appv1.AnnotationKeyRefresh]; found { + requestedAt, err := time.Parse(time.RFC3339, v) + if err != nil { + log.Warnf("Error while parsing date in cluster secret '%s': %v", s.Name, err) + } else { + refreshRequestedAt = &metav1.Time{Time: requestedAt} + } + } + var shard *int64 + if shardStr := s.Data["shard"]; shardStr != nil { + if val, err := strconv.Atoi(string(shardStr)); err != nil { + log.Warnf("Error while parsing shard in cluster secret '%s': %v", s.Name, err) + } else { + shard = pointer.Int64Ptr(int64(val)) + } + } + cluster := appv1.Cluster{ + ID: string(s.UID), + Server: strings.TrimRight(string(s.Data["server"]), "/"), + Name: string(s.Data["name"]), + Namespaces: namespaces, + Config: config, + RefreshRequestedAt: refreshRequestedAt, + Shard: shard, + } + return &cluster, nil +} diff --git a/applicationset/utils/clusterUtils_test.go b/applicationset/utils/clusterUtils_test.go new file mode 100644 index 0000000000000..70332afdd80fb --- /dev/null +++ b/applicationset/utils/clusterUtils_test.go @@ -0,0 +1,178 @@ +package utils + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + kubetesting "k8s.io/client-go/testing" + + argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +const ( + fakeNamespace = "fake-ns" +) + +// From Argo CD util/db/cluster_test.go +func Test_secretToCluster(t *testing.T) { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mycluster", + Namespace: fakeNamespace, + }, + Data: map[string][]byte{ + "name": []byte("test"), + "server": []byte("http://mycluster"), + "config": []byte("{\"username\":\"foo\"}"), + }, + } + cluster, err := secretToCluster(secret) + assert.Nil(t, err) + assert.Equal(t, *cluster, argoappv1.Cluster{ + Name: "test", + Server: "http://mycluster", + Config: argoappv1.ClusterConfig{ + Username: "foo", + }, + }) +} + +// From Argo CD util/db/cluster_test.go +func Test_secretToCluster_NoConfig(t *testing.T) { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mycluster", + Namespace: fakeNamespace, + }, + Data: map[string][]byte{ + "name": []byte("test"), + "server": []byte("http://mycluster"), + }, + } + cluster, err := secretToCluster(secret) + assert.Nil(t, err) + assert.Equal(t, *cluster, argoappv1.Cluster{ + Name: "test", + Server: "http://mycluster", + }) +} + +func createClusterSecret(secretName string, clusterName string, clusterServer string) *corev1.Secret { + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: fakeNamespace, + Labels: map[string]string{ + ArgoCDSecretTypeLabel: ArgoCDSecretTypeCluster, + }, + }, + Data: map[string][]byte{ + "name": []byte(clusterName), + "server": []byte(clusterServer), + "config": []byte("{\"username\":\"foo\",\"password\":\"foo\"}"), + }, + } + + return secret + +} + +// From util/argo/argo_test.go +// (ported to use kubeclientset) +func TestValidateDestination(t *testing.T) { + + t.Run("Validate destination with server url", func(t *testing.T) { + + dest := argoappv1.ApplicationDestination{ + Server: "https://127.0.0.1:6443", + Namespace: "default", + } + + appCond := ValidateDestination(context.Background(), &dest, nil, fakeNamespace) + assert.Nil(t, appCond) + assert.False(t, dest.IsServerInferred()) + }) + + t.Run("Validate destination with server name", func(t *testing.T) { + dest := argoappv1.ApplicationDestination{ + Name: "minikube", + } + + secret := createClusterSecret("my-secret", "minikube", "https://127.0.0.1:6443") + objects := []runtime.Object{} + objects = append(objects, secret) + kubeclientset := fake.NewSimpleClientset(objects...) + + appCond := ValidateDestination(context.Background(), &dest, kubeclientset, fakeNamespace) + assert.Nil(t, appCond) + assert.Equal(t, "https://127.0.0.1:6443", dest.Server) + assert.True(t, dest.IsServerInferred()) + }) + + t.Run("Error when having both server url and name", func(t *testing.T) { + dest := argoappv1.ApplicationDestination{ + Server: "https://127.0.0.1:6443", + Name: "minikube", + Namespace: "default", + } + + err := ValidateDestination(context.Background(), &dest, nil, fakeNamespace) + assert.Equal(t, "application destination can't have both name and server defined: minikube https://127.0.0.1:6443", err.Error()) + assert.False(t, dest.IsServerInferred()) + }) + + t.Run("List clusters fails", func(t *testing.T) { + dest := argoappv1.ApplicationDestination{ + Name: "minikube", + } + kubeclientset := fake.NewSimpleClientset() + + kubeclientset.PrependReactor("list", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, fmt.Errorf("an error occurred") + }) + + err := ValidateDestination(context.Background(), &dest, kubeclientset, fakeNamespace) + assert.Equal(t, "unable to find destination server: an error occurred", err.Error()) + assert.False(t, dest.IsServerInferred()) + }) + + t.Run("Destination cluster does not exist", func(t *testing.T) { + dest := argoappv1.ApplicationDestination{ + Name: "minikube", + } + + secret := createClusterSecret("dind", "dind", "https://127.0.0.1:6443") + objects := []runtime.Object{} + objects = append(objects, secret) + kubeclientset := fake.NewSimpleClientset(objects...) + + err := ValidateDestination(context.Background(), &dest, kubeclientset, fakeNamespace) + assert.Equal(t, "unable to find destination server: there are no clusters with this name: minikube", err.Error()) + assert.False(t, dest.IsServerInferred()) + }) + + t.Run("Validate too many clusters with the same name", func(t *testing.T) { + dest := argoappv1.ApplicationDestination{ + Name: "dind", + } + + secret := createClusterSecret("dind", "dind", "https://127.0.0.1:2443") + secret2 := createClusterSecret("dind2", "dind", "https://127.0.0.1:8443") + + objects := []runtime.Object{} + objects = append(objects, secret, secret2) + kubeclientset := fake.NewSimpleClientset(objects...) + + err := ValidateDestination(context.Background(), &dest, kubeclientset, fakeNamespace) + assert.Equal(t, "unable to find destination server: there are 2 clusters with the same name: [https://127.0.0.1:2443 https://127.0.0.1:8443]", err.Error()) + assert.False(t, dest.IsServerInferred()) + }) + +} diff --git a/applicationset/utils/createOrUpdate.go b/applicationset/utils/createOrUpdate.go new file mode 100644 index 0000000000000..096be5a9a97d3 --- /dev/null +++ b/applicationset/utils/createOrUpdate.go @@ -0,0 +1,101 @@ +package utils + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +// CreateOrUpdate overrides "sigs.k8s.io/controller-runtime" function +// in sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go +// to add equality for argov1alpha1.ApplicationDestination +// argov1alpha1.ApplicationDestination has a private variable, so the default +// implementation fails to compare it. +// +// CreateOrUpdate creates or updates the given object in the Kubernetes +// cluster. The object's desired state must be reconciled with the existing +// state inside the passed in callback MutateFn. +// +// The MutateFn is called regardless of creating or updating an object. +// +// It returns the executed operation and an error. +func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f controllerutil.MutateFn) (controllerutil.OperationResult, error) { + + key := client.ObjectKeyFromObject(obj) + if err := c.Get(ctx, key, obj); err != nil { + if !errors.IsNotFound(err) { + return controllerutil.OperationResultNone, err + } + if err := mutate(f, key, obj); err != nil { + return controllerutil.OperationResultNone, err + } + if err := c.Create(ctx, obj); err != nil { + return controllerutil.OperationResultNone, err + } + return controllerutil.OperationResultCreated, nil + } + + existingObj := obj.DeepCopyObject() + existing, ok := existingObj.(client.Object) + if !ok { + panic(fmt.Errorf("existing object is not a client.Object")) + } + if err := mutate(f, key, obj); err != nil { + return controllerutil.OperationResultNone, err + } + + equality := conversion.EqualitiesOrDie( + func(a, b resource.Quantity) bool { + // Ignore formatting, only care that numeric value stayed the same. + // TODO: if we decide it's important, it should be safe to start comparing the format. + // + // Uninitialized quantities are equivalent to 0 quantities. + return a.Cmp(b) == 0 + }, + func(a, b metav1.MicroTime) bool { + return a.UTC() == b.UTC() + }, + func(a, b metav1.Time) bool { + return a.UTC() == b.UTC() + }, + func(a, b labels.Selector) bool { + return a.String() == b.String() + }, + func(a, b fields.Selector) bool { + return a.String() == b.String() + }, + func(a, b argov1alpha1.ApplicationDestination) bool { + return a.Namespace == b.Namespace && a.Name == b.Name && a.Server == b.Server + }, + ) + + if equality.DeepEqual(existing, obj) { + return controllerutil.OperationResultNone, nil + } + + if err := c.Patch(ctx, obj, client.MergeFrom(existing)); err != nil { + return controllerutil.OperationResultNone, err + } + return controllerutil.OperationResultUpdated, nil +} + +// mutate wraps a MutateFn and applies validation to its result +func mutate(f controllerutil.MutateFn, key client.ObjectKey, obj client.Object) error { + if err := f(); err != nil { + return fmt.Errorf("error while wrapping using MutateFn: %w", err) + } + if newKey := client.ObjectKeyFromObject(obj); key != newKey { + return fmt.Errorf("MutateFn cannot mutate object name and/or object namespace") + } + return nil +} diff --git a/applicationset/utils/map.go b/applicationset/utils/map.go new file mode 100644 index 0000000000000..4e45e1c3fe2d2 --- /dev/null +++ b/applicationset/utils/map.go @@ -0,0 +1,67 @@ +package utils + +import ( + "fmt" +) + +func ConvertToMapStringString(mapStringInterface map[string]interface{}) map[string]string { + mapStringString := make(map[string]string, len(mapStringInterface)) + + for key, value := range mapStringInterface { + strKey := fmt.Sprintf("%v", key) + strValue := fmt.Sprintf("%v", value) + + mapStringString[strKey] = strValue + } + return mapStringString +} + +func ConvertToMapStringInterface(mapStringString map[string]string) map[string]interface{} { + mapStringInterface := make(map[string]interface{}, len(mapStringString)) + + for key, value := range mapStringString { + mapStringInterface[key] = value + } + return mapStringInterface +} + +func CombineStringMaps(aSI map[string]interface{}, bSI map[string]interface{}) (map[string]string, error) { + + a := ConvertToMapStringString(aSI) + b := ConvertToMapStringString(bSI) + + res := map[string]string{} + + for k, v := range a { + res[k] = v + } + + for k, v := range b { + current, present := res[k] + if present && current != v { + return nil, fmt.Errorf("found duplicate key %s with different value, a: %s ,b: %s", k, current, v) + } + res[k] = v + } + + return res, nil +} + +// CombineStringMapsAllowDuplicates merges two maps. Where there are duplicates, take the latter map's value. +func CombineStringMapsAllowDuplicates(aSI map[string]interface{}, bSI map[string]interface{}) (map[string]string, error) { + + a := ConvertToMapStringString(aSI) + b := ConvertToMapStringString(bSI) + + res := map[string]string{} + + for k, v := range a { + res[k] = v + } + + for k, v := range b { + res[k] = v + } + + return res, nil +} diff --git a/applicationset/utils/map_test.go b/applicationset/utils/map_test.go new file mode 100644 index 0000000000000..860bb046cc253 --- /dev/null +++ b/applicationset/utils/map_test.go @@ -0,0 +1,58 @@ +package utils + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCombineStringMaps(t *testing.T) { + testCases := []struct { + name string + left map[string]interface{} + right map[string]interface{} + expected map[string]string + expectedErr error + }{ + { + name: "combines the maps", + left: map[string]interface{}{"foo": "bar"}, + right: map[string]interface{}{"a": "b"}, + expected: map[string]string{"a": "b", "foo": "bar"}, + expectedErr: nil, + }, + { + name: "fails if keys are the same but value isn't", + left: map[string]interface{}{"foo": "bar", "a": "fail"}, + right: map[string]interface{}{"a": "b", "c": "d"}, + expected: map[string]string{"a": "b", "foo": "bar"}, + expectedErr: fmt.Errorf("found duplicate key a with different value, a: fail ,b: b"), + }, + { + name: "pass if keys & values are the same", + left: map[string]interface{}{"foo": "bar", "a": "b"}, + right: map[string]interface{}{"a": "b", "c": "d"}, + expected: map[string]string{"a": "b", "c": "d", "foo": "bar"}, + expectedErr: nil, + }, + } + + for _, testCase := range testCases { + testCaseCopy := testCase + + t.Run(testCaseCopy.name, func(t *testing.T) { + t.Parallel() + + got, err := CombineStringMaps(testCaseCopy.left, testCaseCopy.right) + + if testCaseCopy.expectedErr != nil { + assert.EqualError(t, err, testCaseCopy.expectedErr.Error()) + } else { + assert.NoError(t, err) + assert.Equal(t, testCaseCopy.expected, got) + } + + }) + } +} diff --git a/applicationset/utils/policy.go b/applicationset/utils/policy.go new file mode 100644 index 0000000000000..a06509265a540 --- /dev/null +++ b/applicationset/utils/policy.go @@ -0,0 +1,22 @@ +package utils + +import ( + argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +// Policies is a registry of available policies. +var Policies = map[string]argov1alpha1.ApplicationsSyncPolicy{ + "create-only": argov1alpha1.ApplicationsSyncPolicyCreateOnly, + "create-update": argov1alpha1.ApplicationsSyncPolicyCreateUpdate, + "create-delete": argov1alpha1.ApplicationsSyncPolicyCreateDelete, + "sync": argov1alpha1.ApplicationsSyncPolicySync, + // Default is "sync" + "": argov1alpha1.ApplicationsSyncPolicySync, +} + +func DefaultPolicy(appSetSyncPolicy *argov1alpha1.ApplicationSetSyncPolicy, controllerPolicy argov1alpha1.ApplicationsSyncPolicy, enablePolicyOverride bool) argov1alpha1.ApplicationsSyncPolicy { + if appSetSyncPolicy == nil || appSetSyncPolicy.ApplicationsSync == nil || !enablePolicyOverride { + return controllerPolicy + } + return *appSetSyncPolicy.ApplicationsSync +} diff --git a/applicationset/utils/selector.go b/applicationset/utils/selector.go new file mode 100644 index 0000000000000..53db73a5b3a48 --- /dev/null +++ b/applicationset/utils/selector.go @@ -0,0 +1,261 @@ +package utils + +import ( + "fmt" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/klog/v2" + "sort" + "strconv" + "strings" +) + +var ( + unaryOperators = []string{ + string(selection.Exists), string(selection.DoesNotExist), + } + binaryOperators = []string{ + string(selection.In), string(selection.NotIn), + string(selection.Equals), string(selection.DoubleEquals), string(selection.NotEquals), + string(selection.GreaterThan), string(selection.LessThan), + } + validRequirementOperators = append(binaryOperators, unaryOperators...) +) + +// Selector represents a label selector. +type Selector interface { + // Matches returns true if this selector matches the given set of labels. + Matches(labels.Labels) bool + + // Add adds requirements to the Selector + Add(r ...Requirement) Selector +} + +type internalSelector []Requirement + +// ByKey sorts requirements by key to obtain deterministic parser +type ByKey []Requirement + +func (a ByKey) Len() int { return len(a) } + +func (a ByKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func (a ByKey) Less(i, j int) bool { return a[i].key < a[j].key } + +// Matches for a internalSelector returns true if all +// its Requirements match the input Labels. If any +// Requirement does not match, false is returned. +func (s internalSelector) Matches(l labels.Labels) bool { + for ix := range s { + if matches := s[ix].Matches(l); !matches { + return false + } + } + return true +} + +// Add adds requirements to the selector. It copies the current selector returning a new one +func (s internalSelector) Add(reqs ...Requirement) Selector { + ret := make(internalSelector, 0, len(s)+len(reqs)) + ret = append(ret, s...) + ret = append(ret, reqs...) + sort.Sort(ByKey(ret)) + return ret +} + +type nothingSelector struct{} + +func (n nothingSelector) Matches(l labels.Labels) bool { + return false +} + +func (n nothingSelector) Add(r ...Requirement) Selector { + return n +} + +// Nothing returns a selector that matches no labels +func nothing() Selector { + return nothingSelector{} +} + +// Everything returns a selector that matches all labels. +func everything() Selector { + return internalSelector{} +} + +// LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements +// labels.Selector +// Note: This function should be kept in sync with the selector methods in pkg/labels/selector.go +func LabelSelectorAsSelector(ps *v1.LabelSelector) (Selector, error) { + if ps == nil { + return nothing(), nil + } + if len(ps.MatchLabels)+len(ps.MatchExpressions) == 0 { + return everything(), nil + } + requirements := make([]Requirement, 0, len(ps.MatchLabels)+len(ps.MatchExpressions)) + for k, v := range ps.MatchLabels { + r, err := newRequirement(k, selection.Equals, []string{v}) + if err != nil { + return nil, err + } + requirements = append(requirements, *r) + } + for _, expr := range ps.MatchExpressions { + var op selection.Operator + switch expr.Operator { + case v1.LabelSelectorOpIn: + op = selection.In + case v1.LabelSelectorOpNotIn: + op = selection.NotIn + case v1.LabelSelectorOpExists: + op = selection.Exists + case v1.LabelSelectorOpDoesNotExist: + op = selection.DoesNotExist + default: + return nil, fmt.Errorf("%q is not a valid pod selector operator", expr.Operator) + } + r, err := newRequirement(expr.Key, op, append([]string(nil), expr.Values...)) + if err != nil { + return nil, err + } + requirements = append(requirements, *r) + } + selector := newSelector() + selector = selector.Add(requirements...) + return selector, nil +} + +// NewSelector returns a nil selector +func newSelector() Selector { + return internalSelector(nil) +} + +func validateLabelKey(k string, path *field.Path) *field.Error { + if errs := validation.IsQualifiedName(k); len(errs) != 0 { + return field.Invalid(path, k, strings.Join(errs, "; ")) + } + return nil +} + +// NewRequirement is the constructor for a Requirement. +// If any of these rules is violated, an error is returned: +// (1) The operator can only be In, NotIn, Equals, DoubleEquals, Gt, Lt, NotEquals, Exists, or DoesNotExist. +// (2) If the operator is In or NotIn, the values set must be non-empty. +// (3) If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value. +// (4) If the operator is Exists or DoesNotExist, the value set must be empty. +// (5) If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer. +// (6) The key is invalid due to its length, or sequence +// +// of characters. See validateLabelKey for more details. +// +// The empty string is a valid value in the input values set. +// Returned error, if not nil, is guaranteed to be an aggregated field.ErrorList +func newRequirement(key string, op selection.Operator, vals []string, opts ...field.PathOption) (*Requirement, error) { + var allErrs field.ErrorList + path := field.ToPath(opts...) + if err := validateLabelKey(key, path.Child("key")); err != nil { + allErrs = append(allErrs, err) + } + + valuePath := path.Child("values") + switch op { + case selection.In, selection.NotIn: + if len(vals) == 0 { + allErrs = append(allErrs, field.Invalid(valuePath, vals, "for 'in', 'notin' operators, values set can't be empty")) + } + case selection.Equals, selection.DoubleEquals, selection.NotEquals: + if len(vals) != 1 { + allErrs = append(allErrs, field.Invalid(valuePath, vals, "exact-match compatibility requires one single value")) + } + case selection.Exists, selection.DoesNotExist: + if len(vals) != 0 { + allErrs = append(allErrs, field.Invalid(valuePath, vals, "values set must be empty for exists and does not exist")) + } + case selection.GreaterThan, selection.LessThan: + if len(vals) != 1 { + allErrs = append(allErrs, field.Invalid(valuePath, vals, "for 'Gt', 'Lt' operators, exactly one value is required")) + } + for i := range vals { + if _, err := strconv.ParseInt(vals[i], 10, 64); err != nil { + allErrs = append(allErrs, field.Invalid(valuePath.Index(i), vals[i], "for 'Gt', 'Lt' operators, the value must be an integer")) + } + } + default: + allErrs = append(allErrs, field.NotSupported(path.Child("operator"), op, validRequirementOperators)) + } + + return &Requirement{key: key, operator: op, strValues: vals}, allErrs.ToAggregate() +} + +// Requirement contains values, a key, and an operator that relates the key and values. +// The zero value of Requirement is invalid. +// Requirement implements both set based match and exact match +// Requirement should be initialized via NewRequirement constructor for creating a valid Requirement. +// +k8s:deepcopy-gen=true +type Requirement struct { + key string + operator selection.Operator + // In the majority of cases we have at most one value here. + // It is generally faster to operate on a single-element slice + // than on a single-element map, so we have a slice here. + strValues []string +} + +func (r *Requirement) hasValue(value string) bool { + for i := range r.strValues { + if r.strValues[i] == value { + return true + } + } + return false +} + +func (r *Requirement) Matches(ls labels.Labels) bool { + switch r.operator { + case selection.In, selection.Equals, selection.DoubleEquals: + if !ls.Has(r.key) { + return false + } + return r.hasValue(ls.Get(r.key)) + case selection.NotIn, selection.NotEquals: + if !ls.Has(r.key) { + return true + } + return !r.hasValue(ls.Get(r.key)) + case selection.Exists: + return ls.Has(r.key) + case selection.DoesNotExist: + return !ls.Has(r.key) + case selection.GreaterThan, selection.LessThan: + if !ls.Has(r.key) { + return false + } + lsValue, err := strconv.ParseInt(ls.Get(r.key), 10, 64) + if err != nil { + klog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err) + return false + } + + // There should be only one strValue in r.strValues, and can be converted to an integer. + if len(r.strValues) != 1 { + klog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r) + return false + } + + var rValue int64 + for i := range r.strValues { + rValue, err = strconv.ParseInt(r.strValues[i], 10, 64) + if err != nil { + klog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r) + return false + } + } + return (r.operator == selection.GreaterThan && lsValue > rValue) || (r.operator == selection.LessThan && lsValue < rValue) + default: + return false + } +} diff --git a/applicationset/utils/template_functions.go b/applicationset/utils/template_functions.go new file mode 100644 index 0000000000000..84ab870404f1a --- /dev/null +++ b/applicationset/utils/template_functions.go @@ -0,0 +1,71 @@ +package utils + +import ( + "regexp" + "strings" + + "sigs.k8s.io/yaml" +) + +// SanitizeName sanitizes the name in accordance with the below rules +// 1. contain no more than 253 characters +// 2. contain only lowercase alphanumeric characters, '-' or '.' +// 3. start and end with an alphanumeric character +func SanitizeName(name string) string { + invalidDNSNameChars := regexp.MustCompile("[^-a-z0-9.]") + maxDNSNameLength := 253 + + name = strings.ToLower(name) + name = invalidDNSNameChars.ReplaceAllString(name, "-") + if len(name) > maxDNSNameLength { + name = name[:maxDNSNameLength] + } + + return strings.Trim(name, "-.") +} + +// This has been copied from helm and may be removed as soon as it is retrofited in sprig +// toYAML takes an interface, marshals it to yaml, and returns a string. It will +// always return a string, even on marshal error (empty string). +// +// This is designed to be called from a template. +func toYAML(v interface{}) (string, error) { + data, err := yaml.Marshal(v) + if err != nil { + // Swallow errors inside of a template. + return "", err + } + return strings.TrimSuffix(string(data), "\n"), nil +} + +// This has been copied from helm and may be removed as soon as it is retrofited in sprig +// fromYAML converts a YAML document into a map[string]interface{}. +// +// This is not a general-purpose YAML parser, and will not parse all valid +// YAML documents. Additionally, because its intended use is within templates +// it tolerates errors. It will insert the returned error message string into +// m["Error"] in the returned map. +func fromYAML(str string) (map[string]interface{}, error) { + m := map[string]interface{}{} + + if err := yaml.Unmarshal([]byte(str), &m); err != nil { + return nil, err + } + return m, nil +} + +// This has been copied from helm and may be removed as soon as it is retrofited in sprig +// fromYAMLArray converts a YAML array into a []interface{}. +// +// This is not a general-purpose YAML parser, and will not parse all valid +// YAML documents. Additionally, because its intended use is within templates +// it tolerates errors. It will insert the returned error message string as +// the first and only item in the returned array. +func fromYAMLArray(str string) ([]interface{}, error) { + a := []interface{}{} + + if err := yaml.Unmarshal([]byte(str), &a); err != nil { + return nil, err + } + return a, nil +} diff --git a/applicationset/utils/utils.go b/applicationset/utils/utils.go new file mode 100644 index 0000000000000..089a6ff103100 --- /dev/null +++ b/applicationset/utils/utils.go @@ -0,0 +1,470 @@ +package utils + +import ( + "bytes" + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io" + "os" + "reflect" + "regexp" + "sort" + "strings" + "text/template" + "unsafe" + + "github.com/Masterminds/sprig/v3" + "github.com/valyala/fasttemplate" + "sigs.k8s.io/yaml" + + log "github.com/sirupsen/logrus" + + argoappsv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +var sprigFuncMap = sprig.GenericFuncMap() // a singleton for better performance + +func init() { + // Avoid allowing the user to learn things about the environment. + delete(sprigFuncMap, "env") + delete(sprigFuncMap, "expandenv") + delete(sprigFuncMap, "getHostByName") + sprigFuncMap["normalize"] = SanitizeName + sprigFuncMap["toYaml"] = toYAML + sprigFuncMap["fromYaml"] = fromYAML + sprigFuncMap["fromYamlArray"] = fromYAMLArray +} + +type Renderer interface { + RenderTemplateParams(tmpl *argoappsv1.Application, syncPolicy *argoappsv1.ApplicationSetSyncPolicy, params map[string]interface{}, useGoTemplate bool, goTemplateOptions []string) (*argoappsv1.Application, error) +} + +type Render struct { +} + +func copyValueIntoUnexported(destination, value reflect.Value) { + reflect.NewAt(destination.Type(), unsafe.Pointer(destination.UnsafeAddr())). + Elem(). + Set(value) +} + +func copyUnexported(copy, original reflect.Value) { + var unexported = reflect.NewAt(original.Type(), unsafe.Pointer(original.UnsafeAddr())).Elem() + copyValueIntoUnexported(copy, unexported) +} + +func IsJSONStr(str string) bool { + str = strings.TrimSpace(str) + return len(str) > 0 && str[0] == '{' +} + +func ConvertYAMLToJSON(str string) (string, error) { + if !IsJSONStr(str) { + jsonStr, err := yaml.YAMLToJSON([]byte(str)) + if err != nil { + return str, err + } + return string(jsonStr), nil + } + return str, nil +} + +// This function is in charge of searching all String fields of the object recursively and apply templating +// thanks to https://gist.github.com/randallmlough/1fd78ec8a1034916ca52281e3b886dc7 +func (r *Render) deeplyReplace(copy, original reflect.Value, replaceMap map[string]interface{}, useGoTemplate bool, goTemplateOptions []string) error { + switch original.Kind() { + // The first cases handle nested structures and translate them recursively + // If it is a pointer we need to unwrap and call once again + case reflect.Ptr: + // To get the actual value of the original we have to call Elem() + // At the same time this unwraps the pointer so we don't end up in + // an infinite recursion + originalValue := original.Elem() + // Check if the pointer is nil + if !originalValue.IsValid() { + return nil + } + // Allocate a new object and set the pointer to it + if originalValue.CanSet() { + copy.Set(reflect.New(originalValue.Type())) + } else { + copyUnexported(copy, original) + } + // Unwrap the newly created pointer + if err := r.deeplyReplace(copy.Elem(), originalValue, replaceMap, useGoTemplate, goTemplateOptions); err != nil { + // Not wrapping the error, since this is a recursive function. Avoids excessively long error messages. + return err + } + + // If it is an interface (which is very similar to a pointer), do basically the + // same as for the pointer. Though a pointer is not the same as an interface so + // note that we have to call Elem() after creating a new object because otherwise + // we would end up with an actual pointer + case reflect.Interface: + // Get rid of the wrapping interface + originalValue := original.Elem() + // Create a new object. Now new gives us a pointer, but we want the value it + // points to, so we have to call Elem() to unwrap it + + if originalValue.IsValid() { + reflectType := originalValue.Type() + + reflectValue := reflect.New(reflectType) + + copyValue := reflectValue.Elem() + if err := r.deeplyReplace(copyValue, originalValue, replaceMap, useGoTemplate, goTemplateOptions); err != nil { + // Not wrapping the error, since this is a recursive function. Avoids excessively long error messages. + return err + } + copy.Set(copyValue) + } + + // If it is a struct we translate each field + case reflect.Struct: + for i := 0; i < original.NumField(); i += 1 { + var currentType = fmt.Sprintf("%s.%s", original.Type().Field(i).Name, original.Type().PkgPath()) + // specific case time + if currentType == "time.Time" { + copy.Field(i).Set(original.Field(i)) + } else if currentType == "Raw.k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" || currentType == "Raw.k8s.io/apimachinery/pkg/runtime" { + var unmarshaled interface{} + originalBytes := original.Field(i).Bytes() + convertedToJson, err := ConvertYAMLToJSON(string(originalBytes)) + if err != nil { + return fmt.Errorf("error while converting template to json %q: %w", convertedToJson, err) + } + err = json.Unmarshal([]byte(convertedToJson), &unmarshaled) + if err != nil { + return fmt.Errorf("failed to unmarshal JSON field: %w", err) + } + jsonOriginal := reflect.ValueOf(&unmarshaled) + jsonCopy := reflect.New(jsonOriginal.Type()).Elem() + err = r.deeplyReplace(jsonCopy, jsonOriginal, replaceMap, useGoTemplate, goTemplateOptions) + if err != nil { + return fmt.Errorf("failed to deeply replace JSON field contents: %w", err) + } + jsonCopyInterface := jsonCopy.Interface().(*interface{}) + data, err := json.Marshal(jsonCopyInterface) + if err != nil { + return fmt.Errorf("failed to marshal templated JSON field: %w", err) + } + copy.Field(i).Set(reflect.ValueOf(data)) + } else if err := r.deeplyReplace(copy.Field(i), original.Field(i), replaceMap, useGoTemplate, goTemplateOptions); err != nil { + // Not wrapping the error, since this is a recursive function. Avoids excessively long error messages. + return err + } + } + + // If it is a slice we create a new slice and translate each element + case reflect.Slice: + if copy.CanSet() { + copy.Set(reflect.MakeSlice(original.Type(), original.Len(), original.Cap())) + } else { + copyValueIntoUnexported(copy, reflect.MakeSlice(original.Type(), original.Len(), original.Cap())) + } + + for i := 0; i < original.Len(); i += 1 { + if err := r.deeplyReplace(copy.Index(i), original.Index(i), replaceMap, useGoTemplate, goTemplateOptions); err != nil { + // Not wrapping the error, since this is a recursive function. Avoids excessively long error messages. + return err + } + } + + // If it is a map we create a new map and translate each value + case reflect.Map: + if copy.CanSet() { + copy.Set(reflect.MakeMap(original.Type())) + } else { + copyValueIntoUnexported(copy, reflect.MakeMap(original.Type())) + } + for _, key := range original.MapKeys() { + originalValue := original.MapIndex(key) + if originalValue.Kind() != reflect.String && isNillable(originalValue) && originalValue.IsNil() { + continue + } + // New gives us a pointer, but again we want the value + copyValue := reflect.New(originalValue.Type()).Elem() + + if err := r.deeplyReplace(copyValue, originalValue, replaceMap, useGoTemplate, goTemplateOptions); err != nil { + // Not wrapping the error, since this is a recursive function. Avoids excessively long error messages. + return err + } + + // Keys can be templated as well as values (e.g. to template something into an annotation). + if key.Kind() == reflect.String { + templatedKey, err := r.Replace(key.String(), replaceMap, useGoTemplate, goTemplateOptions) + if err != nil { + // Not wrapping the error, since this is a recursive function. Avoids excessively long error messages. + return err + } + key = reflect.ValueOf(templatedKey) + } + + copy.SetMapIndex(key, copyValue) + } + + // Otherwise we cannot traverse anywhere so this finishes the recursion + // If it is a string translate it (yay finally we're doing what we came for) + case reflect.String: + strToTemplate := original.String() + templated, err := r.Replace(strToTemplate, replaceMap, useGoTemplate, goTemplateOptions) + if err != nil { + // Not wrapping the error, since this is a recursive function. Avoids excessively long error messages. + return err + } + if copy.CanSet() { + copy.SetString(templated) + } else { + copyValueIntoUnexported(copy, reflect.ValueOf(templated)) + } + return nil + + // And everything else will simply be taken from the original + default: + if copy.CanSet() { + copy.Set(original) + } else { + copyUnexported(copy, original) + } + } + return nil +} + +// isNillable returns true if the value is something which may be set to nil. This function is meant to guard against a +// panic from calling IsNil on a non-pointer type. +func isNillable(v reflect.Value) bool { + switch v.Kind() { + case reflect.Map, reflect.Pointer, reflect.UnsafePointer, reflect.Interface, reflect.Slice: + return true + } + return false +} + +func (r *Render) RenderTemplateParams(tmpl *argoappsv1.Application, syncPolicy *argoappsv1.ApplicationSetSyncPolicy, params map[string]interface{}, useGoTemplate bool, goTemplateOptions []string) (*argoappsv1.Application, error) { + if tmpl == nil { + return nil, fmt.Errorf("application template is empty") + } + + if len(params) == 0 { + return tmpl, nil + } + + original := reflect.ValueOf(tmpl) + copy := reflect.New(original.Type()).Elem() + + if err := r.deeplyReplace(copy, original, params, useGoTemplate, goTemplateOptions); err != nil { + return nil, err + } + + replacedTmpl := copy.Interface().(*argoappsv1.Application) + + // Add the 'resources-finalizer' finalizer if: + // The template application doesn't have any finalizers, and: + // a) there is no syncPolicy, or + // b) there IS a syncPolicy, but preserveResourcesOnDeletion is set to false + // See TestRenderTemplateParamsFinalizers in util_test.go for test-based definition of behaviour + if (syncPolicy == nil || !syncPolicy.PreserveResourcesOnDeletion) && + ((*replacedTmpl).ObjectMeta.Finalizers == nil || len((*replacedTmpl).ObjectMeta.Finalizers) == 0) { + + (*replacedTmpl).ObjectMeta.Finalizers = []string{"resources-finalizer.argocd.argoproj.io"} + } + + return replacedTmpl, nil +} + +func (r *Render) RenderGeneratorParams(gen *argoappsv1.ApplicationSetGenerator, params map[string]interface{}, useGoTemplate bool, goTemplateOptions []string) (*argoappsv1.ApplicationSetGenerator, error) { + if gen == nil { + return nil, fmt.Errorf("generator is empty") + } + + if len(params) == 0 { + return gen, nil + } + + original := reflect.ValueOf(gen) + copy := reflect.New(original.Type()).Elem() + + if err := r.deeplyReplace(copy, original, params, useGoTemplate, goTemplateOptions); err != nil { + return nil, fmt.Errorf("failed to replace parameters in generator: %w", err) + } + + replacedGen := copy.Interface().(*argoappsv1.ApplicationSetGenerator) + + return replacedGen, nil +} + +var isTemplatedRegex = regexp.MustCompile(".*{{.*}}.*") + +// Replace executes basic string substitution of a template with replacement values. +// remaining in the substituted template. +func (r *Render) Replace(tmpl string, replaceMap map[string]interface{}, useGoTemplate bool, goTemplateOptions []string) (string, error) { + if useGoTemplate { + template, err := template.New("").Funcs(sprigFuncMap).Parse(tmpl) + if err != nil { + return "", fmt.Errorf("failed to parse template %s: %w", tmpl, err) + } + for _, option := range goTemplateOptions { + template = template.Option(option) + } + + var replacedTmplBuffer bytes.Buffer + if err = template.Execute(&replacedTmplBuffer, replaceMap); err != nil { + return "", fmt.Errorf("failed to execute go template %s: %w", tmpl, err) + } + + return replacedTmplBuffer.String(), nil + } + + if !isTemplatedRegex.MatchString(tmpl) { + return tmpl, nil + } + + fstTmpl, err := fasttemplate.NewTemplate(tmpl, "{{", "}}") + if err != nil { + return "", fmt.Errorf("invalid template: %w", err) + } + replacedTmpl := fstTmpl.ExecuteFuncString(func(w io.Writer, tag string) (int, error) { + trimmedTag := strings.TrimSpace(tag) + replacement, ok := replaceMap[trimmedTag].(string) + if len(trimmedTag) == 0 || !ok { + return w.Write([]byte(fmt.Sprintf("{{%s}}", tag))) + } + return w.Write([]byte(replacement)) + }) + return replacedTmpl, nil +} + +// Log a warning if there are unrecognized generators +func CheckInvalidGenerators(applicationSetInfo *argoappsv1.ApplicationSet) error { + hasInvalidGenerators, invalidGenerators := invalidGenerators(applicationSetInfo) + var errorMessage error + if len(invalidGenerators) > 0 { + gnames := []string{} + for n := range invalidGenerators { + gnames = append(gnames, n) + } + sort.Strings(gnames) + aname := applicationSetInfo.ObjectMeta.Name + msg := "ApplicationSet %s contains unrecognized generators: %s" + errorMessage = fmt.Errorf(msg, aname, strings.Join(gnames, ", ")) + log.Warnf(msg, aname, strings.Join(gnames, ", ")) + } else if hasInvalidGenerators { + name := applicationSetInfo.ObjectMeta.Name + msg := "ApplicationSet %s contains unrecognized generators" + errorMessage = fmt.Errorf(msg, name) + log.Warnf(msg, name) + } + return errorMessage +} + +// Return true if there are unknown generators specified in the application set. If we can discover the names +// of these generators, return the names as the keys in a map +func invalidGenerators(applicationSetInfo *argoappsv1.ApplicationSet) (bool, map[string]bool) { + names := make(map[string]bool) + hasInvalidGenerators := false + for index, generator := range applicationSetInfo.Spec.Generators { + v := reflect.Indirect(reflect.ValueOf(generator)) + found := false + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + if !field.CanInterface() { + continue + } + if !reflect.ValueOf(field.Interface()).IsNil() { + found = true + break + } + } + if !found { + hasInvalidGenerators = true + addInvalidGeneratorNames(names, applicationSetInfo, index) + } + } + return hasInvalidGenerators, names +} + +func addInvalidGeneratorNames(names map[string]bool, applicationSetInfo *argoappsv1.ApplicationSet, index int) { + // The generator names are stored in the "kubectl.kubernetes.io/last-applied-configuration" annotation + config := applicationSetInfo.ObjectMeta.Annotations["kubectl.kubernetes.io/last-applied-configuration"] + var values map[string]interface{} + err := json.Unmarshal([]byte(config), &values) + if err != nil { + log.Warnf("couldn't unmarshal kubectl.kubernetes.io/last-applied-configuration: %+v", config) + return + } + + spec, ok := values["spec"].(map[string]interface{}) + if !ok { + log.Warn("coundn't get spec from kubectl.kubernetes.io/last-applied-configuration annotation") + return + } + + generators, ok := spec["generators"].([]interface{}) + if !ok { + log.Warn("coundn't get generators from kubectl.kubernetes.io/last-applied-configuration annotation") + return + } + + if index >= len(generators) { + log.Warnf("index %d out of range %d for generator in kubectl.kubernetes.io/last-applied-configuration", index, len(generators)) + return + } + + generator, ok := generators[index].(map[string]interface{}) + if !ok { + log.Warn("coundn't get generator from kubectl.kubernetes.io/last-applied-configuration annotation") + return + } + + for key := range generator { + names[key] = true + break + } +} + +func NormalizeBitbucketBasePath(basePath string) string { + if strings.HasSuffix(basePath, "/rest/") { + return strings.TrimSuffix(basePath, "/") + } + if !strings.HasSuffix(basePath, "/rest") { + return basePath + "/rest" + } + return basePath +} + +func getTlsConfigWithCACert(scmRootCAPath string) *tls.Config { + + tlsConfig := &tls.Config{} + + if scmRootCAPath != "" { + _, err := os.Stat(scmRootCAPath) + if os.IsNotExist(err) { + log.Errorf("scmRootCAPath '%s' specified does not exist: %s", scmRootCAPath, err) + return tlsConfig + } + rootCA, err := os.ReadFile(scmRootCAPath) + if err != nil { + log.Errorf("error reading certificate from file '%s', proceeding without custom rootCA : %s", scmRootCAPath, err) + return tlsConfig + } + certPool := x509.NewCertPool() + ok := certPool.AppendCertsFromPEM([]byte(rootCA)) + if !ok { + log.Errorf("failed to append certificates from PEM: proceeding without custom rootCA") + } else { + tlsConfig.RootCAs = certPool + } + } + return tlsConfig +} + +func GetTlsConfig(scmRootCAPath string, insecure bool) *tls.Config { + tlsConfig := getTlsConfigWithCACert(scmRootCAPath) + + if insecure { + tlsConfig.InsecureSkipVerify = true + } + return tlsConfig +} diff --git a/applicationset/utils/utils_test.go b/applicationset/utils/utils_test.go new file mode 100644 index 0000000000000..a1c58769160cc --- /dev/null +++ b/applicationset/utils/utils_test.go @@ -0,0 +1,1333 @@ +package utils + +import ( + "crypto/x509" + "encoding/json" + "os" + "path" + "testing" + "time" + + "github.com/sirupsen/logrus" + logtest "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + + argoappsv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +func TestRenderTemplateParams(t *testing.T) { + + // Believe it or not, this is actually less complex than the equivalent solution using reflection + fieldMap := map[string]func(app *argoappsv1.Application) *string{} + fieldMap["Path"] = func(app *argoappsv1.Application) *string { return &app.Spec.Source.Path } + fieldMap["RepoURL"] = func(app *argoappsv1.Application) *string { return &app.Spec.Source.RepoURL } + fieldMap["TargetRevision"] = func(app *argoappsv1.Application) *string { return &app.Spec.Source.TargetRevision } + fieldMap["Chart"] = func(app *argoappsv1.Application) *string { return &app.Spec.Source.Chart } + + fieldMap["Server"] = func(app *argoappsv1.Application) *string { return &app.Spec.Destination.Server } + fieldMap["Namespace"] = func(app *argoappsv1.Application) *string { return &app.Spec.Destination.Namespace } + fieldMap["Name"] = func(app *argoappsv1.Application) *string { return &app.Spec.Destination.Name } + + fieldMap["Project"] = func(app *argoappsv1.Application) *string { return &app.Spec.Project } + + emptyApplication := &argoappsv1.Application{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{"annotation-key": "annotation-value", "annotation-key2": "annotation-value2"}, + Labels: map[string]string{"label-key": "label-value", "label-key2": "label-value2"}, + CreationTimestamp: metav1.NewTime(time.Now()), + UID: types.UID("d546da12-06b7-4f9a-8ea2-3adb16a20e2b"), + Name: "application-one", + Namespace: "default", + }, + Spec: argoappsv1.ApplicationSpec{ + Source: &argoappsv1.ApplicationSource{ + Path: "", + RepoURL: "", + TargetRevision: "", + Chart: "", + }, + Destination: argoappsv1.ApplicationDestination{ + Server: "", + Namespace: "", + Name: "", + }, + Project: "", + }, + } + + tests := []struct { + name string + fieldVal string + params map[string]interface{} + expectedVal string + }{ + { + name: "simple substitution", + fieldVal: "{{one}}", + expectedVal: "two", + params: map[string]interface{}{ + "one": "two", + }, + }, + { + name: "simple substitution with whitespace", + fieldVal: "{{ one }}", + expectedVal: "two", + params: map[string]interface{}{ + "one": "two", + }, + }, + + { + name: "template characters but not in a template", + fieldVal: "}} {{", + expectedVal: "}} {{", + params: map[string]interface{}{ + "one": "two", + }, + }, + + { + name: "nested template", + fieldVal: "{{ }}", + expectedVal: "{{ }}", + params: map[string]interface{}{ + "one": "{{ }}", + }, + }, + { + name: "field with whitespace", + fieldVal: "{{ }}", + expectedVal: "{{ }}", + params: map[string]interface{}{ + " ": "two", + "": "three", + }, + }, + + { + name: "template contains itself, containing itself", + fieldVal: "{{one}}", + expectedVal: "{{one}}", + params: map[string]interface{}{ + "{{one}}": "{{one}}", + }, + }, + + { + name: "template contains itself, containing something else", + fieldVal: "{{one}}", + expectedVal: "{{one}}", + params: map[string]interface{}{ + "{{one}}": "{{two}}", + }, + }, + + { + name: "templates are case sensitive", + fieldVal: "{{ONE}}", + expectedVal: "{{ONE}}", + params: map[string]interface{}{ + "{{one}}": "two", + }, + }, + { + name: "multiple on a line", + fieldVal: "{{one}}{{one}}", + expectedVal: "twotwo", + params: map[string]interface{}{ + "one": "two", + }, + }, + { + name: "multiple different on a line", + fieldVal: "{{one}}{{three}}", + expectedVal: "twofour", + params: map[string]interface{}{ + "one": "two", + "three": "four", + }, + }, + { + name: "multiple different on a line with quote", + fieldVal: "{{one}} {{three}}", + expectedVal: "\"hello\" world four", + params: map[string]interface{}{ + "one": "\"hello\" world", + "three": "four", + }, + }, + } + + for _, test := range tests { + + t.Run(test.name, func(t *testing.T) { + + for fieldName, getPtrFunc := range fieldMap { + + // Clone the template application + application := emptyApplication.DeepCopy() + + // Set the value of the target field, to the test value + *getPtrFunc(application) = test.fieldVal + + // Render the cloned application, into a new application + render := Render{} + newApplication, err := render.RenderTemplateParams(application, nil, test.params, false, nil) + + // Retrieve the value of the target field from the newApplication, then verify that + // the target field has been templated into the expected value + actualValue := *getPtrFunc(newApplication) + assert.Equal(t, test.expectedVal, actualValue, "Field '%s' had an unexpected value. expected: '%s' value: '%s'", fieldName, test.expectedVal, actualValue) + assert.Equal(t, newApplication.ObjectMeta.Annotations["annotation-key"], "annotation-value") + assert.Equal(t, newApplication.ObjectMeta.Annotations["annotation-key2"], "annotation-value2") + assert.Equal(t, newApplication.ObjectMeta.Labels["label-key"], "label-value") + assert.Equal(t, newApplication.ObjectMeta.Labels["label-key2"], "label-value2") + assert.Equal(t, newApplication.ObjectMeta.Name, "application-one") + assert.Equal(t, newApplication.ObjectMeta.Namespace, "default") + assert.Equal(t, newApplication.ObjectMeta.UID, types.UID("d546da12-06b7-4f9a-8ea2-3adb16a20e2b")) + assert.Equal(t, newApplication.ObjectMeta.CreationTimestamp, application.ObjectMeta.CreationTimestamp) + assert.NoError(t, err) + } + }) + } + +} + +func TestRenderHelmValuesObjectJson(t *testing.T) { + + params := map[string]interface{}{ + "test": "Hello world", + } + + application := &argoappsv1.Application{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{"annotation-key": "annotation-value", "annotation-key2": "annotation-value2"}, + Labels: map[string]string{"label-key": "label-value", "label-key2": "label-value2"}, + CreationTimestamp: metav1.NewTime(time.Now()), + UID: types.UID("d546da12-06b7-4f9a-8ea2-3adb16a20e2b"), + Name: "application-one", + Namespace: "default", + }, + Spec: argoappsv1.ApplicationSpec{ + Source: &argoappsv1.ApplicationSource{ + Path: "", + RepoURL: "", + TargetRevision: "", + Chart: "", + Helm: &argoappsv1.ApplicationSourceHelm{ + ValuesObject: &runtime.RawExtension{ + Raw: []byte(`{ + "some": { + "string": "{{.test}}" + } + }`), + }, + }, + }, + Destination: argoappsv1.ApplicationDestination{ + Server: "", + Namespace: "", + Name: "", + }, + Project: "", + }, + } + + // Render the cloned application, into a new application + render := Render{} + newApplication, err := render.RenderTemplateParams(application, nil, params, true, []string{}) + + assert.NoError(t, err) + assert.NotNil(t, newApplication) + + var unmarshaled interface{} + err = json.Unmarshal(newApplication.Spec.Source.Helm.ValuesObject.Raw, &unmarshaled) + + assert.NoError(t, err) + assert.Equal(t, unmarshaled.(map[string]interface{})["some"].(map[string]interface{})["string"], "Hello world") + +} + +func TestRenderHelmValuesObjectYaml(t *testing.T) { + + params := map[string]interface{}{ + "test": "Hello world", + } + + application := &argoappsv1.Application{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{"annotation-key": "annotation-value", "annotation-key2": "annotation-value2"}, + Labels: map[string]string{"label-key": "label-value", "label-key2": "label-value2"}, + CreationTimestamp: metav1.NewTime(time.Now()), + UID: types.UID("d546da12-06b7-4f9a-8ea2-3adb16a20e2b"), + Name: "application-one", + Namespace: "default", + }, + Spec: argoappsv1.ApplicationSpec{ + Source: &argoappsv1.ApplicationSource{ + Path: "", + RepoURL: "", + TargetRevision: "", + Chart: "", + Helm: &argoappsv1.ApplicationSourceHelm{ + ValuesObject: &runtime.RawExtension{ + Raw: []byte(`some: + string: "{{.test}}"`), + }, + }, + }, + Destination: argoappsv1.ApplicationDestination{ + Server: "", + Namespace: "", + Name: "", + }, + Project: "", + }, + } + + // Render the cloned application, into a new application + render := Render{} + newApplication, err := render.RenderTemplateParams(application, nil, params, true, []string{}) + + assert.NoError(t, err) + assert.NotNil(t, newApplication) + + var unmarshaled interface{} + err = json.Unmarshal(newApplication.Spec.Source.Helm.ValuesObject.Raw, &unmarshaled) + + assert.NoError(t, err) + assert.Equal(t, unmarshaled.(map[string]interface{})["some"].(map[string]interface{})["string"], "Hello world") + +} + +func TestRenderTemplateParamsGoTemplate(t *testing.T) { + + // Believe it or not, this is actually less complex than the equivalent solution using reflection + fieldMap := map[string]func(app *argoappsv1.Application) *string{} + fieldMap["Path"] = func(app *argoappsv1.Application) *string { return &app.Spec.Source.Path } + fieldMap["RepoURL"] = func(app *argoappsv1.Application) *string { return &app.Spec.Source.RepoURL } + fieldMap["TargetRevision"] = func(app *argoappsv1.Application) *string { return &app.Spec.Source.TargetRevision } + fieldMap["Chart"] = func(app *argoappsv1.Application) *string { return &app.Spec.Source.Chart } + + fieldMap["Server"] = func(app *argoappsv1.Application) *string { return &app.Spec.Destination.Server } + fieldMap["Namespace"] = func(app *argoappsv1.Application) *string { return &app.Spec.Destination.Namespace } + fieldMap["Name"] = func(app *argoappsv1.Application) *string { return &app.Spec.Destination.Name } + + fieldMap["Project"] = func(app *argoappsv1.Application) *string { return &app.Spec.Project } + + emptyApplication := &argoappsv1.Application{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{"annotation-key": "annotation-value", "annotation-key2": "annotation-value2"}, + Labels: map[string]string{"label-key": "label-value", "label-key2": "label-value2"}, + CreationTimestamp: metav1.NewTime(time.Now()), + UID: types.UID("d546da12-06b7-4f9a-8ea2-3adb16a20e2b"), + Name: "application-one", + Namespace: "default", + }, + Spec: argoappsv1.ApplicationSpec{ + Source: &argoappsv1.ApplicationSource{ + Path: "", + RepoURL: "", + TargetRevision: "", + Chart: "", + }, + Destination: argoappsv1.ApplicationDestination{ + Server: "", + Namespace: "", + Name: "", + }, + Project: "", + }, + } + + tests := []struct { + name string + fieldVal string + params map[string]interface{} + expectedVal string + errorMessage string + templateOptions []string + }{ + { + name: "simple substitution", + fieldVal: "{{ .one }}", + expectedVal: "two", + params: map[string]interface{}{ + "one": "two", + }, + }, + { + name: "simple substitution with whitespace", + fieldVal: "{{ .one }}", + expectedVal: "two", + params: map[string]interface{}{ + "one": "two", + }, + }, + { + name: "template contains itself, containing itself", + fieldVal: "{{ .one }}", + expectedVal: "{{one}}", + params: map[string]interface{}{ + "one": "{{one}}", + }, + }, + + { + name: "template contains itself, containing something else", + fieldVal: "{{ .one }}", + expectedVal: "{{two}}", + params: map[string]interface{}{ + "one": "{{two}}", + }, + }, + { + name: "multiple on a line", + fieldVal: "{{.one}}{{.one}}", + expectedVal: "twotwo", + params: map[string]interface{}{ + "one": "two", + }, + }, + { + name: "multiple different on a line", + fieldVal: "{{.one}}{{.three}}", + expectedVal: "twofour", + params: map[string]interface{}{ + "one": "two", + "three": "four", + }, + }, + { + name: "multiple different on a line with quote", + fieldVal: "{{.one}} {{.three}}", + expectedVal: "\"hello\" world four", + params: map[string]interface{}{ + "one": "\"hello\" world", + "three": "four", + }, + }, + { + name: "depth", + fieldVal: "{{ .image.version }}", + expectedVal: "latest", + params: map[string]interface{}{ + "replicas": 3, + "image": map[string]interface{}{ + "name": "busybox", + "version": "latest", + }, + }, + }, + { + name: "multiple depth", + fieldVal: "{{ .image.name }}:{{ .image.version }}", + expectedVal: "busybox:latest", + params: map[string]interface{}{ + "replicas": 3, + "image": map[string]interface{}{ + "name": "busybox", + "version": "latest", + }, + }, + }, + { + name: "if ok", + fieldVal: "{{ if .hpa.enabled }}{{ .hpa.maxReplicas }}{{ else }}{{ .replicas }}{{ end }}", + expectedVal: "5", + params: map[string]interface{}{ + "replicas": 3, + "hpa": map[string]interface{}{ + "enabled": true, + "minReplicas": 1, + "maxReplicas": 5, + }, + }, + }, + { + name: "if not ok", + fieldVal: "{{ if .hpa.enabled }}{{ .hpa.maxReplicas }}{{ else }}{{ .replicas }}{{ end }}", + expectedVal: "3", + params: map[string]interface{}{ + "replicas": 3, + "hpa": map[string]interface{}{ + "enabled": false, + "minReplicas": 1, + "maxReplicas": 5, + }, + }, + }, + { + name: "loop", + fieldVal: "{{ range .volumes }}[{{ .name }}]{{ end }}", + expectedVal: "[volume-one][volume-two]", + params: map[string]interface{}{ + "replicas": 3, + "volumes": []map[string]interface{}{ + { + "name": "volume-one", + "emptyDir": map[string]interface{}{}, + }, + { + "name": "volume-two", + "emptyDir": map[string]interface{}{}, + }, + }, + }, + }, + { + name: "Index", + fieldVal: `{{ index .admin "admin-ca" }}, {{ index .admin "admin-jks" }}`, + expectedVal: "value admin ca, value admin jks", + params: map[string]interface{}{ + "admin": map[string]interface{}{ + "admin-ca": "value admin ca", + "admin-jks": "value admin jks", + }, + }, + }, + { + name: "Index", + fieldVal: `{{ index .admin "admin-ca" }}, \\ "Hello world", {{ index .admin "admin-jks" }}`, + expectedVal: `value "admin" ca with \, \\ "Hello world", value admin jks`, + params: map[string]interface{}{ + "admin": map[string]interface{}{ + "admin-ca": `value "admin" ca with \`, + "admin-jks": "value admin jks", + }, + }, + }, + { + name: "quote", + fieldVal: `{{.quote}}`, + expectedVal: `"`, + params: map[string]interface{}{ + "quote": `"`, + }, + }, + { + name: "Test No Data", + fieldVal: `{{.data}}`, + expectedVal: "{{.data}}", + params: map[string]interface{}{}, + }, + { + name: "Test Parse Error", + fieldVal: `{{functiondoesnotexist}}`, + expectedVal: "", + params: map[string]interface{}{ + "data": `a data string`, + }, + errorMessage: `failed to parse template {{functiondoesnotexist}}: template: :1: function "functiondoesnotexist" not defined`, + }, + { + name: "Test template error", + fieldVal: `{{.data.test}}`, + expectedVal: "", + params: map[string]interface{}{ + "data": `a data string`, + }, + errorMessage: `failed to execute go template {{.data.test}}: template: :1:7: executing "" at <.data.test>: can't evaluate field test in type interface {}`, + }, + { + name: "lookup missing value with missingkey=default", + fieldVal: `--> {{.doesnotexist}} <--`, + expectedVal: `--> <--`, + params: map[string]interface{}{ + // if no params are passed then for some reason templating is skipped + "unused": "this is not used", + }, + }, + { + name: "lookup missing value with missingkey=error", + fieldVal: `--> {{.doesnotexist}} <--`, + expectedVal: "", + params: map[string]interface{}{ + // if no params are passed then for some reason templating is skipped + "unused": "this is not used", + }, + templateOptions: []string{"missingkey=error"}, + errorMessage: `failed to execute go template --> {{.doesnotexist}} <--: template: :1:6: executing "" at <.doesnotexist>: map has no entry for key "doesnotexist"`, + }, + { + name: "toYaml", + fieldVal: `{{ toYaml . | indent 2 }}`, + expectedVal: " foo:\n bar:\n bool: true\n number: 2\n str: Hello world", + params: map[string]interface{}{ + "foo": map[string]interface{}{ + "bar": map[string]interface{}{ + "bool": true, + "number": 2, + "str": "Hello world", + }, + }, + }, + }, + { + name: "toYaml Error", + fieldVal: `{{ toYaml . | indent 2 }}`, + expectedVal: " foo:\n bar:\n bool: true\n number: 2\n str: Hello world", + errorMessage: "failed to execute go template {{ toYaml . | indent 2 }}: template: :1:3: executing \"\" at : error calling toYaml: error marshaling into JSON: json: unsupported type: func(*string)", + params: map[string]interface{}{ + "foo": func(test *string) { + }, + }, + }, + { + name: "fromYaml", + fieldVal: `{{ get (fromYaml .value) "hello" }}`, + expectedVal: "world", + params: map[string]interface{}{ + "value": "hello: world", + }, + }, + { + name: "fromYaml error", + fieldVal: `{{ get (fromYaml .value) "hello" }}`, + expectedVal: "world", + errorMessage: "failed to execute go template {{ get (fromYaml .value) \"hello\" }}: template: :1:8: executing \"\" at : error calling fromYaml: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type map[string]interface {}", + params: map[string]interface{}{ + "value": "non\n compliant\n yaml", + }, + }, + { + name: "fromYamlArray", + fieldVal: `{{ fromYamlArray .value | last }}`, + expectedVal: "bonjour tout le monde", + params: map[string]interface{}{ + "value": "- hello world\n- bonjour tout le monde", + }, + }, + { + name: "fromYamlArray error", + fieldVal: `{{ fromYamlArray .value | last }}`, + expectedVal: "bonjour tout le monde", + errorMessage: "failed to execute go template {{ fromYamlArray .value | last }}: template: :1:3: executing \"\" at : error calling fromYamlArray: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type []interface {}", + params: map[string]interface{}{ + "value": "non\n compliant\n yaml", + }, + }, + } + + for _, test := range tests { + + t.Run(test.name, func(t *testing.T) { + + for fieldName, getPtrFunc := range fieldMap { + + // Clone the template application + application := emptyApplication.DeepCopy() + + // Set the value of the target field, to the test value + *getPtrFunc(application) = test.fieldVal + + // Render the cloned application, into a new application + render := Render{} + newApplication, err := render.RenderTemplateParams(application, nil, test.params, true, test.templateOptions) + + // Retrieve the value of the target field from the newApplication, then verify that + // the target field has been templated into the expected value + if test.errorMessage != "" { + assert.Error(t, err) + assert.Equal(t, test.errorMessage, err.Error()) + } else { + assert.NoError(t, err) + actualValue := *getPtrFunc(newApplication) + assert.Equal(t, test.expectedVal, actualValue, "Field '%s' had an unexpected value. expected: '%s' value: '%s'", fieldName, test.expectedVal, actualValue) + assert.Equal(t, newApplication.ObjectMeta.Annotations["annotation-key"], "annotation-value") + assert.Equal(t, newApplication.ObjectMeta.Annotations["annotation-key2"], "annotation-value2") + assert.Equal(t, newApplication.ObjectMeta.Labels["label-key"], "label-value") + assert.Equal(t, newApplication.ObjectMeta.Labels["label-key2"], "label-value2") + assert.Equal(t, newApplication.ObjectMeta.Name, "application-one") + assert.Equal(t, newApplication.ObjectMeta.Namespace, "default") + assert.Equal(t, newApplication.ObjectMeta.UID, types.UID("d546da12-06b7-4f9a-8ea2-3adb16a20e2b")) + assert.Equal(t, newApplication.ObjectMeta.CreationTimestamp, application.ObjectMeta.CreationTimestamp) + } + } + }) + } +} + +func TestRenderGeneratorParams_does_not_panic(t *testing.T) { + // This test verifies that the RenderGeneratorParams function does not panic when the value in a map is a non- + // nillable type. This is a regression test. + render := Render{} + params := map[string]interface{}{ + "branch": "master", + } + generator := &argoappsv1.ApplicationSetGenerator{ + Plugin: &argoappsv1.PluginGenerator{ + ConfigMapRef: argoappsv1.PluginConfigMapRef{ + Name: "cm-plugin", + }, + Input: argoappsv1.PluginInput{ + Parameters: map[string]apiextensionsv1.JSON{ + "branch": { + Raw: []byte(`"{{.branch}}"`), + }, + "repo": { + Raw: []byte(`"argo-test"`), + }, + }, + }, + }, + } + _, err := render.RenderGeneratorParams(generator, params, true, []string{}) + assert.NoError(t, err) +} + +func TestRenderTemplateKeys(t *testing.T) { + t.Run("fasttemplate", func(t *testing.T) { + application := &argoappsv1.Application{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "annotation-{{key}}": "annotation-{{value}}", + }, + }, + } + + params := map[string]interface{}{ + "key": "some-key", + "value": "some-value", + } + + render := Render{} + newApplication, err := render.RenderTemplateParams(application, nil, params, false, nil) + require.NoError(t, err) + require.Contains(t, newApplication.ObjectMeta.Annotations, "annotation-some-key") + assert.Equal(t, newApplication.ObjectMeta.Annotations["annotation-some-key"], "annotation-some-value") + }) + t.Run("gotemplate", func(t *testing.T) { + application := &argoappsv1.Application{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "annotation-{{ .key }}": "annotation-{{ .value }}", + }, + }, + } + + params := map[string]interface{}{ + "key": "some-key", + "value": "some-value", + } + + render := Render{} + newApplication, err := render.RenderTemplateParams(application, nil, params, true, nil) + require.NoError(t, err) + require.Contains(t, newApplication.ObjectMeta.Annotations, "annotation-some-key") + assert.Equal(t, newApplication.ObjectMeta.Annotations["annotation-some-key"], "annotation-some-value") + }) +} + +func Test_Render_Replace_no_panic_on_missing_closing_brace(t *testing.T) { + r := &Render{} + assert.NotPanics(t, func() { + _, err := r.Replace("{{properly.closed}} {{improperly.closed}", nil, false, []string{}) + assert.Error(t, err) + }) +} + +func TestRenderTemplateParamsFinalizers(t *testing.T) { + + emptyApplication := &argoappsv1.Application{ + Spec: argoappsv1.ApplicationSpec{ + Source: &argoappsv1.ApplicationSource{ + Path: "", + RepoURL: "", + TargetRevision: "", + Chart: "", + }, + Destination: argoappsv1.ApplicationDestination{ + Server: "", + Namespace: "", + Name: "", + }, + Project: "", + }, + } + + for _, c := range []struct { + testName string + syncPolicy *argoappsv1.ApplicationSetSyncPolicy + existingFinalizers []string + expectedFinalizers []string + }{ + { + testName: "existing finalizer should be preserved", + existingFinalizers: []string{"existing-finalizer"}, + syncPolicy: nil, + expectedFinalizers: []string{"existing-finalizer"}, + }, + { + testName: "background finalizer should be preserved", + existingFinalizers: []string{"resources-finalizer.argocd.argoproj.io/background"}, + syncPolicy: nil, + expectedFinalizers: []string{"resources-finalizer.argocd.argoproj.io/background"}, + }, + + { + testName: "empty finalizer and empty sync should use standard finalizer", + existingFinalizers: nil, + syncPolicy: nil, + expectedFinalizers: []string{"resources-finalizer.argocd.argoproj.io"}, + }, + + { + testName: "standard finalizer should be preserved", + existingFinalizers: []string{"resources-finalizer.argocd.argoproj.io"}, + syncPolicy: nil, + expectedFinalizers: []string{"resources-finalizer.argocd.argoproj.io"}, + }, + { + testName: "empty array finalizers should use standard finalizer", + existingFinalizers: []string{}, + syncPolicy: nil, + expectedFinalizers: []string{"resources-finalizer.argocd.argoproj.io"}, + }, + { + testName: "non-nil sync policy should use standard finalizer", + existingFinalizers: nil, + syncPolicy: &argoappsv1.ApplicationSetSyncPolicy{}, + expectedFinalizers: []string{"resources-finalizer.argocd.argoproj.io"}, + }, + { + testName: "preserveResourcesOnDeletion should not have a finalizer", + existingFinalizers: nil, + syncPolicy: &argoappsv1.ApplicationSetSyncPolicy{ + PreserveResourcesOnDeletion: true, + }, + expectedFinalizers: nil, + }, + { + testName: "user-specified finalizer should overwrite preserveResourcesOnDeletion", + existingFinalizers: []string{"resources-finalizer.argocd.argoproj.io/background"}, + syncPolicy: &argoappsv1.ApplicationSetSyncPolicy{ + PreserveResourcesOnDeletion: true, + }, + expectedFinalizers: []string{"resources-finalizer.argocd.argoproj.io/background"}, + }, + } { + + t.Run(c.testName, func(t *testing.T) { + + // Clone the template application + application := emptyApplication.DeepCopy() + application.Finalizers = c.existingFinalizers + + params := map[string]interface{}{ + "one": "two", + } + + // Render the cloned application, into a new application + render := Render{} + + res, err := render.RenderTemplateParams(application, c.syncPolicy, params, true, nil) + assert.Nil(t, err) + + assert.ElementsMatch(t, res.Finalizers, c.expectedFinalizers) + + }) + + } + +} + +func TestCheckInvalidGenerators(t *testing.T) { + + scheme := runtime.NewScheme() + err := argoappsv1.AddToScheme(scheme) + assert.Nil(t, err) + err = argoappsv1.AddToScheme(scheme) + assert.Nil(t, err) + + for _, c := range []struct { + testName string + appSet argoappsv1.ApplicationSet + expectedMsg string + }{ + { + testName: "invalid generator, without annotation", + appSet: argoappsv1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app-set", + Namespace: "namespace", + }, + Spec: argoappsv1.ApplicationSetSpec{ + Generators: []argoappsv1.ApplicationSetGenerator{ + { + List: &argoappsv1.ListGenerator{}, + Clusters: nil, + Git: nil, + }, + { + List: nil, + Clusters: nil, + Git: nil, + }, + { + List: nil, + Clusters: nil, + Git: &argoappsv1.GitGenerator{}, + }, + }, + }, + }, + expectedMsg: "ApplicationSet test-app-set contains unrecognized generators", + }, + { + testName: "invalid generator, with annotation", + appSet: argoappsv1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app-set", + Namespace: "namespace", + Annotations: map[string]string{ + "kubectl.kubernetes.io/last-applied-configuration": `{ + "spec":{ + "generators":[ + {"list":{}}, + {"bbb":{}}, + {"git":{}}, + {"aaa":{}} + ] + } + }`, + }, + }, + Spec: argoappsv1.ApplicationSetSpec{ + Generators: []argoappsv1.ApplicationSetGenerator{ + { + List: &argoappsv1.ListGenerator{}, + Clusters: nil, + Git: nil, + }, + { + List: nil, + Clusters: nil, + Git: nil, + }, + { + List: nil, + Clusters: nil, + Git: &argoappsv1.GitGenerator{}, + }, + { + List: nil, + Clusters: nil, + Git: nil, + }, + }, + }, + }, + expectedMsg: "ApplicationSet test-app-set contains unrecognized generators: aaa, bbb", + }, + } { + oldhooks := logrus.StandardLogger().ReplaceHooks(logrus.LevelHooks{}) + defer logrus.StandardLogger().ReplaceHooks(oldhooks) + hook := logtest.NewGlobal() + + _ = CheckInvalidGenerators(&c.appSet) + assert.True(t, len(hook.Entries) >= 1, c.testName) + assert.NotNil(t, hook.LastEntry(), c.testName) + if hook.LastEntry() != nil { + assert.Equal(t, logrus.WarnLevel, hook.LastEntry().Level, c.testName) + assert.Equal(t, c.expectedMsg, hook.LastEntry().Message, c.testName) + } + hook.Reset() + } +} + +func TestInvalidGenerators(t *testing.T) { + + scheme := runtime.NewScheme() + err := argoappsv1.AddToScheme(scheme) + assert.Nil(t, err) + err = argoappsv1.AddToScheme(scheme) + assert.Nil(t, err) + + for _, c := range []struct { + testName string + appSet argoappsv1.ApplicationSet + expectedInvalid bool + expectedNames map[string]bool + }{ + { + testName: "valid generators, with annotation", + appSet: argoappsv1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + Annotations: map[string]string{ + "kubectl.kubernetes.io/last-applied-configuration": `{ + "spec":{ + "generators":[ + {"list":{}}, + {"cluster":{}}, + {"git":{}} + ] + } + }`, + }, + }, + Spec: argoappsv1.ApplicationSetSpec{ + Generators: []argoappsv1.ApplicationSetGenerator{ + { + List: &argoappsv1.ListGenerator{}, + Clusters: nil, + Git: nil, + }, + { + List: nil, + Clusters: &argoappsv1.ClusterGenerator{}, + Git: nil, + }, + { + List: nil, + Clusters: nil, + Git: &argoappsv1.GitGenerator{}, + }, + }, + }, + }, + expectedInvalid: false, + expectedNames: map[string]bool{}, + }, + { + testName: "invalid generators, no annotation", + appSet: argoappsv1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + Spec: argoappsv1.ApplicationSetSpec{ + Generators: []argoappsv1.ApplicationSetGenerator{ + { + List: nil, + Clusters: nil, + Git: nil, + }, + { + List: nil, + Clusters: nil, + Git: nil, + }, + }, + }, + }, + expectedInvalid: true, + expectedNames: map[string]bool{}, + }, + { + testName: "valid and invalid generators, no annotation", + appSet: argoappsv1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + Spec: argoappsv1.ApplicationSetSpec{ + Generators: []argoappsv1.ApplicationSetGenerator{ + { + List: nil, + Clusters: &argoappsv1.ClusterGenerator{}, + Git: nil, + }, + { + List: nil, + Clusters: nil, + Git: nil, + }, + { + List: nil, + Clusters: nil, + Git: &argoappsv1.GitGenerator{}, + }, + }, + }, + }, + expectedInvalid: true, + expectedNames: map[string]bool{}, + }, + { + testName: "valid and invalid generators, with annotation", + appSet: argoappsv1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + Annotations: map[string]string{ + "kubectl.kubernetes.io/last-applied-configuration": `{ + "spec":{ + "generators":[ + {"cluster":{}}, + {"bbb":{}}, + {"git":{}}, + {"aaa":{}} + ] + } + }`, + }, + }, + Spec: argoappsv1.ApplicationSetSpec{ + Generators: []argoappsv1.ApplicationSetGenerator{ + { + List: nil, + Clusters: &argoappsv1.ClusterGenerator{}, + Git: nil, + }, + { + List: nil, + Clusters: nil, + Git: nil, + }, + { + List: nil, + Clusters: nil, + Git: &argoappsv1.GitGenerator{}, + }, + { + List: nil, + Clusters: nil, + Git: nil, + }, + }, + }, + }, + expectedInvalid: true, + expectedNames: map[string]bool{ + "aaa": true, + "bbb": true, + }, + }, + { + testName: "invalid generator, annotation with missing spec", + appSet: argoappsv1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + Annotations: map[string]string{ + "kubectl.kubernetes.io/last-applied-configuration": `{ + }`, + }, + }, + Spec: argoappsv1.ApplicationSetSpec{ + Generators: []argoappsv1.ApplicationSetGenerator{ + { + List: nil, + Clusters: nil, + Git: nil, + }, + }, + }, + }, + expectedInvalid: true, + expectedNames: map[string]bool{}, + }, + { + testName: "invalid generator, annotation with missing generators array", + appSet: argoappsv1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + Annotations: map[string]string{ + "kubectl.kubernetes.io/last-applied-configuration": `{ + "spec":{ + } + }`, + }, + }, + Spec: argoappsv1.ApplicationSetSpec{ + Generators: []argoappsv1.ApplicationSetGenerator{ + { + List: nil, + Clusters: nil, + Git: nil, + }, + }, + }, + }, + expectedInvalid: true, + expectedNames: map[string]bool{}, + }, + { + testName: "invalid generator, annotation with empty generators array", + appSet: argoappsv1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + Annotations: map[string]string{ + "kubectl.kubernetes.io/last-applied-configuration": `{ + "spec":{ + "generators":[ + ] + } + }`, + }, + }, + Spec: argoappsv1.ApplicationSetSpec{ + Generators: []argoappsv1.ApplicationSetGenerator{ + { + List: nil, + Clusters: nil, + Git: nil, + }, + }, + }, + }, + expectedInvalid: true, + expectedNames: map[string]bool{}, + }, + { + testName: "invalid generator, annotation with empty generator", + appSet: argoappsv1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + Annotations: map[string]string{ + "kubectl.kubernetes.io/last-applied-configuration": `{ + "spec":{ + "generators":[ + {} + ] + } + }`, + }, + }, + Spec: argoappsv1.ApplicationSetSpec{ + Generators: []argoappsv1.ApplicationSetGenerator{ + { + List: nil, + Clusters: nil, + Git: nil, + }, + }, + }, + }, + expectedInvalid: true, + expectedNames: map[string]bool{}, + }, + } { + hasInvalid, names := invalidGenerators(&c.appSet) + assert.Equal(t, c.expectedInvalid, hasInvalid, c.testName) + assert.Equal(t, c.expectedNames, names, c.testName) + } +} + +func TestNormalizeBitbucketBasePath(t *testing.T) { + for _, c := range []struct { + testName string + basePath string + expectedBasePath string + }{ + { + testName: "default api url", + basePath: "https://company.bitbucket.com", + expectedBasePath: "https://company.bitbucket.com/rest", + }, + { + testName: "with /rest suffix", + basePath: "https://company.bitbucket.com/rest", + expectedBasePath: "https://company.bitbucket.com/rest", + }, + { + testName: "with /rest/ suffix", + basePath: "https://company.bitbucket.com/rest/", + expectedBasePath: "https://company.bitbucket.com/rest", + }, + } { + result := NormalizeBitbucketBasePath(c.basePath) + assert.Equal(t, c.expectedBasePath, result, c.testName) + } +} + +func TestGetTLSConfig(t *testing.T) { + // certParsed, err := tls.X509KeyPair(test.Cert, test.PrivateKey) + // require.NoError(t, err) + + temppath := t.TempDir() + cert := ` +-----BEGIN CERTIFICATE----- +MIIFvTCCA6WgAwIBAgIUGrTmW3qc39zqnE08e3qNDhUkeWswDQYJKoZIhvcNAQEL +BQAwbjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAklMMRAwDgYDVQQHDAdDaGljYWdv +MRQwEgYDVQQKDAtDYXBvbmUsIEluYzEQMA4GA1UECwwHU3BlY09wczEYMBYGA1UE +AwwPZm9vLmV4YW1wbGUuY29tMB4XDTE5MDcwODEzNTUwNVoXDTIwMDcwNzEzNTUw +NVowbjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAklMMRAwDgYDVQQHDAdDaGljYWdv +MRQwEgYDVQQKDAtDYXBvbmUsIEluYzEQMA4GA1UECwwHU3BlY09wczEYMBYGA1UE +AwwPZm9vLmV4YW1wbGUuY29tMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC +AgEA3csSO13w7qQXKeSLNcpeuAe6wAjXYbRkRl6ariqzTEDcFTKmy2QiXJTKoEGn +bvwxq0T91var7rxY88SGL/qi8Zmo0tVSR0XvKSKcghFIkQOTyDmVgMPZGCvixt4q +gQ7hUVSk4KkFmtcqBVuvnzI1d/DKfZAGKdmGcfRpuAsnVhac3swP0w4Tl1BFrK9U +vuIkz4KwXG77s5oB8rMUnyuLasLsGNpvpvXhkcQRhp6vpcCO2bS7kOTTelAPIucw +P37qkOEdZdiWCLrr57dmhg6tmcVlmBMg6JtmfLxn2HQd9ZrCKlkWxMk5NYs6CAW5 +kgbDZUWQTAsnHeoJKbcgtPkIbxDRxNpPukFMtbA4VEWv1EkODXy9FyEKDOI/PV6K +/80oLkgCIhCkP2mvwSFheU0RHTuZ0o0vVolP5TEOq5iufnDN4wrxqb12o//XLRc0 +RiLqGVVxhFdyKCjVxcLfII9AAp5Tse4PMh6bf6jDfB3OMvGkhMbJWhKXdR2NUTl0 +esKawMPRXIn5g3oBdNm8kyRsTTnvB567pU8uNSmA8j3jxfGCPynI8JdiwKQuW/+P +WgLIflgxqAfG85dVVOsFmF9o5o24dDslvv9yHnHH102c6ijPCg1EobqlyFzqqxOD +Wf2OPjIkzoTH+O27VRugnY/maIU1nshNO7ViRX5zIxEUtNMCAwEAAaNTMFEwHQYD +VR0OBBYEFNY4gDLgPBidogkmpO8nq5yAq5g+MB8GA1UdIwQYMBaAFNY4gDLgPBid +ogkmpO8nq5yAq5g+MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggIB +AJ0WGioNtGNg3m6ywpmxNThorQD5ZvDMlmZlDVk78E2wfNyMhwbVhKhlAnONv0wv +kmsGjibY75nRZ+EK9PxSJ644841fryQXQ+bli5fhr7DW3uTKwaRsnzETJXRJuljq +6+c6Zyg1/mqwnyx7YvPgVh3w496DYx/jm6Fm1IEq3BzOmn6H/gGPq3gbURzEqI3h +P+kC2vJa8RZWrpa05Xk/Q1QUkErDX9vJghb9z3+GgirISZQzqWRghII/znv3NOE6 +zoIgaaWNFn8KPeBVpUoboH+IhpgibsnbTbI0G7AMtFq6qm3kn/4DZ2N2tuh1G2tT +zR2Fh7hJbU7CrqxANrgnIoHG/nLSvzE24ckLb0Vj69uGQlwnZkn9fz6F7KytU+Az +NoB2rjufaB0GQi1azdboMvdGSOxhSCAR8otWT5yDrywCqVnEvjw0oxKmuRduNe2/ +6AcG6TtK2/K+LHuhymiAwZM2qE6VD2odvb+tCzDkZOIeoIz/JcVlNpXE9FuVl250 +9NWvugeghq7tUv81iJ8ninBefJ4lUfxAehTPQqX+zXcfxgjvMRCi/ig73nLyhmjx +r2AaraPFgrprnxUibP4L7jxdr+iiw5bWN9/B81PodrS7n5TNtnfnpZD6X6rThqOP +xO7Tr5lAo74vNUkF2EHNaI28/RGnJPm2TIxZqy4rNH6L +-----END CERTIFICATE----- +` + + rootCAPath := path.Join(temppath, "foo.example.com") + err := os.WriteFile(rootCAPath, []byte(cert), 0666) + if err != nil { + panic(err) + } + + certPool := x509.NewCertPool() + ok := certPool.AppendCertsFromPEM([]byte(cert)) + assert.True(t, ok) + + testCases := []struct { + name string + scmRootCAPath string + insecure bool + validateCertInTlsConfig bool + }{ + { + name: "Insecure mode configured, SCM Root CA Path not set", + scmRootCAPath: "", + insecure: true, + validateCertInTlsConfig: false, + }, + { + name: "SCM Root CA Path set, Insecure mode set to false", + scmRootCAPath: rootCAPath, + insecure: false, + validateCertInTlsConfig: true, + }, + { + name: "SCM Root CA Path set, Insecure mode set to true", + scmRootCAPath: rootCAPath, + insecure: true, + validateCertInTlsConfig: true, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + tlsConfig := GetTlsConfig(testCase.scmRootCAPath, testCase.insecure) + assert.Equal(t, testCase.insecure, tlsConfig.InsecureSkipVerify) + if testCase.validateCertInTlsConfig { + assert.NotNil(t, tlsConfig) + assert.True(t, tlsConfig.RootCAs.Equal(certPool)) + } + }) + } +} diff --git a/applicationset/webhook/testdata/azuredevops-pull-request.json b/applicationset/webhook/testdata/azuredevops-pull-request.json new file mode 100644 index 0000000000000..80c5e7cb90822 --- /dev/null +++ b/applicationset/webhook/testdata/azuredevops-pull-request.json @@ -0,0 +1,85 @@ +{ + "id": "2ab4e3d3-b7a6-425e-92b1-5a9982c1269e", + "eventType": "git.pullrequest.created", + "publisherId": "tfs", + "scope": "all", + "message": { + "text": "Jamal Hartnett created a new pull request", + "html": "Jamal Hartnett created a new pull request", + "markdown": "Jamal Hartnett created a new pull request" + }, + "detailedMessage": { + "text": "Jamal Hartnett created a new pull request\r\n\r\n- Merge status: Succeeded\r\n- Merge commit: eef717(https://dev.azure.com/fabrikam/DefaultCollection/_apis/repos/git/repositories/4bc14d40-c903-45e2-872e-0462c7748079/commits/eef717f69257a6333f221566c1c987dc94cc0d72)\r\n", + "html": "Jamal Hartnett created a new pull request\r\n
    \r\n
  • Merge status: Succeeded
  • \r\n
  • Merge commit: eef717
  • \r\n
", + "markdown": "Jamal Hartnett created a new pull request\r\n\r\n+ Merge status: Succeeded\r\n+ Merge commit: [eef717](https://dev.azure.com/fabrikam/DefaultCollection/_apis/repos/git/repositories/4bc14d40-c903-45e2-872e-0462c7748079/commits/eef717f69257a6333f221566c1c987dc94cc0d72)\r\n" + }, + "resource": { + "repository": { + "id": "4bc14d40-c903-45e2-872e-0462c7748079", + "name": "Fabrikam", + "url": "https://dev.azure.com/fabrikam/DefaultCollection/_apis/repos/git/repositories/4bc14d40-c903-45e2-872e-0462c7748079", + "project": { + "id": "6ce954b1-ce1f-45d1-b94d-e6bf2464ba2c", + "name": "DefaultCollection", + "url": "https://dev.azure.com/fabrikam/DefaultCollection/_apis/projects/6ce954b1-ce1f-45d1-b94d-e6bf2464ba2c", + "state": "wellFormed" + }, + "defaultBranch": "refs/heads/master", + "remoteUrl": "https://dev.azure.com/fabrikam/DefaultCollection/_git/Fabrikam" + }, + "pullRequestId": 1, + "status": "active", + "createdBy": { + "id": "54d125f7-69f7-4191-904f-c5b96b6261c8", + "displayName": "Jamal Hartnett", + "uniqueName": "fabrikamfiber4@hotmail.com", + "url": "https://vssps.dev.azure.com/fabrikam/_apis/Identities/54d125f7-69f7-4191-904f-c5b96b6261c8", + "imageUrl": "https://dev.azure.com/fabrikam/DefaultCollection/_api/_common/identityImage?id=54d125f7-69f7-4191-904f-c5b96b6261c8" + }, + "creationDate": "2014-06-17T16:55:46.589889Z", + "title": "my first pull request", + "description": " - test2\r\n", + "sourceRefName": "refs/heads/mytopic", + "targetRefName": "refs/heads/master", + "mergeStatus": "succeeded", + "mergeId": "a10bb228-6ba6-4362-abd7-49ea21333dbd", + "lastMergeSourceCommit": { + "commitId": "53d54ac915144006c2c9e90d2c7d3880920db49c", + "url": "https://dev.azure.com/fabrikam/DefaultCollection/_apis/repos/git/repositories/4bc14d40-c903-45e2-872e-0462c7748079/commits/53d54ac915144006c2c9e90d2c7d3880920db49c" + }, + "lastMergeTargetCommit": { + "commitId": "a511f535b1ea495ee0c903badb68fbc83772c882", + "url": "https://dev.azure.com/fabrikam/DefaultCollection/_apis/repos/git/repositories/4bc14d40-c903-45e2-872e-0462c7748079/commits/a511f535b1ea495ee0c903badb68fbc83772c882" + }, + "lastMergeCommit": { + "commitId": "eef717f69257a6333f221566c1c987dc94cc0d72", + "url": "https://dev.azure.com/fabrikam/DefaultCollection/_apis/repos/git/repositories/4bc14d40-c903-45e2-872e-0462c7748079/commits/eef717f69257a6333f221566c1c987dc94cc0d72" + }, + "reviewers": [ + { + "reviewerUrl": null, + "vote": 0, + "id": "2ea2d095-48f9-4cd6-9966-62f6f574096c", + "displayName": "[Mobile]\\Mobile Team", + "uniqueName": "vstfs:///Classification/TeamProject/f0811a3b-8c8a-4e43-a3bf-9a049b4835bd\\Mobile Team", + "url": "https://vssps.dev.azure.com/fabrikam/_apis/Identities/2ea2d095-48f9-4cd6-9966-62f6f574096c", + "imageUrl": "https://dev.azure.com/fabrikam/DefaultCollection/_api/_common/identityImage?id=2ea2d095-48f9-4cd6-9966-62f6f574096c", + "isContainer": true + } + ], + "url": "https://dev.azure.com/fabrikam/DefaultCollection/_apis/repos/git/repositories/4bc14d40-c903-45e2-872e-0462c7748079/pullRequests/1" + }, + "resourceVersion": "1.0", + "resourceContainers": { + "collection": { + "id": "c12d0eb8-e382-443b-9f9c-c52cba5014c2" + }, + "account": { + "id": "f844ec47-a9db-4511-8281-8b63f4eaf94e" + }, + "project": { + "id": "be9b3917-87e6-42a4-a549-2bc06a7a878f" + } + }, + "createdDate": "2016-09-19T13:03:27.2879096Z" + } \ No newline at end of file diff --git a/applicationset/webhook/testdata/azuredevops-push.json b/applicationset/webhook/testdata/azuredevops-push.json new file mode 100644 index 0000000000000..41ee074892e39 --- /dev/null +++ b/applicationset/webhook/testdata/azuredevops-push.json @@ -0,0 +1,76 @@ +{ + "id": "03c164c2-8912-4d5e-8009-3707d5f83734", + "eventType": "git.push", + "publisherId": "tfs", + "scope": "all", + "message": { + "text": "Jamal Hartnett pushed updates to branch master of repository Fabrikam-Fiber-Git.", + "html": "Jamal Hartnett pushed updates to branch master of repository Fabrikam-Fiber-Git.", + "markdown": "Jamal Hartnett pushed updates to branch `master` of repository `Fabrikam-Fiber-Git`." + }, + "detailedMessage": { + "text": "Jamal Hartnett pushed 1 commit to branch master of repository Fabrikam-Fiber-Git.\n - Fixed bug in web.config file 33b55f7c", + "html": "Jamal Hartnett pushed 1 commit to branch master of repository Fabrikam-Fiber-Git.\n
    \n
  • Fixed bug in web.config file 33b55f7c\n
", + "markdown": "Jamal Hartnett pushed 1 commit to branch [master](https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_git/Fabrikam-Fiber-Git/#version=GBmaster) of repository [Fabrikam-Fiber-Git](https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_git/Fabrikam-Fiber-Git/).\n* Fixed bug in web.config file [33b55f7c](https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_git/Fabrikam-Fiber-Git/commit/33b55f7cb7e7e245323987634f960cf4a6e6bc74)" + }, + "resource": { + "commits": [ + { + "commitId": "33b55f7cb7e7e245323987634f960cf4a6e6bc74", + "author": { + "name": "Jamal Hartnett", + "email": "fabrikamfiber4@hotmail.com", + "date": "2015-02-25T19:01:00Z" + }, + "committer": { + "name": "Jamal Hartnett", + "email": "fabrikamfiber4@hotmail.com", + "date": "2015-02-25T19:01:00Z" + }, + "comment": "Fixed bug in web.config file", + "url": "https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_git/Fabrikam-Fiber-Git/commit/33b55f7cb7e7e245323987634f960cf4a6e6bc74" + } + ], + "refUpdates": [ + { + "name": "refs/heads/master", + "oldObjectId": "aad331d8d3b131fa9ae03cf5e53965b51942618a", + "newObjectId": "33b55f7cb7e7e245323987634f960cf4a6e6bc74" + } + ], + "repository": { + "id": "278d5cd2-584d-4b63-824a-2ba458937249", + "name": "Fabrikam-Fiber-Git", + "url": "https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_apis/repos/git/repositories/278d5cd2-584d-4b63-824a-2ba458937249", + "project": { + "id": "6ce954b1-ce1f-45d1-b94d-e6bf2464ba2c", + "name": "DefaultCollection", + "url": "https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_apis/projects/6ce954b1-ce1f-45d1-b94d-e6bf2464ba2c", + "state": "wellFormed" + }, + "defaultBranch": "refs/heads/master", + "remoteUrl": "https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_git/Fabrikam-Fiber-Git" + }, + "pushedBy": { + "id": "00067FFED5C7AF52@Live.com", + "displayName": "Jamal Hartnett", + "uniqueName": "Windows Live ID\\fabrikamfiber4@hotmail.com" + }, + "pushId": 14, + "date": "2014-05-02T19:17:13.3309587Z", + "url": "https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_apis/repos/git/repositories/278d5cd2-584d-4b63-824a-2ba458937249/pushes/14" + }, + "resourceVersion": "1.0", + "resourceContainers": { + "collection": { + "id": "c12d0eb8-e382-443b-9f9c-c52cba5014c2" + }, + "account": { + "id": "f844ec47-a9db-4511-8281-8b63f4eaf94e" + }, + "project": { + "id": "be9b3917-87e6-42a4-a549-2bc06a7a878f" + } + }, + "createdDate": "2016-09-19T13:03:27.0379153Z" + } \ No newline at end of file diff --git a/applicationset/webhook/testdata/github-commit-branch-event.json b/applicationset/webhook/testdata/github-commit-branch-event.json new file mode 100644 index 0000000000000..3ddb1c5dd0be3 --- /dev/null +++ b/applicationset/webhook/testdata/github-commit-branch-event.json @@ -0,0 +1,186 @@ +{ + "ref": "refs/heads/master", + "before": "d5c1ffa8e294bc18c639bfb4e0df499251034414", + "after": "63738bb582c8b540af7bcfc18f87c575c3ed66e0", + "created": false, + "deleted": false, + "forced": true, + "base_ref": null, + "compare": "https://github.com/org/repo/compare/d5c1ffa8e294...63738bb582c8", + "commits": [ + { + "id": "63738bb582c8b540af7bcfc18f87c575c3ed66e0", + "tree_id": "64897da445207e409ad05af93b1f349ad0a4ee19", + "distinct": true, + "message": "Add staging-argocd-demo environment", + "timestamp": "2018-05-04T15:40:02-07:00", + "url": "https://github.com/org/repo/commit/63738bb582c8b540af7bcfc18f87c575c3ed66e0", + "author": { + "name": "Jesse Suen", + "email": "Jesse_Suen@example.com", + "username": "org" + }, + "committer": { + "name": "Jesse Suen", + "email": "Jesse_Suen@example.com", + "username": "org" + }, + "added": [ + "ksapps/test-app/environments/staging-argocd-demo/main.jsonnet", + "ksapps/test-app/environments/staging-argocd-demo/params.libsonnet" + ], + "removed": [ + + ], + "modified": [ + "ksapps/test-app/app.yaml" + ] + } + ], + "head_commit": { + "id": "63738bb582c8b540af7bcfc18f87c575c3ed66e0", + "tree_id": "64897da445207e409ad05af93b1f349ad0a4ee19", + "distinct": true, + "message": "Add staging-argocd-demo environment", + "timestamp": "2018-05-04T15:40:02-07:00", + "url": "https://github.com/org/repo/commit/63738bb582c8b540af7bcfc18f87c575c3ed66e0", + "author": { + "name": "Jesse Suen", + "email": "Jesse_Suen@example.com", + "username": "org" + }, + "committer": { + "name": "Jesse Suen", + "email": "Jesse_Suen@example.com", + "username": "org" + }, + "added": [ + "ksapps/test-app/environments/staging-argocd-demo/main.jsonnet", + "ksapps/test-app/environments/staging-argocd-demo/params.libsonnet" + ], + "removed": [ + + ], + "modified": [ + "ksapps/test-app/app.yaml" + ] + }, + "repository": { + "id": 123060978, + "name": "repo", + "full_name": "org/repo", + "owner": { + "name": "org", + "email": "org@users.noreply.github.com", + "login": "org", + "id": 12677113, + "avatar_url": "https://avatars0.githubusercontent.com/u/12677113?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/org", + "html_url": "https://github.com/org", + "followers_url": "https://api.github.com/users/org/followers", + "following_url": "https://api.github.com/users/org/following{/other_user}", + "gists_url": "https://api.github.com/users/org/gists{/gist_id}", + "starred_url": "https://api.github.com/users/org/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/org/subscriptions", + "organizations_url": "https://api.github.com/users/org/orgs", + "repos_url": "https://api.github.com/users/org/repos", + "events_url": "https://api.github.com/users/org/events{/privacy}", + "received_events_url": "https://api.github.com/users/org/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/org/repo", + "description": "Test Repository", + "fork": false, + "url": "https://github.com/org/repo", + "forks_url": "https://api.github.com/repos/org/repo/forks", + "keys_url": "https://api.github.com/repos/org/repo/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/org/repo/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/org/repo/teams", + "hooks_url": "https://api.github.com/repos/org/repo/hooks", + "issue_events_url": "https://api.github.com/repos/org/repo/issues/events{/number}", + "events_url": "https://api.github.com/repos/org/repo/events", + "assignees_url": "https://api.github.com/repos/org/repo/assignees{/user}", + "branches_url": "https://api.github.com/repos/org/repo/branches{/branch}", + "tags_url": "https://api.github.com/repos/org/repo/tags", + "blobs_url": "https://api.github.com/repos/org/repo/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/org/repo/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/org/repo/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/org/repo/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/org/repo/statuses/{sha}", + "languages_url": "https://api.github.com/repos/org/repo/languages", + "stargazers_url": "https://api.github.com/repos/org/repo/stargazers", + "contributors_url": "https://api.github.com/repos/org/repo/contributors", + "subscribers_url": "https://api.github.com/repos/org/repo/subscribers", + "subscription_url": "https://api.github.com/repos/org/repo/subscription", + "commits_url": "https://api.github.com/repos/org/repo/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/org/repo/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/org/repo/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/org/repo/issues/comments{/number}", + "contents_url": "https://api.github.com/repos/org/repo/contents/{+path}", + "compare_url": "https://api.github.com/repos/org/repo/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/org/repo/merges", + "archive_url": "https://api.github.com/repos/org/repo/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/org/repo/downloads", + "issues_url": "https://api.github.com/repos/org/repo/issues{/number}", + "pulls_url": "https://api.github.com/repos/org/repo/pulls{/number}", + "milestones_url": "https://api.github.com/repos/org/repo/milestones{/number}", + "notifications_url": "https://api.github.com/repos/org/repo/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/org/repo/labels{/name}", + "releases_url": "https://api.github.com/repos/org/repo/releases{/id}", + "deployments_url": "https://api.github.com/repos/org/repo/deployments", + "created_at": 1519698615, + "updated_at": "2018-05-04T22:37:55Z", + "pushed_at": 1525473610, + "git_url": "git://github.com/org/repo.git", + "ssh_url": "git@github.com:org/repo.git", + "clone_url": "https://github.com/org/repo.git", + "svn_url": "https://github.com/org/repo", + "homepage": null, + "size": 538, + "stargazers_count": 0, + "watchers_count": 0, + "language": null, + "has_issues": true, + "has_projects": true, + "has_downloads": true, + "has_wiki": true, + "has_pages": false, + "forks_count": 1, + "mirror_url": null, + "archived": false, + "open_issues_count": 0, + "license": null, + "forks": 1, + "open_issues": 0, + "watchers": 0, + "default_branch": "main", + "stargazers": 0, + "master_branch": "main" + }, + "pusher": { + "name": "org", + "email": "org@users.noreply.github.com" + }, + "sender": { + "login": "org", + "id": 12677113, + "avatar_url": "https://avatars0.githubusercontent.com/u/12677113?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/org", + "html_url": "https://github.com/org", + "followers_url": "https://api.github.com/users/org/followers", + "following_url": "https://api.github.com/users/org/following{/other_user}", + "gists_url": "https://api.github.com/users/org/gists{/gist_id}", + "starred_url": "https://api.github.com/users/org/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/org/subscriptions", + "organizations_url": "https://api.github.com/users/org/orgs", + "repos_url": "https://api.github.com/users/org/repos", + "events_url": "https://api.github.com/users/org/events{/privacy}", + "received_events_url": "https://api.github.com/users/org/received_events", + "type": "User", + "site_admin": false + } + } \ No newline at end of file diff --git a/applicationset/webhook/testdata/github-commit-event.json b/applicationset/webhook/testdata/github-commit-event.json new file mode 100644 index 0000000000000..efe9a645eee30 --- /dev/null +++ b/applicationset/webhook/testdata/github-commit-event.json @@ -0,0 +1,186 @@ +{ + "ref": "refs/heads/master", + "before": "d5c1ffa8e294bc18c639bfb4e0df499251034414", + "after": "63738bb582c8b540af7bcfc18f87c575c3ed66e0", + "created": false, + "deleted": false, + "forced": true, + "base_ref": null, + "compare": "https://github.com/org/repo/compare/d5c1ffa8e294...63738bb582c8", + "commits": [ + { + "id": "63738bb582c8b540af7bcfc18f87c575c3ed66e0", + "tree_id": "64897da445207e409ad05af93b1f349ad0a4ee19", + "distinct": true, + "message": "Add staging-argocd-demo environment", + "timestamp": "2018-05-04T15:40:02-07:00", + "url": "https://github.com/org/repo/commit/63738bb582c8b540af7bcfc18f87c575c3ed66e0", + "author": { + "name": "Jesse Suen", + "email": "Jesse_Suen@example.com", + "username": "org" + }, + "committer": { + "name": "Jesse Suen", + "email": "Jesse_Suen@example.com", + "username": "org" + }, + "added": [ + "ksapps/test-app/environments/staging-argocd-demo/main.jsonnet", + "ksapps/test-app/environments/staging-argocd-demo/params.libsonnet" + ], + "removed": [ + + ], + "modified": [ + "ksapps/test-app/app.yaml" + ] + } + ], + "head_commit": { + "id": "63738bb582c8b540af7bcfc18f87c575c3ed66e0", + "tree_id": "64897da445207e409ad05af93b1f349ad0a4ee19", + "distinct": true, + "message": "Add staging-argocd-demo environment", + "timestamp": "2018-05-04T15:40:02-07:00", + "url": "https://github.com/org/repo/commit/63738bb582c8b540af7bcfc18f87c575c3ed66e0", + "author": { + "name": "Jesse Suen", + "email": "Jesse_Suen@example.com", + "username": "org" + }, + "committer": { + "name": "Jesse Suen", + "email": "Jesse_Suen@example.com", + "username": "org" + }, + "added": [ + "ksapps/test-app/environments/staging-argocd-demo/main.jsonnet", + "ksapps/test-app/environments/staging-argocd-demo/params.libsonnet" + ], + "removed": [ + + ], + "modified": [ + "ksapps/test-app/app.yaml" + ] + }, + "repository": { + "id": 123060978, + "name": "repo", + "full_name": "org/repo", + "owner": { + "name": "org", + "email": "org@users.noreply.github.com", + "login": "org", + "id": 12677113, + "avatar_url": "https://avatars0.githubusercontent.com/u/12677113?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/org", + "html_url": "https://github.com/org", + "followers_url": "https://api.github.com/users/org/followers", + "following_url": "https://api.github.com/users/org/following{/other_user}", + "gists_url": "https://api.github.com/users/org/gists{/gist_id}", + "starred_url": "https://api.github.com/users/org/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/org/subscriptions", + "organizations_url": "https://api.github.com/users/org/orgs", + "repos_url": "https://api.github.com/users/org/repos", + "events_url": "https://api.github.com/users/org/events{/privacy}", + "received_events_url": "https://api.github.com/users/org/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/org/repo", + "description": "Test Repository", + "fork": false, + "url": "https://github.com/org/repo", + "forks_url": "https://api.github.com/repos/org/repo/forks", + "keys_url": "https://api.github.com/repos/org/repo/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/org/repo/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/org/repo/teams", + "hooks_url": "https://api.github.com/repos/org/repo/hooks", + "issue_events_url": "https://api.github.com/repos/org/repo/issues/events{/number}", + "events_url": "https://api.github.com/repos/org/repo/events", + "assignees_url": "https://api.github.com/repos/org/repo/assignees{/user}", + "branches_url": "https://api.github.com/repos/org/repo/branches{/branch}", + "tags_url": "https://api.github.com/repos/org/repo/tags", + "blobs_url": "https://api.github.com/repos/org/repo/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/org/repo/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/org/repo/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/org/repo/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/org/repo/statuses/{sha}", + "languages_url": "https://api.github.com/repos/org/repo/languages", + "stargazers_url": "https://api.github.com/repos/org/repo/stargazers", + "contributors_url": "https://api.github.com/repos/org/repo/contributors", + "subscribers_url": "https://api.github.com/repos/org/repo/subscribers", + "subscription_url": "https://api.github.com/repos/org/repo/subscription", + "commits_url": "https://api.github.com/repos/org/repo/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/org/repo/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/org/repo/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/org/repo/issues/comments{/number}", + "contents_url": "https://api.github.com/repos/org/repo/contents/{+path}", + "compare_url": "https://api.github.com/repos/org/repo/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/org/repo/merges", + "archive_url": "https://api.github.com/repos/org/repo/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/org/repo/downloads", + "issues_url": "https://api.github.com/repos/org/repo/issues{/number}", + "pulls_url": "https://api.github.com/repos/org/repo/pulls{/number}", + "milestones_url": "https://api.github.com/repos/org/repo/milestones{/number}", + "notifications_url": "https://api.github.com/repos/org/repo/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/org/repo/labels{/name}", + "releases_url": "https://api.github.com/repos/org/repo/releases{/id}", + "deployments_url": "https://api.github.com/repos/org/repo/deployments", + "created_at": 1519698615, + "updated_at": "2018-05-04T22:37:55Z", + "pushed_at": 1525473610, + "git_url": "git://github.com/org/repo.git", + "ssh_url": "git@github.com:org/repo.git", + "clone_url": "https://github.com/org/repo.git", + "svn_url": "https://github.com/org/repo", + "homepage": null, + "size": 538, + "stargazers_count": 0, + "watchers_count": 0, + "language": null, + "has_issues": true, + "has_projects": true, + "has_downloads": true, + "has_wiki": true, + "has_pages": false, + "forks_count": 1, + "mirror_url": null, + "archived": false, + "open_issues_count": 0, + "license": null, + "forks": 1, + "open_issues": 0, + "watchers": 0, + "default_branch": "master", + "stargazers": 0, + "master_branch": "master" + }, + "pusher": { + "name": "org", + "email": "org@users.noreply.github.com" + }, + "sender": { + "login": "org", + "id": 12677113, + "avatar_url": "https://avatars0.githubusercontent.com/u/12677113?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/org", + "html_url": "https://github.com/org", + "followers_url": "https://api.github.com/users/org/followers", + "following_url": "https://api.github.com/users/org/following{/other_user}", + "gists_url": "https://api.github.com/users/org/gists{/gist_id}", + "starred_url": "https://api.github.com/users/org/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/org/subscriptions", + "organizations_url": "https://api.github.com/users/org/orgs", + "repos_url": "https://api.github.com/users/org/repos", + "events_url": "https://api.github.com/users/org/events{/privacy}", + "received_events_url": "https://api.github.com/users/org/received_events", + "type": "User", + "site_admin": false + } + } \ No newline at end of file diff --git a/applicationset/webhook/testdata/github-ping-event.json b/applicationset/webhook/testdata/github-ping-event.json new file mode 100644 index 0000000000000..9e9329129bae4 --- /dev/null +++ b/applicationset/webhook/testdata/github-ping-event.json @@ -0,0 +1,140 @@ +{ + "zen": "Anything added dilutes everything else.", + "hook_id": 109948940, + "hook": { + "type": "Repository", + "id": 109948940, + "name": "web", + "active": true, + "events": ["*"], + "config": { + "content_type": "json", + "url": "https://smee.io/****************", + "insecure_ssl": "0" + }, + "updated_at": "2019-05-15T15:20:49Z", + "created_at": "2019-05-15T15:20:49Z", + "url": "https://api.github.com/repos/Octocoders/Hello-World/hooks/109948940", + "test_url": "https://api.github.com/repos/Octocoders/Hello-World/hooks/109948940/test", + "ping_url": "https://api.github.com/repos/Octocoders/Hello-World/hooks/109948940/pings", + "last_response": { + "code": null, + "status": "unused", + "message": null + } + }, + "repository": { + "id": 186853261, + "node_id": "MDEwOlJlcG9zaXRvcnkxODY4NTMyNjE=", + "name": "Hello-World", + "full_name": "Octocoders/Hello-World", + "private": false, + "owner": { + "login": "Octocoders", + "id": 38302899, + "node_id": "MDEyOk9yZ2FuaXphdGlvbjM4MzAyODk5", + "avatar_url": "https://avatars1.githubusercontent.com/u/38302899?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/Octocoders", + "html_url": "https://github.com/Octocoders", + "followers_url": "https://api.github.com/users/Octocoders/followers", + "following_url": "https://api.github.com/users/Octocoders/following{/other_user}", + "gists_url": "https://api.github.com/users/Octocoders/gists{/gist_id}", + "starred_url": "https://api.github.com/users/Octocoders/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/Octocoders/subscriptions", + "organizations_url": "https://api.github.com/users/Octocoders/orgs", + "repos_url": "https://api.github.com/users/Octocoders/repos", + "events_url": "https://api.github.com/users/Octocoders/events{/privacy}", + "received_events_url": "https://api.github.com/users/Octocoders/received_events", + "type": "Organization", + "site_admin": false + }, + "html_url": "https://github.com/Octocoders/Hello-World", + "description": null, + "fork": true, + "url": "https://api.github.com/repos/Octocoders/Hello-World", + "forks_url": "https://api.github.com/repos/Octocoders/Hello-World/forks", + "keys_url": "https://api.github.com/repos/Octocoders/Hello-World/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/Octocoders/Hello-World/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/Octocoders/Hello-World/teams", + "hooks_url": "https://api.github.com/repos/Octocoders/Hello-World/hooks", + "issue_events_url": "https://api.github.com/repos/Octocoders/Hello-World/issues/events{/number}", + "events_url": "https://api.github.com/repos/Octocoders/Hello-World/events", + "assignees_url": "https://api.github.com/repos/Octocoders/Hello-World/assignees{/user}", + "branches_url": "https://api.github.com/repos/Octocoders/Hello-World/branches{/branch}", + "tags_url": "https://api.github.com/repos/Octocoders/Hello-World/tags", + "blobs_url": "https://api.github.com/repos/Octocoders/Hello-World/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/Octocoders/Hello-World/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/Octocoders/Hello-World/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/Octocoders/Hello-World/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/Octocoders/Hello-World/statuses/{sha}", + "languages_url": "https://api.github.com/repos/Octocoders/Hello-World/languages", + "stargazers_url": "https://api.github.com/repos/Octocoders/Hello-World/stargazers", + "contributors_url": "https://api.github.com/repos/Octocoders/Hello-World/contributors", + "subscribers_url": "https://api.github.com/repos/Octocoders/Hello-World/subscribers", + "subscription_url": "https://api.github.com/repos/Octocoders/Hello-World/subscription", + "commits_url": "https://api.github.com/repos/Octocoders/Hello-World/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/Octocoders/Hello-World/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/Octocoders/Hello-World/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/Octocoders/Hello-World/issues/comments{/number}", + "contents_url": "https://api.github.com/repos/Octocoders/Hello-World/contents/{+path}", + "compare_url": "https://api.github.com/repos/Octocoders/Hello-World/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/Octocoders/Hello-World/merges", + "archive_url": "https://api.github.com/repos/Octocoders/Hello-World/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/Octocoders/Hello-World/downloads", + "issues_url": "https://api.github.com/repos/Octocoders/Hello-World/issues{/number}", + "pulls_url": "https://api.github.com/repos/Octocoders/Hello-World/pulls{/number}", + "milestones_url": "https://api.github.com/repos/Octocoders/Hello-World/milestones{/number}", + "notifications_url": "https://api.github.com/repos/Octocoders/Hello-World/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/Octocoders/Hello-World/labels{/name}", + "releases_url": "https://api.github.com/repos/Octocoders/Hello-World/releases{/id}", + "deployments_url": "https://api.github.com/repos/Octocoders/Hello-World/deployments", + "created_at": "2019-05-15T15:20:42Z", + "updated_at": "2019-05-15T15:20:45Z", + "pushed_at": "2019-05-15T15:20:33Z", + "git_url": "git://github.com/Octocoders/Hello-World.git", + "ssh_url": "git@github.com:Octocoders/Hello-World.git", + "clone_url": "https://github.com/Octocoders/Hello-World.git", + "svn_url": "https://github.com/Octocoders/Hello-World", + "homepage": null, + "size": 0, + "stargazers_count": 0, + "watchers_count": 0, + "language": "Ruby", + "has_issues": false, + "has_projects": true, + "has_downloads": true, + "has_wiki": true, + "has_pages": false, + "forks_count": 0, + "mirror_url": null, + "archived": false, + "disabled": false, + "open_issues_count": 0, + "license": null, + "forks": 0, + "open_issues": 0, + "watchers": 0, + "default_branch": "master" + }, + "sender": { + "login": "Codertocat", + "id": 21031067, + "node_id": "MDQ6VXNlcjIxMDMxMDY3", + "avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/Codertocat", + "html_url": "https://github.com/Codertocat", + "followers_url": "https://api.github.com/users/Codertocat/followers", + "following_url": "https://api.github.com/users/Codertocat/following{/other_user}", + "gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions", + "organizations_url": "https://api.github.com/users/Codertocat/orgs", + "repos_url": "https://api.github.com/users/Codertocat/repos", + "events_url": "https://api.github.com/users/Codertocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/Codertocat/received_events", + "type": "User", + "site_admin": false + } +} diff --git a/applicationset/webhook/testdata/github-pull-request-assigned-event.json b/applicationset/webhook/testdata/github-pull-request-assigned-event.json new file mode 100644 index 0000000000000..d6df9354cea8d --- /dev/null +++ b/applicationset/webhook/testdata/github-pull-request-assigned-event.json @@ -0,0 +1,454 @@ +{ + "action": "assigned", + "number": 2, + "pull_request": { + "url": "https://api.github.com/repos/Codertocat/Hello-World/pulls/2", + "id": 279147437, + "node_id": "MDExOlB1bGxSZXF1ZXN0Mjc5MTQ3NDM3", + "html_url": "https://github.com/Codertocat/Hello-World/pull/2", + "diff_url": "https://github.com/Codertocat/Hello-World/pull/2.diff", + "patch_url": "https://github.com/Codertocat/Hello-World/pull/2.patch", + "issue_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/2", + "number": 2, + "state": "open", + "locked": false, + "title": "Update the README with new information.", + "user": { + "login": "Codertocat", + "id": 21031067, + "node_id": "MDQ6VXNlcjIxMDMxMDY3", + "avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/Codertocat", + "html_url": "https://github.com/Codertocat", + "followers_url": "https://api.github.com/users/Codertocat/followers", + "following_url": "https://api.github.com/users/Codertocat/following{/other_user}", + "gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions", + "organizations_url": "https://api.github.com/users/Codertocat/orgs", + "repos_url": "https://api.github.com/users/Codertocat/repos", + "events_url": "https://api.github.com/users/Codertocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/Codertocat/received_events", + "type": "User", + "site_admin": false + }, + "body": "This is a pretty simple change that we need to pull into master.", + "created_at": "2019-05-15T15:20:33Z", + "updated_at": "2019-05-15T15:20:33Z", + "closed_at": null, + "merged_at": null, + "merge_commit_sha": null, + "assignee": null, + "assignees": [], + "requested_reviewers": [], + "requested_teams": [], + "labels": [], + "milestone": null, + "commits_url": "https://api.github.com/repos/Codertocat/Hello-World/pulls/2/commits", + "review_comments_url": "https://api.github.com/repos/Codertocat/Hello-World/pulls/2/comments", + "review_comment_url": "https://api.github.com/repos/Codertocat/Hello-World/pulls/comments{/number}", + "comments_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/2/comments", + "statuses_url": "https://api.github.com/repos/Codertocat/Hello-World/statuses/ec26c3e57ca3a959ca5aad62de7213c562f8c821", + "head": { + "label": "Codertocat:changes", + "ref": "changes", + "sha": "ec26c3e57ca3a959ca5aad62de7213c562f8c821", + "user": { + "login": "Codertocat", + "id": 21031067, + "node_id": "MDQ6VXNlcjIxMDMxMDY3", + "avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/Codertocat", + "html_url": "https://github.com/Codertocat", + "followers_url": "https://api.github.com/users/Codertocat/followers", + "following_url": "https://api.github.com/users/Codertocat/following{/other_user}", + "gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions", + "organizations_url": "https://api.github.com/users/Codertocat/orgs", + "repos_url": "https://api.github.com/users/Codertocat/repos", + "events_url": "https://api.github.com/users/Codertocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/Codertocat/received_events", + "type": "User", + "site_admin": false + }, + "repo": { + "id": 186853002, + "node_id": "MDEwOlJlcG9zaXRvcnkxODY4NTMwMDI=", + "name": "Hello-World", + "full_name": "Codertocat/Hello-World", + "private": false, + "owner": { + "login": "Codertocat", + "id": 21031067, + "node_id": "MDQ6VXNlcjIxMDMxMDY3", + "avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/Codertocat", + "html_url": "https://github.com/Codertocat", + "followers_url": "https://api.github.com/users/Codertocat/followers", + "following_url": "https://api.github.com/users/Codertocat/following{/other_user}", + "gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions", + "organizations_url": "https://api.github.com/users/Codertocat/orgs", + "repos_url": "https://api.github.com/users/Codertocat/repos", + "events_url": "https://api.github.com/users/Codertocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/Codertocat/received_events", + "type": "User", + "site_admin": false + }, + "html_url": "https://github.com/Codertocat/Hello-World", + "description": null, + "fork": false, + "url": "https://api.github.com/repos/Codertocat/Hello-World", + "forks_url": "https://api.github.com/repos/Codertocat/Hello-World/forks", + "keys_url": "https://api.github.com/repos/Codertocat/Hello-World/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/Codertocat/Hello-World/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/Codertocat/Hello-World/teams", + "hooks_url": "https://api.github.com/repos/Codertocat/Hello-World/hooks", + "issue_events_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/events{/number}", + "events_url": "https://api.github.com/repos/Codertocat/Hello-World/events", + "assignees_url": "https://api.github.com/repos/Codertocat/Hello-World/assignees{/user}", + "branches_url": "https://api.github.com/repos/Codertocat/Hello-World/branches{/branch}", + "tags_url": "https://api.github.com/repos/Codertocat/Hello-World/tags", + "blobs_url": "https://api.github.com/repos/Codertocat/Hello-World/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/Codertocat/Hello-World/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/Codertocat/Hello-World/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/Codertocat/Hello-World/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/Codertocat/Hello-World/statuses/{sha}", + "languages_url": "https://api.github.com/repos/Codertocat/Hello-World/languages", + "stargazers_url": "https://api.github.com/repos/Codertocat/Hello-World/stargazers", + "contributors_url": "https://api.github.com/repos/Codertocat/Hello-World/contributors", + "subscribers_url": "https://api.github.com/repos/Codertocat/Hello-World/subscribers", + "subscription_url": "https://api.github.com/repos/Codertocat/Hello-World/subscription", + "commits_url": "https://api.github.com/repos/Codertocat/Hello-World/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/Codertocat/Hello-World/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/Codertocat/Hello-World/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/comments{/number}", + "contents_url": "https://api.github.com/repos/Codertocat/Hello-World/contents/{+path}", + "compare_url": "https://api.github.com/repos/Codertocat/Hello-World/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/Codertocat/Hello-World/merges", + "archive_url": "https://api.github.com/repos/Codertocat/Hello-World/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/Codertocat/Hello-World/downloads", + "issues_url": "https://api.github.com/repos/Codertocat/Hello-World/issues{/number}", + "pulls_url": "https://api.github.com/repos/Codertocat/Hello-World/pulls{/number}", + "milestones_url": "https://api.github.com/repos/Codertocat/Hello-World/milestones{/number}", + "notifications_url": "https://api.github.com/repos/Codertocat/Hello-World/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/Codertocat/Hello-World/labels{/name}", + "releases_url": "https://api.github.com/repos/Codertocat/Hello-World/releases{/id}", + "deployments_url": "https://api.github.com/repos/Codertocat/Hello-World/deployments", + "created_at": "2019-05-15T15:19:25Z", + "updated_at": "2019-05-15T15:19:27Z", + "pushed_at": "2019-05-15T15:20:32Z", + "git_url": "git://github.com/Codertocat/Hello-World.git", + "ssh_url": "git@github.com:Codertocat/Hello-World.git", + "clone_url": "https://github.com/Codertocat/Hello-World.git", + "svn_url": "https://github.com/Codertocat/Hello-World", + "homepage": null, + "size": 0, + "stargazers_count": 0, + "watchers_count": 0, + "language": null, + "has_issues": true, + "has_projects": true, + "has_downloads": true, + "has_wiki": true, + "has_pages": true, + "forks_count": 0, + "mirror_url": null, + "archived": false, + "disabled": false, + "open_issues_count": 2, + "license": null, + "forks": 0, + "open_issues": 2, + "watchers": 0, + "default_branch": "master", + "allow_squash_merge": true, + "allow_merge_commit": true, + "allow_rebase_merge": true, + "delete_branch_on_merge": false + } + }, + "base": { + "label": "Codertocat:master", + "ref": "master", + "sha": "f95f852bd8fca8fcc58a9a2d6c842781e32a215e", + "user": { + "login": "Codertocat", + "id": 21031067, + "node_id": "MDQ6VXNlcjIxMDMxMDY3", + "avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/Codertocat", + "html_url": "https://github.com/Codertocat", + "followers_url": "https://api.github.com/users/Codertocat/followers", + "following_url": "https://api.github.com/users/Codertocat/following{/other_user}", + "gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions", + "organizations_url": "https://api.github.com/users/Codertocat/orgs", + "repos_url": "https://api.github.com/users/Codertocat/repos", + "events_url": "https://api.github.com/users/Codertocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/Codertocat/received_events", + "type": "User", + "site_admin": false + }, + "repo": { + "id": 186853002, + "node_id": "MDEwOlJlcG9zaXRvcnkxODY4NTMwMDI=", + "name": "Hello-World", + "full_name": "Codertocat/Hello-World", + "private": false, + "owner": { + "login": "Codertocat", + "id": 21031067, + "node_id": "MDQ6VXNlcjIxMDMxMDY3", + "avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/Codertocat", + "html_url": "https://github.com/Codertocat", + "followers_url": "https://api.github.com/users/Codertocat/followers", + "following_url": "https://api.github.com/users/Codertocat/following{/other_user}", + "gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions", + "organizations_url": "https://api.github.com/users/Codertocat/orgs", + "repos_url": "https://api.github.com/users/Codertocat/repos", + "events_url": "https://api.github.com/users/Codertocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/Codertocat/received_events", + "type": "User", + "site_admin": false + }, + "html_url": "https://github.com/Codertocat/Hello-World", + "description": null, + "fork": false, + "url": "https://api.github.com/repos/Codertocat/Hello-World", + "forks_url": "https://api.github.com/repos/Codertocat/Hello-World/forks", + "keys_url": "https://api.github.com/repos/Codertocat/Hello-World/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/Codertocat/Hello-World/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/Codertocat/Hello-World/teams", + "hooks_url": "https://api.github.com/repos/Codertocat/Hello-World/hooks", + "issue_events_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/events{/number}", + "events_url": "https://api.github.com/repos/Codertocat/Hello-World/events", + "assignees_url": "https://api.github.com/repos/Codertocat/Hello-World/assignees{/user}", + "branches_url": "https://api.github.com/repos/Codertocat/Hello-World/branches{/branch}", + "tags_url": "https://api.github.com/repos/Codertocat/Hello-World/tags", + "blobs_url": "https://api.github.com/repos/Codertocat/Hello-World/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/Codertocat/Hello-World/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/Codertocat/Hello-World/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/Codertocat/Hello-World/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/Codertocat/Hello-World/statuses/{sha}", + "languages_url": "https://api.github.com/repos/Codertocat/Hello-World/languages", + "stargazers_url": "https://api.github.com/repos/Codertocat/Hello-World/stargazers", + "contributors_url": "https://api.github.com/repos/Codertocat/Hello-World/contributors", + "subscribers_url": "https://api.github.com/repos/Codertocat/Hello-World/subscribers", + "subscription_url": "https://api.github.com/repos/Codertocat/Hello-World/subscription", + "commits_url": "https://api.github.com/repos/Codertocat/Hello-World/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/Codertocat/Hello-World/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/Codertocat/Hello-World/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/comments{/number}", + "contents_url": "https://api.github.com/repos/Codertocat/Hello-World/contents/{+path}", + "compare_url": "https://api.github.com/repos/Codertocat/Hello-World/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/Codertocat/Hello-World/merges", + "archive_url": "https://api.github.com/repos/Codertocat/Hello-World/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/Codertocat/Hello-World/downloads", + "issues_url": "https://api.github.com/repos/Codertocat/Hello-World/issues{/number}", + "pulls_url": "https://api.github.com/repos/Codertocat/Hello-World/pulls{/number}", + "milestones_url": "https://api.github.com/repos/Codertocat/Hello-World/milestones{/number}", + "notifications_url": "https://api.github.com/repos/Codertocat/Hello-World/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/Codertocat/Hello-World/labels{/name}", + "releases_url": "https://api.github.com/repos/Codertocat/Hello-World/releases{/id}", + "deployments_url": "https://api.github.com/repos/Codertocat/Hello-World/deployments", + "created_at": "2019-05-15T15:19:25Z", + "updated_at": "2019-05-15T15:19:27Z", + "pushed_at": "2019-05-15T15:20:32Z", + "git_url": "git://github.com/Codertocat/Hello-World.git", + "ssh_url": "git@github.com:Codertocat/Hello-World.git", + "clone_url": "https://github.com/Codertocat/Hello-World.git", + "svn_url": "https://github.com/Codertocat/Hello-World", + "homepage": null, + "size": 0, + "stargazers_count": 0, + "watchers_count": 0, + "language": null, + "has_issues": true, + "has_projects": true, + "has_downloads": true, + "has_wiki": true, + "has_pages": true, + "forks_count": 0, + "mirror_url": null, + "archived": false, + "disabled": false, + "open_issues_count": 2, + "license": null, + "forks": 0, + "open_issues": 2, + "watchers": 0, + "default_branch": "master", + "allow_squash_merge": true, + "allow_merge_commit": true, + "allow_rebase_merge": true, + "delete_branch_on_merge": false + } + }, + "_links": { + "self": { + "href": "https://api.github.com/repos/Codertocat/Hello-World/pulls/2" + }, + "html": { + "href": "https://github.com/Codertocat/Hello-World/pull/2" + }, + "issue": { + "href": "https://api.github.com/repos/Codertocat/Hello-World/issues/2" + }, + "comments": { + "href": "https://api.github.com/repos/Codertocat/Hello-World/issues/2/comments" + }, + "review_comments": { + "href": "https://api.github.com/repos/Codertocat/Hello-World/pulls/2/comments" + }, + "review_comment": { + "href": "https://api.github.com/repos/Codertocat/Hello-World/pulls/comments{/number}" + }, + "commits": { + "href": "https://api.github.com/repos/Codertocat/Hello-World/pulls/2/commits" + }, + "statuses": { + "href": "https://api.github.com/repos/Codertocat/Hello-World/statuses/ec26c3e57ca3a959ca5aad62de7213c562f8c821" + } + }, + "author_association": "OWNER", + "draft": false, + "merged": false, + "mergeable": null, + "rebaseable": null, + "mergeable_state": "unknown", + "merged_by": null, + "comments": 0, + "review_comments": 0, + "maintainer_can_modify": false, + "commits": 1, + "additions": 1, + "deletions": 1, + "changed_files": 1 + }, + "repository": { + "id": 186853002, + "node_id": "MDEwOlJlcG9zaXRvcnkxODY4NTMwMDI=", + "name": "Hello-World", + "full_name": "Codertocat/Hello-World", + "private": false, + "owner": { + "login": "Codertocat", + "id": 21031067, + "node_id": "MDQ6VXNlcjIxMDMxMDY3", + "avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/Codertocat", + "html_url": "https://github.com/Codertocat", + "followers_url": "https://api.github.com/users/Codertocat/followers", + "following_url": "https://api.github.com/users/Codertocat/following{/other_user}", + "gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions", + "organizations_url": "https://api.github.com/users/Codertocat/orgs", + "repos_url": "https://api.github.com/users/Codertocat/repos", + "events_url": "https://api.github.com/users/Codertocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/Codertocat/received_events", + "type": "User", + "site_admin": false + }, + "html_url": "https://github.com/Codertocat/Hello-World", + "description": null, + "fork": false, + "url": "https://api.github.com/repos/Codertocat/Hello-World", + "forks_url": "https://api.github.com/repos/Codertocat/Hello-World/forks", + "keys_url": "https://api.github.com/repos/Codertocat/Hello-World/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/Codertocat/Hello-World/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/Codertocat/Hello-World/teams", + "hooks_url": "https://api.github.com/repos/Codertocat/Hello-World/hooks", + "issue_events_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/events{/number}", + "events_url": "https://api.github.com/repos/Codertocat/Hello-World/events", + "assignees_url": "https://api.github.com/repos/Codertocat/Hello-World/assignees{/user}", + "branches_url": "https://api.github.com/repos/Codertocat/Hello-World/branches{/branch}", + "tags_url": "https://api.github.com/repos/Codertocat/Hello-World/tags", + "blobs_url": "https://api.github.com/repos/Codertocat/Hello-World/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/Codertocat/Hello-World/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/Codertocat/Hello-World/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/Codertocat/Hello-World/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/Codertocat/Hello-World/statuses/{sha}", + "languages_url": "https://api.github.com/repos/Codertocat/Hello-World/languages", + "stargazers_url": "https://api.github.com/repos/Codertocat/Hello-World/stargazers", + "contributors_url": "https://api.github.com/repos/Codertocat/Hello-World/contributors", + "subscribers_url": "https://api.github.com/repos/Codertocat/Hello-World/subscribers", + "subscription_url": "https://api.github.com/repos/Codertocat/Hello-World/subscription", + "commits_url": "https://api.github.com/repos/Codertocat/Hello-World/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/Codertocat/Hello-World/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/Codertocat/Hello-World/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/comments{/number}", + "contents_url": "https://api.github.com/repos/Codertocat/Hello-World/contents/{+path}", + "compare_url": "https://api.github.com/repos/Codertocat/Hello-World/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/Codertocat/Hello-World/merges", + "archive_url": "https://api.github.com/repos/Codertocat/Hello-World/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/Codertocat/Hello-World/downloads", + "issues_url": "https://api.github.com/repos/Codertocat/Hello-World/issues{/number}", + "pulls_url": "https://api.github.com/repos/Codertocat/Hello-World/pulls{/number}", + "milestones_url": "https://api.github.com/repos/Codertocat/Hello-World/milestones{/number}", + "notifications_url": "https://api.github.com/repos/Codertocat/Hello-World/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/Codertocat/Hello-World/labels{/name}", + "releases_url": "https://api.github.com/repos/Codertocat/Hello-World/releases{/id}", + "deployments_url": "https://api.github.com/repos/Codertocat/Hello-World/deployments", + "created_at": "2019-05-15T15:19:25Z", + "updated_at": "2019-05-15T15:19:27Z", + "pushed_at": "2019-05-15T15:20:32Z", + "git_url": "git://github.com/Codertocat/Hello-World.git", + "ssh_url": "git@github.com:Codertocat/Hello-World.git", + "clone_url": "https://github.com/Codertocat/Hello-World.git", + "svn_url": "https://github.com/Codertocat/Hello-World", + "homepage": null, + "size": 0, + "stargazers_count": 0, + "watchers_count": 0, + "language": null, + "has_issues": true, + "has_projects": true, + "has_downloads": true, + "has_wiki": true, + "has_pages": true, + "forks_count": 0, + "mirror_url": null, + "archived": false, + "disabled": false, + "open_issues_count": 2, + "license": null, + "forks": 0, + "open_issues": 2, + "watchers": 0, + "default_branch": "master" + }, + "sender": { + "login": "Codertocat", + "id": 21031067, + "node_id": "MDQ6VXNlcjIxMDMxMDY3", + "avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/Codertocat", + "html_url": "https://github.com/Codertocat", + "followers_url": "https://api.github.com/users/Codertocat/followers", + "following_url": "https://api.github.com/users/Codertocat/following{/other_user}", + "gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions", + "organizations_url": "https://api.github.com/users/Codertocat/orgs", + "repos_url": "https://api.github.com/users/Codertocat/repos", + "events_url": "https://api.github.com/users/Codertocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/Codertocat/received_events", + "type": "User", + "site_admin": false + } + } \ No newline at end of file diff --git a/applicationset/webhook/testdata/github-pull-request-opened-event.json b/applicationset/webhook/testdata/github-pull-request-opened-event.json new file mode 100644 index 0000000000000..e1dace771d7f9 --- /dev/null +++ b/applicationset/webhook/testdata/github-pull-request-opened-event.json @@ -0,0 +1,454 @@ +{ + "action": "opened", + "number": 2, + "pull_request": { + "url": "https://api.github.com/repos/Codertocat/Hello-World/pulls/2", + "id": 279147437, + "node_id": "MDExOlB1bGxSZXF1ZXN0Mjc5MTQ3NDM3", + "html_url": "https://github.com/Codertocat/Hello-World/pull/2", + "diff_url": "https://github.com/Codertocat/Hello-World/pull/2.diff", + "patch_url": "https://github.com/Codertocat/Hello-World/pull/2.patch", + "issue_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/2", + "number": 2, + "state": "open", + "locked": false, + "title": "Update the README with new information.", + "user": { + "login": "Codertocat", + "id": 21031067, + "node_id": "MDQ6VXNlcjIxMDMxMDY3", + "avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/Codertocat", + "html_url": "https://github.com/Codertocat", + "followers_url": "https://api.github.com/users/Codertocat/followers", + "following_url": "https://api.github.com/users/Codertocat/following{/other_user}", + "gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions", + "organizations_url": "https://api.github.com/users/Codertocat/orgs", + "repos_url": "https://api.github.com/users/Codertocat/repos", + "events_url": "https://api.github.com/users/Codertocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/Codertocat/received_events", + "type": "User", + "site_admin": false + }, + "body": "This is a pretty simple change that we need to pull into master.", + "created_at": "2019-05-15T15:20:33Z", + "updated_at": "2019-05-15T15:20:33Z", + "closed_at": null, + "merged_at": null, + "merge_commit_sha": null, + "assignee": null, + "assignees": [], + "requested_reviewers": [], + "requested_teams": [], + "labels": [], + "milestone": null, + "commits_url": "https://api.github.com/repos/Codertocat/Hello-World/pulls/2/commits", + "review_comments_url": "https://api.github.com/repos/Codertocat/Hello-World/pulls/2/comments", + "review_comment_url": "https://api.github.com/repos/Codertocat/Hello-World/pulls/comments{/number}", + "comments_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/2/comments", + "statuses_url": "https://api.github.com/repos/Codertocat/Hello-World/statuses/ec26c3e57ca3a959ca5aad62de7213c562f8c821", + "head": { + "label": "Codertocat:changes", + "ref": "changes", + "sha": "ec26c3e57ca3a959ca5aad62de7213c562f8c821", + "user": { + "login": "Codertocat", + "id": 21031067, + "node_id": "MDQ6VXNlcjIxMDMxMDY3", + "avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/Codertocat", + "html_url": "https://github.com/Codertocat", + "followers_url": "https://api.github.com/users/Codertocat/followers", + "following_url": "https://api.github.com/users/Codertocat/following{/other_user}", + "gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions", + "organizations_url": "https://api.github.com/users/Codertocat/orgs", + "repos_url": "https://api.github.com/users/Codertocat/repos", + "events_url": "https://api.github.com/users/Codertocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/Codertocat/received_events", + "type": "User", + "site_admin": false + }, + "repo": { + "id": 186853002, + "node_id": "MDEwOlJlcG9zaXRvcnkxODY4NTMwMDI=", + "name": "Hello-World", + "full_name": "Codertocat/Hello-World", + "private": false, + "owner": { + "login": "Codertocat", + "id": 21031067, + "node_id": "MDQ6VXNlcjIxMDMxMDY3", + "avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/Codertocat", + "html_url": "https://github.com/Codertocat", + "followers_url": "https://api.github.com/users/Codertocat/followers", + "following_url": "https://api.github.com/users/Codertocat/following{/other_user}", + "gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions", + "organizations_url": "https://api.github.com/users/Codertocat/orgs", + "repos_url": "https://api.github.com/users/Codertocat/repos", + "events_url": "https://api.github.com/users/Codertocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/Codertocat/received_events", + "type": "User", + "site_admin": false + }, + "html_url": "https://github.com/Codertocat/Hello-World", + "description": null, + "fork": false, + "url": "https://api.github.com/repos/Codertocat/Hello-World", + "forks_url": "https://api.github.com/repos/Codertocat/Hello-World/forks", + "keys_url": "https://api.github.com/repos/Codertocat/Hello-World/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/Codertocat/Hello-World/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/Codertocat/Hello-World/teams", + "hooks_url": "https://api.github.com/repos/Codertocat/Hello-World/hooks", + "issue_events_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/events{/number}", + "events_url": "https://api.github.com/repos/Codertocat/Hello-World/events", + "assignees_url": "https://api.github.com/repos/Codertocat/Hello-World/assignees{/user}", + "branches_url": "https://api.github.com/repos/Codertocat/Hello-World/branches{/branch}", + "tags_url": "https://api.github.com/repos/Codertocat/Hello-World/tags", + "blobs_url": "https://api.github.com/repos/Codertocat/Hello-World/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/Codertocat/Hello-World/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/Codertocat/Hello-World/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/Codertocat/Hello-World/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/Codertocat/Hello-World/statuses/{sha}", + "languages_url": "https://api.github.com/repos/Codertocat/Hello-World/languages", + "stargazers_url": "https://api.github.com/repos/Codertocat/Hello-World/stargazers", + "contributors_url": "https://api.github.com/repos/Codertocat/Hello-World/contributors", + "subscribers_url": "https://api.github.com/repos/Codertocat/Hello-World/subscribers", + "subscription_url": "https://api.github.com/repos/Codertocat/Hello-World/subscription", + "commits_url": "https://api.github.com/repos/Codertocat/Hello-World/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/Codertocat/Hello-World/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/Codertocat/Hello-World/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/comments{/number}", + "contents_url": "https://api.github.com/repos/Codertocat/Hello-World/contents/{+path}", + "compare_url": "https://api.github.com/repos/Codertocat/Hello-World/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/Codertocat/Hello-World/merges", + "archive_url": "https://api.github.com/repos/Codertocat/Hello-World/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/Codertocat/Hello-World/downloads", + "issues_url": "https://api.github.com/repos/Codertocat/Hello-World/issues{/number}", + "pulls_url": "https://api.github.com/repos/Codertocat/Hello-World/pulls{/number}", + "milestones_url": "https://api.github.com/repos/Codertocat/Hello-World/milestones{/number}", + "notifications_url": "https://api.github.com/repos/Codertocat/Hello-World/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/Codertocat/Hello-World/labels{/name}", + "releases_url": "https://api.github.com/repos/Codertocat/Hello-World/releases{/id}", + "deployments_url": "https://api.github.com/repos/Codertocat/Hello-World/deployments", + "created_at": "2019-05-15T15:19:25Z", + "updated_at": "2019-05-15T15:19:27Z", + "pushed_at": "2019-05-15T15:20:32Z", + "git_url": "git://github.com/Codertocat/Hello-World.git", + "ssh_url": "git@github.com:Codertocat/Hello-World.git", + "clone_url": "https://github.com/Codertocat/Hello-World.git", + "svn_url": "https://github.com/Codertocat/Hello-World", + "homepage": null, + "size": 0, + "stargazers_count": 0, + "watchers_count": 0, + "language": null, + "has_issues": true, + "has_projects": true, + "has_downloads": true, + "has_wiki": true, + "has_pages": true, + "forks_count": 0, + "mirror_url": null, + "archived": false, + "disabled": false, + "open_issues_count": 2, + "license": null, + "forks": 0, + "open_issues": 2, + "watchers": 0, + "default_branch": "master", + "allow_squash_merge": true, + "allow_merge_commit": true, + "allow_rebase_merge": true, + "delete_branch_on_merge": false + } + }, + "base": { + "label": "Codertocat:master", + "ref": "master", + "sha": "f95f852bd8fca8fcc58a9a2d6c842781e32a215e", + "user": { + "login": "Codertocat", + "id": 21031067, + "node_id": "MDQ6VXNlcjIxMDMxMDY3", + "avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/Codertocat", + "html_url": "https://github.com/Codertocat", + "followers_url": "https://api.github.com/users/Codertocat/followers", + "following_url": "https://api.github.com/users/Codertocat/following{/other_user}", + "gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions", + "organizations_url": "https://api.github.com/users/Codertocat/orgs", + "repos_url": "https://api.github.com/users/Codertocat/repos", + "events_url": "https://api.github.com/users/Codertocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/Codertocat/received_events", + "type": "User", + "site_admin": false + }, + "repo": { + "id": 186853002, + "node_id": "MDEwOlJlcG9zaXRvcnkxODY4NTMwMDI=", + "name": "Hello-World", + "full_name": "Codertocat/Hello-World", + "private": false, + "owner": { + "login": "Codertocat", + "id": 21031067, + "node_id": "MDQ6VXNlcjIxMDMxMDY3", + "avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/Codertocat", + "html_url": "https://github.com/Codertocat", + "followers_url": "https://api.github.com/users/Codertocat/followers", + "following_url": "https://api.github.com/users/Codertocat/following{/other_user}", + "gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions", + "organizations_url": "https://api.github.com/users/Codertocat/orgs", + "repos_url": "https://api.github.com/users/Codertocat/repos", + "events_url": "https://api.github.com/users/Codertocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/Codertocat/received_events", + "type": "User", + "site_admin": false + }, + "html_url": "https://github.com/Codertocat/Hello-World", + "description": null, + "fork": false, + "url": "https://api.github.com/repos/Codertocat/Hello-World", + "forks_url": "https://api.github.com/repos/Codertocat/Hello-World/forks", + "keys_url": "https://api.github.com/repos/Codertocat/Hello-World/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/Codertocat/Hello-World/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/Codertocat/Hello-World/teams", + "hooks_url": "https://api.github.com/repos/Codertocat/Hello-World/hooks", + "issue_events_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/events{/number}", + "events_url": "https://api.github.com/repos/Codertocat/Hello-World/events", + "assignees_url": "https://api.github.com/repos/Codertocat/Hello-World/assignees{/user}", + "branches_url": "https://api.github.com/repos/Codertocat/Hello-World/branches{/branch}", + "tags_url": "https://api.github.com/repos/Codertocat/Hello-World/tags", + "blobs_url": "https://api.github.com/repos/Codertocat/Hello-World/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/Codertocat/Hello-World/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/Codertocat/Hello-World/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/Codertocat/Hello-World/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/Codertocat/Hello-World/statuses/{sha}", + "languages_url": "https://api.github.com/repos/Codertocat/Hello-World/languages", + "stargazers_url": "https://api.github.com/repos/Codertocat/Hello-World/stargazers", + "contributors_url": "https://api.github.com/repos/Codertocat/Hello-World/contributors", + "subscribers_url": "https://api.github.com/repos/Codertocat/Hello-World/subscribers", + "subscription_url": "https://api.github.com/repos/Codertocat/Hello-World/subscription", + "commits_url": "https://api.github.com/repos/Codertocat/Hello-World/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/Codertocat/Hello-World/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/Codertocat/Hello-World/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/comments{/number}", + "contents_url": "https://api.github.com/repos/Codertocat/Hello-World/contents/{+path}", + "compare_url": "https://api.github.com/repos/Codertocat/Hello-World/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/Codertocat/Hello-World/merges", + "archive_url": "https://api.github.com/repos/Codertocat/Hello-World/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/Codertocat/Hello-World/downloads", + "issues_url": "https://api.github.com/repos/Codertocat/Hello-World/issues{/number}", + "pulls_url": "https://api.github.com/repos/Codertocat/Hello-World/pulls{/number}", + "milestones_url": "https://api.github.com/repos/Codertocat/Hello-World/milestones{/number}", + "notifications_url": "https://api.github.com/repos/Codertocat/Hello-World/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/Codertocat/Hello-World/labels{/name}", + "releases_url": "https://api.github.com/repos/Codertocat/Hello-World/releases{/id}", + "deployments_url": "https://api.github.com/repos/Codertocat/Hello-World/deployments", + "created_at": "2019-05-15T15:19:25Z", + "updated_at": "2019-05-15T15:19:27Z", + "pushed_at": "2019-05-15T15:20:32Z", + "git_url": "git://github.com/Codertocat/Hello-World.git", + "ssh_url": "git@github.com:Codertocat/Hello-World.git", + "clone_url": "https://github.com/Codertocat/Hello-World.git", + "svn_url": "https://github.com/Codertocat/Hello-World", + "homepage": null, + "size": 0, + "stargazers_count": 0, + "watchers_count": 0, + "language": null, + "has_issues": true, + "has_projects": true, + "has_downloads": true, + "has_wiki": true, + "has_pages": true, + "forks_count": 0, + "mirror_url": null, + "archived": false, + "disabled": false, + "open_issues_count": 2, + "license": null, + "forks": 0, + "open_issues": 2, + "watchers": 0, + "default_branch": "master", + "allow_squash_merge": true, + "allow_merge_commit": true, + "allow_rebase_merge": true, + "delete_branch_on_merge": false + } + }, + "_links": { + "self": { + "href": "https://api.github.com/repos/Codertocat/Hello-World/pulls/2" + }, + "html": { + "href": "https://github.com/Codertocat/Hello-World/pull/2" + }, + "issue": { + "href": "https://api.github.com/repos/Codertocat/Hello-World/issues/2" + }, + "comments": { + "href": "https://api.github.com/repos/Codertocat/Hello-World/issues/2/comments" + }, + "review_comments": { + "href": "https://api.github.com/repos/Codertocat/Hello-World/pulls/2/comments" + }, + "review_comment": { + "href": "https://api.github.com/repos/Codertocat/Hello-World/pulls/comments{/number}" + }, + "commits": { + "href": "https://api.github.com/repos/Codertocat/Hello-World/pulls/2/commits" + }, + "statuses": { + "href": "https://api.github.com/repos/Codertocat/Hello-World/statuses/ec26c3e57ca3a959ca5aad62de7213c562f8c821" + } + }, + "author_association": "OWNER", + "draft": false, + "merged": false, + "mergeable": null, + "rebaseable": null, + "mergeable_state": "unknown", + "merged_by": null, + "comments": 0, + "review_comments": 0, + "maintainer_can_modify": false, + "commits": 1, + "additions": 1, + "deletions": 1, + "changed_files": 1 + }, + "repository": { + "id": 186853002, + "node_id": "MDEwOlJlcG9zaXRvcnkxODY4NTMwMDI=", + "name": "Hello-World", + "full_name": "Codertocat/Hello-World", + "private": false, + "owner": { + "login": "Codertocat", + "id": 21031067, + "node_id": "MDQ6VXNlcjIxMDMxMDY3", + "avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/Codertocat", + "html_url": "https://github.com/Codertocat", + "followers_url": "https://api.github.com/users/Codertocat/followers", + "following_url": "https://api.github.com/users/Codertocat/following{/other_user}", + "gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions", + "organizations_url": "https://api.github.com/users/Codertocat/orgs", + "repos_url": "https://api.github.com/users/Codertocat/repos", + "events_url": "https://api.github.com/users/Codertocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/Codertocat/received_events", + "type": "User", + "site_admin": false + }, + "html_url": "https://github.com/Codertocat/Hello-World", + "description": null, + "fork": false, + "url": "https://api.github.com/repos/Codertocat/Hello-World", + "forks_url": "https://api.github.com/repos/Codertocat/Hello-World/forks", + "keys_url": "https://api.github.com/repos/Codertocat/Hello-World/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/Codertocat/Hello-World/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/Codertocat/Hello-World/teams", + "hooks_url": "https://api.github.com/repos/Codertocat/Hello-World/hooks", + "issue_events_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/events{/number}", + "events_url": "https://api.github.com/repos/Codertocat/Hello-World/events", + "assignees_url": "https://api.github.com/repos/Codertocat/Hello-World/assignees{/user}", + "branches_url": "https://api.github.com/repos/Codertocat/Hello-World/branches{/branch}", + "tags_url": "https://api.github.com/repos/Codertocat/Hello-World/tags", + "blobs_url": "https://api.github.com/repos/Codertocat/Hello-World/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/Codertocat/Hello-World/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/Codertocat/Hello-World/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/Codertocat/Hello-World/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/Codertocat/Hello-World/statuses/{sha}", + "languages_url": "https://api.github.com/repos/Codertocat/Hello-World/languages", + "stargazers_url": "https://api.github.com/repos/Codertocat/Hello-World/stargazers", + "contributors_url": "https://api.github.com/repos/Codertocat/Hello-World/contributors", + "subscribers_url": "https://api.github.com/repos/Codertocat/Hello-World/subscribers", + "subscription_url": "https://api.github.com/repos/Codertocat/Hello-World/subscription", + "commits_url": "https://api.github.com/repos/Codertocat/Hello-World/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/Codertocat/Hello-World/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/Codertocat/Hello-World/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/comments{/number}", + "contents_url": "https://api.github.com/repos/Codertocat/Hello-World/contents/{+path}", + "compare_url": "https://api.github.com/repos/Codertocat/Hello-World/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/Codertocat/Hello-World/merges", + "archive_url": "https://api.github.com/repos/Codertocat/Hello-World/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/Codertocat/Hello-World/downloads", + "issues_url": "https://api.github.com/repos/Codertocat/Hello-World/issues{/number}", + "pulls_url": "https://api.github.com/repos/Codertocat/Hello-World/pulls{/number}", + "milestones_url": "https://api.github.com/repos/Codertocat/Hello-World/milestones{/number}", + "notifications_url": "https://api.github.com/repos/Codertocat/Hello-World/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/Codertocat/Hello-World/labels{/name}", + "releases_url": "https://api.github.com/repos/Codertocat/Hello-World/releases{/id}", + "deployments_url": "https://api.github.com/repos/Codertocat/Hello-World/deployments", + "created_at": "2019-05-15T15:19:25Z", + "updated_at": "2019-05-15T15:19:27Z", + "pushed_at": "2019-05-15T15:20:32Z", + "git_url": "git://github.com/Codertocat/Hello-World.git", + "ssh_url": "git@github.com:Codertocat/Hello-World.git", + "clone_url": "https://github.com/Codertocat/Hello-World.git", + "svn_url": "https://github.com/Codertocat/Hello-World", + "homepage": null, + "size": 0, + "stargazers_count": 0, + "watchers_count": 0, + "language": null, + "has_issues": true, + "has_projects": true, + "has_downloads": true, + "has_wiki": true, + "has_pages": true, + "forks_count": 0, + "mirror_url": null, + "archived": false, + "disabled": false, + "open_issues_count": 2, + "license": null, + "forks": 0, + "open_issues": 2, + "watchers": 0, + "default_branch": "master" + }, + "sender": { + "login": "Codertocat", + "id": 21031067, + "node_id": "MDQ6VXNlcjIxMDMxMDY3", + "avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/Codertocat", + "html_url": "https://github.com/Codertocat", + "followers_url": "https://api.github.com/users/Codertocat/followers", + "following_url": "https://api.github.com/users/Codertocat/following{/other_user}", + "gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions", + "organizations_url": "https://api.github.com/users/Codertocat/orgs", + "repos_url": "https://api.github.com/users/Codertocat/repos", + "events_url": "https://api.github.com/users/Codertocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/Codertocat/received_events", + "type": "User", + "site_admin": false + } +} \ No newline at end of file diff --git a/applicationset/webhook/testdata/gitlab-event.json b/applicationset/webhook/testdata/gitlab-event.json new file mode 100644 index 0000000000000..83ac0b4fcb059 --- /dev/null +++ b/applicationset/webhook/testdata/gitlab-event.json @@ -0,0 +1,65 @@ +{ + "object_kind": "push", + "event_name": "push", + "before": "e5ba5f6c13b64670048daa88e4c053d60b0e115a", + "after": "bb0748feaa336d841c251017e4e374c22d0c8a98", + "ref": "refs/heads/master", + "checkout_sha": "bb0748feaa336d841c251017e4e374c22d0c8a98", + "message": null, + "user_id": 1, + "user_name": "name", + "user_username": "username", + "user_email": "", + "user_avatar": "", + "project_id": 1, + "project": { + "id": 1, + "name": "project", + "description": "", + "web_url": "https://gitlab/group/name", + "avatar_url": null, + "git_ssh_url": "ssh://git@gitlab:2222/group/name.git", + "git_http_url": "https://gitlab/group/name.git", + "namespace": "group", + "visibility_level": 1, + "path_with_namespace": "group/name", + "default_branch": "master", + "ci_config_path": null, + "homepage": "https://gitlab/group/name", + "url": "ssh://git@gitlab:2222/group/name.git", + "ssh_url": "ssh://git@gitlab:2222/group/name.git", + "http_url": "https://gitlab/group/name.git" + }, + "commits": [ + { + "id": "bb0748feaa336d841c251017e4e374c22d0c8a98", + "message": "Test commit message\n", + "timestamp": "2020-01-06T03:47:55Z", + "url": "https://gitlab/group/name/commit/bb0748feaa336d841c251017e4e374c22d0c8a98", + "author": { + "name": "User", + "email": "user@example.com" + }, + "added": [ + "file.yaml" + ], + "modified": [ + ], + "removed": [ + + ] + } + ], + "total_commits_count": 1, + "push_options": { + }, + "repository": { + "name": "name", + "url": "ssh://git@gitlab:2222/group/name.git", + "description": "", + "homepage": "https://gitlab/group/name", + "git_http_url": "https://gitlab/group/name.git", + "git_ssh_url": "ssh://git@gitlab:2222/group/name.git", + "visibility_level": 10 + } + } \ No newline at end of file diff --git a/applicationset/webhook/testdata/gitlab-merge-request-approval-event.json b/applicationset/webhook/testdata/gitlab-merge-request-approval-event.json new file mode 100644 index 0000000000000..bef2fee493f5e --- /dev/null +++ b/applicationset/webhook/testdata/gitlab-merge-request-approval-event.json @@ -0,0 +1,175 @@ +{ + "object_kind": "merge_request", + "event_type": "merge_request", + "user": { + "id": 1, + "name": "Administrator", + "username": "root", + "avatar_url": "http://www.gravatar.com/avatar/e64c7d89f26bd1972efa854d13d7dd61?s=40\u0026d=identicon", + "email": "admin@example.com" + }, + "project": { + "id": 100500, + "name": "project", + "description": "", + "web_url": "https://gitlab.com/group/name", + "avatar_url": null, + "git_ssh_url": "ssh://git@gitlab:2222/group/name.git", + "git_http_url": "https://gitlab.com/group/name.git", + "namespace": "group", + "visibility_level": 1, + "path_with_namespace": "group/name", + "default_branch": "master", + "ci_config_path": null, + "homepage": "https://gitlab.com/group/name", + "url": "ssh://git@gitlab:2222/group/name.git", + "ssh_url": "ssh://git@gitlab:2222/group/name.git", + "http_url": "https://gitlab.com/group/name.git" + }, + "repository": { + "name": "name", + "url": "ssh://git@gitlab:2222/group/name.git", + "description": "", + "homepage": "https://gitlab.com/group/name", + "git_http_url": "https://gitlab.com/group/name.git", + "git_ssh_url": "ssh://git@gitlab:2222/group/name.git", + "visibility_level": 10 + }, + "object_attributes": { + "id": 99, + "iid": 1, + "target_branch": "master", + "source_branch": "ms-viewport", + "source_project_id": 14, + "author_id": 51, + "assignee_id": 6, + "title": "MS-Viewport", + "created_at": "2013-12-03T17:23:34Z", + "updated_at": "2013-12-03T17:23:34Z", + "milestone_id": null, + "state": "opened", + "blocking_discussions_resolved": true, + "work_in_progress": false, + "first_contribution": true, + "merge_status": "unchecked", + "target_project_id": 100500, + "description": "", + "url": "https://gitlab.com/group/name/merge_requests/1", + "source": { + "name": "Awesome Project", + "description": "Aut reprehenderit ut est.", + "web_url": "https://gitlab.com/group/name", + "avatar_url": null, + "git_ssh_url": "ssh://git@gitlab:2222/group/name.git", + "git_http_url": "https://gitlab.com/group/name.git", + "namespace": "Awesome Space", + "visibility_level": 20, + "path_with_namespace": "group/name", + "default_branch": "master", + "homepage": "https://gitlab.com/group/name", + "url": "https://gitlab.com/group/name.git", + "ssh_url": "ssh://git@gitlab:2222/group/name.git", + "http_url": "https://gitlab.com/group/name.git" + }, + "target": { + "name": "Awesome Project", + "description": "Aut reprehenderit ut est.", + "web_url": "https://gitlab.com/group/name", + "avatar_url": null, + "git_ssh_url": "ssh://git@gitlab:2222/group/name.git", + "git_http_url": "https://gitlab.com/group/name.git", + "namespace": "Awesome Space", + "visibility_level": 20, + "path_with_namespace": "group/name", + "default_branch": "master", + "homepage": "https://gitlab.com/group/name", + "url": "https://gitlab.com/group/name.git", + "ssh_url": "ssh://git@gitlab:2222/group/name.git", + "http_url": "https://gitlab.com/group/name.git" + }, + "last_commit": { + "id": "da1560886d4f094c3e6c9ef40349f7d38b5d27d7", + "message": "fixed readme", + "timestamp": "2012-01-03T23:36:29+02:00", + "url": "https://gitlab.com/group/name/commits/da1560886d4f094c3e6c9ef40349f7d38b5d27d7", + "author": { + "name": "GitLab dev user", + "email": "gitlabdev@dv6700.(none)" + } + }, + "labels": [ + { + "id": 206, + "title": "API", + "color": "#ffffff", + "project_id": 14, + "created_at": "2013-12-03T17:15:43Z", + "updated_at": "2013-12-03T17:15:43Z", + "template": false, + "description": "API related issues", + "type": "ProjectLabel", + "group_id": 41 + } + ], + "action": "approval", + "assignee": { + "name": "User1", + "username": "user1", + "avatar_url": "http://www.gravatar.com/avatar/e64c7d89f26bd1972efa854d13d7dd61?s=40\u0026d=identicon" + } + }, + "labels": [ + { + "id": 206, + "title": "API", + "color": "#ffffff", + "project_id": 14, + "created_at": "2013-12-03T17:15:43Z", + "updated_at": "2013-12-03T17:15:43Z", + "template": false, + "description": "API related issues", + "type": "ProjectLabel", + "group_id": 41 + } + ], + "changes": { + "updated_by_id": { + "previous": null, + "current": 1 + }, + "updated_at": { + "previous": "2017-09-15 16:50:55 UTC", + "current": "2017-09-15 16:52:00 UTC" + }, + "labels": { + "previous": [ + { + "id": 206, + "title": "API", + "color": "#ffffff", + "project_id": 14, + "created_at": "2013-12-03T17:15:43Z", + "updated_at": "2013-12-03T17:15:43Z", + "template": false, + "description": "API related issues", + "type": "ProjectLabel", + "group_id": 41 + } + ], + "current": [ + { + "id": 205, + "title": "Platform", + "color": "#123123", + "project_id": 14, + "created_at": "2013-12-03T17:15:43Z", + "updated_at": "2013-12-03T17:15:43Z", + "template": false, + "description": "Platform related issues", + "type": "ProjectLabel", + "group_id": 41 + } + ] + } + } +} diff --git a/applicationset/webhook/testdata/gitlab-merge-request-open-event.json b/applicationset/webhook/testdata/gitlab-merge-request-open-event.json new file mode 100644 index 0000000000000..e4e226a46dad6 --- /dev/null +++ b/applicationset/webhook/testdata/gitlab-merge-request-open-event.json @@ -0,0 +1,175 @@ +{ + "object_kind": "merge_request", + "event_type": "merge_request", + "user": { + "id": 1, + "name": "Administrator", + "username": "root", + "avatar_url": "http://www.gravatar.com/avatar/e64c7d89f26bd1972efa854d13d7dd61?s=40\u0026d=identicon", + "email": "admin@example.com" + }, + "project": { + "id": 100500, + "name": "project", + "description": "", + "web_url": "https://gitlab.com/group/name", + "avatar_url": null, + "git_ssh_url": "ssh://git@gitlab:2222/group/name.git", + "git_http_url": "https://gitlab.com/group/name.git", + "namespace": "group", + "visibility_level": 1, + "path_with_namespace": "group/name", + "default_branch": "master", + "ci_config_path": null, + "homepage": "https://gitlab.com/group/name", + "url": "ssh://git@gitlab:2222/group/name.git", + "ssh_url": "ssh://git@gitlab:2222/group/name.git", + "http_url": "https://gitlab.com/group/name.git" + }, + "repository": { + "name": "name", + "url": "ssh://git@gitlab:2222/group/name.git", + "description": "", + "homepage": "https://gitlab.com/group/name", + "git_http_url": "https://gitlab.com/group/name.git", + "git_ssh_url": "ssh://git@gitlab:2222/group/name.git", + "visibility_level": 10 + }, + "object_attributes": { + "id": 99, + "iid": 1, + "target_branch": "master", + "source_branch": "ms-viewport", + "source_project_id": 14, + "author_id": 51, + "assignee_id": 6, + "title": "MS-Viewport", + "created_at": "2013-12-03T17:23:34Z", + "updated_at": "2013-12-03T17:23:34Z", + "milestone_id": null, + "state": "opened", + "blocking_discussions_resolved": true, + "work_in_progress": false, + "first_contribution": true, + "merge_status": "unchecked", + "target_project_id": 100500, + "description": "", + "url": "https://gitlab.com/group/name/merge_requests/1", + "source": { + "name": "Awesome Project", + "description": "Aut reprehenderit ut est.", + "web_url": "https://gitlab.com/group/name", + "avatar_url": null, + "git_ssh_url": "ssh://git@gitlab:2222/group/name.git", + "git_http_url": "https://gitlab.com/group/name.git", + "namespace": "Awesome Space", + "visibility_level": 20, + "path_with_namespace": "group/name", + "default_branch": "master", + "homepage": "https://gitlab.com/group/name", + "url": "https://gitlab.com/group/name.git", + "ssh_url": "ssh://git@gitlab:2222/group/name.git", + "http_url": "https://gitlab.com/group/name.git" + }, + "target": { + "name": "Awesome Project", + "description": "Aut reprehenderit ut est.", + "web_url": "https://gitlab.com/group/name", + "avatar_url": null, + "git_ssh_url": "ssh://git@gitlab:2222/group/name.git", + "git_http_url": "https://gitlab.com/group/name.git", + "namespace": "Awesome Space", + "visibility_level": 20, + "path_with_namespace": "group/name", + "default_branch": "master", + "homepage": "https://gitlab.com/group/name", + "url": "https://gitlab.com/group/name.git", + "ssh_url": "ssh://git@gitlab:2222/group/name.git", + "http_url": "https://gitlab.com/group/name.git" + }, + "last_commit": { + "id": "da1560886d4f094c3e6c9ef40349f7d38b5d27d7", + "message": "fixed readme", + "timestamp": "2012-01-03T23:36:29+02:00", + "url": "https://gitlab.com/group/name/commits/da1560886d4f094c3e6c9ef40349f7d38b5d27d7", + "author": { + "name": "GitLab dev user", + "email": "gitlabdev@dv6700.(none)" + } + }, + "labels": [ + { + "id": 206, + "title": "API", + "color": "#ffffff", + "project_id": 14, + "created_at": "2013-12-03T17:15:43Z", + "updated_at": "2013-12-03T17:15:43Z", + "template": false, + "description": "API related issues", + "type": "ProjectLabel", + "group_id": 41 + } + ], + "action": "open", + "assignee": { + "name": "User1", + "username": "user1", + "avatar_url": "http://www.gravatar.com/avatar/e64c7d89f26bd1972efa854d13d7dd61?s=40\u0026d=identicon" + } + }, + "labels": [ + { + "id": 206, + "title": "API", + "color": "#ffffff", + "project_id": 14, + "created_at": "2013-12-03T17:15:43Z", + "updated_at": "2013-12-03T17:15:43Z", + "template": false, + "description": "API related issues", + "type": "ProjectLabel", + "group_id": 41 + } + ], + "changes": { + "updated_by_id": { + "previous": null, + "current": 1 + }, + "updated_at": { + "previous": "2017-09-15 16:50:55 UTC", + "current": "2017-09-15 16:52:00 UTC" + }, + "labels": { + "previous": [ + { + "id": 206, + "title": "API", + "color": "#ffffff", + "project_id": 14, + "created_at": "2013-12-03T17:15:43Z", + "updated_at": "2013-12-03T17:15:43Z", + "template": false, + "description": "API related issues", + "type": "ProjectLabel", + "group_id": 41 + } + ], + "current": [ + { + "id": 205, + "title": "Platform", + "color": "#123123", + "project_id": 14, + "created_at": "2013-12-03T17:15:43Z", + "updated_at": "2013-12-03T17:15:43Z", + "template": false, + "description": "Platform related issues", + "type": "ProjectLabel", + "group_id": 41 + } + ] + } + } +} diff --git a/applicationset/webhook/testdata/invalid-event.json b/applicationset/webhook/testdata/invalid-event.json new file mode 100644 index 0000000000000..5106894fb8b96 --- /dev/null +++ b/applicationset/webhook/testdata/invalid-event.json @@ -0,0 +1,3 @@ +{ + "event":"invalid" +} \ No newline at end of file diff --git a/applicationset/webhook/webhook.go b/applicationset/webhook/webhook.go new file mode 100644 index 0000000000000..ce099df35ea35 --- /dev/null +++ b/applicationset/webhook/webhook.go @@ -0,0 +1,654 @@ +package webhook + +import ( + "context" + "errors" + "fmt" + "html" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/argoproj/argo-cd/v2/applicationset/generators" + "github.com/argoproj/argo-cd/v2/common" + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + argosettings "github.com/argoproj/argo-cd/v2/util/settings" + + "github.com/go-playground/webhooks/v6/azuredevops" + "github.com/go-playground/webhooks/v6/github" + "github.com/go-playground/webhooks/v6/gitlab" + log "github.com/sirupsen/logrus" +) + +var ( + errBasicAuthVerificationFailed = errors.New("basic auth verification failed") +) + +type WebhookHandler struct { + namespace string + github *github.Webhook + gitlab *gitlab.Webhook + azuredevops *azuredevops.Webhook + azuredevopsAuthHandler func(r *http.Request) error + client client.Client + generators map[string]generators.Generator +} + +type gitGeneratorInfo struct { + Revision string + TouchedHead bool + RepoRegexp *regexp.Regexp +} + +type prGeneratorInfo struct { + Azuredevops *prGeneratorAzuredevopsInfo + Github *prGeneratorGithubInfo + Gitlab *prGeneratorGitlabInfo +} + +type prGeneratorAzuredevopsInfo struct { + Repo string + Project string +} + +type prGeneratorGithubInfo struct { + Repo string + Owner string + APIRegexp *regexp.Regexp +} + +type prGeneratorGitlabInfo struct { + Project string + APIHostname string +} + +func NewWebhookHandler(namespace string, argocdSettingsMgr *argosettings.SettingsManager, client client.Client, generators map[string]generators.Generator) (*WebhookHandler, error) { + // register the webhook secrets stored under "argocd-secret" for verifying incoming payloads + argocdSettings, err := argocdSettingsMgr.GetSettings() + if err != nil { + return nil, fmt.Errorf("Failed to get argocd settings: %v", err) + } + githubHandler, err := github.New(github.Options.Secret(argocdSettings.WebhookGitHubSecret)) + if err != nil { + return nil, fmt.Errorf("Unable to init GitHub webhook: %v", err) + } + gitlabHandler, err := gitlab.New(gitlab.Options.Secret(argocdSettings.WebhookGitLabSecret)) + if err != nil { + return nil, fmt.Errorf("Unable to init GitLab webhook: %v", err) + } + azuredevopsHandler, err := azuredevops.New() + if err != nil { + return nil, fmt.Errorf("Unable to init Azure DevOps webhook: %v", err) + } + azuredevopsAuthHandler := func(r *http.Request) error { + if argocdSettings.WebhookAzureDevOpsUsername != "" && argocdSettings.WebhookAzureDevOpsPassword != "" { + username, password, ok := r.BasicAuth() + if !ok || username != argocdSettings.WebhookAzureDevOpsUsername || password != argocdSettings.WebhookAzureDevOpsPassword { + return errBasicAuthVerificationFailed + } + } + return nil + } + + return &WebhookHandler{ + namespace: namespace, + github: githubHandler, + gitlab: gitlabHandler, + azuredevops: azuredevopsHandler, + azuredevopsAuthHandler: azuredevopsAuthHandler, + client: client, + generators: generators, + }, nil +} + +func (h *WebhookHandler) HandleEvent(payload interface{}) { + gitGenInfo := getGitGeneratorInfo(payload) + prGenInfo := getPRGeneratorInfo(payload) + if gitGenInfo == nil && prGenInfo == nil { + return + } + + appSetList := &v1alpha1.ApplicationSetList{} + err := h.client.List(context.Background(), appSetList, &client.ListOptions{}) + if err != nil { + log.Errorf("Failed to list applicationsets: %v", err) + return + } + + for _, appSet := range appSetList.Items { + shouldRefresh := false + for _, gen := range appSet.Spec.Generators { + // check if the ApplicationSet uses any generator that is relevant to the payload + shouldRefresh = shouldRefreshGitGenerator(gen.Git, gitGenInfo) || + shouldRefreshPRGenerator(gen.PullRequest, prGenInfo) || + shouldRefreshPluginGenerator(gen.Plugin) || + h.shouldRefreshMatrixGenerator(gen.Matrix, &appSet, gitGenInfo, prGenInfo) || + h.shouldRefreshMergeGenerator(gen.Merge, &appSet, gitGenInfo, prGenInfo) + if shouldRefresh { + break + } + } + if shouldRefresh { + err := refreshApplicationSet(h.client, &appSet) + if err != nil { + log.Errorf("Failed to refresh ApplicationSet '%s' for controller reprocessing", appSet.Name) + continue + } + log.Infof("refresh ApplicationSet %v/%v from webhook", appSet.Namespace, appSet.Name) + } + } +} + +func (h *WebhookHandler) Handler(w http.ResponseWriter, r *http.Request) { + var payload interface{} + var err error + + switch { + case r.Header.Get("X-GitHub-Event") != "": + payload, err = h.github.Parse(r, github.PushEvent, github.PullRequestEvent, github.PingEvent) + case r.Header.Get("X-Gitlab-Event") != "": + payload, err = h.gitlab.Parse(r, gitlab.PushEvents, gitlab.TagEvents, gitlab.MergeRequestEvents) + case r.Header.Get("X-Vss-Activityid") != "": + if err = h.azuredevopsAuthHandler(r); err != nil { + if errors.Is(err, errBasicAuthVerificationFailed) { + log.WithField(common.SecurityField, common.SecurityHigh).Infof("Azure DevOps webhook basic auth verification failed") + } + } else { + payload, err = h.azuredevops.Parse(r, azuredevops.GitPushEventType, azuredevops.GitPullRequestCreatedEventType, azuredevops.GitPullRequestUpdatedEventType, azuredevops.GitPullRequestMergedEventType) + } + default: + log.Debug("Ignoring unknown webhook event") + http.Error(w, "Unknown webhook event", http.StatusBadRequest) + return + } + + if err != nil { + log.Infof("Webhook processing failed: %s", err) + status := http.StatusBadRequest + if r.Method != http.MethodPost { + status = http.StatusMethodNotAllowed + } + http.Error(w, fmt.Sprintf("Webhook processing failed: %s", html.EscapeString(err.Error())), status) + return + } + + h.HandleEvent(payload) +} + +func parseRevision(ref string) string { + refParts := strings.SplitN(ref, "/", 3) + return refParts[len(refParts)-1] +} + +func getGitGeneratorInfo(payload interface{}) *gitGeneratorInfo { + var ( + webURL string + revision string + touchedHead bool + ) + switch payload := payload.(type) { + case github.PushPayload: + webURL = payload.Repository.HTMLURL + revision = parseRevision(payload.Ref) + touchedHead = payload.Repository.DefaultBranch == revision + case gitlab.PushEventPayload: + webURL = payload.Project.WebURL + revision = parseRevision(payload.Ref) + touchedHead = payload.Project.DefaultBranch == revision + case azuredevops.GitPushEvent: + // See: https://learn.microsoft.com/en-us/azure/devops/service-hooks/events?view=azure-devops#git.push + webURL = payload.Resource.Repository.RemoteURL + revision = parseRevision(payload.Resource.RefUpdates[0].Name) + touchedHead = payload.Resource.RefUpdates[0].Name == payload.Resource.Repository.DefaultBranch + // unfortunately, Azure DevOps doesn't provide a list of changed files + default: + return nil + } + + log.Infof("Received push event repo: %s, revision: %s, touchedHead: %v", webURL, revision, touchedHead) + urlObj, err := url.Parse(webURL) + if err != nil { + log.Errorf("Failed to parse repoURL '%s'", webURL) + return nil + } + regexpStr := `(?i)(http://|https://|\w+@|ssh://(\w+@)?)` + urlObj.Hostname() + "(:[0-9]+|)[:/]" + urlObj.Path[1:] + "(\\.git)?" + repoRegexp, err := regexp.Compile(regexpStr) + if err != nil { + log.Errorf("Failed to compile regexp for repoURL '%s'", webURL) + return nil + } + + return &gitGeneratorInfo{ + RepoRegexp: repoRegexp, + TouchedHead: touchedHead, + Revision: revision, + } +} + +func getPRGeneratorInfo(payload interface{}) *prGeneratorInfo { + var info prGeneratorInfo + switch payload := payload.(type) { + case github.PullRequestPayload: + if !isAllowedGithubPullRequestAction(payload.Action) { + return nil + } + + apiURL := payload.Repository.URL + urlObj, err := url.Parse(apiURL) + if err != nil { + log.Errorf("Failed to parse repoURL '%s'", apiURL) + return nil + } + regexpStr := `(?i)(http://|https://|\w+@|ssh://(\w+@)?)` + urlObj.Hostname() + "(:[0-9]+|)[:/]" + apiRegexp, err := regexp.Compile(regexpStr) + if err != nil { + log.Errorf("Failed to compile regexp for repoURL '%s'", apiURL) + return nil + } + info.Github = &prGeneratorGithubInfo{ + Repo: payload.Repository.Name, + Owner: payload.Repository.Owner.Login, + APIRegexp: apiRegexp, + } + case gitlab.MergeRequestEventPayload: + if !isAllowedGitlabPullRequestAction(payload.ObjectAttributes.Action) { + return nil + } + + apiURL := payload.Project.WebURL + urlObj, err := url.Parse(apiURL) + if err != nil { + log.Errorf("Failed to parse repoURL '%s'", apiURL) + return nil + } + + info.Gitlab = &prGeneratorGitlabInfo{ + Project: strconv.FormatInt(payload.ObjectAttributes.TargetProjectID, 10), + APIHostname: urlObj.Hostname(), + } + case azuredevops.GitPullRequestEvent: + if !isAllowedAzureDevOpsPullRequestAction(string(payload.EventType)) { + return nil + } + + repo := payload.Resource.Repository.Name + project := payload.Resource.Repository.Project.Name + + info.Azuredevops = &prGeneratorAzuredevopsInfo{ + Repo: repo, + Project: project, + } + default: + return nil + } + + return &info +} + +// githubAllowedPullRequestActions is a list of github actions that allow refresh +var githubAllowedPullRequestActions = []string{ + "opened", + "closed", + "synchronize", + "labeled", + "reopened", + "unlabeled", +} + +// gitlabAllowedPullRequestActions is a list of gitlab actions that allow refresh +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#merge-request-events +var gitlabAllowedPullRequestActions = []string{ + "open", + "close", + "reopen", + "update", + "merge", +} + +// azuredevopsAllowedPullRequestActions is a list of Azure DevOps actions that allow refresh +var azuredevopsAllowedPullRequestActions = []string{ + "git.pullrequest.created", + "git.pullrequest.merged", + "git.pullrequest.updated", +} + +func isAllowedGithubPullRequestAction(action string) bool { + for _, allow := range githubAllowedPullRequestActions { + if allow == action { + return true + } + } + return false +} + +func isAllowedGitlabPullRequestAction(action string) bool { + for _, allow := range gitlabAllowedPullRequestActions { + if allow == action { + return true + } + } + return false +} + +func isAllowedAzureDevOpsPullRequestAction(action string) bool { + for _, allow := range azuredevopsAllowedPullRequestActions { + if allow == action { + return true + } + } + return false +} + +func shouldRefreshGitGenerator(gen *v1alpha1.GitGenerator, info *gitGeneratorInfo) bool { + if gen == nil || info == nil { + return false + } + + if !gitGeneratorUsesURL(gen, info.Revision, info.RepoRegexp) { + return false + } + if !genRevisionHasChanged(gen, info.Revision, info.TouchedHead) { + return false + } + return true +} + +func shouldRefreshPluginGenerator(gen *v1alpha1.PluginGenerator) bool { + return gen != nil +} + +func genRevisionHasChanged(gen *v1alpha1.GitGenerator, revision string, touchedHead bool) bool { + targetRev := parseRevision(gen.Revision) + if targetRev == "HEAD" || targetRev == "" { // revision is head + return touchedHead + } + + return targetRev == revision +} + +func gitGeneratorUsesURL(gen *v1alpha1.GitGenerator, webURL string, repoRegexp *regexp.Regexp) bool { + if !repoRegexp.MatchString(gen.RepoURL) { + log.Debugf("%s does not match %s", gen.RepoURL, repoRegexp.String()) + return false + } + + log.Debugf("%s uses repoURL %s", gen.RepoURL, webURL) + return true +} + +func shouldRefreshPRGenerator(gen *v1alpha1.PullRequestGenerator, info *prGeneratorInfo) bool { + if gen == nil || info == nil { + return false + } + + if gen.GitLab != nil && info.Gitlab != nil { + if gen.GitLab.Project != info.Gitlab.Project { + return false + } + + api := gen.GitLab.API + if api == "" { + api = "https://gitlab.com/" + } + + urlObj, err := url.Parse(api) + if err != nil { + log.Errorf("Failed to parse repoURL '%s'", api) + return false + } + + if urlObj.Hostname() != info.Gitlab.APIHostname { + log.Debugf("%s does not match %s", api, info.Gitlab.APIHostname) + return false + } + + return true + } + + if gen.Github != nil && info.Github != nil { + if gen.Github.Owner != info.Github.Owner { + return false + } + if gen.Github.Repo != info.Github.Repo { + return false + } + api := gen.Github.API + if api == "" { + api = "https://api.github.com/" + } + if !info.Github.APIRegexp.MatchString(api) { + log.Debugf("%s does not match %s", api, info.Github.APIRegexp.String()) + return false + } + + return true + } + + if gen.AzureDevOps != nil && info.Azuredevops != nil { + if gen.AzureDevOps.Project != info.Azuredevops.Project { + return false + } + if gen.AzureDevOps.Repo != info.Azuredevops.Repo { + return false + } + return true + } + + return false +} + +func (h *WebhookHandler) shouldRefreshMatrixGenerator(gen *v1alpha1.MatrixGenerator, appSet *v1alpha1.ApplicationSet, gitGenInfo *gitGeneratorInfo, prGenInfo *prGeneratorInfo) bool { + if gen == nil { + return false + } + + // Silently ignore, the ApplicationSetReconciler will log the error as part of the reconcile + if len(gen.Generators) < 2 || len(gen.Generators) > 2 { + return false + } + + g0 := gen.Generators[0] + + // Check first child generator for Git or Pull Request Generator + if shouldRefreshGitGenerator(g0.Git, gitGenInfo) || + shouldRefreshPRGenerator(g0.PullRequest, prGenInfo) { + return true + } + + // Check first child generator for nested Matrix generator + var matrixGenerator0 *v1alpha1.MatrixGenerator + if g0.Matrix != nil { + // Since nested matrix generator is represented as a JSON object in the CRD, we unmarshall it back to a Go struct here. + nestedMatrix, err := v1alpha1.ToNestedMatrixGenerator(g0.Matrix) + if err != nil { + log.Errorf("Failed to unmarshall nested matrix generator: %v", err) + return false + } + if nestedMatrix != nil { + matrixGenerator0 = nestedMatrix.ToMatrixGenerator() + if h.shouldRefreshMatrixGenerator(matrixGenerator0, appSet, gitGenInfo, prGenInfo) { + return true + } + } + } + + // Check first child generator for nested Merge generator + var mergeGenerator0 *v1alpha1.MergeGenerator + if g0.Merge != nil { + // Since nested merge generator is represented as a JSON object in the CRD, we unmarshall it back to a Go struct here. + nestedMerge, err := v1alpha1.ToNestedMergeGenerator(g0.Merge) + if err != nil { + log.Errorf("Failed to unmarshall nested merge generator: %v", err) + return false + } + if nestedMerge != nil { + mergeGenerator0 = nestedMerge.ToMergeGenerator() + if h.shouldRefreshMergeGenerator(mergeGenerator0, appSet, gitGenInfo, prGenInfo) { + return true + } + } + } + + // Create ApplicationSetGenerator for first child generator from its ApplicationSetNestedGenerator + requestedGenerator0 := &v1alpha1.ApplicationSetGenerator{ + List: g0.List, + Clusters: g0.Clusters, + Git: g0.Git, + SCMProvider: g0.SCMProvider, + ClusterDecisionResource: g0.ClusterDecisionResource, + PullRequest: g0.PullRequest, + Plugin: g0.Plugin, + Matrix: matrixGenerator0, + Merge: mergeGenerator0, + } + + // Generate params for first child generator + relGenerators := generators.GetRelevantGenerators(requestedGenerator0, h.generators) + params := []map[string]interface{}{} + for _, g := range relGenerators { + p, err := g.GenerateParams(requestedGenerator0, appSet) + if err != nil { + log.Error(err) + return false + } + params = append(params, p...) + } + + g1 := gen.Generators[1] + + // Create Matrix generator for nested Matrix generator as second child generator + var matrixGenerator1 *v1alpha1.MatrixGenerator + if g1.Matrix != nil { + // Since nested matrix generator is represented as a JSON object in the CRD, we unmarshall it back to a Go struct here. + nestedMatrix, err := v1alpha1.ToNestedMatrixGenerator(g1.Matrix) + if err != nil { + log.Errorf("Failed to unmarshall nested matrix generator: %v", err) + return false + } + if nestedMatrix != nil { + matrixGenerator1 = nestedMatrix.ToMatrixGenerator() + } + } + + // Create Merge generator for nested Merge generator as second child generator + var mergeGenerator1 *v1alpha1.MergeGenerator + if g1.Merge != nil { + // Since nested merge generator is represented as a JSON object in the CRD, we unmarshall it back to a Go struct here. + nestedMerge, err := v1alpha1.ToNestedMergeGenerator(g1.Merge) + if err != nil { + log.Errorf("Failed to unmarshall nested merge generator: %v", err) + return false + } + if nestedMerge != nil { + mergeGenerator1 = nestedMerge.ToMergeGenerator() + } + } + + // Create ApplicationSetGenerator for second child generator from its ApplicationSetNestedGenerator + requestedGenerator1 := &v1alpha1.ApplicationSetGenerator{ + List: g1.List, + Clusters: g1.Clusters, + Git: g1.Git, + SCMProvider: g1.SCMProvider, + ClusterDecisionResource: g1.ClusterDecisionResource, + PullRequest: g1.PullRequest, + Plugin: g1.Plugin, + Matrix: matrixGenerator1, + Merge: mergeGenerator1, + } + + // Interpolate second child generator with params from first child generator, if there are any params + if len(params) != 0 { + for _, p := range params { + tempInterpolatedGenerator, err := generators.InterpolateGenerator(requestedGenerator1, p, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions) + interpolatedGenerator := &tempInterpolatedGenerator + if err != nil { + log.Error(err) + return false + } + + // Check all interpolated child generators + if shouldRefreshGitGenerator(interpolatedGenerator.Git, gitGenInfo) || + shouldRefreshPRGenerator(interpolatedGenerator.PullRequest, prGenInfo) || + shouldRefreshPluginGenerator(interpolatedGenerator.Plugin) || + h.shouldRefreshMatrixGenerator(interpolatedGenerator.Matrix, appSet, gitGenInfo, prGenInfo) || + h.shouldRefreshMergeGenerator(requestedGenerator1.Merge, appSet, gitGenInfo, prGenInfo) { + return true + } + } + } + + // First child generator didn't return any params, just check the second child generator + return shouldRefreshGitGenerator(requestedGenerator1.Git, gitGenInfo) || + shouldRefreshPRGenerator(requestedGenerator1.PullRequest, prGenInfo) || + shouldRefreshPluginGenerator(requestedGenerator1.Plugin) || + h.shouldRefreshMatrixGenerator(requestedGenerator1.Matrix, appSet, gitGenInfo, prGenInfo) || + h.shouldRefreshMergeGenerator(requestedGenerator1.Merge, appSet, gitGenInfo, prGenInfo) +} + +func (h *WebhookHandler) shouldRefreshMergeGenerator(gen *v1alpha1.MergeGenerator, appSet *v1alpha1.ApplicationSet, gitGenInfo *gitGeneratorInfo, prGenInfo *prGeneratorInfo) bool { + if gen == nil { + return false + } + + for _, g := range gen.Generators { + // Check Git or Pull Request generator + if shouldRefreshGitGenerator(g.Git, gitGenInfo) || + shouldRefreshPRGenerator(g.PullRequest, prGenInfo) { + return true + } + + // Check nested Matrix generator + if g.Matrix != nil { + // Since nested matrix generator is represented as a JSON object in the CRD, we unmarshall it back to a Go struct here. + nestedMatrix, err := v1alpha1.ToNestedMatrixGenerator(g.Matrix) + if err != nil { + log.Errorf("Failed to unmarshall nested matrix generator: %v", err) + return false + } + if nestedMatrix != nil { + if h.shouldRefreshMatrixGenerator(nestedMatrix.ToMatrixGenerator(), appSet, gitGenInfo, prGenInfo) { + return true + } + } + } + + // Check nested Merge generator + if g.Merge != nil { + // Since nested merge generator is represented as a JSON object in the CRD, we unmarshall it back to a Go struct here. + nestedMerge, err := v1alpha1.ToNestedMergeGenerator(g.Merge) + if err != nil { + log.Errorf("Failed to unmarshall nested merge generator: %v", err) + return false + } + if nestedMerge != nil { + if h.shouldRefreshMergeGenerator(nestedMerge.ToMergeGenerator(), appSet, gitGenInfo, prGenInfo) { + return true + } + } + } + } + + return false +} + +func refreshApplicationSet(c client.Client, appSet *v1alpha1.ApplicationSet) error { + // patch the ApplicationSet with the refresh annotation to reconcile + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + err := c.Get(context.Background(), types.NamespacedName{Name: appSet.Name, Namespace: appSet.Namespace}, appSet) + if err != nil { + return fmt.Errorf("error getting ApplicationSet: %w", err) + } + if appSet.Annotations == nil { + appSet.Annotations = map[string]string{} + } + appSet.Annotations[common.AnnotationApplicationSetRefresh] = "true" + return c.Patch(context.Background(), appSet, client.Merge) + }) +} diff --git a/applicationset/webhook/webhook_test.go b/applicationset/webhook/webhook_test.go new file mode 100644 index 0000000000000..349d275948aee --- /dev/null +++ b/applicationset/webhook/webhook_test.go @@ -0,0 +1,714 @@ +package webhook + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + kubefake "k8s.io/client-go/kubernetes/fake" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + + "github.com/argoproj/argo-cd/v2/applicationset/generators" + "github.com/argoproj/argo-cd/v2/applicationset/services/scm_provider" + "github.com/argoproj/argo-cd/v2/common" + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + argosettings "github.com/argoproj/argo-cd/v2/util/settings" +) + +type generatorMock struct { + mock.Mock +} + +func (g *generatorMock) GetTemplate(appSetGenerator *v1alpha1.ApplicationSetGenerator) *v1alpha1.ApplicationSetTemplate { + return &v1alpha1.ApplicationSetTemplate{} +} + +func (g *generatorMock) GenerateParams(appSetGenerator *v1alpha1.ApplicationSetGenerator, _ *v1alpha1.ApplicationSet) ([]map[string]interface{}, error) { + return []map[string]interface{}{}, nil +} + +func (g *generatorMock) GetRequeueAfter(appSetGenerator *v1alpha1.ApplicationSetGenerator) time.Duration { + d, _ := time.ParseDuration("10s") + return d +} + +func TestWebhookHandler(t *testing.T) { + tt := []struct { + desc string + headerKey string + headerValue string + effectedAppSets []string + payloadFile string + expectedStatusCode int + expectedRefresh bool + }{ + { + desc: "WebHook from a GitHub repository via Commit", + headerKey: "X-GitHub-Event", + headerValue: "push", + payloadFile: "github-commit-event.json", + effectedAppSets: []string{"git-github", "matrix-git-github", "merge-git-github", "matrix-scm-git-github", "matrix-nested-git-github", "merge-nested-git-github", "plugin", "matrix-pull-request-github-plugin"}, + expectedStatusCode: http.StatusOK, + expectedRefresh: true, + }, + { + desc: "WebHook from a GitHub repository via Commit to branch", + headerKey: "X-GitHub-Event", + headerValue: "push", + payloadFile: "github-commit-branch-event.json", + effectedAppSets: []string{"git-github", "plugin", "matrix-pull-request-github-plugin"}, + expectedStatusCode: http.StatusOK, + expectedRefresh: true, + }, + { + desc: "WebHook from a GitHub ping event", + headerKey: "X-GitHub-Event", + headerValue: "ping", + payloadFile: "github-ping-event.json", + effectedAppSets: []string{"git-github", "plugin"}, + expectedStatusCode: http.StatusOK, + expectedRefresh: false, + }, + { + desc: "WebHook from a GitLab repository via Commit", + headerKey: "X-Gitlab-Event", + headerValue: "Push Hook", + payloadFile: "gitlab-event.json", + effectedAppSets: []string{"git-gitlab", "plugin", "matrix-pull-request-github-plugin"}, + expectedStatusCode: http.StatusOK, + expectedRefresh: true, + }, + { + desc: "WebHook with an unknown event", + headerKey: "X-Random-Event", + headerValue: "Push Hook", + payloadFile: "gitlab-event.json", + effectedAppSets: []string{"git-gitlab", "plugin"}, + expectedStatusCode: http.StatusBadRequest, + expectedRefresh: false, + }, + { + desc: "WebHook with an invalid event", + headerKey: "X-Random-Event", + headerValue: "Push Hook", + payloadFile: "invalid-event.json", + effectedAppSets: []string{"git-gitlab", "plugin"}, + expectedStatusCode: http.StatusBadRequest, + expectedRefresh: false, + }, + { + desc: "WebHook from a GitHub repository via pull_reqeuest opened event", + headerKey: "X-GitHub-Event", + headerValue: "pull_request", + payloadFile: "github-pull-request-opened-event.json", + effectedAppSets: []string{"pull-request-github", "matrix-pull-request-github", "matrix-scm-pull-request-github", "merge-pull-request-github", "plugin", "matrix-pull-request-github-plugin"}, + expectedStatusCode: http.StatusOK, + expectedRefresh: true, + }, + { + desc: "WebHook from a GitHub repository via pull_reqeuest assigned event", + headerKey: "X-GitHub-Event", + headerValue: "pull_request", + payloadFile: "github-pull-request-assigned-event.json", + effectedAppSets: []string{"pull-request-github", "matrix-pull-request-github", "matrix-scm-pull-request-github", "merge-pull-request-github", "plugin", "matrix-pull-request-github-plugin"}, + expectedStatusCode: http.StatusOK, + expectedRefresh: false, + }, + { + desc: "WebHook from a GitLab repository via open merge request event", + headerKey: "X-Gitlab-Event", + headerValue: "Merge Request Hook", + payloadFile: "gitlab-merge-request-open-event.json", + effectedAppSets: []string{"pull-request-gitlab", "plugin", "matrix-pull-request-github-plugin"}, + expectedStatusCode: http.StatusOK, + expectedRefresh: true, + }, + { + desc: "WebHook from a GitLab repository via approval merge request event", + headerKey: "X-Gitlab-Event", + headerValue: "Merge Request Hook", + payloadFile: "gitlab-merge-request-approval-event.json", + effectedAppSets: []string{"pull-request-gitlab", "plugin"}, + expectedStatusCode: http.StatusOK, + expectedRefresh: false, + }, + { + desc: "WebHook from a Azure DevOps repository via Commit", + headerKey: "X-Vss-Activityid", + headerValue: "Push Hook", + payloadFile: "azuredevops-push.json", + effectedAppSets: []string{"git-azure-devops", "plugin", "matrix-pull-request-github-plugin"}, + expectedStatusCode: http.StatusOK, + expectedRefresh: true, + }, + { + desc: "WebHook from a Azure DevOps repository via pull request event", + headerKey: "X-Vss-Activityid", + headerValue: "Pull Request Hook", + payloadFile: "azuredevops-pull-request.json", + effectedAppSets: []string{"pull-request-azure-devops", "plugin", "matrix-pull-request-github-plugin"}, + expectedStatusCode: http.StatusOK, + expectedRefresh: true, + }, + } + + namespace := "test" + fakeClient := newFakeClient(namespace) + scheme := runtime.NewScheme() + err := v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + err = v1alpha1.AddToScheme(scheme) + assert.Nil(t, err) + + for _, test := range tt { + t.Run(test.desc, func(t *testing.T) { + fc := fake.NewClientBuilder().WithScheme(scheme).WithObjects( + fakeAppWithGitGenerator("git-github", namespace, "https://github.com/org/repo"), + fakeAppWithGitGenerator("git-gitlab", namespace, "https://gitlab/group/name"), + fakeAppWithGitGenerator("git-azure-devops", namespace, "https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_git/Fabrikam-Fiber-Git"), + fakeAppWithGithubPullRequestGenerator("pull-request-github", namespace, "Codertocat", "Hello-World"), + fakeAppWithGitlabPullRequestGenerator("pull-request-gitlab", namespace, "100500"), + fakeAppWithAzureDevOpsPullRequestGenerator("pull-request-azure-devops", namespace, "DefaultCollection", "Fabrikam"), + fakeAppWithPluginGenerator("plugin", namespace), + fakeAppWithMatrixAndGitGenerator("matrix-git-github", namespace, "https://github.com/org/repo"), + fakeAppWithMatrixAndPullRequestGenerator("matrix-pull-request-github", namespace, "Codertocat", "Hello-World"), + fakeAppWithMatrixAndScmWithGitGenerator("matrix-scm-git-github", namespace, "org"), + fakeAppWithMatrixAndScmWithPullRequestGenerator("matrix-scm-pull-request-github", namespace, "Codertocat"), + fakeAppWithMatrixAndNestedGitGenerator("matrix-nested-git-github", namespace, "https://github.com/org/repo"), + fakeAppWithMatrixAndPullRequestGeneratorWithPluginGenerator("matrix-pull-request-github-plugin", namespace, "Codertocat", "Hello-World", "plugin-cm"), + fakeAppWithMergeAndGitGenerator("merge-git-github", namespace, "https://github.com/org/repo"), + fakeAppWithMergeAndPullRequestGenerator("merge-pull-request-github", namespace, "Codertocat", "Hello-World"), + fakeAppWithMergeAndNestedGitGenerator("merge-nested-git-github", namespace, "https://github.com/org/repo"), + ).Build() + set := argosettings.NewSettingsManager(context.TODO(), fakeClient, namespace) + h, err := NewWebhookHandler(namespace, set, fc, mockGenerators()) + assert.Nil(t, err) + + req := httptest.NewRequest(http.MethodPost, "/api/webhook", nil) + req.Header.Set(test.headerKey, test.headerValue) + eventJSON, err := os.ReadFile(filepath.Join("testdata", test.payloadFile)) + assert.NoError(t, err) + req.Body = io.NopCloser(bytes.NewReader(eventJSON)) + w := httptest.NewRecorder() + + h.Handler(w, req) + assert.Equal(t, w.Code, test.expectedStatusCode) + + list := &v1alpha1.ApplicationSetList{} + err = fc.List(context.TODO(), list) + assert.Nil(t, err) + effectedAppSetsAsExpected := make(map[string]bool) + for _, appSetName := range test.effectedAppSets { + effectedAppSetsAsExpected[appSetName] = false + } + for i := range list.Items { + gotAppSet := &list.Items[i] + if _, isEffected := effectedAppSetsAsExpected[gotAppSet.Name]; isEffected { + if expected, got := test.expectedRefresh, gotAppSet.RefreshRequired(); expected != got { + t.Errorf("unexpected RefreshRequired() for appset '%s' expect: %v got: %v", gotAppSet.Name, expected, got) + } + effectedAppSetsAsExpected[gotAppSet.Name] = true + } else { + assert.False(t, gotAppSet.RefreshRequired()) + } + } + for appSetName, checked := range effectedAppSetsAsExpected { + assert.True(t, checked, "appset %s not found", appSetName) + } + }) + } +} + +func mockGenerators() map[string]generators.Generator { + // generatorMockList := generatorMock{} + generatorMockGit := &generatorMock{} + generatorMockPR := &generatorMock{} + generatorMockPlugin := &generatorMock{} + mockSCMProvider := &scm_provider.MockProvider{ + Repos: []*scm_provider.Repository{ + { + Organization: "myorg", + Repository: "repo1", + URL: "git@github.com:org/repo.git", + Branch: "main", + SHA: "0bc57212c3cbbec69d20b34c507284bd300def5b", + }, + { + Organization: "Codertocat", + Repository: "Hello-World", + URL: "git@github.com:Codertocat/Hello-World.git", + Branch: "main", + SHA: "59d0", + }, + }, + } + generatorMockSCM := generators.NewTestSCMProviderGenerator(mockSCMProvider) + + terminalMockGenerators := map[string]generators.Generator{ + "List": generators.NewListGenerator(), + "Git": generatorMockGit, + "SCMProvider": generatorMockSCM, + "PullRequest": generatorMockPR, + "Plugin": generatorMockPlugin, + } + + nestedGenerators := map[string]generators.Generator{ + "List": terminalMockGenerators["List"], + "Git": terminalMockGenerators["Git"], + "SCMProvider": terminalMockGenerators["SCMProvider"], + "PullRequest": terminalMockGenerators["PullRequest"], + "Plugin": terminalMockGenerators["Plugin"], + "Matrix": generators.NewMatrixGenerator(terminalMockGenerators), + "Merge": generators.NewMergeGenerator(terminalMockGenerators), + } + + return map[string]generators.Generator{ + "List": terminalMockGenerators["List"], + "Git": terminalMockGenerators["Git"], + "SCMProvider": terminalMockGenerators["SCMProvider"], + "PullRequest": terminalMockGenerators["PullRequest"], + "Plugin": terminalMockGenerators["Plugin"], + "Matrix": generators.NewMatrixGenerator(nestedGenerators), + "Merge": generators.NewMergeGenerator(nestedGenerators), + } +} + +func TestGenRevisionHasChanged(t *testing.T) { + assert.True(t, genRevisionHasChanged(&v1alpha1.GitGenerator{}, "master", true)) + assert.False(t, genRevisionHasChanged(&v1alpha1.GitGenerator{}, "master", false)) + + assert.True(t, genRevisionHasChanged(&v1alpha1.GitGenerator{Revision: "dev"}, "dev", true)) + assert.False(t, genRevisionHasChanged(&v1alpha1.GitGenerator{Revision: "dev"}, "master", false)) + + assert.True(t, genRevisionHasChanged(&v1alpha1.GitGenerator{Revision: "refs/heads/dev"}, "dev", true)) + assert.False(t, genRevisionHasChanged(&v1alpha1.GitGenerator{Revision: "refs/heads/dev"}, "master", false)) +} + +func fakeAppWithGitGenerator(name, namespace, repo string) *v1alpha1.ApplicationSet { + return &v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: v1alpha1.ApplicationSetSpec{ + Generators: []v1alpha1.ApplicationSetGenerator{ + { + Git: &v1alpha1.GitGenerator{ + RepoURL: repo, + Revision: "master", + }, + }, + }, + }, + } +} + +func fakeAppWithGitlabPullRequestGenerator(name, namespace, projectId string) *v1alpha1.ApplicationSet { + return &v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: v1alpha1.ApplicationSetSpec{ + Generators: []v1alpha1.ApplicationSetGenerator{ + { + PullRequest: &v1alpha1.PullRequestGenerator{ + GitLab: &v1alpha1.PullRequestGeneratorGitLab{ + Project: projectId, + }, + }, + }, + }, + }, + } +} + +func fakeAppWithGithubPullRequestGenerator(name, namespace, owner, repo string) *v1alpha1.ApplicationSet { + return &v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: v1alpha1.ApplicationSetSpec{ + Generators: []v1alpha1.ApplicationSetGenerator{ + { + PullRequest: &v1alpha1.PullRequestGenerator{ + Github: &v1alpha1.PullRequestGeneratorGithub{ + Owner: owner, + Repo: repo, + }, + }, + }, + }, + }, + } +} + +func fakeAppWithAzureDevOpsPullRequestGenerator(name, namespace, project, repo string) *v1alpha1.ApplicationSet { + return &v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: v1alpha1.ApplicationSetSpec{ + Generators: []v1alpha1.ApplicationSetGenerator{ + { + PullRequest: &v1alpha1.PullRequestGenerator{ + AzureDevOps: &v1alpha1.PullRequestGeneratorAzureDevOps{ + Project: project, + Repo: repo, + }, + }, + }, + }, + }, + } +} + +func fakeAppWithMatrixAndGitGenerator(name, namespace, repo string) *v1alpha1.ApplicationSet { + return &v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: v1alpha1.ApplicationSetSpec{ + Generators: []v1alpha1.ApplicationSetGenerator{ + { + Matrix: &v1alpha1.MatrixGenerator{ + Generators: []v1alpha1.ApplicationSetNestedGenerator{ + { + List: &v1alpha1.ListGenerator{}, + }, + { + Git: &v1alpha1.GitGenerator{ + RepoURL: repo, + }, + }, + }, + }, + }, + }, + }, + } +} + +func fakeAppWithMatrixAndPullRequestGenerator(name, namespace, owner, repo string) *v1alpha1.ApplicationSet { + return &v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: v1alpha1.ApplicationSetSpec{ + Generators: []v1alpha1.ApplicationSetGenerator{ + { + Matrix: &v1alpha1.MatrixGenerator{ + Generators: []v1alpha1.ApplicationSetNestedGenerator{ + { + List: &v1alpha1.ListGenerator{}, + }, + { + PullRequest: &v1alpha1.PullRequestGenerator{ + Github: &v1alpha1.PullRequestGeneratorGithub{ + Owner: owner, + Repo: repo, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func fakeAppWithMatrixAndScmWithGitGenerator(name, namespace, owner string) *v1alpha1.ApplicationSet { + return &v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: v1alpha1.ApplicationSetSpec{ + Generators: []v1alpha1.ApplicationSetGenerator{ + { + Matrix: &v1alpha1.MatrixGenerator{ + Generators: []v1alpha1.ApplicationSetNestedGenerator{ + { + SCMProvider: &v1alpha1.SCMProviderGenerator{ + CloneProtocol: "ssh", + Github: &v1alpha1.SCMProviderGeneratorGithub{ + Organization: owner, + }, + }, + }, + { + Git: &v1alpha1.GitGenerator{ + RepoURL: "{{ url }}", + }, + }, + }, + }, + }, + }, + }, + } +} + +func fakeAppWithMatrixAndScmWithPullRequestGenerator(name, namespace, owner string) *v1alpha1.ApplicationSet { + return &v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: v1alpha1.ApplicationSetSpec{ + Generators: []v1alpha1.ApplicationSetGenerator{ + { + Matrix: &v1alpha1.MatrixGenerator{ + Generators: []v1alpha1.ApplicationSetNestedGenerator{ + { + SCMProvider: &v1alpha1.SCMProviderGenerator{ + CloneProtocol: "https", + Github: &v1alpha1.SCMProviderGeneratorGithub{ + Organization: owner, + }, + }, + }, + { + PullRequest: &v1alpha1.PullRequestGenerator{ + Github: &v1alpha1.PullRequestGeneratorGithub{ + Owner: "{{ organization }}", + Repo: "{{ repository }}", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func fakeAppWithMatrixAndNestedGitGenerator(name, namespace, repo string) *v1alpha1.ApplicationSet { + return &v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: v1alpha1.ApplicationSetSpec{ + Generators: []v1alpha1.ApplicationSetGenerator{ + { + Matrix: &v1alpha1.MatrixGenerator{ + Generators: []v1alpha1.ApplicationSetNestedGenerator{ + { + List: &v1alpha1.ListGenerator{}, + }, + { + Matrix: &apiextensionsv1.JSON{ + Raw: []byte(fmt.Sprintf(`{ + "Generators": [ + { + "List": { + "Elements": [ + { + "repository": "%s" + } + ] + } + }, + { + "Git": { + "RepoURL": "{{ repository }}" + } + } + ] + }`, repo)), + }, + }, + }, + }, + }, + }, + }, + } +} + +func fakeAppWithMergeAndGitGenerator(name, namespace, repo string) *v1alpha1.ApplicationSet { + return &v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: v1alpha1.ApplicationSetSpec{ + Generators: []v1alpha1.ApplicationSetGenerator{ + { + Merge: &v1alpha1.MergeGenerator{ + Generators: []v1alpha1.ApplicationSetNestedGenerator{ + { + Git: &v1alpha1.GitGenerator{ + RepoURL: repo, + }, + }, + }, + }, + }, + }, + }, + } +} + +func fakeAppWithMergeAndPullRequestGenerator(name, namespace, owner, repo string) *v1alpha1.ApplicationSet { + return &v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: v1alpha1.ApplicationSetSpec{ + Generators: []v1alpha1.ApplicationSetGenerator{ + { + Merge: &v1alpha1.MergeGenerator{ + Generators: []v1alpha1.ApplicationSetNestedGenerator{ + { + PullRequest: &v1alpha1.PullRequestGenerator{ + Github: &v1alpha1.PullRequestGeneratorGithub{ + Owner: owner, + Repo: repo, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func fakeAppWithMergeAndNestedGitGenerator(name, namespace, repo string) *v1alpha1.ApplicationSet { + return &v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: v1alpha1.ApplicationSetSpec{ + Generators: []v1alpha1.ApplicationSetGenerator{ + { + Merge: &v1alpha1.MergeGenerator{ + MergeKeys: []string{ + "server", + }, + Generators: []v1alpha1.ApplicationSetNestedGenerator{ + { + List: &v1alpha1.ListGenerator{}, + }, + { + Merge: &apiextensionsv1.JSON{ + Raw: []byte(fmt.Sprintf(`{ + "MergeKeys": ["server"], + "Generators": [ + { + "List": {} + }, + { + "Git": { + "RepoURL": "%s" + } + } + ] + }`, repo)), + }, + }, + }, + }, + }, + }, + }, + } +} + +func fakeAppWithPluginGenerator(name, namespace string) *v1alpha1.ApplicationSet { + return &v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: v1alpha1.ApplicationSetSpec{ + Generators: []v1alpha1.ApplicationSetGenerator{ + { + Plugin: &v1alpha1.PluginGenerator{ + ConfigMapRef: v1alpha1.PluginConfigMapRef{ + Name: "test", + }, + }, + }, + }, + }, + } +} + +func fakeAppWithMatrixAndPullRequestGeneratorWithPluginGenerator(name, namespace, owner, repo, configmapName string) *v1alpha1.ApplicationSet { + return &v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: v1alpha1.ApplicationSetSpec{ + Generators: []v1alpha1.ApplicationSetGenerator{ + { + Matrix: &v1alpha1.MatrixGenerator{ + Generators: []v1alpha1.ApplicationSetNestedGenerator{ + { + PullRequest: &v1alpha1.PullRequestGenerator{ + Github: &v1alpha1.PullRequestGeneratorGithub{ + Owner: owner, + Repo: repo, + }, + }, + }, + { + Plugin: &v1alpha1.PluginGenerator{ + ConfigMapRef: v1alpha1.PluginConfigMapRef{ + Name: configmapName, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func newFakeClient(ns string) *kubefake.Clientset { + s := runtime.NewScheme() + s.AddKnownTypes(v1alpha1.SchemeGroupVersion, &v1alpha1.ApplicationSet{}) + return kubefake.NewSimpleClientset(&corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "argocd-cm", Namespace: ns, Labels: map[string]string{ + "app.kubernetes.io/part-of": "argocd", + }}}, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: common.ArgoCDSecretName, + Namespace: ns, + Labels: map[string]string{ + "app.kubernetes.io/part-of": "argocd", + }, + }, + Data: map[string][]byte{ + "server.secretkey": nil, + }, + }) +} diff --git a/assets/builtin-policy.csv b/assets/builtin-policy.csv index 89f6efd249d34..9413b53d1cba5 100644 --- a/assets/builtin-policy.csv +++ b/assets/builtin-policy.csv @@ -1,7 +1,7 @@ # Built-in policy which defines two roles: role:readonly and role:admin, # and additionally assigns the admin user to the role:admin role. # There are two policy formats: -# 1. Applications (which belong to a project): +# 1. Applications, logs, and exec (which belong to a project): # p, , , , / # 2. All other resources: # p, , , , @@ -13,6 +13,7 @@ p, role:readonly, repositories, get, *, allow p, role:readonly, projects, get, *, allow p, role:readonly, accounts, get, *, allow p, role:readonly, gpgkeys, get, *, allow +p, role:readonly, logs, get, */*, allow p, role:admin, applications, create, */*, allow p, role:admin, applications, update, */*, allow @@ -20,6 +21,10 @@ p, role:admin, applications, delete, */*, allow p, role:admin, applications, sync, */*, allow p, role:admin, applications, override, */*, allow p, role:admin, applications, action/*, */*, allow +p, role:admin, applicationsets, get, */*, allow +p, role:admin, applicationsets, create, */*, allow +p, role:admin, applicationsets, update, */*, allow +p, role:admin, applicationsets, delete, */*, allow p, role:admin, certificates, create, *, allow p, role:admin, certificates, update, *, allow p, role:admin, certificates, delete, *, allow @@ -35,6 +40,7 @@ p, role:admin, projects, delete, *, allow p, role:admin, accounts, update, *, allow p, role:admin, gpgkeys, create, *, allow p, role:admin, gpgkeys, delete, *, allow +p, role:admin, exec, create, */*, allow g, role:admin, role:readonly -g, admin, role:admin +g, admin, role:admin \ No newline at end of file diff --git a/assets/embed.go b/assets/embed.go new file mode 100644 index 0000000000000..ac148cafd3de6 --- /dev/null +++ b/assets/embed.go @@ -0,0 +1,8 @@ +package assets + +import "embed" + +// Embedded contains embedded assets +// +//go:embed * +var Embedded embed.FS diff --git a/assets/model.conf b/assets/model.conf index 087406b2023b5..e53d9fe89db55 100644 --- a/assets/model.conf +++ b/assets/model.conf @@ -11,4 +11,4 @@ g = _, _ e = some(where (p.eft == allow)) && !some(where (p.eft == deny)) [matchers] -m = g(r.sub, p.sub) && globMatch(r.res, p.res) && globMatch(r.act, p.act) && globMatch(r.obj, p.obj) +m = g(r.sub, p.sub) && globOrRegexMatch(r.res, p.res) && globOrRegexMatch(r.act, p.act) && globOrRegexMatch(r.obj, p.obj) diff --git a/assets/swagger.json b/assets/swagger.json index 1124209bf69a8..ae4688966dd0c 100644 --- a/assets/swagger.json +++ b/assets/swagger.json @@ -234,7 +234,7 @@ }, { "type": "string", - "description": "forces application reconciliation if set to true.", + "description": "forces application reconciliation if set to 'hard'.", "name": "refresh", "in": "query" }, @@ -245,7 +245,7 @@ }, "collectionFormat": "multi", "description": "the project names to restrict returned list applications.", - "name": "project", + "name": "projects", "in": "query" }, { @@ -256,7 +256,7 @@ }, { "type": "string", - "description": "the selector to to restrict returned list to applications only with matched labels.", + "description": "the selector to restrict returned list to applications only with matched labels.", "name": "selector", "in": "query" }, @@ -265,6 +265,22 @@ "description": "the repoURL to restrict returned list applications.", "name": "repo", "in": "query" + }, + { + "type": "string", + "description": "the application's namespace.", + "name": "appNamespace", + "in": "query" + }, + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi", + "description": "the project names to restrict returned list applications (legacy name for backwards-compatibility).", + "name": "project", + "in": "query" } ], "responses": { @@ -324,6 +340,40 @@ } } }, + "/api/v1/applications/manifestsWithFiles": { + "post": { + "tags": [ + "ApplicationService" + ], + "summary": "GetManifestsWithFiles returns application manifests using provided files to generate them", + "operationId": "ApplicationService_GetManifestsWithFiles", + "parameters": [ + { + "description": " (streaming inputs)", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/applicationApplicationManifestQueryWithFilesWrapper" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/repositoryManifestResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, "/api/v1/applications/{application.metadata.name}": { "put": { "tags": [ @@ -351,6 +401,11 @@ "type": "boolean", "name": "validate", "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" } ], "responses": { @@ -407,6 +462,16 @@ "type": "string", "name": "kind", "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" } ], "responses": { @@ -463,6 +528,16 @@ "type": "string", "name": "kind", "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" } ], "responses": { @@ -498,7 +573,7 @@ }, { "type": "string", - "description": "forces application reconciliation if set to true.", + "description": "forces application reconciliation if set to 'hard'.", "name": "refresh", "in": "query" }, @@ -509,7 +584,7 @@ }, "collectionFormat": "multi", "description": "the project names to restrict returned list applications.", - "name": "project", + "name": "projects", "in": "query" }, { @@ -520,7 +595,7 @@ }, { "type": "string", - "description": "the selector to to restrict returned list to applications only with matched labels.", + "description": "the selector to restrict returned list to applications only with matched labels.", "name": "selector", "in": "query" }, @@ -529,6 +604,22 @@ "description": "the repoURL to restrict returned list applications.", "name": "repo", "in": "query" + }, + { + "type": "string", + "description": "the application's namespace.", + "name": "appNamespace", + "in": "query" + }, + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi", + "description": "the project names to restrict returned list applications (legacy name for backwards-compatibility).", + "name": "project", + "in": "query" } ], "responses": { @@ -568,6 +659,16 @@ "type": "string", "name": "propagationPolicy", "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" } ], "responses": { @@ -651,6 +752,16 @@ "type": "string", "name": "resourceUID", "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" } ], "responses": { @@ -669,6 +780,47 @@ } } }, + "/api/v1/applications/{name}/links": { + "get": { + "tags": [ + "ApplicationService" + ], + "summary": "ListLinks returns the list of all application deep links", + "operationId": "ApplicationService_ListLinks", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "namespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/applicationLinksResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, "/api/v1/applications/{name}/logs": { "get": { "tags": [ @@ -753,6 +905,21 @@ "type": "string", "name": "resourceName", "in": "query" + }, + { + "type": "boolean", + "name": "previous", + "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" } ], "responses": { @@ -798,6 +965,16 @@ "type": "string", "name": "revision", "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" } ], "responses": { @@ -829,6 +1006,16 @@ "name": "name", "in": "path", "required": true + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" } ], "responses": { @@ -932,6 +1119,21 @@ "type": "string", "name": "resourceName", "in": "query" + }, + { + "type": "boolean", + "name": "previous", + "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" } ], "responses": { @@ -997,6 +1199,16 @@ "type": "string", "name": "kind", "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" } ], "responses": { @@ -1064,6 +1276,16 @@ "type": "string", "name": "patchType", "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" } ], "responses": { @@ -1128,6 +1350,16 @@ "type": "boolean", "name": "orphan", "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" } ], "responses": { @@ -1184,6 +1416,16 @@ "type": "string", "name": "kind", "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" } ], "responses": { @@ -1246,6 +1488,16 @@ "type": "string", "name": "kind", "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" } ], "responses": { @@ -1264,73 +1516,61 @@ } } }, - "/api/v1/applications/{name}/revisions/{revision}/metadata": { + "/api/v1/applications/{name}/resource/links": { "get": { "tags": [ "ApplicationService" ], - "summary": "Get the meta-data (author, date, tags, message) for a specific revision of the application", - "operationId": "ApplicationService_RevisionMetadata", + "summary": "ListResourceLinks returns the list of all resource deep links", + "operationId": "ApplicationService_ListResourceLinks", "parameters": [ { "type": "string", - "description": "the application's name", "name": "name", "in": "path", "required": true }, { "type": "string", - "description": "the revision of the app", - "name": "revision", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/v1alpha1RevisionMetadata" - } + "name": "namespace", + "in": "query" }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/api/v1/applications/{name}/rollback": { - "post": { - "tags": [ - "ApplicationService" - ], - "summary": "Rollback syncs an application to its target state", - "operationId": "ApplicationService_Rollback", - "parameters": [ { "type": "string", - "name": "name", - "in": "path", - "required": true + "name": "resourceName", + "in": "query" }, { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/applicationApplicationRollbackRequest" - } + "type": "string", + "name": "version", + "in": "query" + }, + { + "type": "string", + "name": "group", + "in": "query" + }, + { + "type": "string", + "name": "kind", + "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" } ], "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/v1alpha1Application" + "$ref": "#/definitions/applicationLinksResponse" } }, "default": { @@ -1342,31 +1582,37 @@ } } }, - "/api/v1/applications/{name}/spec": { - "put": { + "/api/v1/applications/{name}/revisions/{revision}/chartdetails": { + "get": { "tags": [ "ApplicationService" ], - "summary": "UpdateSpec updates an application spec", - "operationId": "ApplicationService_UpdateSpec", + "summary": "Get the chart metadata (description, maintainers, home) for a specific revision of the application", + "operationId": "ApplicationService_RevisionChartDetails", "parameters": [ { "type": "string", + "description": "the application's name", "name": "name", "in": "path", "required": true }, { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/v1alpha1ApplicationSpec" - } + "type": "string", + "description": "the revision of the app", + "name": "revision", + "in": "path", + "required": true }, { - "type": "boolean", - "name": "validate", + "type": "string", + "description": "the application's namespace.", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", "in": "query" } ], @@ -1374,7 +1620,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/v1alpha1ApplicationSpec" + "$ref": "#/definitions/v1alpha1ChartDetails" } }, "default": { @@ -1386,34 +1632,45 @@ } } }, - "/api/v1/applications/{name}/sync": { - "post": { + "/api/v1/applications/{name}/revisions/{revision}/metadata": { + "get": { "tags": [ "ApplicationService" ], - "summary": "Sync syncs an application to its target state", - "operationId": "ApplicationService_Sync", + "summary": "Get the meta-data (author, date, tags, message) for a specific revision of the application", + "operationId": "ApplicationService_RevisionMetadata", "parameters": [ { "type": "string", + "description": "the application's name", "name": "name", "in": "path", "required": true }, { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/applicationApplicationSyncRequest" - } + "type": "string", + "description": "the revision of the app", + "name": "revision", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "the application's namespace.", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" } ], "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/v1alpha1Application" + "$ref": "#/definitions/v1alpha1RevisionMetadata" } }, "default": { @@ -1425,26 +1682,34 @@ } } }, - "/api/v1/applications/{name}/syncwindows": { - "get": { + "/api/v1/applications/{name}/rollback": { + "post": { "tags": [ "ApplicationService" ], - "summary": "Get returns sync windows of the application", - "operationId": "ApplicationService_GetApplicationSyncWindows", + "summary": "Rollback syncs an application to its target state", + "operationId": "ApplicationService_Rollback", "parameters": [ { "type": "string", "name": "name", "in": "path", "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/applicationApplicationRollbackRequest" + } } ], "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/applicationApplicationSyncWindowsResponse" + "$ref": "#/definitions/v1alpha1Application" } }, "default": { @@ -1456,30 +1721,41 @@ } } }, - "/api/v1/certificates": { - "get": { + "/api/v1/applications/{name}/spec": { + "put": { "tags": [ - "CertificateService" + "ApplicationService" ], - "summary": "List all available repository certificates", - "operationId": "CertificateService_ListCertificates", + "summary": "UpdateSpec updates an application spec", + "operationId": "ApplicationService_UpdateSpec", "parameters": [ { "type": "string", - "description": "A file-glob pattern (not regular expression) the host name has to match.", - "name": "hostNamePattern", + "name": "name", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1alpha1ApplicationSpec" + } + }, + { + "type": "boolean", + "name": "validate", "in": "query" }, { "type": "string", - "description": "The type of the certificate to match (ssh or https).", - "name": "certType", + "name": "appNamespace", "in": "query" }, { "type": "string", - "description": "The sub type of the certificate to match (protocol dependent, usually only used for ssh certs).", - "name": "certSubType", + "name": "project", "in": "query" } ], @@ -1487,7 +1763,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/v1alpha1RepositoryCertificateList" + "$ref": "#/definitions/v1alpha1ApplicationSpec" } }, "default": { @@ -1497,35 +1773,36 @@ } } } - }, + } + }, + "/api/v1/applications/{name}/sync": { "post": { "tags": [ - "CertificateService" + "ApplicationService" ], - "summary": "Creates repository certificates on the server", - "operationId": "CertificateService_CreateCertificate", + "summary": "Sync syncs an application to its target state", + "operationId": "ApplicationService_Sync", "parameters": [ { - "description": "List of certificates to be created", + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { "name": "body", "in": "body", "required": true, "schema": { - "$ref": "#/definitions/v1alpha1RepositoryCertificateList" + "$ref": "#/definitions/applicationApplicationSyncRequest" } - }, - { - "type": "boolean", - "description": "Whether to upsert already existing certificates.", - "name": "upsert", - "in": "query" } ], "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/v1alpha1RepositoryCertificateList" + "$ref": "#/definitions/v1alpha1Application" } }, "default": { @@ -1535,30 +1812,30 @@ } } } - }, - "delete": { + } + }, + "/api/v1/applications/{name}/syncwindows": { + "get": { "tags": [ - "CertificateService" + "ApplicationService" ], - "summary": "Delete the certificates that match the RepositoryCertificateQuery", - "operationId": "CertificateService_DeleteCertificate", + "summary": "Get returns sync windows of the application", + "operationId": "ApplicationService_GetApplicationSyncWindows", "parameters": [ { "type": "string", - "description": "A file-glob pattern (not regular expression) the host name has to match.", - "name": "hostNamePattern", - "in": "query" + "name": "name", + "in": "path", + "required": true }, { "type": "string", - "description": "The type of the certificate to match (ssh or https).", - "name": "certType", + "name": "appNamespace", "in": "query" }, { "type": "string", - "description": "The sub type of the certificate to match (protocol dependent, usually only used for ssh certs).", - "name": "certSubType", + "name": "project", "in": "query" } ], @@ -1566,7 +1843,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/v1alpha1RepositoryCertificateList" + "$ref": "#/definitions/applicationApplicationSyncWindowsResponse" } }, "default": { @@ -1578,22 +1855,34 @@ } } }, - "/api/v1/clusters": { + "/api/v1/applicationsets": { "get": { "tags": [ - "ClusterService" + "ApplicationSetService" ], - "summary": "List returns list of clusters", - "operationId": "ClusterService_List", + "summary": "List returns list of applicationset", + "operationId": "ApplicationSetService_List", "parameters": [ + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi", + "description": "the project names to restrict returned list applicationsets.", + "name": "projects", + "in": "query" + }, { "type": "string", - "name": "server", + "description": "the selector to restrict returned list to applications only with matched labels.", + "name": "selector", "in": "query" }, { "type": "string", - "name": "name", + "description": "The application set namespace. Default empty is argocd control plane namespace.", + "name": "appsetNamespace", "in": "query" } ], @@ -1601,7 +1890,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/v1alpha1ClusterList" + "$ref": "#/definitions/v1alpha1ApplicationSetList" } }, "default": { @@ -1614,17 +1903,17 @@ }, "post": { "tags": [ - "ClusterService" + "ApplicationSetService" ], - "summary": "Create creates a cluster", - "operationId": "ClusterService_Create", + "summary": "Create creates an applicationset", + "operationId": "ApplicationSetService_Create", "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { - "$ref": "#/definitions/v1alpha1Cluster" + "$ref": "#/definitions/v1alpha1ApplicationSet" } }, { @@ -1637,7 +1926,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/v1alpha1Cluster" + "$ref": "#/definitions/v1alpha1ApplicationSet" } }, "default": { @@ -1649,36 +1938,25 @@ } } }, - "/api/v1/clusters/{cluster.server}": { - "put": { + "/api/v1/applicationsets/{name}": { + "get": { "tags": [ - "ClusterService" + "ApplicationSetService" ], - "summary": "Update updates a cluster", - "operationId": "ClusterService_Update", + "summary": "Get returns an applicationset by name", + "operationId": "ApplicationSetService_Get", "parameters": [ { "type": "string", - "description": "Server is the API server URL of the Kubernetes cluster", - "name": "cluster.server", + "description": "the applicationsets's name", + "name": "name", "in": "path", "required": true }, { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/v1alpha1Cluster" - } - }, - { - "type": "array", - "items": { - "type": "string" - }, - "collectionFormat": "multi", - "name": "updatedFields", + "type": "string", + "description": "The application set namespace. Default empty is argocd control plane namespace.", + "name": "appsetNamespace", "in": "query" } ], @@ -1686,7 +1964,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/v1alpha1Cluster" + "$ref": "#/definitions/v1alpha1ApplicationSet" } }, "default": { @@ -1696,25 +1974,24 @@ } } } - } - }, - "/api/v1/clusters/{server}": { - "get": { + }, + "delete": { "tags": [ - "ClusterService" + "ApplicationSetService" ], - "summary": "Get returns a cluster by server address", - "operationId": "ClusterService_Get", + "summary": "Delete deletes an application set", + "operationId": "ApplicationSetService_Delete", "parameters": [ { "type": "string", - "name": "server", + "name": "name", "in": "path", "required": true }, { "type": "string", - "name": "name", + "description": "The application set namespace. Default empty is argocd control plane namespace.", + "name": "appsetNamespace", "in": "query" } ], @@ -1722,7 +1999,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/v1alpha1Cluster" + "$ref": "#/definitions/applicationsetApplicationSetResponse" } }, "default": { @@ -1732,23 +2009,32 @@ } } } - }, - "delete": { - "tags": [ - "ClusterService" + } + }, + "/api/v1/certificates": { + "get": { + "tags": [ + "CertificateService" ], - "summary": "Delete deletes a cluster", - "operationId": "ClusterService_Delete", + "summary": "List all available repository certificates", + "operationId": "CertificateService_ListCertificates", "parameters": [ { "type": "string", - "name": "server", - "in": "path", - "required": true + "description": "A file-glob pattern (not regular expression) the host name has to match.", + "name": "hostNamePattern", + "in": "query" }, { "type": "string", - "name": "name", + "description": "The type of the certificate to match (ssh or https).", + "name": "certType", + "in": "query" + }, + { + "type": "string", + "description": "The sub type of the certificate to match (protocol dependent, usually only used for ssh certs).", + "name": "certSubType", "in": "query" } ], @@ -1756,7 +2042,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/clusterClusterResponse" + "$ref": "#/definitions/v1alpha1RepositoryCertificateList" } }, "default": { @@ -1766,28 +2052,35 @@ } } } - } - }, - "/api/v1/clusters/{server}/invalidate-cache": { + }, "post": { "tags": [ - "ClusterService" + "CertificateService" ], - "summary": "InvalidateCache invalidates cluster cache", - "operationId": "ClusterService_InvalidateCache", + "summary": "Creates repository certificates on the server", + "operationId": "CertificateService_CreateCertificate", "parameters": [ { - "type": "string", - "name": "server", - "in": "path", - "required": true + "description": "List of certificates to be created", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1alpha1RepositoryCertificateList" + } + }, + { + "type": "boolean", + "description": "Whether to upsert already existing certificates.", + "name": "upsert", + "in": "query" } ], "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/v1alpha1Cluster" + "$ref": "#/definitions/v1alpha1RepositoryCertificateList" } }, "default": { @@ -1797,28 +2090,38 @@ } } } - } - }, - "/api/v1/clusters/{server}/rotate-auth": { - "post": { + }, + "delete": { "tags": [ - "ClusterService" + "CertificateService" ], - "summary": "RotateAuth rotates the bearer token used for a cluster", - "operationId": "ClusterService_RotateAuth", + "summary": "Delete the certificates that match the RepositoryCertificateQuery", + "operationId": "CertificateService_DeleteCertificate", "parameters": [ { "type": "string", - "name": "server", - "in": "path", - "required": true + "description": "A file-glob pattern (not regular expression) the host name has to match.", + "name": "hostNamePattern", + "in": "query" + }, + { + "type": "string", + "description": "The type of the certificate to match (ssh or https).", + "name": "certType", + "in": "query" + }, + { + "type": "string", + "description": "The sub type of the certificate to match (protocol dependent, usually only used for ssh certs).", + "name": "certSubType", + "in": "query" } ], "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/clusterClusterResponse" + "$ref": "#/definitions/v1alpha1RepositoryCertificateList" } }, "default": { @@ -1830,18 +2133,34 @@ } } }, - "/api/v1/gpgkeys": { + "/api/v1/clusters": { "get": { "tags": [ - "GPGKeyService" + "ClusterService" ], - "summary": "List all available repository certificates", - "operationId": "GPGKeyService_List", + "summary": "List returns list of clusters", + "operationId": "ClusterService_List", "parameters": [ { "type": "string", - "description": "The GPG key ID to query for.", - "name": "keyID", + "name": "server", + "in": "query" + }, + { + "type": "string", + "name": "name", + "in": "query" + }, + { + "type": "string", + "description": "type is the type of the specified cluster identifier ( \"server\" - default, \"name\" ).", + "name": "id.type", + "in": "query" + }, + { + "type": "string", + "description": "value holds the cluster server URL or cluster name.", + "name": "id.value", "in": "query" } ], @@ -1849,7 +2168,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/v1alpha1GnuPGPublicKeyList" + "$ref": "#/definitions/v1alpha1ClusterList" } }, "default": { @@ -1862,23 +2181,21 @@ }, "post": { "tags": [ - "GPGKeyService" + "ClusterService" ], - "summary": "Create one or more GPG public keys in the server's configuration", - "operationId": "GPGKeyService_Create", + "summary": "Create creates a cluster", + "operationId": "ClusterService_Create", "parameters": [ { - "description": "Raw key data of the GPG key(s) to create", "name": "body", "in": "body", "required": true, "schema": { - "$ref": "#/definitions/v1alpha1GnuPGPublicKey" + "$ref": "#/definitions/v1alpha1Cluster" } }, { "type": "boolean", - "description": "Whether to upsert already existing public keys.", "name": "upsert", "in": "query" } @@ -1887,7 +2204,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/gpgkeyGnuPGPublicKeyCreateResponse" + "$ref": "#/definitions/v1alpha1Cluster" } }, "default": { @@ -1897,18 +2214,37 @@ } } } - }, - "delete": { + } + }, + "/api/v1/clusters/{id.value}": { + "get": { "tags": [ - "GPGKeyService" + "ClusterService" ], - "summary": "Delete specified GPG public key from the server's configuration", - "operationId": "GPGKeyService_Delete", + "summary": "Get returns a cluster by server address", + "operationId": "ClusterService_Get", "parameters": [ { "type": "string", - "description": "The GPG key ID to query for.", - "name": "keyID", + "description": "value holds the cluster server URL or cluster name", + "name": "id.value", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "server", + "in": "query" + }, + { + "type": "string", + "name": "name", + "in": "query" + }, + { + "type": "string", + "description": "type is the type of the specified cluster identifier ( \"server\" - default, \"name\" ).", + "name": "id.type", "in": "query" } ], @@ -1916,7 +2252,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/gpgkeyGnuPGPublicKeyResponse" + "$ref": "#/definitions/v1alpha1Cluster" } }, "default": { @@ -1926,29 +2262,50 @@ } } } - } - }, - "/api/v1/gpgkeys/{keyID}": { - "get": { + }, + "put": { "tags": [ - "GPGKeyService" + "ClusterService" ], - "summary": "Get information about specified GPG public key from the server", - "operationId": "GPGKeyService_Get", + "summary": "Update updates a cluster", + "operationId": "ClusterService_Update", "parameters": [ { "type": "string", - "description": "The GPG key ID to query for", - "name": "keyID", + "description": "value holds the cluster server URL or cluster name", + "name": "id.value", "in": "path", "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1alpha1Cluster" + } + }, + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi", + "name": "updatedFields", + "in": "query" + }, + { + "type": "string", + "description": "type is the type of the specified cluster identifier ( \"server\" - default, \"name\" ).", + "name": "id.type", + "in": "query" } ], "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/v1alpha1GnuPGPublicKey" + "$ref": "#/definitions/v1alpha1Cluster" } }, "default": { @@ -1958,27 +2315,43 @@ } } } - } - }, - "/api/v1/projects": { - "get": { + }, + "delete": { "tags": [ - "ProjectService" + "ClusterService" ], - "summary": "List returns list of projects", - "operationId": "ProjectService_List", + "summary": "Delete deletes a cluster", + "operationId": "ClusterService_Delete", "parameters": [ + { + "type": "string", + "description": "value holds the cluster server URL or cluster name", + "name": "id.value", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "server", + "in": "query" + }, { "type": "string", "name": "name", "in": "query" + }, + { + "type": "string", + "description": "type is the type of the specified cluster identifier ( \"server\" - default, \"name\" ).", + "name": "id.type", + "in": "query" } ], "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/v1alpha1AppProjectList" + "$ref": "#/definitions/clusterClusterResponse" } }, "default": { @@ -1988,28 +2361,29 @@ } } } - }, + } + }, + "/api/v1/clusters/{id.value}/invalidate-cache": { "post": { "tags": [ - "ProjectService" + "ClusterService" ], - "summary": "Create a new project", - "operationId": "ProjectService_Create", + "summary": "InvalidateCache invalidates cluster cache", + "operationId": "ClusterService_InvalidateCache", "parameters": [ { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/projectProjectCreateRequest" - } + "type": "string", + "description": "value holds the cluster server URL or cluster name", + "name": "id.value", + "in": "path", + "required": true } ], "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/v1alpha1AppProject" + "$ref": "#/definitions/v1alpha1Cluster" } }, "default": { @@ -2021,17 +2395,18 @@ } } }, - "/api/v1/projects/{name}": { - "get": { + "/api/v1/clusters/{id.value}/rotate-auth": { + "post": { "tags": [ - "ProjectService" + "ClusterService" ], - "summary": "Get returns a project by name", - "operationId": "ProjectService_Get", + "summary": "RotateAuth rotates the bearer token used for a cluster", + "operationId": "ClusterService_RotateAuth", "parameters": [ { "type": "string", - "name": "name", + "description": "value holds the cluster server URL or cluster name", + "name": "id.value", "in": "path", "required": true } @@ -2040,7 +2415,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/v1alpha1AppProject" + "$ref": "#/definitions/clusterClusterResponse" } }, "default": { @@ -2050,26 +2425,28 @@ } } } - }, - "delete": { - "tags": [ - "ProjectService" + } + }, + "/api/v1/gpgkeys": { + "get": { + "tags": [ + "GPGKeyService" ], - "summary": "Delete deletes a project", - "operationId": "ProjectService_Delete", + "summary": "List all available repository certificates", + "operationId": "GPGKeyService_List", "parameters": [ { "type": "string", - "name": "name", - "in": "path", - "required": true + "description": "The GPG key ID to query for.", + "name": "keyID", + "in": "query" } ], "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/projectEmptyResponse" + "$ref": "#/definitions/v1alpha1GnuPGPublicKeyList" } }, "default": { @@ -2079,28 +2456,35 @@ } } } - } - }, - "/api/v1/projects/{name}/events": { - "get": { + }, + "post": { "tags": [ - "ProjectService" + "GPGKeyService" ], - "summary": "ListEvents returns a list of project events", - "operationId": "ProjectService_ListEvents", + "summary": "Create one or more GPG public keys in the server's configuration", + "operationId": "GPGKeyService_Create", "parameters": [ { - "type": "string", - "name": "name", - "in": "path", - "required": true + "description": "Raw key data of the GPG key(s) to create", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1alpha1GnuPGPublicKey" + } + }, + { + "type": "boolean", + "description": "Whether to upsert already existing public keys.", + "name": "upsert", + "in": "query" } ], "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/v1EventList" + "$ref": "#/definitions/gpgkeyGnuPGPublicKeyCreateResponse" } }, "default": { @@ -2110,28 +2494,26 @@ } } } - } - }, - "/api/v1/projects/{name}/globalprojects": { - "get": { + }, + "delete": { "tags": [ - "ProjectService" + "GPGKeyService" ], - "summary": "Get returns a virtual project by name", - "operationId": "ProjectService_GetGlobalProjects", + "summary": "Delete specified GPG public key from the server's configuration", + "operationId": "GPGKeyService_Delete", "parameters": [ { "type": "string", - "name": "name", - "in": "path", - "required": true + "description": "The GPG key ID to query for.", + "name": "keyID", + "in": "query" } ], "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/projectGlobalProjectsResponse" + "$ref": "#/definitions/gpgkeyGnuPGPublicKeyResponse" } }, "default": { @@ -2143,17 +2525,18 @@ } } }, - "/api/v1/projects/{name}/syncwindows": { + "/api/v1/gpgkeys/{keyID}": { "get": { "tags": [ - "ProjectService" + "GPGKeyService" ], - "summary": "GetSchedulesState returns true if there are any active sync syncWindows", - "operationId": "ProjectService_GetSyncWindowsState", + "summary": "Get information about specified GPG public key from the server", + "operationId": "GPGKeyService_Get", "parameters": [ { "type": "string", - "name": "name", + "description": "The GPG key ID to query for", + "name": "keyID", "in": "path", "required": true } @@ -2162,7 +2545,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/projectSyncWindowsResponse" + "$ref": "#/definitions/v1alpha1GnuPGPublicKey" } }, "default": { @@ -2174,35 +2557,18 @@ } } }, - "/api/v1/projects/{project.metadata.name}": { - "put": { + "/api/v1/notifications/services": { + "get": { "tags": [ - "ProjectService" - ], - "summary": "Update updates a project", - "operationId": "ProjectService_Update", - "parameters": [ - { - "type": "string", - "description": "Name must be unique within a namespace. Is required when creating resources, although\nsome resources may allow a client to request the generation of an appropriate name\nautomatically. Name is primarily intended for creation idempotence and configuration\ndefinition.\nCannot be updated.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#names\n+optional", - "name": "project.metadata.name", - "in": "path", - "required": true - }, - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/projectProjectUpdateRequest" - } - } + "NotificationService" ], + "summary": "List returns list of services", + "operationId": "NotificationService_ListServices", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/v1alpha1AppProject" + "$ref": "#/definitions/notificationServiceList" } }, "default": { @@ -2214,40 +2580,18 @@ } } }, - "/api/v1/projects/{project}/roles/{role}/token": { - "post": { + "/api/v1/notifications/templates": { + "get": { "tags": [ - "ProjectService" - ], - "summary": "Create a new project token", - "operationId": "ProjectService_CreateToken", - "parameters": [ - { - "type": "string", - "name": "project", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "role", - "in": "path", - "required": true - }, - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/projectProjectTokenCreateRequest" - } - } + "NotificationService" ], + "summary": "List returns list of templates", + "operationId": "NotificationService_ListTemplates", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/projectProjectTokenResponse" + "$ref": "#/definitions/notificationTemplateList" } }, "default": { @@ -2259,44 +2603,18 @@ } } }, - "/api/v1/projects/{project}/roles/{role}/token/{iat}": { - "delete": { + "/api/v1/notifications/triggers": { + "get": { "tags": [ - "ProjectService" - ], - "summary": "Delete a new project token", - "operationId": "ProjectService_DeleteToken", - "parameters": [ - { - "type": "string", - "name": "project", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "role", - "in": "path", - "required": true - }, - { - "type": "string", - "format": "int64", - "name": "iat", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "id", - "in": "query" - } + "NotificationService" ], + "summary": "List returns list of triggers", + "operationId": "NotificationService_ListTriggers", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/projectEmptyResponse" + "$ref": "#/definitions/notificationTriggerList" } }, "default": { @@ -2308,18 +2626,17 @@ } } }, - "/api/v1/repocreds": { + "/api/v1/projects": { "get": { "tags": [ - "RepoCredsService" + "ProjectService" ], - "summary": "ListRepositoryCredentials gets a list of all configured repository credential sets", - "operationId": "RepoCredsService_ListRepositoryCredentials", + "summary": "List returns list of projects", + "operationId": "ProjectService_List", "parameters": [ { "type": "string", - "description": "Repo URL for query.", - "name": "url", + "name": "name", "in": "query" } ], @@ -2327,7 +2644,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/v1alpha1RepoCredsList" + "$ref": "#/definitions/v1alpha1AppProjectList" } }, "default": { @@ -2340,32 +2657,25 @@ }, "post": { "tags": [ - "RepoCredsService" + "ProjectService" ], - "summary": "CreateRepositoryCredentials creates a new repository credential set", - "operationId": "RepoCredsService_CreateRepositoryCredentials", + "summary": "Create a new project", + "operationId": "ProjectService_Create", "parameters": [ { - "description": "Repository definition", "name": "body", "in": "body", "required": true, "schema": { - "$ref": "#/definitions/v1alpha1RepoCreds" + "$ref": "#/definitions/projectProjectCreateRequest" } - }, - { - "type": "boolean", - "description": "Whether to create in upsert mode.", - "name": "upsert", - "in": "query" } ], "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/v1alpha1RepoCreds" + "$ref": "#/definitions/v1alpha1AppProject" } }, "default": { @@ -2377,35 +2687,26 @@ } } }, - "/api/v1/repocreds/{creds.url}": { - "put": { + "/api/v1/projects/{name}": { + "get": { "tags": [ - "RepoCredsService" + "ProjectService" ], - "summary": "UpdateRepositoryCredentials updates a repository credential set", - "operationId": "RepoCredsService_UpdateRepositoryCredentials", + "summary": "Get returns a project by name", + "operationId": "ProjectService_Get", "parameters": [ { "type": "string", - "description": "URL is the URL that this credentials matches to", - "name": "creds.url", + "name": "name", "in": "path", "required": true - }, - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/v1alpha1RepoCreds" - } } ], "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/v1alpha1RepoCreds" + "$ref": "#/definitions/v1alpha1AppProject" } }, "default": { @@ -2415,19 +2716,17 @@ } } } - } - }, - "/api/v1/repocreds/{url}": { + }, "delete": { "tags": [ - "RepoCredsService" + "ProjectService" ], - "summary": "DeleteRepositoryCredentials deletes a repository credential set from the configuration", - "operationId": "RepoCredsService_DeleteRepositoryCredentials", + "summary": "Delete deletes a project", + "operationId": "ProjectService_Delete", "parameters": [ { "type": "string", - "name": "url", + "name": "name", "in": "path", "required": true } @@ -2436,7 +2735,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/repocredsRepoCredsResponse" + "$ref": "#/definitions/projectEmptyResponse" } }, "default": { @@ -2448,32 +2747,26 @@ } } }, - "/api/v1/repositories": { + "/api/v1/projects/{name}/detailed": { "get": { "tags": [ - "RepositoryService" + "ProjectService" ], - "summary": "ListRepositories gets a list of all configured repositories", - "operationId": "RepositoryService_ListRepositories", + "summary": "GetDetailedProject returns a project that include project, global project and scoped resources by name", + "operationId": "ProjectService_GetDetailedProject", "parameters": [ { "type": "string", - "description": "Repo URL for query.", - "name": "repo", - "in": "query" - }, - { - "type": "boolean", - "description": "Whether to force a cache refresh on repo's connection state.", - "name": "forceRefresh", - "in": "query" + "name": "name", + "in": "path", + "required": true } ], "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/v1alpha1RepositoryList" + "$ref": "#/definitions/projectDetailedProjectsResponse" } }, "default": { @@ -2483,41 +2776,59 @@ } } } - }, - "post": { - "tags": [ - "RepositoryService" + } + }, + "/api/v1/projects/{name}/events": { + "get": { + "tags": [ + "ProjectService" ], - "summary": "CreateRepository creates a new repository configuration", - "operationId": "RepositoryService_CreateRepository", + "summary": "ListEvents returns a list of project events", + "operationId": "ProjectService_ListEvents", "parameters": [ { - "description": "Repository definition", - "name": "body", - "in": "body", - "required": true, + "type": "string", + "name": "name", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "A successful response.", "schema": { - "$ref": "#/definitions/v1alpha1Repository" + "$ref": "#/definitions/v1EventList" } }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/projects/{name}/globalprojects": { + "get": { + "tags": [ + "ProjectService" + ], + "summary": "Get returns a virtual project by name", + "operationId": "ProjectService_GetGlobalProjects", + "parameters": [ { - "type": "boolean", - "description": "Whether to create in upsert mode.", - "name": "upsert", - "in": "query" - }, - { - "type": "boolean", - "description": "Whether to operate on credential set instead of repository.", - "name": "credsOnly", - "in": "query" + "type": "string", + "name": "name", + "in": "path", + "required": true } ], "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/v1alpha1Repository" + "$ref": "#/definitions/projectGlobalProjectsResponse" } }, "default": { @@ -2529,35 +2840,26 @@ } } }, - "/api/v1/repositories/{repo.repo}": { - "put": { + "/api/v1/projects/{name}/links": { + "get": { "tags": [ - "RepositoryService" + "ProjectService" ], - "summary": "UpdateRepository updates a repository configuration", - "operationId": "RepositoryService_UpdateRepository", + "summary": "ListLinks returns all deep links for the particular project", + "operationId": "ProjectService_ListLinks", "parameters": [ { "type": "string", - "description": "Repo contains the URL to the remote repository", - "name": "repo.repo", + "name": "name", "in": "path", "required": true - }, - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/v1alpha1Repository" - } } ], "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/v1alpha1Repository" + "$ref": "#/definitions/applicationLinksResponse" } }, "default": { @@ -2569,33 +2871,26 @@ } } }, - "/api/v1/repositories/{repo}": { + "/api/v1/projects/{name}/syncwindows": { "get": { "tags": [ - "RepositoryService" + "ProjectService" ], - "summary": "Get returns a repository or its credentials", - "operationId": "RepositoryService_Get", + "summary": "GetSchedulesState returns true if there are any active sync syncWindows", + "operationId": "ProjectService_GetSyncWindowsState", "parameters": [ { "type": "string", - "description": "Repo URL for query", - "name": "repo", + "name": "name", "in": "path", "required": true - }, - { - "type": "boolean", - "description": "Whether to force a cache refresh on repo's connection state.", - "name": "forceRefresh", - "in": "query" } ], "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/v1alpha1Repository" + "$ref": "#/definitions/projectSyncWindowsResponse" } }, "default": { @@ -2605,33 +2900,37 @@ } } } - }, - "delete": { + } + }, + "/api/v1/projects/{project.metadata.name}": { + "put": { "tags": [ - "RepositoryService" + "ProjectService" ], - "summary": "DeleteRepository deletes a repository from the configuration", - "operationId": "RepositoryService_DeleteRepository", + "summary": "Update updates a project", + "operationId": "ProjectService_Update", "parameters": [ { "type": "string", - "description": "Repo URL for query", - "name": "repo", + "description": "Name must be unique within a namespace. Is required when creating resources, although\nsome resources may allow a client to request the generation of an appropriate name\nautomatically. Name is primarily intended for creation idempotence and configuration\ndefinition.\nCannot be updated.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#names\n+optional", + "name": "project.metadata.name", "in": "path", "required": true }, { - "type": "boolean", - "description": "Whether to force a cache refresh on repo's connection state.", - "name": "forceRefresh", - "in": "query" + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/projectProjectUpdateRequest" + } } ], "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/repositoryRepoResponse" + "$ref": "#/definitions/v1alpha1AppProject" } }, "default": { @@ -2643,31 +2942,40 @@ } } }, - "/api/v1/repositories/{repo}/apps": { - "get": { + "/api/v1/projects/{project}/roles/{role}/token": { + "post": { "tags": [ - "RepositoryService" + "ProjectService" ], - "summary": "ListApps returns list of apps in the repe", - "operationId": "RepositoryService_ListApps", + "summary": "Create a new project token", + "operationId": "ProjectService_CreateToken", "parameters": [ { "type": "string", - "name": "repo", + "name": "project", "in": "path", "required": true }, { "type": "string", - "name": "revision", - "in": "query" + "name": "role", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/projectProjectTokenCreateRequest" + } } ], "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/repositoryRepoAppsResponse" + "$ref": "#/definitions/projectProjectTokenResponse" } }, "default": { @@ -2679,25 +2987,36 @@ } } }, - "/api/v1/repositories/{repo}/helmcharts": { - "get": { + "/api/v1/projects/{project}/roles/{role}/token/{iat}": { + "delete": { "tags": [ - "RepositoryService" + "ProjectService" ], - "summary": "GetHelmCharts returns list of helm charts in the specified repository", - "operationId": "RepositoryService_GetHelmCharts", + "summary": "Delete a new project token", + "operationId": "ProjectService_DeleteToken", "parameters": [ { "type": "string", - "description": "Repo URL for query", - "name": "repo", + "name": "project", "in": "path", "required": true }, { - "type": "boolean", - "description": "Whether to force a cache refresh on repo's connection state.", - "name": "forceRefresh", + "type": "string", + "name": "role", + "in": "path", + "required": true + }, + { + "type": "string", + "format": "int64", + "name": "iat", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "id", "in": "query" } ], @@ -2705,7 +3024,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/repositoryHelmChartsResponse" + "$ref": "#/definitions/projectEmptyResponse" } }, "default": { @@ -2717,24 +3036,56 @@ } } }, - "/api/v1/repositories/{repo}/refs": { + "/api/v1/repocreds": { "get": { "tags": [ - "RepositoryService" + "RepoCredsService" ], - "operationId": "RepositoryService_ListRefs", + "summary": "ListRepositoryCredentials gets a list of all configured repository credential sets", + "operationId": "RepoCredsService_ListRepositoryCredentials", "parameters": [ { "type": "string", - "description": "Repo URL for query", - "name": "repo", - "in": "path", - "required": true + "description": "Repo URL for query.", + "name": "url", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1RepoCredsList" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + }, + "post": { + "tags": [ + "RepoCredsService" + ], + "summary": "CreateRepositoryCredentials creates a new repository credential set", + "operationId": "RepoCredsService_CreateRepositoryCredentials", + "parameters": [ + { + "description": "Repository definition", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1alpha1RepoCreds" + } }, { "type": "boolean", - "description": "Whether to force a cache refresh on repo's connection state.", - "name": "forceRefresh", + "description": "Whether to create in upsert mode.", + "name": "upsert", "in": "query" } ], @@ -2742,7 +3093,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/repositoryRefs" + "$ref": "#/definitions/v1alpha1RepoCreds" } }, "default": { @@ -2754,108 +3105,139 @@ } } }, - "/api/v1/repositories/{repo}/validate": { - "post": { + "/api/v1/repocreds/{creds.url}": { + "put": { "tags": [ - "RepositoryService" + "RepoCredsService" ], - "summary": "ValidateAccess validates access to a repository with given parameters", - "operationId": "RepositoryService_ValidateAccess", + "summary": "UpdateRepositoryCredentials updates a repository credential set", + "operationId": "RepoCredsService_UpdateRepositoryCredentials", "parameters": [ { "type": "string", - "description": "The URL to the repo", - "name": "repo", + "description": "URL is the URL that this credentials matches to", + "name": "creds.url", "in": "path", "required": true }, { - "description": "The URL to the repo", "name": "body", "in": "body", "required": true, "schema": { - "type": "string" + "$ref": "#/definitions/v1alpha1RepoCreds" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1RepoCreds" } }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/repocreds/{url}": { + "delete": { + "tags": [ + "RepoCredsService" + ], + "summary": "DeleteRepositoryCredentials deletes a repository credential set from the configuration", + "operationId": "RepoCredsService_DeleteRepositoryCredentials", + "parameters": [ { "type": "string", - "description": "Username for accessing repo.", - "name": "username", - "in": "query" - }, - { - "type": "string", - "description": "Password for accessing repo.", - "name": "password", - "in": "query" + "name": "url", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/repocredsRepoCredsResponse" + } }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/repositories": { + "get": { + "tags": [ + "RepositoryService" + ], + "summary": "ListRepositories gets a list of all configured repositories", + "operationId": "RepositoryService_ListRepositories", + "parameters": [ { "type": "string", - "description": "Private key data for accessing SSH repository.", - "name": "sshPrivateKey", + "description": "Repo URL for query.", + "name": "repo", "in": "query" }, { "type": "boolean", - "description": "Whether to skip certificate or host key validation.", - "name": "insecure", - "in": "query" - }, - { - "type": "string", - "description": "TLS client cert data for accessing HTTPS repository.", - "name": "tlsClientCertData", - "in": "query" - }, - { - "type": "string", - "description": "TLS client cert key for accessing HTTPS repository.", - "name": "tlsClientCertKey", - "in": "query" - }, - { - "type": "string", - "description": "The type of the repo.", - "name": "type", + "description": "Whether to force a cache refresh on repo's connection state.", + "name": "forceRefresh", "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1RepositoryList" + } }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + }, + "post": { + "tags": [ + "RepositoryService" + ], + "summary": "CreateRepository creates a new repository configuration", + "operationId": "RepositoryService_CreateRepository", + "parameters": [ { - "type": "string", - "description": "The name of the repo.", - "name": "name", - "in": "query" + "description": "Repository definition", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1alpha1Repository" + } }, { "type": "boolean", - "description": "Whether helm-oci support should be enabled for this repo.", - "name": "enableOci", - "in": "query" - }, - { - "type": "string", - "description": "Github App Private Key PEM data.", - "name": "githubAppPrivateKey", - "in": "query" - }, - { - "type": "string", - "format": "int64", - "description": "Github App ID of the app used to access the repo.", - "name": "githubAppID", - "in": "query" - }, - { - "type": "string", - "format": "int64", - "description": "Github App Installation ID of the installed GitHub App.", - "name": "githubAppInstallationID", + "description": "Whether to create in upsert mode.", + "name": "upsert", "in": "query" }, { - "type": "string", - "description": "Github App Enterprise base url if empty will default to https://api.github.com.", - "name": "githubAppEnterpriseBaseUrl", + "type": "boolean", + "description": "Whether to operate on credential set instead of repository.", + "name": "credsOnly", "in": "query" } ], @@ -2863,7 +3245,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/repositoryRepoResponse" + "$ref": "#/definitions/v1alpha1Repository" } }, "default": { @@ -2875,18 +3257,18 @@ } } }, - "/api/v1/repositories/{source.repoURL}/appdetails": { - "post": { + "/api/v1/repositories/{repo.repo}": { + "put": { "tags": [ "RepositoryService" ], - "summary": "GetAppDetails returns application details by given path", - "operationId": "RepositoryService_GetAppDetails", + "summary": "UpdateRepository updates a repository configuration", + "operationId": "RepositoryService_UpdateRepository", "parameters": [ { "type": "string", - "description": "RepoURL is the URL to the repository (Git or Helm) that contains the application manifests", - "name": "source.repoURL", + "description": "Repo contains the URL to the remote repository", + "name": "repo.repo", "in": "path", "required": true }, @@ -2895,7 +3277,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/repositoryRepoAppDetailsQuery" + "$ref": "#/definitions/v1alpha1Repository" } } ], @@ -2903,7 +3285,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/repositoryRepoAppDetailsResponse" + "$ref": "#/definitions/v1alpha1Repository" } }, "default": { @@ -2915,28 +3297,33 @@ } } }, - "/api/v1/session": { - "post": { + "/api/v1/repositories/{repo}": { + "get": { "tags": [ - "SessionService" + "RepositoryService" ], - "summary": "Create a new JWT for authentication and set a cookie if using HTTP", - "operationId": "SessionService_Create", + "summary": "Get returns a repository or its credentials", + "operationId": "RepositoryService_Get", "parameters": [ { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/sessionSessionCreateRequest" - } + "type": "string", + "description": "Repo URL for query", + "name": "repo", + "in": "path", + "required": true + }, + { + "type": "boolean", + "description": "Whether to force a cache refresh on repo's connection state.", + "name": "forceRefresh", + "in": "query" } ], "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/sessionSessionResponse" + "$ref": "#/definitions/v1alpha1Repository" } }, "default": { @@ -2949,15 +3336,30 @@ }, "delete": { "tags": [ - "SessionService" + "RepositoryService" + ], + "summary": "DeleteRepository deletes a repository from the configuration", + "operationId": "RepositoryService_DeleteRepository", + "parameters": [ + { + "type": "string", + "description": "Repo URL for query", + "name": "repo", + "in": "path", + "required": true + }, + { + "type": "boolean", + "description": "Whether to force a cache refresh on repo's connection state.", + "name": "forceRefresh", + "in": "query" + } ], - "summary": "Delete an existing JWT cookie if using HTTP", - "operationId": "SessionService_Delete", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/sessionSessionResponse" + "$ref": "#/definitions/repositoryRepoResponse" } }, "default": { @@ -2969,18 +3371,41 @@ } } }, - "/api/v1/session/userinfo": { + "/api/v1/repositories/{repo}/apps": { "get": { "tags": [ - "SessionService" + "RepositoryService" + ], + "summary": "ListApps returns list of apps in the repo", + "operationId": "RepositoryService_ListApps", + "parameters": [ + { + "type": "string", + "name": "repo", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "revision", + "in": "query" + }, + { + "type": "string", + "name": "appName", + "in": "query" + }, + { + "type": "string", + "name": "appProject", + "in": "query" + } ], - "summary": "Get the current user's info", - "operationId": "SessionService_GetUserInfo", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/sessionGetUserInfoResponse" + "$ref": "#/definitions/repositoryRepoAppsResponse" } }, "default": { @@ -2992,18 +3417,33 @@ } } }, - "/api/v1/settings": { + "/api/v1/repositories/{repo}/helmcharts": { "get": { "tags": [ - "SettingsService" + "RepositoryService" ], - "summary": "Get returns Argo CD settings", - "operationId": "SettingsService_Get", - "responses": { + "summary": "GetHelmCharts returns list of helm charts in the specified repository", + "operationId": "RepositoryService_GetHelmCharts", + "parameters": [ + { + "type": "string", + "description": "Repo URL for query", + "name": "repo", + "in": "path", + "required": true + }, + { + "type": "boolean", + "description": "Whether to force a cache refresh on repo's connection state.", + "name": "forceRefresh", + "in": "query" + } + ], + "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/clusterSettings" + "$ref": "#/definitions/repositoryHelmChartsResponse" } }, "default": { @@ -3015,69 +3455,32 @@ } } }, - "/api/v1/stream/applications": { + "/api/v1/repositories/{repo}/refs": { "get": { "tags": [ - "ApplicationService" + "RepositoryService" ], - "summary": "Watch returns stream of application change events", - "operationId": "ApplicationService_Watch", + "operationId": "RepositoryService_ListRefs", "parameters": [ { "type": "string", - "description": "the application's name.", - "name": "name", - "in": "query" - }, - { - "type": "string", - "description": "forces application reconciliation if set to true.", - "name": "refresh", - "in": "query" - }, - { - "type": "array", - "items": { - "type": "string" - }, - "collectionFormat": "multi", - "description": "the project names to restrict returned list applications.", - "name": "project", - "in": "query" - }, - { - "type": "string", - "description": "when specified with a watch call, shows changes that occur after that particular version of a resource.", - "name": "resourceVersion", - "in": "query" - }, - { - "type": "string", - "description": "the selector to to restrict returned list to applications only with matched labels.", - "name": "selector", - "in": "query" + "description": "Repo URL for query", + "name": "repo", + "in": "path", + "required": true }, { - "type": "string", - "description": "the repoURL to restrict returned list applications.", - "name": "repo", + "type": "boolean", + "description": "Whether to force a cache refresh on repo's connection state.", + "name": "forceRefresh", "in": "query" } ], "responses": { "200": { - "description": "A successful response.(streaming responses)", + "description": "A successful response.", "schema": { - "type": "object", - "title": "Stream result of v1alpha1ApplicationWatchEvent", - "properties": { - "error": { - "$ref": "#/definitions/runtimeStreamError" - }, - "result": { - "$ref": "#/definitions/v1alpha1ApplicationWatchEvent" - } - } + "$ref": "#/definitions/repositoryRefs" } }, "default": { @@ -3089,60 +3492,140 @@ } } }, - "/api/v1/stream/applications/{applicationName}/resource-tree": { - "get": { + "/api/v1/repositories/{repo}/validate": { + "post": { "tags": [ - "ApplicationService" + "RepositoryService" ], - "summary": "Watch returns stream of application resource tree", - "operationId": "ApplicationService_WatchResourceTree", + "summary": "ValidateAccess validates access to a repository with given parameters", + "operationId": "RepositoryService_ValidateAccess", "parameters": [ { "type": "string", - "name": "applicationName", + "description": "The URL to the repo", + "name": "repo", "in": "path", "required": true }, + { + "description": "The URL to the repo", + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "string" + } + }, { "type": "string", - "name": "namespace", + "description": "Username for accessing repo.", + "name": "username", + "in": "query" + }, + { + "type": "string", + "description": "Password for accessing repo.", + "name": "password", + "in": "query" + }, + { + "type": "string", + "description": "Private key data for accessing SSH repository.", + "name": "sshPrivateKey", + "in": "query" + }, + { + "type": "boolean", + "description": "Whether to skip certificate or host key validation.", + "name": "insecure", + "in": "query" + }, + { + "type": "string", + "description": "TLS client cert data for accessing HTTPS repository.", + "name": "tlsClientCertData", "in": "query" }, { "type": "string", + "description": "TLS client cert key for accessing HTTPS repository.", + "name": "tlsClientCertKey", + "in": "query" + }, + { + "type": "string", + "description": "The type of the repo.", + "name": "type", + "in": "query" + }, + { + "type": "string", + "description": "The name of the repo.", "name": "name", "in": "query" }, + { + "type": "boolean", + "description": "Whether helm-oci support should be enabled for this repo.", + "name": "enableOci", + "in": "query" + }, { "type": "string", - "name": "version", + "description": "Github App Private Key PEM data.", + "name": "githubAppPrivateKey", "in": "query" }, { "type": "string", - "name": "group", + "format": "int64", + "description": "Github App ID of the app used to access the repo.", + "name": "githubAppID", "in": "query" }, { "type": "string", - "name": "kind", + "format": "int64", + "description": "Github App Installation ID of the installed GitHub App.", + "name": "githubAppInstallationID", + "in": "query" + }, + { + "type": "string", + "description": "Github App Enterprise base url if empty will default to https://api.github.com.", + "name": "githubAppEnterpriseBaseUrl", + "in": "query" + }, + { + "type": "string", + "description": "HTTP/HTTPS proxy to access the repository.", + "name": "proxy", + "in": "query" + }, + { + "type": "string", + "description": "Reference between project and repository that allow you automatically to be added as item inside SourceRepos project entity.", + "name": "project", + "in": "query" + }, + { + "type": "string", + "description": "Google Cloud Platform service account key.", + "name": "gcpServiceAccountKey", + "in": "query" + }, + { + "type": "boolean", + "description": "Whether to force HTTP basic auth.", + "name": "forceHttpBasicAuth", "in": "query" } ], "responses": { "200": { - "description": "A successful response.(streaming responses)", + "description": "A successful response.", "schema": { - "type": "object", - "title": "Stream result of v1alpha1ApplicationTree", - "properties": { - "error": { - "$ref": "#/definitions/runtimeStreamError" - }, - "result": { - "$ref": "#/definitions/v1alpha1ApplicationTree" - } - } + "$ref": "#/definitions/repositoryRepoResponse" } }, "default": { @@ -3154,18 +3637,35 @@ } } }, - "/api/version": { - "get": { + "/api/v1/repositories/{source.repoURL}/appdetails": { + "post": { "tags": [ - "VersionService" + "RepositoryService" + ], + "summary": "GetAppDetails returns application details by given path", + "operationId": "RepositoryService_GetAppDetails", + "parameters": [ + { + "type": "string", + "description": "RepoURL is the URL to the repository (Git or Helm) that contains the application manifests", + "name": "source.repoURL", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/repositoryRepoAppDetailsQuery" + } + } ], - "summary": "Version returns version information of the API server", - "operationId": "VersionService_Version", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/versionVersionMessage" + "$ref": "#/definitions/repositoryRepoAppDetailsResponse" } }, "default": { @@ -3176,314 +3676,1159 @@ } } } - } - }, - "definitions": { - "accountAccount": { - "type": "object", - "properties": { - "capabilities": { - "type": "array", - "items": { - "type": "string" - } - }, - "enabled": { - "type": "boolean" - }, - "name": { - "type": "string" - }, - "tokens": { - "type": "array", - "items": { - "$ref": "#/definitions/accountToken" - } - } - } }, - "accountAccountsList": { - "type": "object", - "properties": { - "items": { - "type": "array", - "items": { - "$ref": "#/definitions/accountAccount" - } + "/api/v1/session": { + "post": { + "tags": [ + "SessionService" + ], + "summary": "Create a new JWT for authentication and set a cookie if using HTTP", + "operationId": "SessionService_Create", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/sessionSessionCreateRequest" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/sessionSessionResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + }, + "delete": { + "tags": [ + "SessionService" + ], + "summary": "Delete an existing JWT cookie if using HTTP", + "operationId": "SessionService_Delete", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/sessionSessionResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/session/userinfo": { + "get": { + "tags": [ + "SessionService" + ], + "summary": "Get the current user's info", + "operationId": "SessionService_GetUserInfo", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/sessionGetUserInfoResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/settings": { + "get": { + "tags": [ + "SettingsService" + ], + "summary": "Get returns Argo CD settings", + "operationId": "SettingsService_Get", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/clusterSettings" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/settings/plugins": { + "get": { + "tags": [ + "SettingsService" + ], + "summary": "Get returns Argo CD plugins", + "operationId": "SettingsService_GetPlugins", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/clusterSettingsPluginsResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/stream/applications": { + "get": { + "tags": [ + "ApplicationService" + ], + "summary": "Watch returns stream of application change events", + "operationId": "ApplicationService_Watch", + "parameters": [ + { + "type": "string", + "description": "the application's name.", + "name": "name", + "in": "query" + }, + { + "type": "string", + "description": "forces application reconciliation if set to 'hard'.", + "name": "refresh", + "in": "query" + }, + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi", + "description": "the project names to restrict returned list applications.", + "name": "projects", + "in": "query" + }, + { + "type": "string", + "description": "when specified with a watch call, shows changes that occur after that particular version of a resource.", + "name": "resourceVersion", + "in": "query" + }, + { + "type": "string", + "description": "the selector to restrict returned list to applications only with matched labels.", + "name": "selector", + "in": "query" + }, + { + "type": "string", + "description": "the repoURL to restrict returned list applications.", + "name": "repo", + "in": "query" + }, + { + "type": "string", + "description": "the application's namespace.", + "name": "appNamespace", + "in": "query" + }, + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi", + "description": "the project names to restrict returned list applications (legacy name for backwards-compatibility).", + "name": "project", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.(streaming responses)", + "schema": { + "type": "object", + "title": "Stream result of v1alpha1ApplicationWatchEvent", + "properties": { + "error": { + "$ref": "#/definitions/runtimeStreamError" + }, + "result": { + "$ref": "#/definitions/v1alpha1ApplicationWatchEvent" + } + } + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/stream/applications/{applicationName}/resource-tree": { + "get": { + "tags": [ + "ApplicationService" + ], + "summary": "Watch returns stream of application resource tree", + "operationId": "ApplicationService_WatchResourceTree", + "parameters": [ + { + "type": "string", + "name": "applicationName", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "namespace", + "in": "query" + }, + { + "type": "string", + "name": "name", + "in": "query" + }, + { + "type": "string", + "name": "version", + "in": "query" + }, + { + "type": "string", + "name": "group", + "in": "query" + }, + { + "type": "string", + "name": "kind", + "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.(streaming responses)", + "schema": { + "type": "object", + "title": "Stream result of v1alpha1ApplicationTree", + "properties": { + "error": { + "$ref": "#/definitions/runtimeStreamError" + }, + "result": { + "$ref": "#/definitions/v1alpha1ApplicationTree" + } + } + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/version": { + "get": { + "tags": [ + "VersionService" + ], + "summary": "Version returns version information of the API server", + "operationId": "VersionService_Version", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/versionVersionMessage" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + } + }, + "definitions": { + "accountAccount": { + "type": "object", + "properties": { + "capabilities": { + "type": "array", + "items": { + "type": "string" + } + }, + "enabled": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "tokens": { + "type": "array", + "items": { + "$ref": "#/definitions/accountToken" + } + } + } + }, + "accountAccountsList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/accountAccount" + } + } + } + }, + "accountCanIResponse": { + "type": "object", + "properties": { + "value": { + "type": "string" + } + } + }, + "accountCreateTokenRequest": { + "type": "object", + "properties": { + "expiresIn": { + "type": "integer", + "format": "int64", + "title": "expiresIn represents a duration in seconds" + }, + "id": { + "type": "string" + }, + "name": { + "type": "string" + } + } + }, + "accountCreateTokenResponse": { + "type": "object", + "properties": { + "token": { + "type": "string" + } + } + }, + "accountEmptyResponse": { + "type": "object" + }, + "accountToken": { + "type": "object", + "properties": { + "expiresAt": { + "type": "integer", + "format": "int64" + }, + "id": { + "type": "string" + }, + "issuedAt": { + "type": "integer", + "format": "int64" + } + } + }, + "accountUpdatePasswordRequest": { + "type": "object", + "properties": { + "currentPassword": { + "type": "string" + }, + "name": { + "type": "string" + }, + "newPassword": { + "type": "string" + } + } + }, + "accountUpdatePasswordResponse": { + "type": "object" + }, + "applicationApplicationManifestQueryWithFiles": { + "type": "object", + "properties": { + "appNamespace": { + "type": "string" + }, + "checksum": { + "type": "string" + }, + "name": { + "type": "string" + }, + "project": { + "type": "string" + } + } + }, + "applicationApplicationManifestQueryWithFilesWrapper": { + "type": "object", + "properties": { + "chunk": { + "$ref": "#/definitions/applicationFileChunk" + }, + "query": { + "$ref": "#/definitions/applicationApplicationManifestQueryWithFiles" + } + } + }, + "applicationApplicationPatchRequest": { + "type": "object", + "title": "ApplicationPatchRequest is a request to patch an application", + "properties": { + "appNamespace": { + "type": "string" + }, + "name": { + "type": "string" + }, + "patch": { + "type": "string" + }, + "patchType": { + "type": "string" + }, + "project": { + "type": "string" + } + } + }, + "applicationApplicationResourceResponse": { + "type": "object", + "properties": { + "manifest": { + "type": "string" + } + } + }, + "applicationApplicationResponse": { + "type": "object" + }, + "applicationApplicationRollbackRequest": { + "type": "object", + "properties": { + "appNamespace": { + "type": "string" + }, + "dryRun": { + "type": "boolean" + }, + "id": { + "type": "integer", + "format": "int64" + }, + "name": { + "type": "string" + }, + "project": { + "type": "string" + }, + "prune": { + "type": "boolean" + } + } + }, + "applicationApplicationSyncRequest": { + "type": "object", + "title": "ApplicationSyncRequest is a request to apply the config state to live state", + "properties": { + "appNamespace": { + "type": "string" + }, + "dryRun": { + "type": "boolean" + }, + "infos": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Info" + } + }, + "manifests": { + "type": "array", + "items": { + "type": "string" + } + }, + "name": { + "type": "string" + }, + "project": { + "type": "string" + }, + "prune": { + "type": "boolean" + }, + "resources": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1SyncOperationResource" + } + }, + "retryStrategy": { + "$ref": "#/definitions/v1alpha1RetryStrategy" + }, + "revision": { + "type": "string" + }, + "strategy": { + "$ref": "#/definitions/v1alpha1SyncStrategy" + }, + "syncOptions": { + "$ref": "#/definitions/applicationSyncOptions" + } + } + }, + "applicationApplicationSyncWindow": { + "type": "object", + "properties": { + "duration": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "manualSync": { + "type": "boolean" + }, + "schedule": { + "type": "string" + } + } + }, + "applicationApplicationSyncWindowsResponse": { + "type": "object", + "properties": { + "activeWindows": { + "type": "array", + "items": { + "$ref": "#/definitions/applicationApplicationSyncWindow" + } + }, + "assignedWindows": { + "type": "array", + "items": { + "$ref": "#/definitions/applicationApplicationSyncWindow" + } + }, + "canSync": { + "type": "boolean" + } + } + }, + "applicationFileChunk": { + "type": "object", + "properties": { + "chunk": { + "type": "string", + "format": "byte" + } + } + }, + "applicationLinkInfo": { + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "iconClass": { + "type": "string" + }, + "title": { + "type": "string" + }, + "url": { + "type": "string" + } + } + }, + "applicationLinksResponse": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/applicationLinkInfo" + } + } + } + }, + "applicationLogEntry": { + "type": "object", + "properties": { + "content": { + "type": "string" + }, + "last": { + "type": "boolean" + }, + "podName": { + "type": "string" + }, + "timeStamp": { + "$ref": "#/definitions/v1Time" + }, + "timeStampStr": { + "type": "string" + } + } + }, + "applicationManagedResourcesResponse": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ResourceDiff" + } + } + } + }, + "applicationOperationTerminateResponse": { + "type": "object" + }, + "applicationResourceActionsListResponse": { + "type": "object", + "properties": { + "actions": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ResourceAction" + } } } }, - "accountCanIResponse": { + "applicationSyncOptions": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "applicationsetApplicationSetResponse": { + "type": "object", + "properties": { + "applicationset": { + "$ref": "#/definitions/v1alpha1ApplicationSet" + }, + "project": { + "type": "string" + } + } + }, + "applicationv1alpha1EnvEntry": { + "type": "object", + "title": "EnvEntry represents an entry in the application's environment", + "properties": { + "name": { + "type": "string", + "title": "Name is the name of the variable, usually expressed in uppercase" + }, + "value": { + "type": "string", + "title": "Value is the value of the variable" + } + } + }, + "clusterClusterID": { "type": "object", + "title": "ClusterID holds a cluster server URL or cluster name", "properties": { + "type": { + "type": "string", + "title": "type is the type of the specified cluster identifier ( \"server\" - default, \"name\" )" + }, "value": { + "type": "string", + "title": "value holds the cluster server URL or cluster name" + } + } + }, + "clusterClusterResponse": { + "type": "object" + }, + "clusterConnector": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "type": { "type": "string" } } }, - "accountCreateTokenRequest": { + "clusterDexConfig": { "type": "object", "properties": { - "expiresIn": { + "connectors": { + "type": "array", + "items": { + "$ref": "#/definitions/clusterConnector" + } + } + } + }, + "clusterGoogleAnalyticsConfig": { + "type": "object", + "properties": { + "anonymizeUsers": { + "type": "boolean" + }, + "trackingID": { + "type": "string" + } + } + }, + "clusterHelp": { + "type": "object", + "title": "Help settings", + "properties": { + "binaryUrls": { + "type": "object", + "title": "the URLs for downloading argocd binaries", + "additionalProperties": { + "type": "string" + } + }, + "chatText": { "type": "string", - "format": "int64", - "title": "expiresIn represents a duration in seconds" + "title": "the text for getting chat help, defaults to \"Chat now!\"" + }, + "chatUrl": { + "type": "string", + "title": "the URL for getting chat help, this will typically be your Slack channel for support" + } + } + }, + "clusterOIDCConfig": { + "type": "object", + "properties": { + "cliClientID": { + "type": "string" + }, + "clientID": { + "type": "string" + }, + "idTokenClaims": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/oidcClaim" + } + }, + "issuer": { + "type": "string" + }, + "name": { + "type": "string" + }, + "scopes": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "clusterPlugin": { + "type": "object", + "title": "Plugin settings", + "properties": { + "name": { + "type": "string", + "title": "the name of the plugin, e.g. \"kasane\"" + } + } + }, + "clusterSettings": { + "type": "object", + "properties": { + "appLabelKey": { + "type": "string" + }, + "appsInAnyNamespaceEnabled": { + "type": "boolean" + }, + "configManagementPlugins": { + "description": "Deprecated: use sidecar plugins instead.", + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ConfigManagementPlugin" + } + }, + "controllerNamespace": { + "type": "string" + }, + "dexConfig": { + "$ref": "#/definitions/clusterDexConfig" + }, + "execEnabled": { + "type": "boolean" + }, + "googleAnalytics": { + "$ref": "#/definitions/clusterGoogleAnalyticsConfig" + }, + "help": { + "$ref": "#/definitions/clusterHelp" + }, + "kustomizeOptions": { + "$ref": "#/definitions/v1alpha1KustomizeOptions" + }, + "kustomizeVersions": { + "type": "array", + "items": { + "type": "string" + } + }, + "oidcConfig": { + "$ref": "#/definitions/clusterOIDCConfig" + }, + "passwordPattern": { + "type": "string" + }, + "plugins": { + "type": "array", + "items": { + "$ref": "#/definitions/clusterPlugin" + } + }, + "resourceOverrides": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/v1alpha1ResourceOverride" + } + }, + "statusBadgeEnabled": { + "type": "boolean" + }, + "statusBadgeRootUrl": { + "type": "string" + }, + "trackingMethod": { + "type": "string" + }, + "uiBannerContent": { + "type": "string" + }, + "uiBannerPermanent": { + "type": "boolean" + }, + "uiBannerPosition": { + "type": "string" + }, + "uiBannerURL": { + "type": "string" + }, + "uiCssURL": { + "type": "string" + }, + "url": { + "type": "string" + }, + "userLoginsDisabled": { + "type": "boolean" + } + } + }, + "clusterSettingsPluginsResponse": { + "type": "object", + "properties": { + "plugins": { + "type": "array", + "items": { + "$ref": "#/definitions/clusterPlugin" + } + } + } + }, + "gpgkeyGnuPGPublicKeyCreateResponse": { + "type": "object", + "title": "Response to a public key creation request", + "properties": { + "created": { + "$ref": "#/definitions/v1alpha1GnuPGPublicKeyList" + }, + "skipped": { + "type": "array", + "title": "List of key IDs that haven been skipped because they already exist on the server", + "items": { + "type": "string" + } + } + } + }, + "gpgkeyGnuPGPublicKeyResponse": { + "type": "object", + "title": "Generic (empty) response for GPG public key CRUD requests" + }, + "intstrIntOrString": { + "description": "+protobuf=true\n+protobuf.options.(gogoproto.goproto_stringer)=false\n+k8s:openapi-gen=true", + "type": "object", + "title": "IntOrString is a type that can hold an int32 or a string. When used in\nJSON or YAML marshalling and unmarshalling, it produces or consumes the\ninner type. This allows you to have, for example, a JSON field that can\naccept a name or number.\nTODO: Rename to Int32OrString", + "properties": { + "intVal": { + "type": "integer", + "format": "int32" }, - "id": { + "strVal": { "type": "string" }, - "name": { - "type": "string" + "type": { + "type": "integer", + "format": "int64" } } }, - "accountCreateTokenResponse": { + "notificationService": { "type": "object", "properties": { - "token": { + "name": { "type": "string" } } }, - "accountEmptyResponse": { - "type": "object" - }, - "accountToken": { + "notificationServiceList": { "type": "object", "properties": { - "expiresAt": { - "type": "string", - "format": "int64" - }, - "id": { - "type": "string" - }, - "issuedAt": { - "type": "string", - "format": "int64" + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/notificationService" + } } } }, - "accountUpdatePasswordRequest": { + "notificationTemplate": { "type": "object", "properties": { - "currentPassword": { - "type": "string" - }, "name": { "type": "string" - }, - "newPassword": { - "type": "string" } } }, - "accountUpdatePasswordResponse": { - "type": "object" + "notificationTemplateList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/notificationTemplate" + } + } + } }, - "applicationApplicationPatchRequest": { + "notificationTrigger": { "type": "object", - "title": "ApplicationPatchRequest is a request to patch an application", "properties": { "name": { "type": "string" - }, - "patch": { - "type": "string" - }, - "patchType": { - "type": "string" } } }, - "applicationApplicationResourceResponse": { + "notificationTriggerList": { "type": "object", "properties": { - "manifest": { - "type": "string" + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/notificationTrigger" + } } } }, - "applicationApplicationResponse": { - "type": "object" - }, - "applicationApplicationRollbackRequest": { + "oidcClaim": { "type": "object", "properties": { - "dryRun": { + "essential": { "type": "boolean" }, - "id": { - "type": "string", - "format": "int64" - }, - "name": { + "value": { "type": "string" }, - "prune": { - "type": "boolean" + "values": { + "type": "array", + "items": { + "type": "string" + } } } }, - "applicationApplicationSyncRequest": { + "projectDetailedProjectsResponse": { "type": "object", - "title": "ApplicationSyncRequest is a request to apply the config state to live state", "properties": { - "dryRun": { - "type": "boolean" - }, - "infos": { + "clusters": { "type": "array", "items": { - "$ref": "#/definitions/v1alpha1Info" + "$ref": "#/definitions/v1alpha1Cluster" } }, - "manifests": { + "globalProjects": { "type": "array", "items": { - "type": "string" + "$ref": "#/definitions/v1alpha1AppProject" } }, - "name": { - "type": "string" - }, - "prune": { - "type": "boolean" + "project": { + "$ref": "#/definitions/v1alpha1AppProject" }, - "resources": { + "repositories": { "type": "array", "items": { - "$ref": "#/definitions/v1alpha1SyncOperationResource" + "$ref": "#/definitions/v1alpha1Repository" } - }, - "retryStrategy": { - "$ref": "#/definitions/v1alpha1RetryStrategy" - }, - "revision": { - "type": "string" - }, - "strategy": { - "$ref": "#/definitions/v1alpha1SyncStrategy" - }, - "syncOptions": { - "$ref": "#/definitions/applicationSyncOptions" } } }, - "applicationApplicationSyncWindow": { + "projectEmptyResponse": { + "type": "object" + }, + "projectGlobalProjectsResponse": { "type": "object", "properties": { - "duration": { - "type": "string" - }, - "kind": { - "type": "string" - }, - "manualSync": { - "type": "boolean" - }, - "schedule": { - "type": "string" + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1AppProject" + } } } }, - "applicationApplicationSyncWindowsResponse": { + "projectProjectCreateRequest": { + "description": "ProjectCreateRequest defines project creation parameters.", "type": "object", "properties": { - "activeWindows": { - "type": "array", - "items": { - "$ref": "#/definitions/applicationApplicationSyncWindow" - } - }, - "assignedWindows": { - "type": "array", - "items": { - "$ref": "#/definitions/applicationApplicationSyncWindow" - } + "project": { + "$ref": "#/definitions/v1alpha1AppProject" }, - "canSync": { + "upsert": { "type": "boolean" } } }, - "applicationLogEntry": { + "projectProjectTokenCreateRequest": { + "description": "ProjectTokenCreateRequest defines project token creation parameters.", "type": "object", "properties": { - "content": { + "description": { "type": "string" }, - "last": { - "type": "boolean" + "expiresIn": { + "type": "integer", + "format": "int64", + "title": "expiresIn represents a duration in seconds" }, - "podName": { + "id": { "type": "string" }, - "timeStamp": { - "$ref": "#/definitions/v1Time" + "project": { + "type": "string" }, - "timeStampStr": { + "role": { "type": "string" } } }, - "applicationManagedResourcesResponse": { + "projectProjectTokenResponse": { + "description": "ProjectTokenResponse wraps the created token or returns an empty string if deleted.", "type": "object", "properties": { - "items": { - "type": "array", - "items": { - "$ref": "#/definitions/v1alpha1ResourceDiff" - } + "token": { + "type": "string" } } }, - "applicationOperationTerminateResponse": { - "type": "object" - }, - "applicationResourceActionsListResponse": { + "projectProjectUpdateRequest": { "type": "object", "properties": { - "actions": { - "type": "array", - "items": { - "$ref": "#/definitions/v1alpha1ResourceAction" - } + "project": { + "$ref": "#/definitions/v1alpha1AppProject" } } }, - "applicationSyncOptions": { + "projectSyncWindowsResponse": { "type": "object", "properties": { - "items": { + "windows": { "type": "array", "items": { - "type": "string" + "$ref": "#/definitions/v1alpha1SyncWindow" } } } }, - "applicationv1alpha1EnvEntry": { + "protobufAny": { "type": "object", - "title": "EnvEntry represents an entry in the application's environment", "properties": { - "name": { - "type": "string", - "title": "Name is the name of the variable, usually expressed in uppercase" + "type_url": { + "type": "string" }, "value": { "type": "string", - "title": "Value is the value of the variable" + "format": "byte" } } }, - "clusterClusterResponse": { - "type": "object" + "repocredsRepoCredsResponse": { + "type": "object", + "title": "RepoCredsResponse is a response to most repository credentials requests" }, - "clusterConnector": { + "repositoryAppInfo": { "type": "object", + "title": "AppInfo contains application type and app file path", "properties": { - "name": { + "path": { "type": "string" }, "type": { @@ -3491,176 +4836,179 @@ } } }, - "clusterDexConfig": { + "repositoryDirectoryAppSpec": { + "type": "object", + "title": "DirectoryAppSpec contains directory" + }, + "repositoryHelmAppSpec": { "type": "object", + "title": "HelmAppSpec contains helm app name in source repo", "properties": { - "connectors": { + "fileParameters": { "type": "array", + "title": "helm file parameters", "items": { - "$ref": "#/definitions/clusterConnector" + "$ref": "#/definitions/v1alpha1HelmFileParameter" + } + }, + "name": { + "type": "string" + }, + "parameters": { + "type": "array", + "title": "the output of `helm inspect values`", + "items": { + "$ref": "#/definitions/v1alpha1HelmParameter" + } + }, + "valueFiles": { + "type": "array", + "items": { + "type": "string" } - } - } - }, - "clusterGoogleAnalyticsConfig": { - "type": "object", - "properties": { - "anonymizeUsers": { - "type": "boolean" }, - "trackingID": { - "type": "string" + "values": { + "type": "string", + "title": "the contents of values.yaml" } } }, - "clusterHelp": { + "repositoryHelmChart": { "type": "object", - "title": "Help settings", "properties": { - "chatText": { - "type": "string", - "title": "the text for getting chat help, defaults to \"Chat now!\"" + "name": { + "type": "string" }, - "chatUrl": { - "type": "string", - "title": "the URL for getting chat help, this will typically be your Slack channel for support" + "versions": { + "type": "array", + "items": { + "type": "string" + } } } }, - "clusterOIDCConfig": { + "repositoryHelmChartsResponse": { "type": "object", "properties": { - "cliClientID": { - "type": "string" - }, - "clientID": { - "type": "string" - }, - "idTokenClaims": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/oidcClaim" - } - }, - "issuer": { - "type": "string" - }, - "name": { - "type": "string" - }, - "scopes": { + "items": { "type": "array", "items": { - "type": "string" + "$ref": "#/definitions/repositoryHelmChart" } } } }, - "clusterPlugin": { + "repositoryKustomizeAppSpec": { "type": "object", - "title": "Plugin settings", + "title": "KustomizeAppSpec contains kustomize images", "properties": { - "name": { - "type": "string", - "title": "the name of the plugin, e.g. \"kasane\"" + "images": { + "description": "images is a list of available images.", + "type": "array", + "items": { + "type": "string" + } } } }, - "clusterSettings": { + "repositoryManifestResponse": { "type": "object", "properties": { - "appLabelKey": { - "type": "string" - }, - "configManagementPlugins": { + "manifests": { "type": "array", "items": { - "$ref": "#/definitions/v1alpha1ConfigManagementPlugin" + "type": "string" } }, - "dexConfig": { - "$ref": "#/definitions/clusterDexConfig" + "namespace": { + "type": "string" }, - "googleAnalytics": { - "$ref": "#/definitions/clusterGoogleAnalyticsConfig" + "revision": { + "type": "string", + "title": "resolved revision" }, - "help": { - "$ref": "#/definitions/clusterHelp" + "server": { + "type": "string" }, - "kustomizeOptions": { - "$ref": "#/definitions/v1alpha1KustomizeOptions" + "sourceType": { + "type": "string" }, - "kustomizeVersions": { + "verifyResult": { + "type": "string", + "title": "Raw response of git verify-commit operation (always the empty string for Helm)" + } + } + }, + "repositoryParameterAnnouncement": { + "type": "object", + "properties": { + "array": { + "description": "array is the default value of the parameter if the parameter is an array.", "type": "array", "items": { "type": "string" } }, - "oidcConfig": { - "$ref": "#/definitions/clusterOIDCConfig" + "collectionType": { + "description": "collectionType is the type of value this parameter holds - either a single value (a string) or a collection\n(array or map). If collectionType is set, only the field with that type will be used. If collectionType is not\nset, `string` is the default. If collectionType is set to an invalid value, a validation error is thrown.", + "type": "string" }, - "plugins": { - "type": "array", - "items": { - "$ref": "#/definitions/clusterPlugin" - } + "itemType": { + "description": "itemType determines the primitive data type represented by the parameter. Parameters are always encoded as\nstrings, but this field lets them be interpreted as other primitive types.", + "type": "string" }, - "resourceOverrides": { + "map": { + "description": "map is the default value of the parameter if the parameter is a map.", "type": "object", "additionalProperties": { - "$ref": "#/definitions/v1alpha1ResourceOverride" + "type": "string" } }, - "statusBadgeEnabled": { - "type": "boolean" - }, - "uiBannerContent": { + "name": { + "description": "name is the name identifying a parameter.", "type": "string" }, - "uiBannerURL": { - "type": "string" + "required": { + "description": "required defines if this given parameter is mandatory.", + "type": "boolean" }, - "uiCssURL": { + "string": { + "description": "string is the default value of the parameter if the parameter is a string.", "type": "string" }, - "url": { + "title": { + "description": "title is a human-readable text of the parameter name.", "type": "string" }, - "userLoginsDisabled": { - "type": "boolean" + "tooltip": { + "description": "tooltip is a human-readable description of the parameter.", + "type": "string" } } }, - "gpgkeyGnuPGPublicKeyCreateResponse": { + "repositoryPluginAppSpec": { "type": "object", - "title": "Response to a public key creation request", + "title": "PluginAppSpec contains details about a plugin-type Application", "properties": { - "created": { - "$ref": "#/definitions/v1alpha1GnuPGPublicKeyList" - }, - "skipped": { + "parametersAnnouncement": { "type": "array", - "title": "List of key IDs that haven been skipped because they already exist on the server", "items": { - "type": "string" + "$ref": "#/definitions/repositoryParameterAnnouncement" } } } }, - "gpgkeyGnuPGPublicKeyResponse": { - "type": "object", - "title": "Generic (empty) response for GPG public key CRUD requests" - }, - "oidcClaim": { + "repositoryRefs": { "type": "object", + "title": "A subset of the repository's named refs", "properties": { - "essential": { - "type": "boolean" - }, - "value": { - "type": "string" + "branches": { + "type": "array", + "items": { + "type": "string" + } }, - "values": { + "tags": { "type": "array", "items": { "type": "string" @@ -3668,1063 +5016,1221 @@ } } }, - "projectEmptyResponse": { - "type": "object" - }, - "projectGlobalProjectsResponse": { + "repositoryRepoAppDetailsQuery": { "type": "object", + "title": "RepoAppDetailsQuery contains query information for app details request", "properties": { - "items": { - "type": "array", - "items": { - "$ref": "#/definitions/v1alpha1AppProject" - } + "appName": { + "type": "string" + }, + "appProject": { + "type": "string" + }, + "source": { + "$ref": "#/definitions/v1alpha1ApplicationSource" } } }, - "projectProjectCreateRequest": { - "description": "ProjectCreateRequest defines project creation parameters.", + "repositoryRepoAppDetailsResponse": { "type": "object", + "title": "RepoAppDetailsResponse application details", "properties": { - "project": { - "$ref": "#/definitions/v1alpha1AppProject" + "directory": { + "$ref": "#/definitions/repositoryDirectoryAppSpec" }, - "upsert": { - "type": "boolean" + "helm": { + "$ref": "#/definitions/repositoryHelmAppSpec" + }, + "kustomize": { + "$ref": "#/definitions/repositoryKustomizeAppSpec" + }, + "plugin": { + "$ref": "#/definitions/repositoryPluginAppSpec" + }, + "type": { + "type": "string" } } }, - "projectProjectTokenCreateRequest": { - "description": "ProjectTokenCreateRequest defines project token creation parameters.", + "repositoryRepoAppsResponse": { "type": "object", + "title": "RepoAppsResponse contains applications of specified repository", "properties": { - "description": { - "type": "string" - }, - "expiresIn": { - "type": "string", - "format": "int64", - "title": "expiresIn represents a duration in seconds" + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/repositoryAppInfo" + } + } + } + }, + "repositoryRepoResponse": { + "type": "object" + }, + "runtimeError": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32" }, - "id": { - "type": "string" + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } }, - "project": { + "error": { "type": "string" }, - "role": { + "message": { "type": "string" } } }, - "projectProjectTokenResponse": { - "description": "ProjectTokenResponse wraps the created token or returns an empty string if deleted.", + "runtimeRawExtension": { + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned\nstruct, and Object in your internal struct. You also need to register your\nvarious plugin types.\n\n// Internal package:\ntype MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n}\ntype PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package:\ntype MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n}\ntype PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this:\n{\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into\nyour external MyAPIObject. That causes the raw JSON to be stored, but not unpacked.\nThe next step is to copy (using pkg/conversion) into the internal struct. The runtime\npackage's DefaultScheme has conversion functions installed which will unpack the\nJSON stored in RawExtension, turning it into the correct object type, and storing it\nin the Object. (TODO: In the case where the object is of an unknown type, a\nruntime.Unknown object will be created and stored.)\n\n+k8s:deepcopy-gen=true\n+protobuf=true\n+k8s:openapi-gen=true", "type": "object", "properties": { - "token": { - "type": "string" + "raw": { + "description": "Raw is the underlying serialization of this object.\n\nTODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data.", + "type": "string", + "format": "byte" } } }, - "projectProjectUpdateRequest": { + "runtimeStreamError": { "type": "object", "properties": { - "project": { - "$ref": "#/definitions/v1alpha1AppProject" + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + }, + "grpc_code": { + "type": "integer", + "format": "int32" + }, + "http_code": { + "type": "integer", + "format": "int32" + }, + "http_status": { + "type": "string" + }, + "message": { + "type": "string" } } }, - "projectSyncWindowsResponse": { + "sessionGetUserInfoResponse": { "type": "object", + "title": "The current user's userInfo info", "properties": { - "windows": { + "groups": { "type": "array", "items": { - "$ref": "#/definitions/v1alpha1SyncWindow" + "type": "string" } + }, + "iss": { + "type": "string" + }, + "loggedIn": { + "type": "boolean" + }, + "username": { + "type": "string" } } }, - "protobufAny": { + "sessionSessionCreateRequest": { + "description": "SessionCreateRequest is for logging in.", "type": "object", "properties": { - "type_url": { + "password": { "type": "string" }, - "value": { - "type": "string", - "format": "byte" + "token": { + "type": "string" + }, + "username": { + "type": "string" } } }, - "repocredsRepoCredsResponse": { - "type": "object", - "title": "RepoCredsResponse is a response to most repository credentials requests" - }, - "repositoryAppInfo": { + "sessionSessionResponse": { + "description": "SessionResponse wraps the created token or returns an empty string if deleted.", "type": "object", - "title": "AppInfo contains application type and app file path", "properties": { - "path": { - "type": "string" - }, - "type": { + "token": { "type": "string" } } }, - "repositoryDirectoryAppSpec": { - "type": "object", - "title": "DirectoryAppSpec contains directory" - }, - "repositoryHelmAppSpec": { + "v1Event": { + "description": "Event is a report of an event somewhere in the cluster. Events\nhave a limited retention time and triggers and messages may evolve\nwith time. Event consumers should not rely on the timing of an event\nwith a given Reason reflecting a consistent underlying trigger, or the\ncontinued existence of events with that Reason. Events should be\ntreated as informative, best-effort, supplemental data.", "type": "object", - "title": "HelmAppSpec contains helm app name in source repo", "properties": { - "fileParameters": { - "type": "array", - "title": "helm file parameters", - "items": { - "$ref": "#/definitions/v1alpha1HelmFileParameter" - } + "action": { + "type": "string", + "title": "What action was taken/failed regarding to the Regarding object.\n+optional" }, - "name": { - "type": "string" + "count": { + "type": "integer", + "format": "int32", + "title": "The number of times this event has occurred.\n+optional" }, - "parameters": { - "type": "array", - "title": "the output of `helm inspect values`", - "items": { - "$ref": "#/definitions/v1alpha1HelmParameter" - } + "eventTime": { + "$ref": "#/definitions/v1MicroTime" }, - "valueFiles": { - "type": "array", - "items": { - "type": "string" - } + "firstTimestamp": { + "$ref": "#/definitions/v1Time" }, - "values": { + "involvedObject": { + "$ref": "#/definitions/v1ObjectReference" + }, + "lastTimestamp": { + "$ref": "#/definitions/v1Time" + }, + "message": { "type": "string", - "title": "the contents of values.yaml" - } - } - }, - "repositoryHelmChart": { - "type": "object", - "properties": { - "name": { - "type": "string" + "title": "A human-readable description of the status of this operation.\nTODO: decide on maximum length.\n+optional" }, - "versions": { - "type": "array", - "items": { - "type": "string" - } + "metadata": { + "$ref": "#/definitions/v1ObjectMeta" + }, + "reason": { + "type": "string", + "title": "This should be a short, machine understandable string that gives the reason\nfor the transition into the object's current status.\nTODO: provide exact specification for format.\n+optional" + }, + "related": { + "$ref": "#/definitions/v1ObjectReference" + }, + "reportingComponent": { + "type": "string", + "title": "Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.\n+optional" + }, + "reportingInstance": { + "type": "string", + "title": "ID of the controller instance, e.g. `kubelet-xyzf`.\n+optional" + }, + "series": { + "$ref": "#/definitions/v1EventSeries" + }, + "source": { + "$ref": "#/definitions/v1EventSource" + }, + "type": { + "type": "string", + "title": "Type of this event (Normal, Warning), new types could be added in the future\n+optional" } } }, - "repositoryHelmChartsResponse": { + "v1EventList": { + "description": "EventList is a list of events.", "type": "object", "properties": { "items": { "type": "array", + "title": "List of events", "items": { - "$ref": "#/definitions/repositoryHelmChart" + "$ref": "#/definitions/v1Event" } + }, + "metadata": { + "$ref": "#/definitions/v1ListMeta" } } }, - "repositoryKsonnetAppSpec": { + "v1EventSeries": { + "description": "EventSeries contain information on series of events, i.e. thing that was/is happening\ncontinuously for some time.", "type": "object", - "title": "KsonnetAppSpec contains Ksonnet app response\nThis roughly reflects: ksonnet/ksonnet/metadata/app/schema.go", "properties": { - "environments": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/repositoryKsonnetEnvironment" - } - }, - "name": { - "type": "string" + "count": { + "type": "integer", + "format": "int32", + "title": "Number of occurrences in this series up to the last heartbeat time" }, - "parameters": { - "type": "array", - "items": { - "$ref": "#/definitions/v1alpha1KsonnetParameter" - } + "lastObservedTime": { + "$ref": "#/definitions/v1MicroTime" } } }, - "repositoryKsonnetEnvironment": { + "v1EventSource": { + "description": "EventSource contains information for an event.", "type": "object", "properties": { - "destination": { - "$ref": "#/definitions/repositoryKsonnetEnvironmentDestination" - }, - "k8sVersion": { - "description": "KubernetesVersion is the kubernetes version the targeted cluster is running on.", - "type": "string" + "component": { + "type": "string", + "title": "Component from which the event is generated.\n+optional" }, - "name": { + "host": { "type": "string", - "title": "Name is the user defined name of an environment" + "title": "Node name on which the event is generated.\n+optional" } } }, - "repositoryKsonnetEnvironmentDestination": { + "v1FieldsV1": { + "description": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set,\nor a string representing a sub-field or item. The string will follow one of these four formats:\n'f:', where is the name of a field in a struct, or key in a map\n'v:', where is the exact json formatted value of a list item\n'i:', where is position of a item in a list\n'k:', where is a map of a list item's key fields to their unique values\nIf a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff\n+protobuf.options.(gogoproto.goproto_stringer)=false", "type": "object", "properties": { - "namespace": { + "Raw": { + "description": "Raw is the underlying serialization of this object.", "type": "string", - "title": "Namespace is the namespace of the Kubernetes server that targets should be deployed to" - }, - "server": { - "description": "Server is the Kubernetes server that the cluster is running on.", - "type": "string" + "format": "byte" } } }, - "repositoryKustomizeAppSpec": { + "v1GroupKind": { + "description": "+protobuf.options.(gogoproto.goproto_stringer)=false", "type": "object", - "title": "KustomizeAppSpec contains kustomize images", + "title": "GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying\nconcepts during lookup stages without having partially valid types", "properties": { - "images": { - "description": "images is a list of available images.", - "type": "array", - "items": { - "type": "string" - } + "group": { + "type": "string" + }, + "kind": { + "type": "string" } } }, - "repositoryManifestResponse": { + "v1JSON": { + "description": "JSON represents any valid JSON value.\nThese types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil.", "type": "object", "properties": { - "manifests": { - "type": "array", - "items": { - "type": "string" - } - }, - "namespace": { - "type": "string" - }, - "revision": { - "type": "string", - "title": "resolved revision" - }, - "server": { - "type": "string" - }, - "sourceType": { - "type": "string" - }, - "verifyResult": { + "raw": { "type": "string", - "title": "Raw response of git verify-commit operation (always the empty string for Helm)" + "format": "byte" } } }, - "repositoryRefs": { + "v1LabelSelector": { "type": "object", - "title": "A subset of the repository's named refs", + "title": "A label selector is a label query over a set of resources. The result of matchLabels and\nmatchExpressions are ANDed. An empty label selector matches all objects. A null\nlabel selector matches no objects.\n+structType=atomic", "properties": { - "branches": { + "matchExpressions": { "type": "array", + "title": "matchExpressions is a list of label selector requirements. The requirements are ANDed.\n+optional", "items": { - "type": "string" + "$ref": "#/definitions/v1LabelSelectorRequirement" } }, - "tags": { - "type": "array", - "items": { + "matchLabels": { + "type": "object", + "title": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed.\n+optional", + "additionalProperties": { "type": "string" } } } }, - "repositoryRepoAppDetailsQuery": { + "v1LabelSelectorRequirement": { + "description": "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values.", "type": "object", - "title": "RepoAppDetailsQuery contains query information for app details request", "properties": { - "appName": { + "key": { + "type": "string", + "title": "key is the label key that the selector applies to.\n+patchMergeKey=key\n+patchStrategy=merge" + }, + "operator": { + "description": "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist.", "type": "string" }, - "source": { - "$ref": "#/definitions/v1alpha1ApplicationSource" + "values": { + "type": "array", + "title": "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch.\n+optional", + "items": { + "type": "string" + } } } }, - "repositoryRepoAppDetailsResponse": { + "v1ListMeta": { + "description": "ListMeta describes metadata that synthetic resources must have, including lists and\nvarious status objects. A resource may have only one of {ObjectMeta, ListMeta}.", "type": "object", - "title": "RepoAppDetailsResponse application details", "properties": { - "directory": { - "$ref": "#/definitions/repositoryDirectoryAppSpec" - }, - "helm": { - "$ref": "#/definitions/repositoryHelmAppSpec" - }, - "ksonnet": { - "$ref": "#/definitions/repositoryKsonnetAppSpec" + "continue": { + "description": "continue may be set if the user set a limit on the number of items returned, and indicates that\nthe server has more data available. The value is opaque and may be used to issue another request\nto the endpoint that served this list to retrieve the next set of available objects. Continuing a\nconsistent list may not be possible if the server configuration has changed or more than a few\nminutes have passed. The resourceVersion field returned when using this continue value will be\nidentical to the value in the first response, unless you have received this token from an error\nmessage.", + "type": "string" }, - "kustomize": { - "$ref": "#/definitions/repositoryKustomizeAppSpec" + "remainingItemCount": { + "type": "integer", + "format": "int64", + "title": "remainingItemCount is the number of subsequent items in the list which are not included in this\nlist response. If the list request contained label or field selectors, then the number of\nremaining items is unknown and the field will be left unset and omitted during serialization.\nIf the list is complete (either because it is not chunking or because this is the last chunk),\nthen there are no more remaining items and this field will be left unset and omitted during\nserialization.\nServers older than v1.15 do not set this field.\nThe intended use of the remainingItemCount is *estimating* the size of a collection. Clients\nshould not rely on the remainingItemCount to be set or to be exact.\n+optional" }, - "type": { - "type": "string" + "resourceVersion": { + "type": "string", + "title": "String that identifies the server's internal version of this object that\ncan be used by clients to determine when objects have changed.\nValue must be treated as opaque by clients and passed unmodified back to the server.\nPopulated by the system.\nRead-only.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency\n+optional" + }, + "selfLink": { + "type": "string", + "title": "Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.\n+optional" } } }, - "repositoryRepoAppsResponse": { + "v1LoadBalancerIngress": { + "description": "LoadBalancerIngress represents the status of a load-balancer ingress point:\ntraffic intended for the service should be sent to an ingress point.", "type": "object", - "title": "RepoAppsResponse contains applications of specified repository", "properties": { - "items": { + "hostname": { + "type": "string", + "title": "Hostname is set for load-balancer ingress points that are DNS based\n(typically AWS load-balancers)\n+optional" + }, + "ip": { + "type": "string", + "title": "IP is set for load-balancer ingress points that are IP based\n(typically GCE or OpenStack load-balancers)\n+optional" + }, + "ports": { "type": "array", + "title": "Ports is a list of records of service ports\nIf used, every port defined in the service should have an entry in it\n+listType=atomic\n+optional", "items": { - "$ref": "#/definitions/repositoryAppInfo" + "$ref": "#/definitions/v1PortStatus" } } } }, - "repositoryRepoResponse": { - "type": "object" - }, - "runtimeError": { + "v1ManagedFieldsEntry": { + "description": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource\nthat the fieldset applies to.", "type": "object", "properties": { - "code": { - "type": "integer", - "format": "int32" + "apiVersion": { + "description": "APIVersion defines the version of this resource that this field set\napplies to. The format is \"group/version\" just like the top-level\nAPIVersion field. It is necessary to track the version of a field\nset because it cannot be automatically converted.", + "type": "string" }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } + "fieldsType": { + "type": "string", + "title": "FieldsType is the discriminator for the different fields format and version.\nThere is currently only one possible value: \"FieldsV1\"" }, - "error": { + "fieldsV1": { + "$ref": "#/definitions/v1FieldsV1" + }, + "manager": { + "description": "Manager is an identifier of the workflow managing these fields.", "type": "string" }, - "message": { + "operation": { + "description": "Operation is the type of operation which lead to this ManagedFieldsEntry being created.\nThe only valid values for this field are 'Apply' and 'Update'.", + "type": "string" + }, + "subresource": { + "description": "Subresource is the name of the subresource used to update that object, or\nempty string if the object was updated through the main resource. The\nvalue of this field is used to distinguish between managers, even if they\nshare the same name. For example, a status update will be distinct from a\nregular update using the same manager name.\nNote that the APIVersion field is not related to the Subresource field and\nit always corresponds to the version of the main resource.", "type": "string" + }, + "time": { + "$ref": "#/definitions/v1Time" } } }, - "runtimeStreamError": { + "v1MicroTime": { + "description": "MicroTime is version of Time with microsecond level precision.\n\n+protobuf.options.marshal=false\n+protobuf.as=Timestamp\n+protobuf.options.(gogoproto.goproto_stringer)=false", "type": "object", "properties": { - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - }, - "grpc_code": { + "nanos": { + "description": "Non-negative fractions of a second at nanosecond resolution. Negative\nsecond values with fractions must still have non-negative nanos values\nthat count forward in time. Must be from 0 to 999,999,999\ninclusive. This field may be limited in precision depending on context.", "type": "integer", "format": "int32" }, - "http_code": { + "seconds": { + "description": "Represents seconds of UTC time since Unix epoch\n1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to\n9999-12-31T23:59:59Z inclusive.", "type": "integer", - "format": "int32" - }, - "http_status": { - "type": "string" - }, - "message": { - "type": "string" + "format": "int64" } } }, - "sessionGetUserInfoResponse": { + "v1NodeSystemInfo": { + "description": "NodeSystemInfo is a set of ids/uuids to uniquely identify the node.", "type": "object", - "title": "The current user's userInfo info", "properties": { - "groups": { - "type": "array", - "items": { - "type": "string" - } + "architecture": { + "type": "string", + "title": "The Architecture reported by the node" }, - "iss": { + "bootID": { + "description": "Boot ID reported by the node.", "type": "string" }, - "loggedIn": { - "type": "boolean" + "containerRuntimeVersion": { + "description": "ContainerRuntime Version reported by the node through runtime remote API (e.g. containerd://1.4.2).", + "type": "string" }, - "username": { + "kernelVersion": { + "description": "Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).", "type": "string" - } - } - }, - "sessionSessionCreateRequest": { - "description": "SessionCreateRequest is for logging in.", - "type": "object", - "properties": { - "password": { + }, + "kubeProxyVersion": { + "description": "KubeProxy Version reported by the node.", "type": "string" }, - "token": { + "kubeletVersion": { + "description": "Kubelet Version reported by the node.", "type": "string" }, - "username": { + "machineID": { + "type": "string", + "title": "MachineID reported by the node. For unique machine identification\nin the cluster this field is preferred. Learn more from man(5)\nmachine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html" + }, + "operatingSystem": { + "type": "string", + "title": "The Operating System reported by the node" + }, + "osImage": { + "description": "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).", "type": "string" + }, + "systemUUID": { + "type": "string", + "title": "SystemUUID reported by the node. For unique machine identification\nMachineID is preferred. This field is specific to Red Hat hosts\nhttps://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid" } } }, - "sessionSessionResponse": { - "description": "SessionResponse wraps the created token or returns an empty string if deleted.", + "v1ObjectMeta": { + "description": "ObjectMeta is metadata that all persisted resources must have, which includes all objects\nusers must create.", "type": "object", "properties": { - "token": { + "annotations": { + "type": "object", + "title": "Annotations is an unstructured key value map stored with a resource that may be\nset by external tools to store and retrieve arbitrary metadata. They are not\nqueryable and should be preserved when modifying objects.\nMore info: http://kubernetes.io/docs/user-guide/annotations\n+optional", + "additionalProperties": { + "type": "string" + } + }, + "clusterName": { + "description": "Deprecated: ClusterName is a legacy field that was always cleared by\nthe system and never used; it will be removed completely in 1.25.\n\nThe name in the go struct is changed to help clients detect\naccidental use.\n\n+optional", "type": "string" - } - } - }, - "v1Event": { - "description": "Event is a report of an event somewhere in the cluster. Events\nhave a limited retention time and triggers and messages may evolve\nwith time. Event consumers should not rely on the timing of an event\nwith a given Reason reflecting a consistent underlying trigger, or the\ncontinued existence of events with that Reason. Events should be\ntreated as informative, best-effort, supplemental data.", - "type": "object", - "properties": { - "action": { - "type": "string", - "title": "What action was taken/failed regarding to the Regarding object.\n+optional" }, - "count": { - "type": "integer", - "format": "int32", - "title": "The number of times this event has occurred.\n+optional" + "creationTimestamp": { + "$ref": "#/definitions/v1Time" }, - "eventTime": { - "$ref": "#/definitions/v1MicroTime" + "deletionGracePeriodSeconds": { + "type": "integer", + "format": "int64", + "title": "Number of seconds allowed for this object to gracefully terminate before\nit will be removed from the system. Only set when deletionTimestamp is also set.\nMay only be shortened.\nRead-only.\n+optional" }, - "firstTimestamp": { + "deletionTimestamp": { "$ref": "#/definitions/v1Time" }, - "involvedObject": { - "$ref": "#/definitions/v1ObjectReference" + "finalizers": { + "type": "array", + "title": "Must be empty before the object is deleted from the registry. Each entry\nis an identifier for the responsible component that will remove the entry\nfrom the list. If the deletionTimestamp of the object is non-nil, entries\nin this list can only be removed.\nFinalizers may be processed and removed in any order. Order is NOT enforced\nbecause it introduces significant risk of stuck finalizers.\nfinalizers is a shared field, any actor with permission can reorder it.\nIf the finalizer list is processed in order, then this can lead to a situation\nin which the component responsible for the first finalizer in the list is\nwaiting for a signal (field value, external system, or other) produced by a\ncomponent responsible for a finalizer later in the list, resulting in a deadlock.\nWithout enforced ordering finalizers are free to order amongst themselves and\nare not vulnerable to ordering changes in the list.\n+optional\n+patchStrategy=merge", + "items": { + "type": "string" + } }, - "lastTimestamp": { - "$ref": "#/definitions/v1Time" + "generateName": { + "description": "GenerateName is an optional prefix, used by the server, to generate a unique\nname ONLY IF the Name field has not been provided.\nIf this field is used, the name returned to the client will be different\nthan the name passed. This value will also be combined with a unique suffix.\nThe provided value has the same validation rules as the Name field,\nand may be truncated by the length of the suffix required to make the value\nunique on the server.\n\nIf this field is specified and the generated name exists, the server will return a 409.\n\nApplied only if Name is not specified.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency\n+optional", + "type": "string" }, - "message": { + "generation": { + "type": "integer", + "format": "int64", + "title": "A sequence number representing a specific generation of the desired state.\nPopulated by the system. Read-only.\n+optional" + }, + "labels": { + "type": "object", + "title": "Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: http://kubernetes.io/docs/user-guide/labels\n+optional", + "additionalProperties": { + "type": "string" + } + }, + "managedFields": { + "description": "ManagedFields maps workflow-id and version to the set of fields\nthat are managed by that workflow. This is mostly for internal\nhousekeeping, and users typically shouldn't need to set or\nunderstand this field. A workflow can be the user's name, a\ncontroller's name, or the name of a specific apply path like\n\"ci-cd\". The set of fields is always in the version that the\nworkflow used when modifying the object.\n\n+optional", + "type": "array", + "items": { + "$ref": "#/definitions/v1ManagedFieldsEntry" + } + }, + "name": { "type": "string", - "title": "A human-readable description of the status of this operation.\nTODO: decide on maximum length.\n+optional" + "title": "Name must be unique within a namespace. Is required when creating resources, although\nsome resources may allow a client to request the generation of an appropriate name\nautomatically. Name is primarily intended for creation idempotence and configuration\ndefinition.\nCannot be updated.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#names\n+optional" }, - "metadata": { - "$ref": "#/definitions/v1ObjectMeta" + "namespace": { + "description": "Namespace defines the space within which each name must be unique. An empty namespace is\nequivalent to the \"default\" namespace, but \"default\" is the canonical representation.\nNot all objects are required to be scoped to a namespace - the value of this field for\nthose objects will be empty.\n\nMust be a DNS_LABEL.\nCannot be updated.\nMore info: http://kubernetes.io/docs/user-guide/namespaces\n+optional", + "type": "string" }, - "reason": { + "ownerReferences": { + "type": "array", + "title": "List of objects depended by this object. If ALL objects in the list have\nbeen deleted, this object will be garbage collected. If this object is managed by a controller,\nthen an entry in this list will point to this controller, with the controller field set to true.\nThere cannot be more than one managing controller.\n+optional\n+patchMergeKey=uid\n+patchStrategy=merge", + "items": { + "$ref": "#/definitions/v1OwnerReference" + } + }, + "resourceVersion": { + "description": "An opaque value that represents the internal version of this object that can\nbe used by clients to determine when objects have changed. May be used for optimistic\nconcurrency, change detection, and the watch operation on a resource or set of resources.\nClients must treat these values as opaque and passed unmodified back to the server.\nThey may only be valid for a particular resource or set of resources.\n\nPopulated by the system.\nRead-only.\nValue must be treated as opaque by clients and .\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency\n+optional", + "type": "string" + }, + "selfLink": { "type": "string", - "title": "This should be a short, machine understandable string that gives the reason\nfor the transition into the object's current status.\nTODO: provide exact specification for format.\n+optional" + "title": "Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.\n+optional" }, - "related": { - "$ref": "#/definitions/v1ObjectReference" + "uid": { + "description": "UID is the unique in time and space value for this object. It is typically generated by\nthe server on successful creation of a resource and is not allowed to change on PUT\noperations.\n\nPopulated by the system.\nRead-only.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#uids\n+optional", + "type": "string" + } + } + }, + "v1ObjectReference": { + "type": "object", + "title": "ObjectReference contains enough information to let you inspect or modify the referred object.\n---\nNew uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.\n 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage.\n 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular\n restrictions like, \"must refer only to types A and B\" or \"UID not honored\" or \"name must be restricted\".\n Those cannot be well described when embedded.\n 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.\n 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity\n during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple\n and the version of the actual struct is irrelevant.\n 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type\n will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.\nInstead of using this type, create a locally provided and used type that is well-focused on your reference.\nFor example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+structType=atomic", + "properties": { + "apiVersion": { + "type": "string", + "title": "API version of the referent.\n+optional" }, - "reportingComponent": { + "fieldPath": { "type": "string", - "title": "Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.\n+optional" + "title": "If referring to a piece of an object instead of an entire object, this string\nshould contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].\nFor example, if the object reference is to a container within a pod, this would take on a value like:\n\"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered\nthe event) or if no container name is specified \"spec.containers[2]\" (container with\nindex 2 in this pod). This syntax is chosen only to have some well-defined way of\nreferencing a part of an object.\nTODO: this design is not final and this field is subject to change in the future.\n+optional" }, - "reportingInstance": { + "kind": { "type": "string", - "title": "ID of the controller instance, e.g. `kubelet-xyzf`.\n+optional" + "title": "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n+optional" }, - "series": { - "$ref": "#/definitions/v1EventSeries" + "name": { + "type": "string", + "title": "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\n+optional" }, - "source": { - "$ref": "#/definitions/v1EventSource" + "namespace": { + "type": "string", + "title": "Namespace of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/\n+optional" }, - "type": { + "resourceVersion": { + "type": "string", + "title": "Specific resourceVersion to which this reference is made, if any.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency\n+optional" + }, + "uid": { "type": "string", - "title": "Type of this event (Normal, Warning), new types could be added in the future\n+optional" + "title": "UID of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids\n+optional" } } }, - "v1EventList": { - "description": "EventList is a list of events.", + "v1OwnerReference": { "type": "object", + "title": "OwnerReference contains enough information to let you identify an owning\nobject. An owning object must be in the same namespace as the dependent, or\nbe cluster-scoped, so there is no namespace field.\n+structType=atomic", "properties": { - "items": { - "type": "array", - "title": "List of events", - "items": { - "$ref": "#/definitions/v1Event" - } + "apiVersion": { + "description": "API version of the referent.", + "type": "string" }, - "metadata": { - "$ref": "#/definitions/v1ListMeta" + "blockOwnerDeletion": { + "type": "boolean", + "title": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then\nthe owner cannot be deleted from the key-value store until this\nreference is removed.\nSee https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion\nfor how the garbage collector interacts with this field and enforces the foreground deletion.\nDefaults to false.\nTo set this field, a user needs \"delete\" permission of the owner,\notherwise 422 (Unprocessable Entity) will be returned.\n+optional" + }, + "controller": { + "type": "boolean", + "title": "If true, this reference points to the managing controller.\n+optional" + }, + "kind": { + "type": "string", + "title": "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + }, + "name": { + "type": "string", + "title": "Name of the referent.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#names" + }, + "uid": { + "type": "string", + "title": "UID of the referent.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#uids" } } }, - "v1EventSeries": { - "description": "EventSeries contain information on series of events, i.e. thing that was/is happening\ncontinuously for some time.", + "v1PortStatus": { "type": "object", "properties": { - "count": { + "error": { + "type": "string", + "title": "Error is to record the problem with the service port\nThe format of the error shall comply with the following rules:\n- built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.\n---\nThe regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)\n+optional\n+kubebuilder:validation:Required\n+kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$`\n+kubebuilder:validation:MaxLength=316" + }, + "port": { "type": "integer", "format": "int32", - "title": "Number of occurrences in this series up to the last heartbeat time" + "title": "Port is the port number of the service port of which status is recorded here" }, - "lastObservedTime": { - "$ref": "#/definitions/v1MicroTime" + "protocol": { + "type": "string", + "title": "Protocol is the protocol of the service port of which status is recorded here\nThe supported values are: \"TCP\", \"UDP\", \"SCTP\"" } } }, - "v1EventSource": { - "description": "EventSource contains information for an event.", + "v1Time": { + "description": "Time is a wrapper around time.Time which supports correct\nmarshaling to YAML and JSON. Wrappers are provided for many\nof the factory methods that the time package offers.\n\n+protobuf.options.marshal=false\n+protobuf.as=Timestamp\n+protobuf.options.(gogoproto.goproto_stringer)=false", + "type": "string", + "format": "date-time" + }, + "v1alpha1AWSAuthConfig": { "type": "object", + "title": "AWSAuthConfig is an AWS IAM authentication configuration", "properties": { - "component": { + "clusterName": { "type": "string", - "title": "Component from which the event is generated.\n+optional" + "title": "ClusterName contains AWS cluster name" }, - "host": { - "type": "string", - "title": "Node name on which the event is generated.\n+optional" + "roleARN": { + "description": "RoleARN contains optional role ARN. If set then AWS IAM Authenticator assume a role to perform cluster operations instead of the default AWS credential provider chain.", + "type": "string" } } }, - "v1FieldsV1": { - "description": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set,\nor a string representing a sub-field or item. The string will follow one of these four formats:\n'f:', where is the name of a field in a struct, or key in a map\n'v:', where is the exact json formatted value of a list item\n'i:', where is position of a item in a list\n'k:', where is a map of a list item's key fields to their unique values\nIf a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff\n+protobuf.options.(gogoproto.goproto_stringer)=false", + "v1alpha1AppProject": { "type": "object", + "title": "AppProject provides a logical grouping of applications, providing controls for:\n* where the apps may deploy to (cluster whitelist)\n* what may be deployed (repository whitelist, resource whitelist/blacklist)\n* who can access these applications (roles, OIDC group claims bindings)\n* and what they can do (RBAC policies)\n* automation access to these roles (JWT tokens)\n+genclient\n+genclient:noStatus\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:resource:path=appprojects,shortName=appproj;appprojs", "properties": { - "Raw": { - "description": "Raw is the underlying serialization of this object.", - "type": "string", - "format": "byte" + "metadata": { + "$ref": "#/definitions/v1ObjectMeta" + }, + "spec": { + "$ref": "#/definitions/v1alpha1AppProjectSpec" + }, + "status": { + "$ref": "#/definitions/v1alpha1AppProjectStatus" } } }, - "v1GroupKind": { - "description": "+protobuf.options.(gogoproto.goproto_stringer)=false", + "v1alpha1AppProjectList": { "type": "object", - "title": "GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying\nconcepts during lookup stages without having partially valid types", + "title": "AppProjectList is list of AppProject resources\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object", "properties": { - "group": { - "type": "string" + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1AppProject" + } }, - "kind": { - "type": "string" + "metadata": { + "$ref": "#/definitions/v1ListMeta" } } }, - "v1ListMeta": { - "description": "ListMeta describes metadata that synthetic resources must have, including lists and\nvarious status objects. A resource may have only one of {ObjectMeta, ListMeta}.", + "v1alpha1AppProjectSpec": { "type": "object", + "title": "AppProjectSpec is the specification of an AppProject", "properties": { - "continue": { - "description": "continue may be set if the user set a limit on the number of items returned, and indicates that\nthe server has more data available. The value is opaque and may be used to issue another request\nto the endpoint that served this list to retrieve the next set of available objects. Continuing a\nconsistent list may not be possible if the server configuration has changed or more than a few\nminutes have passed. The resourceVersion field returned when using this continue value will be\nidentical to the value in the first response, unless you have received this token from an error\nmessage.", - "type": "string" + "clusterResourceBlacklist": { + "type": "array", + "title": "ClusterResourceBlacklist contains list of blacklisted cluster level resources", + "items": { + "$ref": "#/definitions/v1GroupKind" + } }, - "remainingItemCount": { - "type": "string", - "format": "int64", - "title": "remainingItemCount is the number of subsequent items in the list which are not included in this\nlist response. If the list request contained label or field selectors, then the number of\nremaining items is unknown and the field will be left unset and omitted during serialization.\nIf the list is complete (either because it is not chunking or because this is the last chunk),\nthen there are no more remaining items and this field will be left unset and omitted during\nserialization.\nServers older than v1.15 do not set this field.\nThe intended use of the remainingItemCount is *estimating* the size of a collection. Clients\nshould not rely on the remainingItemCount to be set or to be exact.\n+optional" + "clusterResourceWhitelist": { + "type": "array", + "title": "ClusterResourceWhitelist contains list of whitelisted cluster level resources", + "items": { + "$ref": "#/definitions/v1GroupKind" + } }, - "resourceVersion": { + "description": { "type": "string", - "title": "String that identifies the server's internal version of this object that\ncan be used by clients to determine when objects have changed.\nValue must be treated as opaque by clients and passed unmodified back to the server.\nPopulated by the system.\nRead-only.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency\n+optional" + "title": "Description contains optional project description" }, - "selfLink": { - "description": "selfLink is a URL representing this object.\nPopulated by the system.\nRead-only.\n\nDEPRECATED\nKubernetes will stop propagating this field in 1.20 release and the field is planned\nto be removed in 1.21 release.\n+optional", - "type": "string" + "destinations": { + "type": "array", + "title": "Destinations contains list of destinations available for deployment", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationDestination" + } + }, + "namespaceResourceBlacklist": { + "type": "array", + "title": "NamespaceResourceBlacklist contains list of blacklisted namespace level resources", + "items": { + "$ref": "#/definitions/v1GroupKind" + } + }, + "namespaceResourceWhitelist": { + "type": "array", + "title": "NamespaceResourceWhitelist contains list of whitelisted namespace level resources", + "items": { + "$ref": "#/definitions/v1GroupKind" + } + }, + "orphanedResources": { + "$ref": "#/definitions/v1alpha1OrphanedResourcesMonitorSettings" + }, + "permitOnlyProjectScopedClusters": { + "type": "boolean", + "title": "PermitOnlyProjectScopedClusters determines whether destinations can only reference clusters which are project-scoped" + }, + "roles": { + "type": "array", + "title": "Roles are user defined RBAC roles associated with this project", + "items": { + "$ref": "#/definitions/v1alpha1ProjectRole" + } + }, + "signatureKeys": { + "type": "array", + "title": "SignatureKeys contains a list of PGP key IDs that commits in Git must be signed with in order to be allowed for sync", + "items": { + "$ref": "#/definitions/v1alpha1SignatureKey" + } + }, + "sourceNamespaces": { + "type": "array", + "title": "SourceNamespaces defines the namespaces application resources are allowed to be created in", + "items": { + "type": "string" + } + }, + "sourceRepos": { + "type": "array", + "title": "SourceRepos contains list of repository URLs which can be used for deployment", + "items": { + "type": "string" + } + }, + "syncWindows": { + "type": "array", + "title": "SyncWindows controls when syncs can be run for apps in this project", + "items": { + "$ref": "#/definitions/v1alpha1SyncWindow" + } } } }, - "v1LoadBalancerIngress": { - "description": "LoadBalancerIngress represents the status of a load-balancer ingress point:\ntraffic intended for the service should be sent to an ingress point.", + "v1alpha1AppProjectStatus": { "type": "object", + "title": "AppProjectStatus contains status information for AppProject CRs", "properties": { - "hostname": { - "type": "string", - "title": "Hostname is set for load-balancer ingress points that are DNS based\n(typically AWS load-balancers)\n+optional" - }, - "ip": { - "type": "string", - "title": "IP is set for load-balancer ingress points that are IP based\n(typically GCE or OpenStack load-balancers)\n+optional" - }, - "ports": { - "type": "array", - "title": "Ports is a list of records of service ports\nIf used, every port defined in the service should have an entry in it\n+listType=atomic\n+optional", - "items": { - "$ref": "#/definitions/v1PortStatus" + "jwtTokensByRole": { + "type": "object", + "title": "JWTTokensByRole contains a list of JWT tokens issued for a given role", + "additionalProperties": { + "$ref": "#/definitions/v1alpha1JWTTokens" } } } }, - "v1ManagedFieldsEntry": { - "description": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource\nthat the fieldset applies to.", + "v1alpha1Application": { "type": "object", + "title": "Application is a definition of Application resource.\n+genclient\n+genclient:noStatus\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:resource:path=applications,shortName=app;apps\n+kubebuilder:printcolumn:name=\"Sync Status\",type=string,JSONPath=`.status.sync.status`\n+kubebuilder:printcolumn:name=\"Health Status\",type=string,JSONPath=`.status.health.status`\n+kubebuilder:printcolumn:name=\"Revision\",type=string,JSONPath=`.status.sync.revision`,priority=10", "properties": { - "apiVersion": { - "description": "APIVersion defines the version of this resource that this field set\napplies to. The format is \"group/version\" just like the top-level\nAPIVersion field. It is necessary to track the version of a field\nset because it cannot be automatically converted.", - "type": "string" - }, - "fieldsType": { - "type": "string", - "title": "FieldsType is the discriminator for the different fields format and version.\nThere is currently only one possible value: \"FieldsV1\"" - }, - "fieldsV1": { - "$ref": "#/definitions/v1FieldsV1" - }, - "manager": { - "description": "Manager is an identifier of the workflow managing these fields.", - "type": "string" + "metadata": { + "$ref": "#/definitions/v1ObjectMeta" }, "operation": { - "description": "Operation is the type of operation which lead to this ManagedFieldsEntry being created.\nThe only valid values for this field are 'Apply' and 'Update'.", - "type": "string" + "$ref": "#/definitions/v1alpha1Operation" }, - "time": { - "$ref": "#/definitions/v1Time" + "spec": { + "$ref": "#/definitions/v1alpha1ApplicationSpec" + }, + "status": { + "$ref": "#/definitions/v1alpha1ApplicationStatus" } } }, - "v1MicroTime": { - "description": "MicroTime is version of Time with microsecond level precision.\n\n+protobuf.options.marshal=false\n+protobuf.as=Timestamp\n+protobuf.options.(gogoproto.goproto_stringer)=false", + "v1alpha1ApplicationCondition": { "type": "object", + "title": "ApplicationCondition contains details about an application condition, which is usually an error or warning", "properties": { - "nanos": { - "description": "Non-negative fractions of a second at nanosecond resolution. Negative\nsecond values with fractions must still have non-negative nanos values\nthat count forward in time. Must be from 0 to 999,999,999\ninclusive. This field may be limited in precision depending on context.", - "type": "integer", - "format": "int32" + "lastTransitionTime": { + "$ref": "#/definitions/v1Time" }, - "seconds": { - "description": "Represents seconds of UTC time since Unix epoch\n1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to\n9999-12-31T23:59:59Z inclusive.", + "message": { "type": "string", - "format": "int64" + "title": "Message contains human-readable message indicating details about condition" + }, + "type": { + "type": "string", + "title": "Type is an application condition type" } } }, - "v1NodeSystemInfo": { - "description": "NodeSystemInfo is a set of ids/uuids to uniquely identify the node.", + "v1alpha1ApplicationDestination": { "type": "object", + "title": "ApplicationDestination holds information about the application's destination", "properties": { - "architecture": { - "type": "string", - "title": "The Architecture reported by the node" - }, - "bootID": { - "description": "Boot ID reported by the node.", - "type": "string" - }, - "containerRuntimeVersion": { - "description": "ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).", - "type": "string" - }, - "kernelVersion": { - "description": "Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).", - "type": "string" - }, - "kubeProxyVersion": { - "description": "KubeProxy Version reported by the node.", - "type": "string" - }, - "kubeletVersion": { - "description": "Kubelet Version reported by the node.", + "name": { + "description": "Name is an alternate way of specifying the target cluster by its symbolic name. This must be set if Server is not set.", "type": "string" }, - "machineID": { - "type": "string", - "title": "MachineID reported by the node. For unique machine identification\nin the cluster this field is preferred. Learn more from man(5)\nmachine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html" - }, - "operatingSystem": { + "namespace": { "type": "string", - "title": "The Operating System reported by the node" + "title": "Namespace specifies the target namespace for the application's resources.\nThe namespace will only be set for namespace-scoped resources that have not set a value for .metadata.namespace" }, - "osImage": { - "description": "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).", + "server": { + "description": "Server specifies the URL of the target cluster's Kubernetes control plane API. This must be set if Name is not set.", "type": "string" - }, - "systemUUID": { - "type": "string", - "title": "SystemUUID reported by the node. For unique machine identification\nMachineID is preferred. This field is specific to Red Hat hosts\nhttps://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid" } } }, - "v1ObjectMeta": { - "description": "ObjectMeta is metadata that all persisted resources must have, which includes all objects\nusers must create.", + "v1alpha1ApplicationList": { "type": "object", + "title": "ApplicationList is list of Application resources\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object", "properties": { - "annotations": { - "type": "object", - "title": "Annotations is an unstructured key value map stored with a resource that may be\nset by external tools to store and retrieve arbitrary metadata. They are not\nqueryable and should be preserved when modifying objects.\nMore info: http://kubernetes.io/docs/user-guide/annotations\n+optional", - "additionalProperties": { - "type": "string" - } - }, - "clusterName": { - "type": "string", - "title": "The name of the cluster which the object belongs to.\nThis is used to distinguish resources with same name and namespace in different clusters.\nThis field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.\n+optional" - }, - "creationTimestamp": { - "$ref": "#/definitions/v1Time" - }, - "deletionGracePeriodSeconds": { - "type": "string", - "format": "int64", - "title": "Number of seconds allowed for this object to gracefully terminate before\nit will be removed from the system. Only set when deletionTimestamp is also set.\nMay only be shortened.\nRead-only.\n+optional" - }, - "deletionTimestamp": { - "$ref": "#/definitions/v1Time" - }, - "finalizers": { + "items": { "type": "array", - "title": "Must be empty before the object is deleted from the registry. Each entry\nis an identifier for the responsible component that will remove the entry\nfrom the list. If the deletionTimestamp of the object is non-nil, entries\nin this list can only be removed.\nFinalizers may be processed and removed in any order. Order is NOT enforced\nbecause it introduces significant risk of stuck finalizers.\nfinalizers is a shared field, any actor with permission can reorder it.\nIf the finalizer list is processed in order, then this can lead to a situation\nin which the component responsible for the first finalizer in the list is\nwaiting for a signal (field value, external system, or other) produced by a\ncomponent responsible for a finalizer later in the list, resulting in a deadlock.\nWithout enforced ordering finalizers are free to order amongst themselves and\nare not vulnerable to ordering changes in the list.\n+optional\n+patchStrategy=merge", "items": { - "type": "string" + "$ref": "#/definitions/v1alpha1Application" } }, - "generateName": { - "description": "GenerateName is an optional prefix, used by the server, to generate a unique\nname ONLY IF the Name field has not been provided.\nIf this field is used, the name returned to the client will be different\nthan the name passed. This value will also be combined with a unique suffix.\nThe provided value has the same validation rules as the Name field,\nand may be truncated by the length of the suffix required to make the value\nunique on the server.\n\nIf this field is specified and the generated name exists, the server will\nNOT return a 409 - instead, it will either return 201 Created or 500 with Reason\nServerTimeout indicating a unique name could not be found in the time allotted, and the client\nshould retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency\n+optional", + "metadata": { + "$ref": "#/definitions/v1ListMeta" + } + } + }, + "v1alpha1ApplicationMatchExpression": { + "type": "object", + "properties": { + "key": { "type": "string" }, - "generation": { - "type": "string", - "format": "int64", - "title": "A sequence number representing a specific generation of the desired state.\nPopulated by the system. Read-only.\n+optional" + "operator": { + "type": "string" }, - "labels": { - "type": "object", - "title": "Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: http://kubernetes.io/docs/user-guide/labels\n+optional", - "additionalProperties": { + "values": { + "type": "array", + "items": { "type": "string" } - }, - "managedFields": { - "description": "ManagedFields maps workflow-id and version to the set of fields\nthat are managed by that workflow. This is mostly for internal\nhousekeeping, and users typically shouldn't need to set or\nunderstand this field. A workflow can be the user's name, a\ncontroller's name, or the name of a specific apply path like\n\"ci-cd\". The set of fields is always in the version that the\nworkflow used when modifying the object.\n\n+optional", + } + } + }, + "v1alpha1ApplicationPreservedFields": { + "type": "object", + "properties": { + "annotations": { "type": "array", "items": { - "$ref": "#/definitions/v1ManagedFieldsEntry" + "type": "string" } }, - "name": { - "type": "string", - "title": "Name must be unique within a namespace. Is required when creating resources, although\nsome resources may allow a client to request the generation of an appropriate name\nautomatically. Name is primarily intended for creation idempotence and configuration\ndefinition.\nCannot be updated.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#names\n+optional" - }, - "namespace": { - "description": "Namespace defines the space within which each name must be unique. An empty namespace is\nequivalent to the \"default\" namespace, but \"default\" is the canonical representation.\nNot all objects are required to be scoped to a namespace - the value of this field for\nthose objects will be empty.\n\nMust be a DNS_LABEL.\nCannot be updated.\nMore info: http://kubernetes.io/docs/user-guide/namespaces\n+optional", - "type": "string" - }, - "ownerReferences": { + "labels": { "type": "array", - "title": "List of objects depended by this object. If ALL objects in the list have\nbeen deleted, this object will be garbage collected. If this object is managed by a controller,\nthen an entry in this list will point to this controller, with the controller field set to true.\nThere cannot be more than one managing controller.\n+optional\n+patchMergeKey=uid\n+patchStrategy=merge", "items": { - "$ref": "#/definitions/v1OwnerReference" + "type": "string" } - }, - "resourceVersion": { - "description": "An opaque value that represents the internal version of this object that can\nbe used by clients to determine when objects have changed. May be used for optimistic\nconcurrency, change detection, and the watch operation on a resource or set of resources.\nClients must treat these values as opaque and passed unmodified back to the server.\nThey may only be valid for a particular resource or set of resources.\n\nPopulated by the system.\nRead-only.\nValue must be treated as opaque by clients and .\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency\n+optional", - "type": "string" - }, - "selfLink": { - "description": "SelfLink is a URL representing this object.\nPopulated by the system.\nRead-only.\n\nDEPRECATED\nKubernetes will stop propagating this field in 1.20 release and the field is planned\nto be removed in 1.21 release.\n+optional", - "type": "string" - }, - "uid": { - "description": "UID is the unique in time and space value for this object. It is typically generated by\nthe server on successful creation of a resource and is not allowed to change on PUT\noperations.\n\nPopulated by the system.\nRead-only.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#uids\n+optional", - "type": "string" } } }, - "v1ObjectReference": { + "v1alpha1ApplicationSet": { "type": "object", - "title": "ObjectReference contains enough information to let you inspect or modify the referred object.\n---\nNew uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.\n 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage.\n 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular\n restrictions like, \"must refer only to types A and B\" or \"UID not honored\" or \"name must be restricted\".\n Those cannot be well described when embedded.\n 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.\n 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity\n during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple\n and the version of the actual struct is irrelevant.\n 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type\n will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.\nInstead of using this type, create a locally provided and used type that is well-focused on your reference.\nFor example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object", + "title": "ApplicationSet is a set of Application resources\n+genclient\n+genclient:noStatus\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:resource:path=applicationsets,shortName=appset;appsets\n+kubebuilder:subresource:status", "properties": { - "apiVersion": { - "type": "string", - "title": "API version of the referent.\n+optional" + "metadata": { + "$ref": "#/definitions/v1ObjectMeta" }, - "fieldPath": { - "type": "string", - "title": "If referring to a piece of an object instead of an entire object, this string\nshould contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].\nFor example, if the object reference is to a container within a pod, this would take on a value like:\n\"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered\nthe event) or if no container name is specified \"spec.containers[2]\" (container with\nindex 2 in this pod). This syntax is chosen only to have some well-defined way of\nreferencing a part of an object.\nTODO: this design is not final and this field is subject to change in the future.\n+optional" + "spec": { + "$ref": "#/definitions/v1alpha1ApplicationSetSpec" }, - "kind": { + "status": { + "$ref": "#/definitions/v1alpha1ApplicationSetStatus" + } + } + }, + "v1alpha1ApplicationSetApplicationStatus": { + "type": "object", + "title": "ApplicationSetApplicationStatus contains details about each Application managed by the ApplicationSet", + "properties": { + "application": { "type": "string", - "title": "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n+optional" + "title": "Application contains the name of the Application resource" }, - "name": { - "type": "string", - "title": "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\n+optional" + "lastTransitionTime": { + "$ref": "#/definitions/v1Time" }, - "namespace": { + "message": { "type": "string", - "title": "Namespace of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/\n+optional" + "title": "Message contains human-readable message indicating details about the status" }, - "resourceVersion": { + "status": { "type": "string", - "title": "Specific resourceVersion to which this reference is made, if any.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency\n+optional" + "title": "Status contains the AppSet's perceived status of the managed Application resource: (Waiting, Pending, Progressing, Healthy)" }, - "uid": { + "step": { "type": "string", - "title": "UID of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids\n+optional" + "title": "Step tracks which step this Application should be updated in" } } }, - "v1OwnerReference": { - "description": "OwnerReference contains enough information to let you identify an owning\nobject. An owning object must be in the same namespace as the dependent, or\nbe cluster-scoped, so there is no namespace field.", + "v1alpha1ApplicationSetCondition": { "type": "object", + "title": "ApplicationSetCondition contains details about an applicationset condition, which is usally an error or warning", "properties": { - "apiVersion": { - "description": "API version of the referent.", - "type": "string" - }, - "blockOwnerDeletion": { - "type": "boolean", - "title": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then\nthe owner cannot be deleted from the key-value store until this\nreference is removed.\nDefaults to false.\nTo set this field, a user needs \"delete\" permission of the owner,\notherwise 422 (Unprocessable Entity) will be returned.\n+optional" + "lastTransitionTime": { + "$ref": "#/definitions/v1Time" }, - "controller": { - "type": "boolean", - "title": "If true, this reference points to the managing controller.\n+optional" + "message": { + "type": "string", + "title": "Message contains human-readable message indicating details about condition" }, - "kind": { + "reason": { "type": "string", - "title": "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + "title": "Single word camelcase representing the reason for the status eg ErrorOccurred" }, - "name": { + "status": { "type": "string", - "title": "Name of the referent.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#names" + "title": "True/False/Unknown" }, - "uid": { + "type": { "type": "string", - "title": "UID of the referent.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#uids" + "title": "Type is an applicationset condition type" } } }, - "v1PortStatus": { + "v1alpha1ApplicationSetGenerator": { + "description": "ApplicationSetGenerator represents a generator at the top level of an ApplicationSet.", "type": "object", "properties": { - "error": { - "type": "string", - "title": "Error is to record the problem with the service port\nThe format of the error shall comply with the following rules:\n- built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.\n---\nThe regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)\n+optional\n+kubebuilder:validation:Required\n+kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$`\n+kubebuilder:validation:MaxLength=316" + "clusterDecisionResource": { + "$ref": "#/definitions/v1alpha1DuckTypeGenerator" }, - "port": { - "type": "integer", - "format": "int32", - "title": "Port is the port number of the service port of which status is recorded here" + "clusters": { + "$ref": "#/definitions/v1alpha1ClusterGenerator" }, - "protocol": { - "type": "string", - "title": "Protocol is the protocol of the service port of which status is recorded here\nThe supported values are: \"TCP\", \"UDP\", \"SCTP\"" + "git": { + "$ref": "#/definitions/v1alpha1GitGenerator" + }, + "list": { + "$ref": "#/definitions/v1alpha1ListGenerator" + }, + "matrix": { + "$ref": "#/definitions/v1alpha1MatrixGenerator" + }, + "merge": { + "$ref": "#/definitions/v1alpha1MergeGenerator" + }, + "plugin": { + "$ref": "#/definitions/v1alpha1PluginGenerator" + }, + "pullRequest": { + "$ref": "#/definitions/v1alpha1PullRequestGenerator" + }, + "scmProvider": { + "$ref": "#/definitions/v1alpha1SCMProviderGenerator" + }, + "selector": { + "$ref": "#/definitions/v1LabelSelector" } } }, - "v1Time": { - "description": "Time is a wrapper around time.Time which supports correct\nmarshaling to YAML and JSON. Wrappers are provided for many\nof the factory methods that the time package offers.\n\n+protobuf.options.marshal=false\n+protobuf.as=Timestamp\n+protobuf.options.(gogoproto.goproto_stringer)=false", + "v1alpha1ApplicationSetList": { "type": "object", + "title": "ApplicationSetList contains a list of ApplicationSet\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:object:root=true", "properties": { - "nanos": { - "description": "Non-negative fractions of a second at nanosecond resolution. Negative\nsecond values with fractions must still have non-negative nanos values\nthat count forward in time. Must be from 0 to 999,999,999\ninclusive. This field may be limited in precision depending on context.", - "type": "integer", - "format": "int32" + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationSet" + } }, - "seconds": { - "description": "Represents seconds of UTC time since Unix epoch\n1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to\n9999-12-31T23:59:59Z inclusive.", - "type": "string", - "format": "int64" + "metadata": { + "$ref": "#/definitions/v1ListMeta" } } }, - "v1alpha1AWSAuthConfig": { + "v1alpha1ApplicationSetNestedGenerator": { + "description": "ApplicationSetNestedGenerator represents a generator nested within a combination-type generator (MatrixGenerator or\nMergeGenerator).", "type": "object", - "title": "AWSAuthConfig is an AWS IAM authentication configuration", "properties": { - "clusterName": { - "type": "string", - "title": "ClusterName contains AWS cluster name" + "clusterDecisionResource": { + "$ref": "#/definitions/v1alpha1DuckTypeGenerator" }, - "roleARN": { - "description": "RoleARN contains optional role ARN. If set then AWS IAM Authenticator assume a role to perform cluster operations instead of the default AWS credential provider chain.", - "type": "string" + "clusters": { + "$ref": "#/definitions/v1alpha1ClusterGenerator" + }, + "git": { + "$ref": "#/definitions/v1alpha1GitGenerator" + }, + "list": { + "$ref": "#/definitions/v1alpha1ListGenerator" + }, + "matrix": { + "$ref": "#/definitions/v1JSON" + }, + "merge": { + "$ref": "#/definitions/v1JSON" + }, + "plugin": { + "$ref": "#/definitions/v1alpha1PluginGenerator" + }, + "pullRequest": { + "$ref": "#/definitions/v1alpha1PullRequestGenerator" + }, + "scmProvider": { + "$ref": "#/definitions/v1alpha1SCMProviderGenerator" + }, + "selector": { + "$ref": "#/definitions/v1LabelSelector" } } }, - "v1alpha1AppProject": { + "v1alpha1ApplicationSetResourceIgnoreDifferences": { + "description": "ApplicationSetResourceIgnoreDifferences configures how the ApplicationSet controller will ignore differences in live\napplications when applying changes from generated applications.", "type": "object", - "title": "AppProject provides a logical grouping of applications, providing controls for:\n* where the apps may deploy to (cluster whitelist)\n* what may be deployed (repository whitelist, resource whitelist/blacklist)\n* who can access these applications (roles, OIDC group claims bindings)\n* and what they can do (RBAC policies)\n* automation access to these roles (JWT tokens)\n+genclient\n+genclient:noStatus\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:resource:path=appprojects,shortName=appproj;appprojs", "properties": { - "metadata": { - "$ref": "#/definitions/v1ObjectMeta" + "jqPathExpressions": { + "description": "JQPathExpressions is a list of JQ path expressions to fields to ignore differences for.", + "type": "array", + "items": { + "type": "string" + } }, - "spec": { - "$ref": "#/definitions/v1alpha1AppProjectSpec" + "jsonPointers": { + "description": "JSONPointers is a list of JSON pointers to fields to ignore differences for.", + "type": "array", + "items": { + "type": "string" + } }, - "status": { - "$ref": "#/definitions/v1alpha1AppProjectStatus" + "name": { + "description": "Name is the name of the application to ignore differences for. If not specified, the rule applies to all applications.", + "type": "string" } } }, - "v1alpha1AppProjectList": { + "v1alpha1ApplicationSetRolloutStep": { "type": "object", - "title": "AppProjectList is list of AppProject resources\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object", "properties": { - "items": { + "matchExpressions": { "type": "array", "items": { - "$ref": "#/definitions/v1alpha1AppProject" + "$ref": "#/definitions/v1alpha1ApplicationMatchExpression" } }, - "metadata": { - "$ref": "#/definitions/v1ListMeta" + "maxUpdate": { + "$ref": "#/definitions/intstrIntOrString" } } }, - "v1alpha1AppProjectSpec": { + "v1alpha1ApplicationSetRolloutStrategy": { "type": "object", - "title": "AppProjectSpec is the specification of an AppProject", "properties": { - "clusterResourceBlacklist": { + "steps": { "type": "array", - "title": "ClusterResourceBlacklist contains list of blacklisted cluster level resources", "items": { - "$ref": "#/definitions/v1GroupKind" + "$ref": "#/definitions/v1alpha1ApplicationSetRolloutStep" } + } + } + }, + "v1alpha1ApplicationSetSpec": { + "description": "ApplicationSetSpec represents a class of application set state.", + "type": "object", + "properties": { + "applyNestedSelectors": { + "type": "boolean", + "title": "ApplyNestedSelectors enables selectors defined within the generators of two level-nested matrix or merge generators" }, - "clusterResourceWhitelist": { + "generators": { "type": "array", - "title": "ClusterResourceWhitelist contains list of whitelisted cluster level resources", "items": { - "$ref": "#/definitions/v1GroupKind" + "$ref": "#/definitions/v1alpha1ApplicationSetGenerator" } }, - "description": { - "type": "string", - "title": "Description contains optional project description" - }, - "destinations": { - "type": "array", - "title": "Destinations contains list of destinations available for deployment", - "items": { - "$ref": "#/definitions/v1alpha1ApplicationDestination" - } + "goTemplate": { + "type": "boolean" }, - "namespaceResourceBlacklist": { + "goTemplateOptions": { "type": "array", - "title": "NamespaceResourceBlacklist contains list of blacklisted namespace level resources", "items": { - "$ref": "#/definitions/v1GroupKind" + "type": "string" } }, - "namespaceResourceWhitelist": { + "ignoreApplicationDifferences": { "type": "array", - "title": "NamespaceResourceWhitelist contains list of whitelisted namespace level resources", "items": { - "$ref": "#/definitions/v1GroupKind" + "$ref": "#/definitions/v1alpha1ApplicationSetResourceIgnoreDifferences" } }, - "orphanedResources": { - "$ref": "#/definitions/v1alpha1OrphanedResourcesMonitorSettings" + "preservedFields": { + "$ref": "#/definitions/v1alpha1ApplicationPreservedFields" }, - "roles": { - "type": "array", - "title": "Roles are user defined RBAC roles associated with this project", - "items": { - "$ref": "#/definitions/v1alpha1ProjectRole" - } + "strategy": { + "$ref": "#/definitions/v1alpha1ApplicationSetStrategy" }, - "signatureKeys": { - "type": "array", - "title": "SignatureKeys contains a list of PGP key IDs that commits in Git must be signed with in order to be allowed for sync", - "items": { - "$ref": "#/definitions/v1alpha1SignatureKey" - } + "syncPolicy": { + "$ref": "#/definitions/v1alpha1ApplicationSetSyncPolicy" }, - "sourceRepos": { + "template": { + "$ref": "#/definitions/v1alpha1ApplicationSetTemplate" + } + } + }, + "v1alpha1ApplicationSetStatus": { + "type": "object", + "title": "ApplicationSetStatus defines the observed state of ApplicationSet", + "properties": { + "applicationStatus": { "type": "array", - "title": "SourceRepos contains list of repository URLs which can be used for deployment", "items": { - "type": "string" + "$ref": "#/definitions/v1alpha1ApplicationSetApplicationStatus" } }, - "syncWindows": { + "conditions": { "type": "array", - "title": "SyncWindows controls when syncs can be run for apps in this project", + "title": "INSERT ADDITIONAL STATUS FIELD - define observed state of cluster\nImportant: Run \"make\" to regenerate code after modifying this file", "items": { - "$ref": "#/definitions/v1alpha1SyncWindow" - } - } - } - }, - "v1alpha1AppProjectStatus": { - "type": "object", - "title": "AppProjectStatus contains status information for AppProject CRs", - "properties": { - "jwtTokensByRole": { - "type": "object", - "title": "JWTTokensByRole contains a list of JWT tokens issued for a given role", - "additionalProperties": { - "$ref": "#/definitions/v1alpha1JWTTokens" + "$ref": "#/definitions/v1alpha1ApplicationSetCondition" } } } }, - "v1alpha1Application": { + "v1alpha1ApplicationSetStrategy": { + "description": "ApplicationSetStrategy configures how generated Applications are updated in sequence.", "type": "object", - "title": "Application is a definition of Application resource.\n+genclient\n+genclient:noStatus\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:resource:path=applications,shortName=app;apps\n+kubebuilder:printcolumn:name=\"Sync Status\",type=string,JSONPath=`.status.sync.status`\n+kubebuilder:printcolumn:name=\"Health Status\",type=string,JSONPath=`.status.health.status`\n+kubebuilder:printcolumn:name=\"Revision\",type=string,JSONPath=`.status.sync.revision`,priority=10", "properties": { - "metadata": { - "$ref": "#/definitions/v1ObjectMeta" - }, - "operation": { - "$ref": "#/definitions/v1alpha1Operation" - }, - "spec": { - "$ref": "#/definitions/v1alpha1ApplicationSpec" + "rollingSync": { + "$ref": "#/definitions/v1alpha1ApplicationSetRolloutStrategy" }, - "status": { - "$ref": "#/definitions/v1alpha1ApplicationStatus" + "type": { + "type": "string" } } }, - "v1alpha1ApplicationCondition": { + "v1alpha1ApplicationSetSyncPolicy": { + "description": "ApplicationSetSyncPolicy configures how generated Applications will relate to their\nApplicationSet.", "type": "object", - "title": "ApplicationCondition contains details about an application condition, which is usally an error or warning", "properties": { - "lastTransitionTime": { - "$ref": "#/definitions/v1Time" - }, - "message": { + "applicationsSync": { "type": "string", - "title": "Message contains human-readable message indicating details about condition" + "title": "ApplicationsSync represents the policy applied on the generated applications. Possible values are create-only, create-update, create-delete, sync\n+kubebuilder:validation:Optional\n+kubebuilder:validation:Enum=create-only;create-update;create-delete;sync" }, - "type": { - "type": "string", - "title": "Type is an application condition type" + "preserveResourcesOnDeletion": { + "description": "PreserveResourcesOnDeletion will preserve resources on deletion. If PreserveResourcesOnDeletion is set to true, these Applications will not be deleted.", + "type": "boolean" } } }, - "v1alpha1ApplicationDestination": { + "v1alpha1ApplicationSetTemplate": { "type": "object", - "title": "ApplicationDestination holds information about the application's destination", + "title": "ApplicationSetTemplate represents argocd ApplicationSpec", "properties": { - "name": { - "type": "string", - "title": "Name is an alternate way of specifying the target cluster by its symbolic name" - }, - "namespace": { - "type": "string", - "title": "Namespace specifies the target namespace for the application's resources.\nThe namespace will only be set for namespace-scoped resources that have not set a value for .metadata.namespace" + "metadata": { + "$ref": "#/definitions/v1alpha1ApplicationSetTemplateMeta" }, - "server": { - "type": "string", - "title": "Server specifies the URL of the target cluster and must be set to the Kubernetes control plane API" + "spec": { + "$ref": "#/definitions/v1alpha1ApplicationSpec" } } }, - "v1alpha1ApplicationList": { + "v1alpha1ApplicationSetTemplateMeta": { "type": "object", - "title": "ApplicationList is list of Application resources\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object", + "title": "ApplicationSetTemplateMeta represents the Argo CD application fields that may\nbe used for Applications generated from the ApplicationSet (based on metav1.ObjectMeta)", "properties": { - "items": { + "annotations": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "finalizers": { "type": "array", "items": { - "$ref": "#/definitions/v1alpha1Application" + "type": "string" } }, - "metadata": { - "$ref": "#/definitions/v1ListMeta" + "labels": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "name": { + "type": "string" + }, + "namespace": { + "type": "string" } } }, @@ -4742,9 +6248,6 @@ "helm": { "$ref": "#/definitions/v1alpha1ApplicationSourceHelm" }, - "ksonnet": { - "$ref": "#/definitions/v1alpha1ApplicationSourceKsonnet" - }, "kustomize": { "$ref": "#/definitions/v1alpha1ApplicationSourceKustomize" }, @@ -4755,6 +6258,10 @@ "plugin": { "$ref": "#/definitions/v1alpha1ApplicationSourcePlugin" }, + "ref": { + "description": "Ref is reference to another source within sources field. This field will not be used if used with a `source` tag.", + "type": "string" + }, "repoURL": { "type": "string", "title": "RepoURL is the URL to the repository (Git or Helm) that contains the application manifests" @@ -4797,6 +6304,10 @@ "$ref": "#/definitions/v1alpha1HelmFileParameter" } }, + "ignoreMissingValueFiles": { + "type": "boolean", + "title": "IgnoreMissingValueFiles prevents helm template from failing when valueFiles do not exist locally by not appending them to helm template --values" + }, "parameters": { "type": "array", "title": "Parameters is a list of Helm parameters which are passed to the helm template command upon manifest generation", @@ -4804,10 +6315,18 @@ "$ref": "#/definitions/v1alpha1HelmParameter" } }, + "passCredentials": { + "type": "boolean", + "title": "PassCredentials pass credentials to all domains (Helm's --pass-credentials)" + }, "releaseName": { "type": "string", "title": "ReleaseName is the Helm release name to use. If omitted it will use the application name" }, + "skipCrds": { + "type": "boolean", + "title": "SkipCrds skips custom resource definition installation step (Helm's --skip-crds)" + }, "valueFiles": { "type": "array", "title": "ValuesFiles is a list of Helm value files to use when generating a template", @@ -4817,11 +6336,14 @@ }, "values": { "type": "string", - "title": "Values specifies Helm values to be passed to helm template, typically defined as a block" + "title": "Values specifies Helm values to be passed to helm template, typically defined as a block. ValuesObject takes precedence over Values, so use one or the other.\n+patchStrategy=replace" + }, + "valuesObject": { + "$ref": "#/definitions/runtimeRawExtension" }, "version": { "type": "string", - "title": "Version is the Helm version to use for templating (either \"2\" or \"3\")" + "title": "Version is the Helm version to use for templating (\"3\")" } } }, @@ -4852,23 +6374,6 @@ } } }, - "v1alpha1ApplicationSourceKsonnet": { - "type": "object", - "title": "ApplicationSourceKsonnet holds ksonnet specific options", - "properties": { - "environment": { - "type": "string", - "title": "Environment is a ksonnet application environment name" - }, - "parameters": { - "type": "array", - "title": "Parameters are a list of ksonnet component parameter override values", - "items": { - "$ref": "#/definitions/v1alpha1KsonnetParameter" - } - } - } - }, "v1alpha1ApplicationSourceKustomize": { "type": "object", "title": "ApplicationSourceKustomize holds options specific to an Application source specific to Kustomize", @@ -4880,6 +6385,10 @@ "type": "string" } }, + "commonAnnotationsEnvsubst": { + "type": "boolean", + "title": "CommonAnnotationsEnvsubst specifies whether to apply env variables substitution for annotation values" + }, "commonLabels": { "type": "object", "title": "CommonLabels is a list of additional labels to add to rendered manifests", @@ -4887,6 +6396,14 @@ "type": "string" } }, + "forceCommonAnnotations": { + "type": "boolean", + "title": "ForceCommonAnnotations specifies whether to force applying common annotations to resources for Kustomize apps" + }, + "forceCommonLabels": { + "type": "boolean", + "title": "ForceCommonLabels specifies whether to force applying common labels to resources for Kustomize apps" + }, "images": { "type": "array", "title": "Images is a list of Kustomize image override specifications", @@ -4902,6 +6419,24 @@ "type": "string", "title": "NameSuffix is a suffix appended to resources for Kustomize apps" }, + "namespace": { + "type": "string", + "title": "Namespace sets the namespace that Kustomize adds to all resources" + }, + "patches": { + "type": "array", + "title": "Patches is a list of Kustomize patches", + "items": { + "$ref": "#/definitions/v1alpha1KustomizePatch" + } + }, + "replicas": { + "type": "array", + "title": "Replicas is a list of Kustomize Replicas override specifications", + "items": { + "$ref": "#/definitions/v1alpha1KustomizeReplica" + } + }, "version": { "type": "string", "title": "Version controls which version of Kustomize to use for rendering manifests" @@ -4920,6 +6455,39 @@ }, "name": { "type": "string" + }, + "parameters": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationSourcePluginParameter" + } + } + } + }, + "v1alpha1ApplicationSourcePluginParameter": { + "type": "object", + "properties": { + "array": { + "description": "Array is the value of an array type parameter.", + "type": "array", + "items": { + "type": "string" + } + }, + "map": { + "description": "Map is the value of a map type parameter.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "name": { + "description": "Name is the name identifying a parameter.", + "type": "string" + }, + "string": { + "description": "String_ is the value of a string type parameter.", + "type": "string" } } }, @@ -4950,12 +6518,19 @@ }, "revisionHistoryLimit": { "description": "RevisionHistoryLimit limits the number of items kept in the application's revision history, which is used for informational purposes as well as for rollbacks to previous versions.\nThis should only be changed in exceptional circumstances.\nSetting to zero will store no history. This will reduce storage used.\nIncreasing will increase the space used to store the history, so we do not recommend increasing it.\nDefault is 10.", - "type": "string", + "type": "integer", "format": "int64" }, "source": { "$ref": "#/definitions/v1alpha1ApplicationSource" }, + "sources": { + "type": "array", + "title": "Sources is a reference to the location of the application's manifests or chart", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationSource" + } + }, "syncPolicy": { "$ref": "#/definitions/v1alpha1SyncPolicy" } @@ -4972,6 +6547,10 @@ "$ref": "#/definitions/v1alpha1ApplicationCondition" } }, + "controllerNamespace": { + "type": "string", + "title": "ControllerNamespace indicates the namespace in which the application controller is located" + }, "health": { "$ref": "#/definitions/v1alpha1HealthStatus" }, @@ -4991,6 +6570,10 @@ "reconciledAt": { "$ref": "#/definitions/v1Time" }, + "resourceHealthSource": { + "type": "string", + "title": "ResourceHealthSource indicates where the resource health status is stored: inline if not set or appTree" + }, "resources": { "type": "array", "title": "Resources is a list of Kubernetes resources managed by this application", @@ -5002,6 +6585,13 @@ "type": "string", "title": "SourceType specifies the type of this application" }, + "sourceTypes": { + "type": "array", + "title": "SourceTypes specifies the type of the sources included in the application", + "items": { + "type": "string" + } + }, "summary": { "$ref": "#/definitions/v1alpha1ApplicationSummary" }, @@ -5057,34 +6647,76 @@ } } }, - "v1alpha1ApplicationWatchEvent": { - "description": "ApplicationWatchEvent contains information about application change.", + "v1alpha1ApplicationWatchEvent": { + "description": "ApplicationWatchEvent contains information about application change.", + "type": "object", + "properties": { + "application": { + "$ref": "#/definitions/v1alpha1Application" + }, + "type": { + "type": "string" + } + } + }, + "v1alpha1Backoff": { + "type": "object", + "title": "Backoff is the backoff strategy to use on subsequent retries for failing syncs", + "properties": { + "duration": { + "type": "string", + "title": "Duration is the amount to back off. Default unit is seconds, but could also be a duration (e.g. \"2m\", \"1h\")" + }, + "factor": { + "type": "integer", + "format": "int64", + "title": "Factor is a factor to multiply the base duration after each failed retry" + }, + "maxDuration": { + "type": "string", + "title": "MaxDuration is the maximum amount of time allowed for the backoff strategy" + } + } + }, + "v1alpha1BasicAuthBitbucketServer": { + "description": "BasicAuthBitbucketServer defines the username/(password or personal access token) for Basic auth.", + "type": "object", + "properties": { + "passwordRef": { + "$ref": "#/definitions/v1alpha1SecretRef" + }, + "username": { + "type": "string", + "title": "Username for Basic auth" + } + } + }, + "v1alpha1BearerTokenBitbucketCloud": { + "description": "BearerTokenBitbucketCloud defines the Bearer token for BitBucket AppToken auth.", "type": "object", "properties": { - "application": { - "$ref": "#/definitions/v1alpha1Application" - }, - "type": { - "type": "string" + "tokenRef": { + "$ref": "#/definitions/v1alpha1SecretRef" } } }, - "v1alpha1Backoff": { + "v1alpha1ChartDetails": { "type": "object", - "title": "Backoff is the backoff strategy to use on subsequent retries for failing syncs", + "title": "ChartDetails contains helm chart metadata for a specific version", "properties": { - "duration": { - "type": "string", - "title": "Duration is the amount to back off. Default unit is seconds, but could also be a duration (e.g. \"2m\", \"1h\")" + "description": { + "type": "string" }, - "factor": { + "home": { "type": "string", - "format": "int64", - "title": "Factor is a factor to multiply the base duration after each failed retry" + "title": "The URL of this projects home page, e.g. \"http://example.com\"" }, - "maxDuration": { - "type": "string", - "title": "MaxDuration is the maximum amount of time allowed for the backoff strategy" + "maintainers": { + "type": "array", + "title": "List of maintainer details, name and email, e.g. [\"John Doe \"]", + "items": { + "type": "string" + } } } }, @@ -5092,6 +6724,17 @@ "type": "object", "title": "Cluster is the definition of a cluster resource", "properties": { + "annotations": { + "type": "object", + "title": "Annotations for cluster secret metadata", + "additionalProperties": { + "type": "string" + } + }, + "clusterResources": { + "description": "Indicates if cluster level resources should be managed. This setting is used only if cluster is connected in a namespaced mode.", + "type": "boolean" + }, "config": { "$ref": "#/definitions/v1alpha1ClusterConfig" }, @@ -5101,6 +6744,13 @@ "info": { "$ref": "#/definitions/v1alpha1ClusterInfo" }, + "labels": { + "type": "object", + "title": "Labels for cluster secret metadata", + "additionalProperties": { + "type": "string" + } + }, "name": { "type": "string", "title": "Name of the cluster. If omitted, will use the server address" @@ -5112,6 +6762,10 @@ "type": "string" } }, + "project": { + "type": "string", + "title": "Reference between project and cluster that allow you automatically to be added as item inside Destinations project entity" + }, "refreshRequestedAt": { "$ref": "#/definitions/v1Time" }, @@ -5125,7 +6779,7 @@ }, "shard": { "description": "Shard contains optional shard number. Calculated on the fly by the application controller if not specified.", - "type": "string", + "type": "integer", "format": "int64" } } @@ -5135,7 +6789,7 @@ "title": "ClusterCacheInfo contains information about the cluster cache", "properties": { "apisCount": { - "type": "string", + "type": "integer", "format": "int64", "title": "APIsCount holds number of observed Kubernetes API count" }, @@ -5143,7 +6797,7 @@ "$ref": "#/definitions/v1Time" }, "resourcesCount": { - "type": "string", + "type": "integer", "format": "int64", "title": "ResourcesCount holds number of observed Kubernetes resources" } @@ -5175,12 +6829,38 @@ } } }, + "v1alpha1ClusterGenerator": { + "description": "ClusterGenerator defines a generator to match against clusters registered with ArgoCD.", + "type": "object", + "properties": { + "selector": { + "$ref": "#/definitions/v1LabelSelector" + }, + "template": { + "$ref": "#/definitions/v1alpha1ApplicationSetTemplate" + }, + "values": { + "type": "object", + "title": "Values contains key/value pairs which are passed directly as parameters to the template", + "additionalProperties": { + "type": "string" + } + } + } + }, "v1alpha1ClusterInfo": { "type": "object", "title": "ClusterInfo contains information about the cluster", "properties": { + "apiVersions": { + "type": "array", + "title": "APIVersions contains list of API versions supported by the cluster", + "items": { + "type": "string" + } + }, "applicationsCount": { - "type": "string", + "type": "integer", "format": "int64", "title": "ApplicationsCount is the number of applications managed by Argo CD on the cluster" }, @@ -5236,8 +6916,22 @@ "destination": { "$ref": "#/definitions/v1alpha1ApplicationDestination" }, + "ignoreDifferences": { + "type": "array", + "title": "IgnoreDifferences is a reference to the application's ignored differences used for comparison", + "items": { + "$ref": "#/definitions/v1alpha1ResourceIgnoreDifferences" + } + }, "source": { "$ref": "#/definitions/v1alpha1ApplicationSource" + }, + "sources": { + "type": "array", + "title": "Sources is a reference to the application's multiple sources used for comparison", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationSource" + } } } }, @@ -5251,6 +6945,9 @@ "init": { "$ref": "#/definitions/v1alpha1Command" }, + "lockRepo": { + "type": "boolean" + }, "name": { "type": "string" } @@ -5273,6 +6970,36 @@ } } }, + "v1alpha1DuckTypeGenerator": { + "description": "DuckType defines a generator to match against clusters registered with ArgoCD.", + "type": "object", + "properties": { + "configMapRef": { + "type": "string", + "title": "ConfigMapRef is a ConfigMap with the duck type definitions needed to retrieve the data\n this includes apiVersion(group/version), kind, matchKey and validation settings\nName is the resource name of the kind, group and version, defined in the ConfigMapRef\nRequeueAfterSeconds is how long before the duckType will be rechecked for a change" + }, + "labelSelector": { + "$ref": "#/definitions/v1LabelSelector" + }, + "name": { + "type": "string" + }, + "requeueAfterSeconds": { + "type": "integer", + "format": "int64" + }, + "template": { + "$ref": "#/definitions/v1alpha1ApplicationSetTemplate" + }, + "values": { + "type": "object", + "title": "Values contains key/value pairs which are passed directly as parameters to the template", + "additionalProperties": { + "type": "string" + } + } + } + }, "v1alpha1ExecProviderConfig": { "type": "object", "title": "ExecProviderConfig is config used to call an external command to perform cluster authentication\nSee: https://godoc.org/k8s.io/client-go/tools/clientcmd/api#ExecConfig", @@ -5305,6 +7032,65 @@ } } }, + "v1alpha1GitDirectoryGeneratorItem": { + "type": "object", + "properties": { + "exclude": { + "type": "boolean" + }, + "path": { + "type": "string" + } + } + }, + "v1alpha1GitFileGeneratorItem": { + "type": "object", + "properties": { + "path": { + "type": "string" + } + } + }, + "v1alpha1GitGenerator": { + "type": "object", + "properties": { + "directories": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1GitDirectoryGeneratorItem" + } + }, + "files": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1GitFileGeneratorItem" + } + }, + "pathParamPrefix": { + "type": "string" + }, + "repoURL": { + "type": "string" + }, + "requeueAfterSeconds": { + "type": "integer", + "format": "int64" + }, + "revision": { + "type": "string" + }, + "template": { + "$ref": "#/definitions/v1alpha1ApplicationSetTemplate" + }, + "values": { + "type": "object", + "title": "Values contains key/value pairs which are passed directly as parameters to the template", + "additionalProperties": { + "type": "string" + } + } + } + }, "v1alpha1GnuPGPublicKey": { "type": "object", "title": "GnuPGPublicKey is a representation of a GnuPG public key", @@ -5419,15 +7205,15 @@ "title": "TODO: describe this type", "properties": { "capacity": { - "type": "string", + "type": "integer", "format": "int64" }, "requestedByApp": { - "type": "string", + "type": "integer", "format": "int64" }, "requestedByNeighbors": { - "type": "string", + "type": "integer", "format": "int64" }, "resourceName": { @@ -5465,83 +7251,213 @@ "title": "JWTToken holds the issuedAt and expiresAt values of a token", "properties": { "exp": { - "type": "string", + "type": "integer", "format": "int64" }, "iat": { - "type": "string", + "type": "integer", "format": "int64" }, - "id": { + "id": { + "type": "string" + } + } + }, + "v1alpha1JWTTokens": { + "type": "object", + "title": "JWTTokens represents a list of JWT tokens", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1JWTToken" + } + } + } + }, + "v1alpha1JsonnetVar": { + "type": "object", + "title": "JsonnetVar represents a variable to be passed to jsonnet during manifest generation", + "properties": { + "code": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + } + }, + "v1alpha1KnownTypeField": { + "type": "object", + "title": "KnownTypeField contains mapping between CRD field and known Kubernetes type.\nThis is mainly used for unit conversion in unknown resources (e.g. 0.1 == 100mi)\nTODO: Describe the members of this type", + "properties": { + "field": { + "type": "string" + }, + "type": { + "type": "string" + } + } + }, + "v1alpha1KustomizeGvk": { + "type": "object", + "properties": { + "group": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "version": { + "type": "string" + } + } + }, + "v1alpha1KustomizeOptions": { + "type": "object", + "title": "KustomizeOptions are options for kustomize to use when building manifests", + "properties": { + "binaryPath": { + "type": "string", + "title": "BinaryPath holds optional path to kustomize binary" + }, + "buildOptions": { + "type": "string", + "title": "BuildOptions is a string of build parameters to use when calling `kustomize build`" + } + } + }, + "v1alpha1KustomizePatch": { + "type": "object", + "properties": { + "options": { + "type": "object", + "additionalProperties": { + "type": "boolean" + } + }, + "patch": { + "type": "string" + }, + "path": { + "type": "string" + }, + "target": { + "$ref": "#/definitions/v1alpha1KustomizeSelector" + } + } + }, + "v1alpha1KustomizeReplica": { + "type": "object", + "properties": { + "count": { + "$ref": "#/definitions/intstrIntOrString" + }, + "name": { + "type": "string", + "title": "Name of Deployment or StatefulSet" + } + } + }, + "v1alpha1KustomizeResId": { + "type": "object", + "properties": { + "gvk": { + "$ref": "#/definitions/v1alpha1KustomizeGvk" + }, + "name": { + "type": "string" + }, + "namespace": { "type": "string" } } }, - "v1alpha1JWTTokens": { + "v1alpha1KustomizeSelector": { "type": "object", - "title": "JWTTokens represents a list of JWT tokens", "properties": { - "items": { - "type": "array", - "items": { - "$ref": "#/definitions/v1alpha1JWTToken" - } + "annotationSelector": { + "type": "string" + }, + "labelSelector": { + "type": "string" + }, + "resId": { + "$ref": "#/definitions/v1alpha1KustomizeResId" } } }, - "v1alpha1JsonnetVar": { + "v1alpha1ListGenerator": { "type": "object", - "title": "JsonnetVar represents a variable to be passed to jsonnet during manifest generation", + "title": "ListGenerator include items info", "properties": { - "code": { - "type": "boolean" + "elements": { + "type": "array", + "items": { + "$ref": "#/definitions/v1JSON" + } }, - "name": { + "elementsYaml": { "type": "string" }, - "value": { - "type": "string" + "template": { + "$ref": "#/definitions/v1alpha1ApplicationSetTemplate" } } }, - "v1alpha1KnownTypeField": { + "v1alpha1ManagedNamespaceMetadata": { "type": "object", - "title": "KnownTypeField contains mapping between CRD field and known Kubernetes type.\nThis is mainly used for unit conversion in unknown resources (e.g. 0.1 == 100mi)\nTODO: Describe the members of this type", "properties": { - "field": { - "type": "string" + "annotations": { + "type": "object", + "additionalProperties": { + "type": "string" + } }, - "type": { - "type": "string" + "labels": { + "type": "object", + "additionalProperties": { + "type": "string" + } } } }, - "v1alpha1KsonnetParameter": { + "v1alpha1MatrixGenerator": { + "description": "MatrixGenerator generates the cartesian product of two sets of parameters. The parameters are defined by two nested\ngenerators.", "type": "object", - "title": "KsonnetParameter is a ksonnet component parameter", "properties": { - "component": { - "type": "string" - }, - "name": { - "type": "string" + "generators": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationSetNestedGenerator" + } }, - "value": { - "type": "string" + "template": { + "$ref": "#/definitions/v1alpha1ApplicationSetTemplate" } } }, - "v1alpha1KustomizeOptions": { + "v1alpha1MergeGenerator": { + "description": "MergeGenerator merges the output of two or more generators. Where the values for all specified merge keys are equal\nbetween two sets of generated parameters, the parameter sets will be merged with the parameters from the latter\ngenerator taking precedence. Parameter sets with merge keys not present in the base generator's params will be\nignored.\nFor example, if the first generator produced [{a: '1', b: '2'}, {c: '1', d: '1'}] and the second generator produced\n[{'a': 'override'}], the united parameters for merge keys = ['a'] would be\n[{a: 'override', b: '1'}, {c: '1', d: '1'}].\n\nMergeGenerator supports template overriding. If a MergeGenerator is one of multiple top-level generators, its\ntemplate will be merged with the top-level generator before the parameters are applied.", "type": "object", - "title": "KustomizeOptions are options for kustomize to use when building manifests", "properties": { - "binaryPath": { - "type": "string", - "title": "BinaryPath holds optional path to kustomize binary" + "generators": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationSetNestedGenerator" + } }, - "buildOptions": { - "type": "string", - "title": "BuildOptions is a string of build parameters to use when calling `kustomize build`" + "mergeKeys": { + "type": "array", + "items": { + "type": "string" + } + }, + "template": { + "$ref": "#/definitions/v1alpha1ApplicationSetTemplate" } } }, @@ -5600,7 +7516,7 @@ "title": "Phase is the current phase of the operation" }, "retryCount": { - "type": "string", + "type": "integer", "format": "int64", "title": "RetryCount contains time of operation retries" }, @@ -5646,16 +7562,79 @@ }, "v1alpha1OverrideIgnoreDiff": { "type": "object", - "title": "TODO: describe this type", + "title": "OverrideIgnoreDiff contains configurations about how fields should be ignored during diffs between\nthe desired state and live state", "properties": { "jSONPointers": { "type": "array", + "title": "JSONPointers is a JSON path list following the format defined in RFC4627 (https://datatracker.ietf.org/doc/html/rfc6902#section-3)", + "items": { + "type": "string" + } + }, + "jqPathExpressions": { + "type": "array", + "title": "JQPathExpressions is a JQ path list that will be evaludated during the diff process", + "items": { + "type": "string" + } + }, + "managedFieldsManagers": { + "type": "array", + "title": "ManagedFieldsManagers is a list of trusted managers. Fields mutated by those managers will take precedence over the\ndesired state defined in the SCM and won't be displayed in diffs", "items": { "type": "string" } } } }, + "v1alpha1PluginConfigMapRef": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name of the ConfigMap" + } + } + }, + "v1alpha1PluginGenerator": { + "description": "PluginGenerator defines connection info specific to Plugin.", + "type": "object", + "properties": { + "configMapRef": { + "$ref": "#/definitions/v1alpha1PluginConfigMapRef" + }, + "input": { + "$ref": "#/definitions/v1alpha1PluginInput" + }, + "requeueAfterSeconds": { + "description": "RequeueAfterSeconds determines how long the ApplicationSet controller will wait before reconciling the ApplicationSet again.", + "type": "integer", + "format": "int64" + }, + "template": { + "$ref": "#/definitions/v1alpha1ApplicationSetTemplate" + }, + "values": { + "description": "Values contains key/value pairs which are passed directly as parameters to the template. These values will not be\nsent as parameters to the plugin.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "v1alpha1PluginInput": { + "type": "object", + "properties": { + "parameters": { + "description": "Parameters contains the information to pass to the plugin. It is a map. The keys must be strings, and the\nvalues can be any type.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/v1JSON" + } + } + } + }, "v1alpha1ProjectRole": { "type": "object", "title": "ProjectRole represents a role that has access to a project", @@ -5684,10 +7663,227 @@ }, "policies": { "type": "array", - "title": "Policies Stores a list of casbin formated strings that define access policies for the role in the project", + "title": "Policies Stores a list of casbin formatted strings that define access policies for the role in the project", + "items": { + "type": "string" + } + } + } + }, + "v1alpha1PullRequestGenerator": { + "description": "PullRequestGenerator defines a generator that scrapes a PullRequest API to find candidate pull requests.", + "type": "object", + "properties": { + "azuredevops": { + "$ref": "#/definitions/v1alpha1PullRequestGeneratorAzureDevOps" + }, + "bitbucket": { + "$ref": "#/definitions/v1alpha1PullRequestGeneratorBitbucket" + }, + "bitbucketServer": { + "$ref": "#/definitions/v1alpha1PullRequestGeneratorBitbucketServer" + }, + "filters": { + "description": "Filters for which pull requests should be considered.", + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1PullRequestGeneratorFilter" + } + }, + "gitea": { + "$ref": "#/definitions/v1alpha1PullRequestGeneratorGitea" + }, + "github": { + "$ref": "#/definitions/v1alpha1PullRequestGeneratorGithub" + }, + "gitlab": { + "$ref": "#/definitions/v1alpha1PullRequestGeneratorGitLab" + }, + "requeueAfterSeconds": { + "description": "Standard parameters.", + "type": "integer", + "format": "int64" + }, + "template": { + "$ref": "#/definitions/v1alpha1ApplicationSetTemplate" + } + } + }, + "v1alpha1PullRequestGeneratorAzureDevOps": { + "description": "PullRequestGeneratorAzureDevOps defines connection info specific to AzureDevOps.", + "type": "object", + "properties": { + "api": { + "description": "The Azure DevOps API URL to talk to. If blank, use https://dev.azure.com/.", + "type": "string" + }, + "labels": { + "type": "array", + "title": "Labels is used to filter the PRs that you want to target", + "items": { + "type": "string" + } + }, + "organization": { + "description": "Azure DevOps org to scan. Required.", + "type": "string" + }, + "project": { + "description": "Azure DevOps project name to scan. Required.", + "type": "string" + }, + "repo": { + "description": "Azure DevOps repo name to scan. Required.", + "type": "string" + }, + "tokenRef": { + "$ref": "#/definitions/v1alpha1SecretRef" + } + } + }, + "v1alpha1PullRequestGeneratorBitbucket": { + "description": "PullRequestGeneratorBitbucket defines connection info specific to Bitbucket.", + "type": "object", + "properties": { + "api": { + "description": "The Bitbucket REST API URL to talk to. If blank, uses https://api.bitbucket.org/2.0.", + "type": "string" + }, + "basicAuth": { + "$ref": "#/definitions/v1alpha1BasicAuthBitbucketServer" + }, + "bearerToken": { + "$ref": "#/definitions/v1alpha1BearerTokenBitbucketCloud" + }, + "owner": { + "description": "Workspace to scan. Required.", + "type": "string" + }, + "repo": { + "description": "Repo name to scan. Required.", + "type": "string" + } + } + }, + "v1alpha1PullRequestGeneratorBitbucketServer": { + "description": "PullRequestGeneratorBitbucketServer defines connection info specific to BitbucketServer.", + "type": "object", + "properties": { + "api": { + "description": "The Bitbucket REST API URL to talk to e.g. https://bitbucket.org/rest Required.", + "type": "string" + }, + "basicAuth": { + "$ref": "#/definitions/v1alpha1BasicAuthBitbucketServer" + }, + "project": { + "description": "Project to scan. Required.", + "type": "string" + }, + "repo": { + "description": "Repo name to scan. Required.", + "type": "string" + } + } + }, + "v1alpha1PullRequestGeneratorFilter": { + "description": "PullRequestGeneratorFilter is a single pull request filter.\nIf multiple filter types are set on a single struct, they will be AND'd together. All filters must\npass for a pull request to be included.", + "type": "object", + "properties": { + "branchMatch": { + "type": "string" + }, + "targetBranchMatch": { + "type": "string" + } + } + }, + "v1alpha1PullRequestGeneratorGitLab": { + "description": "PullRequestGeneratorGitLab defines connection info specific to GitLab.", + "type": "object", + "properties": { + "api": { + "description": "The GitLab API URL to talk to. If blank, uses https://gitlab.com/.", + "type": "string" + }, + "insecure": { + "type": "boolean", + "title": "Skips validating the SCM provider's TLS certificate - useful for self-signed certificates.; default: false" + }, + "labels": { + "type": "array", + "title": "Labels is used to filter the MRs that you want to target", + "items": { + "type": "string" + } + }, + "project": { + "description": "GitLab project to scan. Required.", + "type": "string" + }, + "pullRequestState": { + "type": "string", + "title": "PullRequestState is an additional MRs filter to get only those with a certain state. Default: \"\" (all states)" + }, + "tokenRef": { + "$ref": "#/definitions/v1alpha1SecretRef" + } + } + }, + "v1alpha1PullRequestGeneratorGitea": { + "description": "PullRequestGeneratorGitea defines connection info specific to Gitea.", + "type": "object", + "properties": { + "api": { + "type": "string", + "title": "The Gitea API URL to talk to. Required" + }, + "insecure": { + "description": "Allow insecure tls, for self-signed certificates; default: false.", + "type": "boolean" + }, + "owner": { + "description": "Gitea org or user to scan. Required.", + "type": "string" + }, + "repo": { + "description": "Gitea repo name to scan. Required.", + "type": "string" + }, + "tokenRef": { + "$ref": "#/definitions/v1alpha1SecretRef" + } + } + }, + "v1alpha1PullRequestGeneratorGithub": { + "description": "PullRequestGenerator defines connection info specific to GitHub.", + "type": "object", + "properties": { + "api": { + "description": "The GitHub API URL to talk to. If blank, use https://api.github.com/.", + "type": "string" + }, + "appSecretName": { + "description": "AppSecretName is a reference to a GitHub App repo-creds secret with permission to access pull requests.", + "type": "string" + }, + "labels": { + "type": "array", + "title": "Labels is used to filter the PRs that you want to target", "items": { "type": "string" } + }, + "owner": { + "description": "GitHub org or user to scan. Required.", + "type": "string" + }, + "repo": { + "description": "GitHub repo name to scan. Required.", + "type": "string" + }, + "tokenRef": { + "$ref": "#/definitions/v1alpha1SecretRef" } } }, @@ -5695,17 +7891,29 @@ "type": "object", "title": "RepoCreds holds the definition for repository credentials", "properties": { + "enableOCI": { + "type": "boolean", + "title": "EnableOCI specifies whether helm-oci support should be enabled for this repo" + }, + "forceHttpBasicAuth": { + "type": "boolean", + "title": "ForceHttpBasicAuth specifies whether Argo CD should attempt to force basic auth for HTTP connections" + }, + "gcpServiceAccountKey": { + "type": "string", + "title": "GCPServiceAccountKey specifies the service account key in JSON format to be used for getting credentials to Google Cloud Source repos" + }, "githubAppEnterpriseBaseUrl": { "type": "string", "title": "GithubAppEnterpriseBaseURL specifies the GitHub API URL for GitHub app authentication. If empty will default to https://api.github.com" }, "githubAppID": { - "type": "string", + "type": "integer", "format": "int64", "title": "GithubAppId specifies the Github App ID of the app used to access the repo for GitHub app authentication" }, "githubAppInstallationID": { - "type": "string", + "type": "integer", "format": "int64", "title": "GithubAppInstallationId specifies the ID of the installed GitHub App for GitHub app authentication" }, @@ -5717,6 +7925,10 @@ "type": "string", "title": "Password for authenticating at the repo server" }, + "proxy": { + "type": "string", + "title": "Proxy specifies the HTTP/HTTPS proxy used to access repos at the repo server" + }, "sshPrivateKey": { "type": "string", "title": "SSHPrivateKey contains the private key data for authenticating at the repo server using SSH (only Git repos)" @@ -5729,6 +7941,10 @@ "type": "string", "title": "TLSClientCertKey specifies the TLS client cert key for authenticating at the repo server" }, + "type": { + "description": "Type specifies the type of the repoCreds. Can be either \"git\" or \"helm. \"git\" is assumed if empty or absent.", + "type": "string" + }, "url": { "type": "string", "title": "URL is the URL that this credentials matches to" @@ -5769,17 +7985,25 @@ "type": "boolean", "title": "EnableOCI specifies whether helm-oci support should be enabled for this repo" }, + "forceHttpBasicAuth": { + "type": "boolean", + "title": "ForceHttpBasicAuth specifies whether Argo CD should attempt to force basic auth for HTTP connections" + }, + "gcpServiceAccountKey": { + "type": "string", + "title": "GCPServiceAccountKey specifies the service account key in JSON format to be used for getting credentials to Google Cloud Source repos" + }, "githubAppEnterpriseBaseUrl": { "type": "string", "title": "GithubAppEnterpriseBaseURL specifies the base URL of GitHub Enterprise installation. If empty will default to https://api.github.com" }, "githubAppID": { - "type": "string", + "type": "integer", "format": "int64", "title": "GithubAppId specifies the ID of the GitHub app used to access the repo" }, "githubAppInstallationID": { - "type": "string", + "type": "integer", "format": "int64", "title": "GithubAppInstallationId specifies the installation ID of the GitHub App used to access the repo" }, @@ -5807,6 +8031,14 @@ "type": "string", "title": "Password contains the password or PAT used for authenticating at the remote repository" }, + "project": { + "type": "string", + "title": "Reference between project and repository that allow you automatically to be added as item inside SourceRepos project entity" + }, + "proxy": { + "type": "string", + "title": "Proxy specifies the HTTP/HTTPS proxy used to access the repo" + }, "repo": { "type": "string", "title": "Repo contains the URL to the remote repository" @@ -5898,6 +8130,12 @@ "disabled": { "type": "boolean" }, + "displayName": { + "type": "string" + }, + "iconClass": { + "type": "string" + }, "name": { "type": "string" }, @@ -5981,6 +8219,12 @@ "group": { "type": "string" }, + "jqPathExpressions": { + "type": "array", + "items": { + "type": "string" + } + }, "jsonPointers": { "type": "array", "items": { @@ -5990,6 +8234,13 @@ "kind": { "type": "string" }, + "managedFieldsManagers": { + "type": "array", + "title": "ManagedFieldsManagers is a list of trusted managers. Fields mutated by those managers will take precedence over the\ndesired state defined in the SCM and won't be displayed in diffs", + "items": { + "type": "string" + } + }, "name": { "type": "string" }, @@ -6066,13 +8317,15 @@ "$ref": "#/definitions/v1alpha1ResourceRef" } }, - "resourceRef": { - "$ref": "#/definitions/v1alpha1ResourceRef" - }, "resourceVersion": { "type": "string" } - } + }, + "allOf": [ + { + "$ref": "#/definitions/v1alpha1ResourceRef" + } + ] }, "v1alpha1ResourceOverride": { "type": "object", @@ -6087,11 +8340,17 @@ "ignoreDifferences": { "$ref": "#/definitions/v1alpha1OverrideIgnoreDiff" }, + "ignoreResourceUpdates": { + "$ref": "#/definitions/v1alpha1OverrideIgnoreDiff" + }, "knownTypeFields": { "type": "array", "items": { "$ref": "#/definitions/v1alpha1KnownTypeField" } + }, + "useOpenLibs": { + "type": "boolean" } } }, @@ -6193,6 +8452,10 @@ "status": { "type": "string" }, + "syncWave": { + "type": "integer", + "format": "int64" + }, "version": { "type": "string" } @@ -6207,7 +8470,7 @@ }, "limit": { "description": "Limit is the maximum number of attempts for retrying a failed sync. If set to 0, no retries will be performed.", - "type": "string", + "type": "integer", "format": "int64" } } @@ -6223,7 +8486,7 @@ "$ref": "#/definitions/v1Time" }, "id": { - "type": "string", + "type": "integer", "format": "int64", "title": "ID is an auto incrementing identifier of the RevisionHistory" }, @@ -6231,8 +8494,22 @@ "type": "string", "title": "Revision holds the revision the sync was performed against" }, + "revisions": { + "type": "array", + "title": "Revisions holds the revision of each source in sources field the sync was performed against", + "items": { + "type": "string" + } + }, "source": { "$ref": "#/definitions/v1alpha1ApplicationSource" + }, + "sources": { + "type": "array", + "title": "Sources is a reference to the application sources used for the sync operation", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationSource" + } } } }, @@ -6248,8 +8525,8 @@ "$ref": "#/definitions/v1Time" }, "message": { - "type": "string", - "title": "Message contains the message associated with the revision, most likely the commit message.\nThe message is truncated to the first newline or 64 characters (which ever comes first)" + "description": "Message contains the message associated with the revision, most likely the commit message.", + "type": "string" }, "signatureInfo": { "description": "SignatureInfo contains a hint on the signer if the revision was signed with GPG, and signature verification is enabled.", @@ -6264,6 +8541,282 @@ } } }, + "v1alpha1SCMProviderGenerator": { + "description": "SCMProviderGenerator defines a generator that scrapes a SCMaaS API to find candidate repos.", + "type": "object", + "properties": { + "awsCodeCommit": { + "$ref": "#/definitions/v1alpha1SCMProviderGeneratorAWSCodeCommit" + }, + "azureDevOps": { + "$ref": "#/definitions/v1alpha1SCMProviderGeneratorAzureDevOps" + }, + "bitbucket": { + "$ref": "#/definitions/v1alpha1SCMProviderGeneratorBitbucket" + }, + "bitbucketServer": { + "$ref": "#/definitions/v1alpha1SCMProviderGeneratorBitbucketServer" + }, + "cloneProtocol": { + "description": "Which protocol to use for the SCM URL. Default is provider-specific but ssh if possible. Not all providers\nnecessarily support all protocols.", + "type": "string" + }, + "filters": { + "description": "Filters for which repos should be considered.", + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1SCMProviderGeneratorFilter" + } + }, + "gitea": { + "$ref": "#/definitions/v1alpha1SCMProviderGeneratorGitea" + }, + "github": { + "$ref": "#/definitions/v1alpha1SCMProviderGeneratorGithub" + }, + "gitlab": { + "$ref": "#/definitions/v1alpha1SCMProviderGeneratorGitlab" + }, + "requeueAfterSeconds": { + "description": "Standard parameters.", + "type": "integer", + "format": "int64" + }, + "template": { + "$ref": "#/definitions/v1alpha1ApplicationSetTemplate" + }, + "values": { + "type": "object", + "title": "Values contains key/value pairs which are passed directly as parameters to the template", + "additionalProperties": { + "type": "string" + } + } + } + }, + "v1alpha1SCMProviderGeneratorAWSCodeCommit": { + "description": "SCMProviderGeneratorAWSCodeCommit defines connection info specific to AWS CodeCommit.", + "type": "object", + "properties": { + "allBranches": { + "description": "Scan all branches instead of just the default branch.", + "type": "boolean" + }, + "region": { + "description": "Region provides the AWS region to discover repos.\nif not provided, AppSet controller will infer the current region from environment.", + "type": "string" + }, + "role": { + "description": "Role provides the AWS IAM role to assume, for cross-account repo discovery\nif not provided, AppSet controller will use its pod/node identity to discover.", + "type": "string" + }, + "tagFilters": { + "type": "array", + "title": "TagFilters provides the tag filter(s) for repo discovery", + "items": { + "$ref": "#/definitions/v1alpha1TagFilter" + } + } + } + }, + "v1alpha1SCMProviderGeneratorAzureDevOps": { + "description": "SCMProviderGeneratorAzureDevOps defines connection info specific to Azure DevOps.", + "type": "object", + "properties": { + "accessTokenRef": { + "$ref": "#/definitions/v1alpha1SecretRef" + }, + "allBranches": { + "description": "Scan all branches instead of just the default branch.", + "type": "boolean" + }, + "api": { + "description": "The URL to Azure DevOps. If blank, use https://dev.azure.com.", + "type": "string" + }, + "organization": { + "description": "Azure Devops organization. Required. E.g. \"my-organization\".", + "type": "string" + }, + "teamProject": { + "description": "Azure Devops team project. Required. E.g. \"my-team\".", + "type": "string" + } + } + }, + "v1alpha1SCMProviderGeneratorBitbucket": { + "description": "SCMProviderGeneratorBitbucket defines connection info specific to Bitbucket Cloud (API version 2).", + "type": "object", + "properties": { + "allBranches": { + "description": "Scan all branches instead of just the main branch.", + "type": "boolean" + }, + "appPasswordRef": { + "$ref": "#/definitions/v1alpha1SecretRef" + }, + "owner": { + "description": "Bitbucket workspace to scan. Required.", + "type": "string" + }, + "user": { + "type": "string", + "title": "Bitbucket user to use when authenticating. Should have a \"member\" role to be able to read all repositories and branches. Required" + } + } + }, + "v1alpha1SCMProviderGeneratorBitbucketServer": { + "description": "SCMProviderGeneratorBitbucketServer defines connection info specific to Bitbucket Server.", + "type": "object", + "properties": { + "allBranches": { + "description": "Scan all branches instead of just the default branch.", + "type": "boolean" + }, + "api": { + "description": "The Bitbucket Server REST API URL to talk to. Required.", + "type": "string" + }, + "basicAuth": { + "$ref": "#/definitions/v1alpha1BasicAuthBitbucketServer" + }, + "project": { + "description": "Project to scan. Required.", + "type": "string" + } + } + }, + "v1alpha1SCMProviderGeneratorFilter": { + "description": "SCMProviderGeneratorFilter is a single repository filter.\nIf multiple filter types are set on a single struct, they will be AND'd together. All filters must\npass for a repo to be included.", + "type": "object", + "properties": { + "branchMatch": { + "description": "A regex which must match the branch name.", + "type": "string" + }, + "labelMatch": { + "description": "A regex which must match at least one label.", + "type": "string" + }, + "pathsDoNotExist": { + "description": "An array of paths, all of which must not exist.", + "type": "array", + "items": { + "type": "string" + } + }, + "pathsExist": { + "description": "An array of paths, all of which must exist.", + "type": "array", + "items": { + "type": "string" + } + }, + "repositoryMatch": { + "description": "A regex for repo names.", + "type": "string" + } + } + }, + "v1alpha1SCMProviderGeneratorGitea": { + "description": "SCMProviderGeneratorGitea defines a connection info specific to Gitea.", + "type": "object", + "properties": { + "allBranches": { + "description": "Scan all branches instead of just the default branch.", + "type": "boolean" + }, + "api": { + "description": "The Gitea URL to talk to. For example https://gitea.mydomain.com/.", + "type": "string" + }, + "insecure": { + "type": "boolean", + "title": "Allow self-signed TLS / Certificates; default: false" + }, + "owner": { + "description": "Gitea organization or user to scan. Required.", + "type": "string" + }, + "tokenRef": { + "$ref": "#/definitions/v1alpha1SecretRef" + } + } + }, + "v1alpha1SCMProviderGeneratorGithub": { + "description": "SCMProviderGeneratorGithub defines connection info specific to GitHub.", + "type": "object", + "properties": { + "allBranches": { + "description": "Scan all branches instead of just the default branch.", + "type": "boolean" + }, + "api": { + "description": "The GitHub API URL to talk to. If blank, use https://api.github.com/.", + "type": "string" + }, + "appSecretName": { + "description": "AppSecretName is a reference to a GitHub App repo-creds secret.", + "type": "string" + }, + "organization": { + "description": "GitHub org to scan. Required.", + "type": "string" + }, + "tokenRef": { + "$ref": "#/definitions/v1alpha1SecretRef" + } + } + }, + "v1alpha1SCMProviderGeneratorGitlab": { + "description": "SCMProviderGeneratorGitlab defines connection info specific to Gitlab.", + "type": "object", + "properties": { + "allBranches": { + "description": "Scan all branches instead of just the default branch.", + "type": "boolean" + }, + "api": { + "description": "The Gitlab API URL to talk to.", + "type": "string" + }, + "group": { + "description": "Gitlab group to scan. Required. You can use either the project id (recommended) or the full namespaced path.", + "type": "string" + }, + "includeSharedProjects": { + "type": "boolean", + "title": "When recursing through subgroups, also include shared Projects (true) or scan only the subgroups under same path (false). Defaults to \"true\"" + }, + "includeSubgroups": { + "type": "boolean", + "title": "Recurse through subgroups (true) or scan only the base group (false). Defaults to \"false\"" + }, + "insecure": { + "type": "boolean", + "title": "Skips validating the SCM provider's TLS certificate - useful for self-signed certificates.; default: false" + }, + "tokenRef": { + "$ref": "#/definitions/v1alpha1SecretRef" + }, + "topic": { + "description": "Filter repos list based on Gitlab Topic.", + "type": "string" + } + } + }, + "v1alpha1SecretRef": { + "description": "Utility struct for a reference to a secret key.", + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "secretName": { + "type": "string" + } + } + }, "v1alpha1SignatureKey": { "type": "object", "title": "SignatureKey is the specification of a key required to verify commit signatures with", @@ -6304,9 +8857,23 @@ "description": "Revision is the revision (Git) or chart version (Helm) which to sync the application to\nIf omitted, will use the revision specified in app spec.", "type": "string" }, + "revisions": { + "description": "Revisions is the list of revision (Git) or chart version (Helm) which to sync each source in sources field for the application to\nIf omitted, will use the revision specified in app spec.", + "type": "array", + "items": { + "type": "string" + } + }, "source": { "$ref": "#/definitions/v1alpha1ApplicationSource" }, + "sources": { + "type": "array", + "title": "Sources overrides the source definition set in the application.\nThis is typically set in a Rollback operation and is nil during a Sync operation", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationSource" + } + }, "syncOptions": { "type": "array", "title": "SyncOptions provide per-sync sync-options, e.g. Validate=false", @@ -6341,6 +8908,9 @@ "type": "object", "title": "SyncOperationResult represent result of sync operation", "properties": { + "managedNamespaceMetadata": { + "$ref": "#/definitions/v1alpha1ManagedNamespaceMetadata" + }, "resources": { "type": "array", "title": "Resources contains a list of sync result items for each individual resource in a sync operation", @@ -6352,8 +8922,22 @@ "type": "string", "title": "Revision holds the revision this sync operation was performed to" }, + "revisions": { + "type": "array", + "title": "Revisions holds the revision this sync operation was performed for respective indexed source in sources field", + "items": { + "type": "string" + } + }, "source": { "$ref": "#/definitions/v1alpha1ApplicationSource" + }, + "sources": { + "type": "array", + "title": "Source records the application source information of the sync, used for comparing auto-sync", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationSource" + } } } }, @@ -6364,6 +8948,9 @@ "automated": { "$ref": "#/definitions/v1alpha1SyncPolicyAutomated" }, + "managedNamespaceMetadata": { + "$ref": "#/definitions/v1alpha1ManagedNamespaceMetadata" + }, "retry": { "$ref": "#/definitions/v1alpha1RetryStrategy" }, @@ -6390,7 +8977,7 @@ }, "selfHeal": { "type": "boolean", - "title": "SelfHeal specifes whether to revert resources back to their desired state upon modification in the cluster (default: false)" + "title": "SelfHeal specifies whether to revert resources back to their desired state upon modification in the cluster (default: false)" } } }, @@ -6405,6 +8992,13 @@ "type": "string", "title": "Revision contains information about the revision the comparison has been performed to" }, + "revisions": { + "type": "array", + "title": "Revisions contains information about the revisions of multiple sources the comparison has been performed to", + "items": { + "type": "string" + } + }, "status": { "type": "string", "title": "Status is the sync state of the comparison" @@ -6482,6 +9076,10 @@ "schedule": { "type": "string", "title": "Schedule is the time the window will begin, specified in cron format" + }, + "timeZone": { + "type": "string", + "title": "TimeZone of the sync that will be applied to the schedule" } } }, @@ -6514,6 +9112,17 @@ } } }, + "v1alpha1TagFilter": { + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + } + }, "versionVersionMessage": { "type": "object", "title": "VersionMessage represents version of the Argo CD API server", @@ -6524,6 +9133,9 @@ "Compiler": { "type": "string" }, + "ExtraBuildInfo": { + "type": "string" + }, "GitCommit": { "type": "string" }, @@ -6542,9 +9154,6 @@ "JsonnetVersion": { "type": "string" }, - "KsonnetVersion": { - "type": "string" - }, "KubectlVersion": { "type": "string" }, diff --git a/cmd/argocd-application-controller/commands/argocd_application_controller.go b/cmd/argocd-application-controller/commands/argocd_application_controller.go index 2e6c5d24bfbd7..a43174633b02a 100644 --- a/cmd/argocd-application-controller/commands/argocd_application_controller.go +++ b/cmd/argocd-application-controller/commands/argocd_application_controller.go @@ -7,7 +7,7 @@ import ( "time" "github.com/argoproj/pkg/stats" - "github.com/go-redis/redis/v8" + "github.com/redis/go-redis/v9" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "k8s.io/client-go/kubernetes" @@ -23,37 +23,51 @@ import ( cacheutil "github.com/argoproj/argo-cd/v2/util/cache" appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate" "github.com/argoproj/argo-cd/v2/util/cli" + "github.com/argoproj/argo-cd/v2/util/db" "github.com/argoproj/argo-cd/v2/util/env" "github.com/argoproj/argo-cd/v2/util/errors" kubeutil "github.com/argoproj/argo-cd/v2/util/kube" "github.com/argoproj/argo-cd/v2/util/settings" "github.com/argoproj/argo-cd/v2/util/tls" + "github.com/argoproj/argo-cd/v2/util/trace" + kubeerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( // CLIName is the name of the CLI - cliName = "argocd-application-controller" + cliName = common.ApplicationController // Default time in seconds for application resync period defaultAppResyncPeriod = 180 + // Default time in seconds for application hard resync period + defaultAppHardResyncPeriod = 0 ) func NewCommand() *cobra.Command { var ( - clientConfig clientcmd.ClientConfig - appResyncPeriod int64 - repoServerAddress string - repoServerTimeoutSeconds int - selfHealTimeoutSeconds int - statusProcessors int - operationProcessors int - glogLevel int - metricsPort int - metricsCacheExpiration time.Duration - kubectlParallelismLimit int64 - cacheSrc func() (*appstatecache.Cache, error) - redisClient *redis.Client - repoServerPlaintext bool - repoServerStrictTLS bool + clientConfig clientcmd.ClientConfig + appResyncPeriod int64 + appHardResyncPeriod int64 + repoServerAddress string + repoServerTimeoutSeconds int + selfHealTimeoutSeconds int + statusProcessors int + operationProcessors int + glogLevel int + metricsPort int + metricsCacheExpiration time.Duration + metricsAplicationLabels []string + kubectlParallelismLimit int64 + cacheSource func() (*appstatecache.Cache, error) + redisClient *redis.Client + repoServerPlaintext bool + repoServerStrictTLS bool + otlpAddress string + otlpAttrs []string + applicationNamespaces []string + persistResourceHealth bool + shardingAlgorithm string + enableDynamicClusterDistribution bool ) var command = cobra.Command{ Use: cliName, @@ -61,6 +75,19 @@ func NewCommand() *cobra.Command { Long: "ArgoCD application controller is a Kubernetes controller that continuously monitors running applications and compares the current, live state against the desired target state (as specified in the repo). This command runs Application Controller in the foreground. It can be configured by following options.", DisableAutoGenTag: true, RunE: func(c *cobra.Command, args []string) error { + ctx, cancel := context.WithCancel(c.Context()) + defer cancel() + + vers := common.GetVersion() + namespace, _, err := clientConfig.Namespace() + errors.CheckError(err) + vers.LogStartupInfo( + "ArgoCD Application Controller", + map[string]any{ + "namespace": namespace, + }, + ) + cli.SetLogFormat(cmdutil.LogFormat) cli.SetLogLevel(cmdutil.LogLevel) cli.SetGLogLevel(glogLevel) @@ -68,14 +95,21 @@ func NewCommand() *cobra.Command { config, err := clientConfig.ClientConfig() errors.CheckError(err) errors.CheckError(v1alpha1.SetK8SConfigDefaults(config)) + config.UserAgent = fmt.Sprintf("%s/%s (%s)", common.DefaultApplicationControllerName, vers.Version, vers.Platform) kubeClient := kubernetes.NewForConfigOrDie(config) appClient := appclientset.NewForConfigOrDie(config) - namespace, _, err := clientConfig.Namespace() - errors.CheckError(err) + hardResyncDuration := time.Duration(appHardResyncPeriod) * time.Second + + var resyncDuration time.Duration + if appResyncPeriod == 0 { + // Re-sync should be disabled if period is 0. Set duration to a very long duration + resyncDuration = time.Hour * 24 * 365 * 100 + } else { + resyncDuration = time.Duration(appResyncPeriod) * time.Second + } - resyncDuration := time.Duration(appResyncPeriod) * time.Second tlsConfig := apiclient.TLSConfiguration{ DisableTLS: repoServerPlaintext, StrictValidation: repoServerStrictTLS, @@ -96,17 +130,19 @@ func NewCommand() *cobra.Command { repoClientset := apiclient.NewRepoServerClientset(repoServerAddress, repoServerTimeoutSeconds, tlsConfig) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - cache, err := cacheSrc() + cache, err := cacheSource() errors.CheckError(err) cache.Cache.SetClient(cacheutil.NewTwoLevelClient(cache.Cache.GetClient(), 10*time.Minute)) - settingsMgr := settings.NewSettingsManager(ctx, kubeClient, namespace) + var appController *controller.ApplicationController + + settingsMgr := settings.NewSettingsManager(ctx, kubeClient, namespace, settings.WithRepoOrClusterChangedHandler(func() { + appController.InvalidateProjectsCache() + })) kubectl := kubeutil.NewKubectl() - clusterFilter := getClusterFilter() - appController, err := controller.NewApplicationController( + clusterFilter := getClusterFilter(kubeClient, settingsMgr, shardingAlgorithm, enableDynamicClusterDistribution) + errors.CheckError(err) + appController, err = controller.NewApplicationController( namespace, settingsMgr, kubeClient, @@ -115,20 +151,31 @@ func NewCommand() *cobra.Command { cache, kubectl, resyncDuration, + hardResyncDuration, time.Duration(selfHealTimeoutSeconds)*time.Second, metricsPort, metricsCacheExpiration, + metricsAplicationLabels, kubectlParallelismLimit, - clusterFilter) + persistResourceHealth, + clusterFilter, + applicationNamespaces, + ) errors.CheckError(err) cacheutil.CollectMetrics(redisClient, appController.GetMetricsServer()) - vers := common.GetVersion() - log.Infof("Application Controller (version: %s, built: %s) starting (namespace: %s)", vers.Version, vers.BuildDate, namespace) stats.RegisterStackDumper() stats.StartStatsTicker(10 * time.Minute) stats.RegisterHeapDumper("memprofile") + if otlpAddress != "" { + closeTracer, err := trace.InitTracer(ctx, "argocd-controller", otlpAddress, otlpAttrs) + if err != nil { + log.Fatalf("failed to initialize tracing: %v", err) + } + defer closeTracer() + } + go appController.Run(ctx, statusProcessors, operationProcessors) // Wait forever @@ -137,38 +184,83 @@ func NewCommand() *cobra.Command { } clientConfig = cli.AddKubectlFlagsToCmd(&command) - command.Flags().Int64Var(&appResyncPeriod, "app-resync", defaultAppResyncPeriod, "Time period in seconds for application resync.") - command.Flags().StringVar(&repoServerAddress, "repo-server", common.DefaultRepoServerAddr, "Repo server address.") - command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", 60, "Repo server RPC call timeout seconds.") - command.Flags().IntVar(&statusProcessors, "status-processors", 1, "Number of application status processors") - command.Flags().IntVar(&operationProcessors, "operation-processors", 1, "Number of application operation processors") - command.Flags().StringVar(&cmdutil.LogFormat, "logformat", "text", "Set the logging format. One of: text|json") - command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error") + command.Flags().Int64Var(&appResyncPeriod, "app-resync", int64(env.ParseDurationFromEnv("ARGOCD_RECONCILIATION_TIMEOUT", defaultAppResyncPeriod*time.Second, 0, math.MaxInt64).Seconds()), "Time period in seconds for application resync.") + command.Flags().Int64Var(&appHardResyncPeriod, "app-hard-resync", int64(env.ParseDurationFromEnv("ARGOCD_HARD_RECONCILIATION_TIMEOUT", defaultAppHardResyncPeriod*time.Second, 0, math.MaxInt64).Seconds()), "Time period in seconds for application hard resync.") + command.Flags().StringVar(&repoServerAddress, "repo-server", env.StringFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER", common.DefaultRepoServerAddr), "Repo server address.") + command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_TIMEOUT_SECONDS", 60, 0, math.MaxInt64), "Repo server RPC call timeout seconds.") + command.Flags().IntVar(&statusProcessors, "status-processors", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_STATUS_PROCESSORS", 20, 0, math.MaxInt32), "Number of application status processors") + command.Flags().IntVar(&operationProcessors, "operation-processors", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_OPERATION_PROCESSORS", 10, 0, math.MaxInt32), "Number of application operation processors") + command.Flags().StringVar(&cmdutil.LogFormat, "logformat", env.StringFromEnv("ARGOCD_APPLICATION_CONTROLLER_LOGFORMAT", "text"), "Set the logging format. One of: text|json") + command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", env.StringFromEnv("ARGOCD_APPLICATION_CONTROLLER_LOGLEVEL", "info"), "Set the logging level. One of: debug|info|warn|error") command.Flags().IntVar(&glogLevel, "gloglevel", 0, "Set the glog logging level") command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortArgoCDMetrics, "Start metrics server on given port") - command.Flags().DurationVar(&metricsCacheExpiration, "metrics-cache-expiration", 0*time.Second, "Prometheus metrics cache expiration (disabled by default. e.g. 24h0m0s)") - command.Flags().IntVar(&selfHealTimeoutSeconds, "self-heal-timeout-seconds", 5, "Specifies timeout between application self heal attempts") - command.Flags().Int64Var(&kubectlParallelismLimit, "kubectl-parallelism-limit", 20, "Number of allowed concurrent kubectl fork/execs. Any value less the 1 means no limit.") - command.Flags().BoolVar(&repoServerPlaintext, "repo-server-plaintext", false, "Disable TLS on connections to repo server") - command.Flags().BoolVar(&repoServerStrictTLS, "repo-server-strict-tls", false, "Whether to use strict validation of the TLS cert presented by the repo server") - cacheSrc = appstatecache.AddCacheFlagsToCmd(&command, func(client *redis.Client) { + command.Flags().DurationVar(&metricsCacheExpiration, "metrics-cache-expiration", env.ParseDurationFromEnv("ARGOCD_APPLICATION_CONTROLLER_METRICS_CACHE_EXPIRATION", 0*time.Second, 0, math.MaxInt64), "Prometheus metrics cache expiration (disabled by default. e.g. 24h0m0s)") + command.Flags().IntVar(&selfHealTimeoutSeconds, "self-heal-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_TIMEOUT_SECONDS", 5, 0, math.MaxInt32), "Specifies timeout between application self heal attempts") + command.Flags().Int64Var(&kubectlParallelismLimit, "kubectl-parallelism-limit", env.ParseInt64FromEnv("ARGOCD_APPLICATION_CONTROLLER_KUBECTL_PARALLELISM_LIMIT", 20, 0, math.MaxInt64), "Number of allowed concurrent kubectl fork/execs. Any value less than 1 means no limit.") + command.Flags().BoolVar(&repoServerPlaintext, "repo-server-plaintext", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_PLAINTEXT", false), "Disable TLS on connections to repo server") + command.Flags().BoolVar(&repoServerStrictTLS, "repo-server-strict-tls", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_STRICT_TLS", false), "Whether to use strict validation of the TLS cert presented by the repo server") + command.Flags().StringSliceVar(&metricsAplicationLabels, "metrics-application-labels", []string{}, "List of Application labels that will be added to the argocd_application_labels metric") + command.Flags().StringVar(&otlpAddress, "otlp-address", env.StringFromEnv("ARGOCD_APPLICATION_CONTROLLER_OTLP_ADDRESS", ""), "OpenTelemetry collector address to send traces to") + command.Flags().StringSliceVar(&otlpAttrs, "otlp-attrs", env.StringsFromEnv("ARGOCD_APPLICATION_CONTROLLER_OTLP_ATTRS", []string{}, ","), "List of OpenTelemetry collector extra attrs when send traces, each attribute is separated by a colon(e.g. key:value)") + command.Flags().StringSliceVar(&applicationNamespaces, "application-namespaces", env.StringsFromEnv("ARGOCD_APPLICATION_NAMESPACES", []string{}, ","), "List of additional namespaces that applications are allowed to be reconciled from") + command.Flags().BoolVar(&persistResourceHealth, "persist-resource-health", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_PERSIST_RESOURCE_HEALTH", true), "Enables storing the managed resources health in the Application CRD") + command.Flags().StringVar(&shardingAlgorithm, "sharding-method", env.StringFromEnv(common.EnvControllerShardingAlgorithm, common.DefaultShardingAlgorithm), "Enables choice of sharding method. Supported sharding methods are : [legacy, round-robin] ") + command.Flags().BoolVar(&enableDynamicClusterDistribution, "dynamic-cluster-distribution-enabled", env.ParseBoolFromEnv(common.EnvEnableDynamicClusterDistribution, false), "Enables dynamic cluster distribution.") + cacheSource = appstatecache.AddCacheFlagsToCmd(&command, func(client *redis.Client) { redisClient = client }) return &command } -func getClusterFilter() func(cluster *v1alpha1.Cluster) bool { - replicas := env.ParseNumFromEnv(common.EnvControllerReplicas, 0, 0, math.MaxInt32) +func getClusterFilter(kubeClient *kubernetes.Clientset, settingsMgr *settings.SettingsManager, shardingAlgorithm string, enableDynamicClusterDistribution bool) sharding.ClusterFilterFunction { + + var replicas int shard := env.ParseNumFromEnv(common.EnvControllerShard, -1, -math.MaxInt32, math.MaxInt32) + + applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName) + appControllerDeployment, err := kubeClient.AppsV1().Deployments(settingsMgr.GetNamespace()).Get(context.Background(), applicationControllerName, metav1.GetOptions{}) + + // if the application controller deployment was not found, the Get() call returns an empty Deployment object. So, set the variable to nil explicitly + if err != nil && kubeerrors.IsNotFound(err) { + appControllerDeployment = nil + } + + if enableDynamicClusterDistribution && appControllerDeployment != nil && appControllerDeployment.Spec.Replicas != nil { + replicas = int(*appControllerDeployment.Spec.Replicas) + } else { + replicas = env.ParseNumFromEnv(common.EnvControllerReplicas, 0, 0, math.MaxInt32) + } + var clusterFilter func(cluster *v1alpha1.Cluster) bool if replicas > 1 { - if shard < 0 { + // check for shard mapping using configmap if application-controller is a deployment + // else use existing logic to infer shard from pod name if application-controller is a statefulset + if enableDynamicClusterDistribution && appControllerDeployment != nil { + var err error - shard, err = sharding.InferShard() + // retry 3 times if we find a conflict while updating shard mapping configMap. + // If we still see conflicts after the retries, wait for next iteration of heartbeat process. + for i := 0; i <= common.AppControllerHeartbeatUpdateRetryCount; i++ { + shard, err = sharding.GetOrUpdateShardFromConfigMap(kubeClient, settingsMgr, replicas, shard) + if !kubeerrors.IsConflict(err) { + err = fmt.Errorf("unable to get shard due to error updating the sharding config map: %s", err) + break + } + log.Warnf("conflict when getting shard from shard mapping configMap. Retrying (%d/3)", i) + } errors.CheckError(err) + } else { + if shard < 0 { + var err error + shard, err = sharding.InferShard() + errors.CheckError(err) + } } log.Infof("Processing clusters from shard %d", shard) - clusterFilter = sharding.GetClusterFilter(replicas, shard) + db := db.NewDB(settingsMgr.GetNamespace(), settingsMgr, kubeClient) + log.Infof("Using filter function: %s", shardingAlgorithm) + distributionFunction := sharding.GetDistributionFunction(db, shardingAlgorithm) + clusterFilter = sharding.GetClusterFilter(db, distributionFunction, shard) } else { log.Info("Processing all cluster shards") } diff --git a/cmd/argocd-applicationset-controller/commands/applicationset_controller.go b/cmd/argocd-applicationset-controller/commands/applicationset_controller.go new file mode 100644 index 0000000000000..9adbc3e64a685 --- /dev/null +++ b/cmd/argocd-applicationset-controller/commands/applicationset_controller.go @@ -0,0 +1,277 @@ +package command + +import ( + "fmt" + "math" + "net/http" + "os" + "time" + + "github.com/argoproj/pkg/stats" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/argoproj/argo-cd/v2/reposerver/apiclient" + "github.com/argoproj/argo-cd/v2/util/tls" + + "github.com/argoproj/argo-cd/v2/applicationset/controllers" + "github.com/argoproj/argo-cd/v2/applicationset/generators" + "github.com/argoproj/argo-cd/v2/applicationset/utils" + "github.com/argoproj/argo-cd/v2/applicationset/webhook" + cmdutil "github.com/argoproj/argo-cd/v2/cmd/util" + "github.com/argoproj/argo-cd/v2/common" + "github.com/argoproj/argo-cd/v2/util/env" + "github.com/argoproj/argo-cd/v2/util/github_app" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + "k8s.io/client-go/tools/clientcmd" + + "github.com/argoproj/argo-cd/v2/applicationset/services" + appv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned" + "github.com/argoproj/argo-cd/v2/util/cli" + "github.com/argoproj/argo-cd/v2/util/db" + "github.com/argoproj/argo-cd/v2/util/errors" + argosettings "github.com/argoproj/argo-cd/v2/util/settings" +) + +var gitSubmoduleEnabled = env.ParseBoolFromEnv(common.EnvGitSubmoduleEnabled, true) + +func NewCommand() *cobra.Command { + var ( + clientConfig clientcmd.ClientConfig + metricsAddr string + probeBindAddr string + webhookAddr string + enableLeaderElection bool + applicationSetNamespaces []string + argocdRepoServer string + policy string + enablePolicyOverride bool + debugLog bool + dryRun bool + enableProgressiveSyncs bool + enableNewGitFileGlobbing bool + repoServerPlaintext bool + repoServerStrictTLS bool + repoServerTimeoutSeconds int + maxConcurrentReconciliations int + scmRootCAPath string + allowedScmProviders []string + globalPreservedAnnotations []string + globalPreservedLabels []string + enableScmProviders bool + ) + scheme := runtime.NewScheme() + _ = clientgoscheme.AddToScheme(scheme) + _ = appv1alpha1.AddToScheme(scheme) + var command = cobra.Command{ + Use: "controller", + Short: "Starts Argo CD ApplicationSet controller", + RunE: func(c *cobra.Command, args []string) error { + ctx := c.Context() + + vers := common.GetVersion() + namespace, _, err := clientConfig.Namespace() + applicationSetNamespaces = append(applicationSetNamespaces, namespace) + + errors.CheckError(err) + vers.LogStartupInfo( + "ArgoCD ApplicationSet Controller", + map[string]any{ + "namespace": namespace, + }, + ) + + cli.SetLogFormat(cmdutil.LogFormat) + cli.SetLogLevel(cmdutil.LogLevel) + + restConfig, err := clientConfig.ClientConfig() + errors.CheckError(err) + + restConfig.UserAgent = fmt.Sprintf("argocd-applicationset-controller/%s (%s)", vers.Version, vers.Platform) + + policyObj, exists := utils.Policies[policy] + if !exists { + log.Error("Policy value can be: sync, create-only, create-update, create-delete, default value: sync") + os.Exit(1) + } + + // By default watch all namespace + var watchedNamespace string = "" + + // If the applicationset-namespaces contains only one namespace it corresponds to the current namespace + if len(applicationSetNamespaces) == 1 { + watchedNamespace = (applicationSetNamespaces)[0] + } else if enableScmProviders && len(allowedScmProviders) == 0 { + log.Error("When enabling applicationset in any namespace using applicationset-namespaces, you must either set --enable-scm-providers=false or specify --allowed-scm-providers") + os.Exit(1) + } + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + MetricsBindAddress: metricsAddr, + Namespace: watchedNamespace, + HealthProbeBindAddress: probeBindAddr, + Port: 9443, + LeaderElection: enableLeaderElection, + LeaderElectionID: "58ac56fa.applicationsets.argoproj.io", + DryRunClient: dryRun, + }) + + if err != nil { + log.Error(err, "unable to start manager") + os.Exit(1) + } + dynamicClient, err := dynamic.NewForConfig(mgr.GetConfig()) + errors.CheckError(err) + k8sClient, err := kubernetes.NewForConfig(mgr.GetConfig()) + errors.CheckError(err) + + argoSettingsMgr := argosettings.NewSettingsManager(ctx, k8sClient, namespace) + appSetConfig := appclientset.NewForConfigOrDie(mgr.GetConfig()) + argoCDDB := db.NewDB(namespace, argoSettingsMgr, k8sClient) + + scmAuth := generators.SCMAuthProviders{ + GitHubApps: github_app.NewAuthCredentials(argoCDDB.(db.RepoCredsDB)), + } + + tlsConfig := apiclient.TLSConfiguration{ + DisableTLS: repoServerPlaintext, + StrictValidation: repoServerPlaintext, + } + + if !repoServerPlaintext && repoServerStrictTLS { + pool, err := tls.LoadX509CertPool( + fmt.Sprintf("%s/reposerver/tls/tls.crt", env.StringFromEnv(common.EnvAppConfigPath, common.DefaultAppConfigPath)), + fmt.Sprintf("%s/reposerver/tls/ca.crt", env.StringFromEnv(common.EnvAppConfigPath, common.DefaultAppConfigPath)), + ) + errors.CheckError(err) + tlsConfig.Certificates = pool + } + + repoClientset := apiclient.NewRepoServerClientset(argocdRepoServer, repoServerTimeoutSeconds, tlsConfig) + argoCDService, err := services.NewArgoCDService(argoCDDB, gitSubmoduleEnabled, repoClientset, enableNewGitFileGlobbing) + errors.CheckError(err) + + terminalGenerators := map[string]generators.Generator{ + "List": generators.NewListGenerator(), + "Clusters": generators.NewClusterGenerator(mgr.GetClient(), ctx, k8sClient, namespace), + "Git": generators.NewGitGenerator(argoCDService), + "SCMProvider": generators.NewSCMProviderGenerator(mgr.GetClient(), scmAuth, scmRootCAPath, allowedScmProviders, enableScmProviders), + "ClusterDecisionResource": generators.NewDuckTypeGenerator(ctx, dynamicClient, k8sClient, namespace), + "PullRequest": generators.NewPullRequestGenerator(mgr.GetClient(), scmAuth, scmRootCAPath, allowedScmProviders, enableScmProviders), + "Plugin": generators.NewPluginGenerator(mgr.GetClient(), ctx, k8sClient, namespace), + } + + nestedGenerators := map[string]generators.Generator{ + "List": terminalGenerators["List"], + "Clusters": terminalGenerators["Clusters"], + "Git": terminalGenerators["Git"], + "SCMProvider": terminalGenerators["SCMProvider"], + "ClusterDecisionResource": terminalGenerators["ClusterDecisionResource"], + "PullRequest": terminalGenerators["PullRequest"], + "Plugin": terminalGenerators["Plugin"], + "Matrix": generators.NewMatrixGenerator(terminalGenerators), + "Merge": generators.NewMergeGenerator(terminalGenerators), + } + + topLevelGenerators := map[string]generators.Generator{ + "List": terminalGenerators["List"], + "Clusters": terminalGenerators["Clusters"], + "Git": terminalGenerators["Git"], + "SCMProvider": terminalGenerators["SCMProvider"], + "ClusterDecisionResource": terminalGenerators["ClusterDecisionResource"], + "PullRequest": terminalGenerators["PullRequest"], + "Plugin": terminalGenerators["Plugin"], + "Matrix": generators.NewMatrixGenerator(nestedGenerators), + "Merge": generators.NewMergeGenerator(nestedGenerators), + } + + // start a webhook server that listens to incoming webhook payloads + webhookHandler, err := webhook.NewWebhookHandler(namespace, argoSettingsMgr, mgr.GetClient(), topLevelGenerators) + if err != nil { + log.Error(err, "failed to create webhook handler") + } + if webhookHandler != nil { + startWebhookServer(webhookHandler, webhookAddr) + } + + if err = (&controllers.ApplicationSetReconciler{ + Generators: topLevelGenerators, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("applicationset-controller"), + Renderer: &utils.Render{}, + Policy: policyObj, + EnablePolicyOverride: enablePolicyOverride, + ArgoAppClientset: appSetConfig, + KubeClientset: k8sClient, + ArgoDB: argoCDDB, + ArgoCDNamespace: namespace, + ApplicationSetNamespaces: applicationSetNamespaces, + EnableProgressiveSyncs: enableProgressiveSyncs, + SCMRootCAPath: scmRootCAPath, + GlobalPreservedAnnotations: globalPreservedAnnotations, + GlobalPreservedLabels: globalPreservedLabels, + Cache: mgr.GetCache(), + }).SetupWithManager(mgr, enableProgressiveSyncs, maxConcurrentReconciliations); err != nil { + log.Error(err, "unable to create controller", "controller", "ApplicationSet") + os.Exit(1) + } + + stats.StartStatsTicker(10 * time.Minute) + log.Info("Starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + log.Error(err, "problem running manager") + os.Exit(1) + } + return nil + }, + } + clientConfig = cli.AddKubectlFlagsToCmd(&command) + command.Flags().StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") + command.Flags().StringVar(&probeBindAddr, "probe-addr", ":8081", "The address the probe endpoint binds to.") + command.Flags().StringVar(&webhookAddr, "webhook-addr", ":7000", "The address the webhook endpoint binds to.") + command.Flags().BoolVar(&enableLeaderElection, "enable-leader-election", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_LEADER_ELECTION", false), + "Enable leader election for controller manager. "+ + "Enabling this will ensure there is only one active controller manager.") + command.Flags().StringSliceVar(&applicationSetNamespaces, "applicationset-namespaces", env.StringsFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_NAMESPACES", []string{}, ","), "Argo CD applicationset namespaces") + command.Flags().StringVar(&argocdRepoServer, "argocd-repo-server", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_REPO_SERVER", common.DefaultRepoServerAddr), "Argo CD repo server address") + command.Flags().StringVar(&policy, "policy", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_POLICY", ""), "Modify how application is synced between the generator and the cluster. Default is 'sync' (create & update & delete), options: 'create-only', 'create-update' (no deletion), 'create-delete' (no update)") + command.Flags().BoolVar(&enablePolicyOverride, "enable-policy-override", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_POLICY_OVERRIDE", policy == ""), "For security reason if 'policy' is set, it is not possible to override it at applicationSet level. 'allow-policy-override' allows user to define their own policy") + command.Flags().BoolVar(&debugLog, "debug", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_DEBUG", false), "Print debug logs. Takes precedence over loglevel") + command.Flags().StringVar(&cmdutil.LogFormat, "logformat", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_LOGFORMAT", "text"), "Set the logging format. One of: text|json") + command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_LOGLEVEL", "info"), "Set the logging level. One of: debug|info|warn|error") + command.Flags().StringSliceVar(&allowedScmProviders, "allowed-scm-providers", env.StringsFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ALLOWED_SCM_PROVIDERS", []string{}, ","), "The list of allowed custom SCM provider API URLs. This restriction does not apply to SCM or PR generators which do not accept a custom API URL. (Default: Empty = all)") + command.Flags().BoolVar(&enableScmProviders, "enable-scm-providers", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_SCM_PROVIDERS", true), "Enable retrieving information from SCM providers, used by the SCM and PR generators (Default: true)") + command.Flags().BoolVar(&dryRun, "dry-run", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_DRY_RUN", false), "Enable dry run mode") + command.Flags().BoolVar(&enableProgressiveSyncs, "enable-progressive-syncs", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS", false), "Enable use of the experimental progressive syncs feature.") + command.Flags().BoolVar(&enableNewGitFileGlobbing, "enable-new-git-file-globbing", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_NEW_GIT_FILE_GLOBBING", false), "Enable new globbing in Git files generator.") + command.Flags().BoolVar(&repoServerPlaintext, "repo-server-plaintext", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_REPO_SERVER_PLAINTEXT", false), "Disable TLS on connections to repo server") + command.Flags().BoolVar(&repoServerStrictTLS, "repo-server-strict-tls", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_REPO_SERVER_STRICT_TLS", false), "Whether to use strict validation of the TLS cert presented by the repo server") + command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_REPO_SERVER_TIMEOUT_SECONDS", 60, 0, math.MaxInt64), "Repo server RPC call timeout seconds.") + command.Flags().IntVar(&maxConcurrentReconciliations, "concurrent-reconciliations", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_CONCURRENT_RECONCILIATIONS", 10, 1, 100), "Max concurrent reconciliations limit for the controller") + command.Flags().StringVar(&scmRootCAPath, "scm-root-ca-path", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_SCM_ROOT_CA_PATH", ""), "Provide Root CA Path for self-signed TLS Certificates") + command.Flags().StringSliceVar(&globalPreservedAnnotations, "preserved-annotations", env.StringsFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_GLOBAL_PRESERVED_ANNOTATIONS", []string{}, ","), "Sets global preserved field values for annotations") + command.Flags().StringSliceVar(&globalPreservedLabels, "preserved-labels", env.StringsFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_GLOBAL_PRESERVED_LABELS", []string{}, ","), "Sets global preserved field values for labels") + return &command +} + +func startWebhookServer(webhookHandler *webhook.WebhookHandler, webhookAddr string) { + mux := http.NewServeMux() + mux.HandleFunc("/api/webhook", webhookHandler.Handler) + go func() { + log.Info("Starting webhook server") + err := http.ListenAndServe(webhookAddr, mux) + if err != nil { + log.Error(err, "failed to start webhook server") + os.Exit(1) + } + }() +} diff --git a/cmd/argocd-cmp-server/commands/argocd_cmp_server.go b/cmd/argocd-cmp-server/commands/argocd_cmp_server.go new file mode 100644 index 0000000000000..62f45b24aedb5 --- /dev/null +++ b/cmd/argocd-cmp-server/commands/argocd_cmp_server.go @@ -0,0 +1,88 @@ +package commands + +import ( + "time" + + "github.com/argoproj/pkg/stats" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + cmdutil "github.com/argoproj/argo-cd/v2/cmd/util" + "github.com/argoproj/argo-cd/v2/cmpserver" + "github.com/argoproj/argo-cd/v2/cmpserver/plugin" + "github.com/argoproj/argo-cd/v2/common" + "github.com/argoproj/argo-cd/v2/util/cli" + "github.com/argoproj/argo-cd/v2/util/env" + "github.com/argoproj/argo-cd/v2/util/errors" + traceutil "github.com/argoproj/argo-cd/v2/util/trace" +) + +const ( + // CLIName is the name of the CLI + cliName = "argocd-cmp-server" +) + +func NewCommand() *cobra.Command { + var ( + configFilePath string + otlpAddress string + otlpAttrs []string + ) + var command = cobra.Command{ + Use: cliName, + Short: "Run ArgoCD ConfigManagementPlugin Server", + Long: "ArgoCD ConfigManagementPlugin Server is an internal service which runs as sidecar container in reposerver deployment. The following configuration options are available:", + DisableAutoGenTag: true, + RunE: func(c *cobra.Command, args []string) error { + ctx := c.Context() + + vers := common.GetVersion() + vers.LogStartupInfo("ArgoCD ConfigManagementPlugin Server", nil) + + cli.SetLogFormat(cmdutil.LogFormat) + cli.SetLogLevel(cmdutil.LogLevel) + + config, err := plugin.ReadPluginConfig(configFilePath) + errors.CheckError(err) + + if !config.Spec.Discover.IsDefined() { + name := config.Metadata.Name + if config.Spec.Version != "" { + name = name + "-" + config.Spec.Version + } + log.Infof("No discovery configuration is defined for plugin %s. To use this plugin, specify %q in the Application's spec.source.plugin.name field.", config.Metadata.Name, name) + } + + if otlpAddress != "" { + var closer func() + var err error + closer, err = traceutil.InitTracer(ctx, "argocd-cmp-server", otlpAddress, otlpAttrs) + if err != nil { + log.Fatalf("failed to initialize tracing: %v", err) + } + defer closer() + } + + server, err := cmpserver.NewServer(plugin.CMPServerInitConstants{ + PluginConfig: *config, + }) + errors.CheckError(err) + + // register dumper + stats.RegisterStackDumper() + stats.StartStatsTicker(10 * time.Minute) + stats.RegisterHeapDumper("memprofile") + + // run argocd-cmp-server server + server.Run() + return nil + }, + } + + command.Flags().StringVar(&cmdutil.LogFormat, "logformat", "text", "Set the logging format. One of: text|json") + command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error") + command.Flags().StringVar(&configFilePath, "config-dir-path", common.DefaultPluginConfigFilePath, "Config management plugin configuration file location, Default is '/home/argocd/cmp-server/config/'") + command.Flags().StringVar(&otlpAddress, "otlp-address", env.StringFromEnv("ARGOCD_CMP_SERVER_OTLP_ADDRESS", ""), "OpenTelemetry collector address to send traces to") + command.Flags().StringSliceVar(&otlpAttrs, "otlp-attrs", env.StringsFromEnv("ARGOCD_CMP_SERVER_OTLP_ATTRS", []string{}, ","), "List of OpenTelemetry collector extra attrs when send traces, each attribute is separated by a colon(e.g. key:value)") + return &command +} diff --git a/cmd/argocd-dex/commands/argocd_dex.go b/cmd/argocd-dex/commands/argocd_dex.go index f52b59e8b54e3..2b070ec895e41 100644 --- a/cmd/argocd-dex/commands/argocd_dex.go +++ b/cmd/argocd-dex/commands/argocd_dex.go @@ -1,24 +1,26 @@ package commands import ( - "context" "fmt" - "io/ioutil" "os" "os/exec" "syscall" - "github.com/ghodss/yaml" + "github.com/argoproj/argo-cd/v2/common" + log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/yaml" cmdutil "github.com/argoproj/argo-cd/v2/cmd/util" "github.com/argoproj/argo-cd/v2/util/cli" "github.com/argoproj/argo-cd/v2/util/dex" + "github.com/argoproj/argo-cd/v2/util/env" "github.com/argoproj/argo-cd/v2/util/errors" "github.com/argoproj/argo-cd/v2/util/settings" + "github.com/argoproj/argo-cd/v2/util/tls" ) const ( @@ -38,28 +40,56 @@ func NewCommand() *cobra.Command { command.AddCommand(NewRunDexCommand()) command.AddCommand(NewGenDexConfigCommand()) - - command.Flags().StringVar(&cmdutil.LogFormat, "logformat", "text", "Set the logging format. One of: text|json") - command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error") return command } func NewRunDexCommand() *cobra.Command { var ( clientConfig clientcmd.ClientConfig + disableTLS bool ) var command = cobra.Command{ Use: "rundex", Short: "Runs dex generating a config using settings from the Argo CD configmap and secret", RunE: func(c *cobra.Command, args []string) error { - _, err := exec.LookPath("dex") + ctx := c.Context() + + vers := common.GetVersion() + namespace, _, err := clientConfig.Namespace() errors.CheckError(err) - config, err := clientConfig.ClientConfig() + vers.LogStartupInfo( + "ArgoCD Dex Server", + map[string]any{ + "namespace": namespace, + }, + ) + + cli.SetLogFormat(cmdutil.LogFormat) + cli.SetLogLevel(cmdutil.LogLevel) + _, err = exec.LookPath("dex") errors.CheckError(err) - namespace, _, err := clientConfig.Namespace() + config, err := clientConfig.ClientConfig() errors.CheckError(err) + config.UserAgent = fmt.Sprintf("argocd-dex/%s (%s)", vers.Version, vers.Platform) kubeClientset := kubernetes.NewForConfigOrDie(config) - settingsMgr := settings.NewSettingsManager(context.Background(), kubeClientset, namespace) + + if !disableTLS { + config, err := tls.CreateServerTLSConfig("/tls/tls.crt", "/tls/tls.key", []string{"localhost", "dexserver"}) + if err != nil { + log.Fatalf("could not create TLS config: %v", err) + } + certPem, keyPem := tls.EncodeX509KeyPair(config.Certificates[0]) + err = os.WriteFile("/tmp/tls.crt", certPem, 0600) + if err != nil { + log.Fatalf("could not write TLS certificate: %v", err) + } + err = os.WriteFile("/tmp/tls.key", keyPem, 0600) + if err != nil { + log.Fatalf("could not write TLS key: %v", err) + } + } + + settingsMgr := settings.NewSettingsManager(ctx, kubeClientset, namespace) prevSettings, err := settingsMgr.GetSettings() errors.CheckError(err) updateCh := make(chan *settings.ArgoCDSettings, 1) @@ -67,12 +97,12 @@ func NewRunDexCommand() *cobra.Command { for { var cmd *exec.Cmd - dexCfgBytes, err := dex.GenerateDexConfigYAML(prevSettings) + dexCfgBytes, err := dex.GenerateDexConfigYAML(prevSettings, disableTLS) errors.CheckError(err) if len(dexCfgBytes) == 0 { log.Infof("dex is not configured") } else { - err = ioutil.WriteFile("/tmp/dex.yaml", dexCfgBytes, 0644) + err = os.WriteFile("/tmp/dex.yaml", dexCfgBytes, 0644) errors.CheckError(err) log.Debug(redactor(string(dexCfgBytes))) cmd = exec.Command("dex", "serve", "/tmp/dex.yaml") @@ -85,7 +115,7 @@ func NewRunDexCommand() *cobra.Command { // loop until the dex config changes for { newSettings := <-updateCh - newDexCfgBytes, err := dex.GenerateDexConfigYAML(newSettings) + newDexCfgBytes, err := dex.GenerateDexConfigYAML(newSettings, disableTLS) errors.CheckError(err) if string(newDexCfgBytes) != string(dexCfgBytes) { prevSettings = newSettings @@ -106,6 +136,9 @@ func NewRunDexCommand() *cobra.Command { } clientConfig = cli.AddKubectlFlagsToCmd(&command) + command.Flags().StringVar(&cmdutil.LogFormat, "logformat", "text", "Set the logging format. One of: text|json") + command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error") + command.Flags().BoolVar(&disableTLS, "disable-tls", env.ParseBoolFromEnv("ARGOCD_DEX_SERVER_DISABLE_TLS", false), "Disable TLS on the HTTP endpoint") return &command } @@ -113,20 +146,26 @@ func NewGenDexConfigCommand() *cobra.Command { var ( clientConfig clientcmd.ClientConfig out string + disableTLS bool ) var command = cobra.Command{ Use: "gendexcfg", Short: "Generates a dex config from Argo CD settings", RunE: func(c *cobra.Command, args []string) error { + ctx := c.Context() + + cli.SetLogFormat(cmdutil.LogFormat) + cli.SetLogLevel(cmdutil.LogLevel) + config, err := clientConfig.ClientConfig() errors.CheckError(err) namespace, _, err := clientConfig.Namespace() errors.CheckError(err) kubeClientset := kubernetes.NewForConfigOrDie(config) - settingsMgr := settings.NewSettingsManager(context.Background(), kubeClientset, namespace) + settingsMgr := settings.NewSettingsManager(ctx, kubeClientset, namespace) settings, err := settingsMgr.GetSettings() errors.CheckError(err) - dexCfgBytes, err := dex.GenerateDexConfigYAML(settings) + dexCfgBytes, err := dex.GenerateDexConfigYAML(settings, disableTLS) errors.CheckError(err) if len(dexCfgBytes) == 0 { log.Infof("dex is not configured") @@ -157,7 +196,7 @@ func NewGenDexConfigCommand() *cobra.Command { errors.CheckError(err) fmt.Print(string(maskedDexCfgBytes)) } else { - err = ioutil.WriteFile(out, dexCfgBytes, 0644) + err = os.WriteFile(out, dexCfgBytes, 0644) errors.CheckError(err) } return nil @@ -165,7 +204,10 @@ func NewGenDexConfigCommand() *cobra.Command { } clientConfig = cli.AddKubectlFlagsToCmd(&command) + command.Flags().StringVar(&cmdutil.LogFormat, "logformat", "text", "Set the logging format. One of: text|json") + command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error") command.Flags().StringVarP(&out, "out", "o", "", "Output to the specified file instead of stdout") + command.Flags().BoolVar(&disableTLS, "disable-tls", env.ParseBoolFromEnv("ARGOCD_DEX_SERVER_DISABLE_TLS", false), "Disable TLS on the HTTP endpoint") return &command } diff --git a/cmd/argocd-git-ask-pass/commands/argocd_git_ask_pass.go b/cmd/argocd-git-ask-pass/commands/argocd_git_ask_pass.go new file mode 100644 index 0000000000000..8f457527b78b2 --- /dev/null +++ b/cmd/argocd-git-ask-pass/commands/argocd_git_ask_pass.go @@ -0,0 +1,59 @@ +package commands + +import ( + "fmt" + "os" + "strings" + + "github.com/argoproj/argo-cd/v2/util/git" + + "github.com/spf13/cobra" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + "github.com/argoproj/argo-cd/v2/reposerver/askpass" + "github.com/argoproj/argo-cd/v2/util/errors" + grpc_util "github.com/argoproj/argo-cd/v2/util/grpc" + "github.com/argoproj/argo-cd/v2/util/io" +) + +const ( + // cliName is the name of the CLI + cliName = "argocd-git-ask-pass" +) + +func NewCommand() *cobra.Command { + var command = cobra.Command{ + Use: cliName, + Short: "Argo CD git credential helper", + DisableAutoGenTag: true, + Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + + if len(os.Args) != 2 { + errors.CheckError(fmt.Errorf("expected 1 argument, got %d", len(os.Args)-1)) + } + nonce := os.Getenv(git.ASKPASS_NONCE_ENV) + if nonce == "" { + errors.CheckError(fmt.Errorf("%s is not set", git.ASKPASS_NONCE_ENV)) + } + conn, err := grpc_util.BlockingDial(ctx, "unix", askpass.SocketPath, nil, grpc.WithTransportCredentials(insecure.NewCredentials())) + errors.CheckError(err) + defer io.Close(conn) + client := askpass.NewAskPassServiceClient(conn) + + creds, err := client.GetCredentials(ctx, &askpass.CredentialsRequest{Nonce: nonce}) + errors.CheckError(err) + switch { + case strings.HasPrefix(os.Args[1], "Username"): + fmt.Println(creds.Username) + case strings.HasPrefix(os.Args[1], "Password"): + fmt.Println(creds.Password) + default: + errors.CheckError(fmt.Errorf("unknown credential type '%s'", os.Args[1])) + } + }, + } + + return &command +} diff --git a/cmd/argocd-k8s-auth/commands/argocd_k8s_auth.go b/cmd/argocd-k8s-auth/commands/argocd_k8s_auth.go new file mode 100644 index 0000000000000..ce0f3ee3a2f49 --- /dev/null +++ b/cmd/argocd-k8s-auth/commands/argocd_k8s_auth.go @@ -0,0 +1,26 @@ +package commands + +import ( + "github.com/spf13/cobra" +) + +const ( + cliName = "argocd-k8s-auth" +) + +func NewCommand() *cobra.Command { + var command = &cobra.Command{ + Use: cliName, + Short: "argocd-k8s-auth a set of commands to generate k8s auth token", + DisableAutoGenTag: true, + Run: func(c *cobra.Command, args []string) { + c.HelpFunc()(c, args) + }, + } + + command.AddCommand(newAWSCommand()) + command.AddCommand(newGCPCommand()) + command.AddCommand(newAzureCommand()) + + return command +} diff --git a/cmd/argocd-k8s-auth/commands/aws.go b/cmd/argocd-k8s-auth/commands/aws.go new file mode 100644 index 0000000000000..79a118d2653a3 --- /dev/null +++ b/cmd/argocd-k8s-auth/commands/aws.go @@ -0,0 +1,110 @@ +package commands + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" + + "github.com/argoproj/argo-cd/v2/util/errors" +) + +const ( + clusterIDHeader = "x-k8s-aws-id" + // The sts GetCallerIdentity request is valid for 15 minutes regardless of this parameters value after it has been + // signed, but we set this unused parameter to 60 for legacy reasons (we check for a value between 0 and 60 on the + // server side in 0.3.0 or earlier). IT IS IGNORED. If we can get STS to support x-amz-expires, then we should + // set this parameter to the actual expiration, and make it configurable. + requestPresignParam = 60 + // The actual token expiration (presigned STS urls are valid for 15 minutes after timestamp in x-amz-date). + presignedURLExpiration = 15 * time.Minute + v1Prefix = "k8s-aws-v1." +) + +// newAWSCommand returns a new instance of an aws command that generates k8s auth token +// implementation is "inspired" by https://github.com/kubernetes-sigs/aws-iam-authenticator/blob/e61f537662b64092ed83cb76e600e023f627f628/pkg/token/token.go#L316 +func newAWSCommand() *cobra.Command { + var ( + clusterName string + roleARN string + ) + var command = &cobra.Command{ + Use: "aws", + Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + + presignedURLString, err := getSignedRequestWithRetry(ctx, time.Minute, 5*time.Second, clusterName, roleARN, getSignedRequest) + errors.CheckError(err) + token := v1Prefix + base64.RawURLEncoding.EncodeToString([]byte(presignedURLString)) + // Set token expiration to 1 minute before the presigned URL expires for some cushion + tokenExpiration := time.Now().Local().Add(presignedURLExpiration - 1*time.Minute) + _, _ = fmt.Fprint(os.Stdout, formatJSON(token, tokenExpiration)) + }, + } + command.Flags().StringVar(&clusterName, "cluster-name", "", "AWS Cluster name") + command.Flags().StringVar(&roleARN, "role-arn", "", "AWS Role ARN") + return command +} + +type getSignedRequestFunc func(clusterName, roleARN string) (string, error) + +func getSignedRequestWithRetry(ctx context.Context, timeout, interval time.Duration, clusterName, roleARN string, fn getSignedRequestFunc) (string, error) { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + for { + signed, err := fn(clusterName, roleARN) + if err == nil { + return signed, nil + } + select { + case <-ctx.Done(): + return "", fmt.Errorf("timeout while trying to get signed aws request: last error: %s", err) + case <-time.After(interval): + } + } +} + +func getSignedRequest(clusterName, roleARN string) (string, error) { + sess, err := session.NewSession() + if err != nil { + return "", fmt.Errorf("error creating new AWS session: %s", err) + } + stsAPI := sts.New(sess) + if roleARN != "" { + creds := stscreds.NewCredentials(sess, roleARN) + stsAPI = sts.New(sess, &aws.Config{Credentials: creds}) + } + request, _ := stsAPI.GetCallerIdentityRequest(&sts.GetCallerIdentityInput{}) + request.HTTPRequest.Header.Add(clusterIDHeader, clusterName) + signed, err := request.Presign(requestPresignParam) + if err != nil { + return "", fmt.Errorf("error presigning AWS request: %s", err) + } + return signed, nil +} + +func formatJSON(token string, expiration time.Time) string { + expirationTimestamp := metav1.NewTime(expiration) + execInput := &clientauthv1beta1.ExecCredential{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "client.authentication.k8s.io/v1beta1", + Kind: "ExecCredential", + }, + Status: &clientauthv1beta1.ExecCredentialStatus{ + ExpirationTimestamp: &expirationTimestamp, + Token: token, + }, + } + enc, _ := json.Marshal(execInput) + return string(enc) +} diff --git a/cmd/argocd-k8s-auth/commands/aws_test.go b/cmd/argocd-k8s-auth/commands/aws_test.go new file mode 100644 index 0000000000000..c22449eba42be --- /dev/null +++ b/cmd/argocd-k8s-auth/commands/aws_test.go @@ -0,0 +1,76 @@ +package commands + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestGetSignedRequestWithRetry(t *testing.T) { + ctx := context.Background() + + t.Run("will return signed request on first attempt", func(t *testing.T) { + // given + t.Parallel() + mock := &signedRequestMock{ + returnFunc: func(m *signedRequestMock) (string, error) { + return "token", nil + }, + } + + // when + signed, err := getSignedRequestWithRetry(ctx, time.Second, time.Millisecond, "cluster-name", "", mock.getSignedRequestMock) + + // then + assert.NoError(t, err) + assert.Equal(t, "token", signed) + }) + t.Run("will return signed request on third attempt", func(t *testing.T) { + // given + t.Parallel() + mock := &signedRequestMock{ + returnFunc: func(m *signedRequestMock) (string, error) { + if m.getSignedRequestCalls < 3 { + return "", fmt.Errorf("some error") + } + return "token", nil + }, + } + + // when + signed, err := getSignedRequestWithRetry(ctx, time.Second, time.Millisecond, "cluster-name", "", mock.getSignedRequestMock) + + // then + assert.NoError(t, err) + assert.Equal(t, "token", signed) + }) + t.Run("will return error on timeout", func(t *testing.T) { + // given + t.Parallel() + mock := &signedRequestMock{ + returnFunc: func(m *signedRequestMock) (string, error) { + return "", fmt.Errorf("some error") + }, + } + + // when + signed, err := getSignedRequestWithRetry(ctx, time.Second, time.Millisecond, "cluster-name", "", mock.getSignedRequestMock) + + // then + assert.Error(t, err) + assert.Equal(t, "", signed) + }) +} + +type signedRequestMock struct { + getSignedRequestCalls int + returnFunc func(m *signedRequestMock) (string, error) +} + +func (m *signedRequestMock) getSignedRequestMock(clusterName, roleARN string) (string, error) { + m.getSignedRequestCalls++ + return m.returnFunc(m) +} diff --git a/cmd/argocd-k8s-auth/commands/azure.go b/cmd/argocd-k8s-auth/commands/azure.go new file mode 100644 index 0000000000000..bc45bbacef48b --- /dev/null +++ b/cmd/argocd-k8s-auth/commands/azure.go @@ -0,0 +1,43 @@ +package commands + +import ( + "os" + + "github.com/Azure/kubelogin/pkg/token" + "github.com/spf13/cobra" + + "github.com/argoproj/argo-cd/v2/util/errors" +) + +var ( + envServerApplicationID = "AAD_SERVER_APPLICATION_ID" + envEnvironmentName = "AAD_ENVIRONMENT_NAME" +) + +const ( + DEFAULT_AAD_SERVER_APPLICATION_ID = "6dae42f8-4368-4678-94ff-3960e28e3630" +) + +func newAzureCommand() *cobra.Command { + o := token.NewOptions() + //we'll use default of WorkloadIdentityLogin for the login flow + o.LoginMethod = token.WorkloadIdentityLogin + o.ServerID = DEFAULT_AAD_SERVER_APPLICATION_ID + var command = &cobra.Command{ + Use: "azure", + Run: func(c *cobra.Command, args []string) { + o.UpdateFromEnv() + if v, ok := os.LookupEnv(envServerApplicationID); ok { + o.ServerID = v + } + if v, ok := os.LookupEnv(envEnvironmentName); ok { + o.Environment = v + } + plugin, err := token.New(&o) + errors.CheckError(err) + err = plugin.Do() + errors.CheckError(err) + }, + } + return command +} diff --git a/cmd/argocd-k8s-auth/commands/gcp.go b/cmd/argocd-k8s-auth/commands/gcp.go new file mode 100644 index 0000000000000..65d9c9ffe3325 --- /dev/null +++ b/cmd/argocd-k8s-auth/commands/gcp.go @@ -0,0 +1,41 @@ +package commands + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + "golang.org/x/oauth2/google" + + "github.com/argoproj/argo-cd/v2/util/errors" +) + +var ( + // defaultGCPScopes: + // - cloud-platform is the base scope to authenticate to GCP. + // - userinfo.email is used to authenticate to GKE APIs with gserviceaccount + // email instead of numeric uniqueID. + // https://github.com/kubernetes/client-go/blob/be758edd136e61a1bffadf1c0235fceb8aee8e9e/plugin/pkg/client/auth/gcp/gcp.go#L59 + defaultGCPScopes = []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/userinfo.email", + } +) + +func newGCPCommand() *cobra.Command { + var command = &cobra.Command{ + Use: "gcp", + Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + + // Preferred way to retrieve GCP credentials + // https://github.com/golang/oauth2/blob/9780585627b5122c8cc9c6a378ac9861507e7551/google/doc.go#L54-L68 + cred, err := google.FindDefaultCredentials(ctx, defaultGCPScopes...) + errors.CheckError(err) + token, err := cred.TokenSource.Token() + errors.CheckError(err) + _, _ = fmt.Fprint(os.Stdout, formatJSON(token.AccessToken, token.Expiry)) + }, + } + return command +} diff --git a/cmd/argocd-notification/commands/controller.go b/cmd/argocd-notification/commands/controller.go new file mode 100644 index 0000000000000..abd9a2e8475f0 --- /dev/null +++ b/cmd/argocd-notification/commands/controller.go @@ -0,0 +1,167 @@ +package commands + +import ( + "fmt" + "net/http" + "os" + "strings" + + "github.com/argoproj/argo-cd/v2/common" + "github.com/argoproj/argo-cd/v2/reposerver/apiclient" + + "github.com/argoproj/argo-cd/v2/util/env" + "github.com/argoproj/argo-cd/v2/util/errors" + service "github.com/argoproj/argo-cd/v2/util/notification/argocd" + "github.com/argoproj/argo-cd/v2/util/tls" + + notificationscontroller "github.com/argoproj/argo-cd/v2/notification_controller/controller" + + "github.com/argoproj/notifications-engine/pkg/controller" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + "k8s.io/client-go/tools/clientcmd" +) + +const ( + defaultMetricsPort = 9001 +) + +func addK8SFlagsToCmd(cmd *cobra.Command) clientcmd.ClientConfig { + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig + overrides := clientcmd.ConfigOverrides{} + kflags := clientcmd.RecommendedConfigOverrideFlags("") + cmd.PersistentFlags().StringVar(&loadingRules.ExplicitPath, "kubeconfig", "", "Path to a kube config. Only required if out-of-cluster") + clientcmd.BindOverrideFlags(&overrides, cmd.PersistentFlags(), kflags) + return clientcmd.NewInteractiveDeferredLoadingClientConfig(loadingRules, &overrides, os.Stdin) +} + +func NewCommand() *cobra.Command { + var ( + clientConfig clientcmd.ClientConfig + processorsCount int + namespace string + appLabelSelector string + logLevel string + logFormat string + metricsPort int + argocdRepoServer string + argocdRepoServerPlaintext bool + argocdRepoServerStrictTLS bool + configMapName string + secretName string + applicationNamespaces []string + ) + var command = cobra.Command{ + Use: "controller", + Short: "Starts Argo CD Notifications controller", + RunE: func(c *cobra.Command, args []string) error { + ctx := c.Context() + + vers := common.GetVersion() + namespace, _, err := clientConfig.Namespace() + errors.CheckError(err) + vers.LogStartupInfo( + "ArgoCD Notifications Controller", + map[string]any{ + "namespace": namespace, + }, + ) + + restConfig, err := clientConfig.ClientConfig() + if err != nil { + return fmt.Errorf("failed to create REST client config: %w", err) + } + restConfig.UserAgent = fmt.Sprintf("argocd-notifications-controller/%s (%s)", vers.Version, vers.Platform) + dynamicClient, err := dynamic.NewForConfig(restConfig) + if err != nil { + return fmt.Errorf("failed to create dynamic client: %w", err) + } + k8sClient, err := kubernetes.NewForConfig(restConfig) + if err != nil { + return fmt.Errorf("failed to create Kubernetes client: %w", err) + } + if namespace == "" { + namespace, _, err = clientConfig.Namespace() + if err != nil { + return fmt.Errorf("failed to determine controller's host namespace: %w", err) + } + } + level, err := log.ParseLevel(logLevel) + if err != nil { + return fmt.Errorf("failed to parse log level: %w", err) + } + log.SetLevel(level) + + switch strings.ToLower(logFormat) { + case "json": + log.SetFormatter(&log.JSONFormatter{}) + case "text": + if os.Getenv("FORCE_LOG_COLORS") == "1" { + log.SetFormatter(&log.TextFormatter{ForceColors: true}) + } + default: + return fmt.Errorf("unknown log format '%s'", logFormat) + } + + tlsConfig := apiclient.TLSConfiguration{ + DisableTLS: argocdRepoServerPlaintext, + StrictValidation: argocdRepoServerStrictTLS, + } + if !tlsConfig.DisableTLS && tlsConfig.StrictValidation { + pool, err := tls.LoadX509CertPool( + fmt.Sprintf("%s/reposerver/tls/tls.crt", env.StringFromEnv(common.EnvAppConfigPath, common.DefaultAppConfigPath)), + fmt.Sprintf("%s/reposerver/tls/ca.crt", env.StringFromEnv(common.EnvAppConfigPath, common.DefaultAppConfigPath)), + ) + if err != nil { + return fmt.Errorf("failed to load repo-server certificate pool: %w", err) + } + tlsConfig.Certificates = pool + } + repoClientset := apiclient.NewRepoServerClientset(argocdRepoServer, 5, tlsConfig) + argocdService, err := service.NewArgoCDService(k8sClient, namespace, repoClientset) + if err != nil { + return fmt.Errorf("failed to initialize Argo CD service: %w", err) + } + defer argocdService.Close() + + registry := controller.NewMetricsRegistry("argocd") + http.Handle("/metrics", promhttp.HandlerFor(prometheus.Gatherers{registry, prometheus.DefaultGatherer}, promhttp.HandlerOpts{})) + + go func() { + log.Fatal(http.ListenAndServe(fmt.Sprintf("0.0.0.0:%d", metricsPort), http.DefaultServeMux)) + }() + log.Infof("serving metrics on port %d", metricsPort) + log.Infof("loading configuration %d", metricsPort) + + ctrl := notificationscontroller.NewController(k8sClient, dynamicClient, argocdService, namespace, applicationNamespaces, appLabelSelector, registry, secretName, configMapName) + err = ctrl.Init(ctx) + if err != nil { + return fmt.Errorf("failed to initialize controller: %w", err) + } + + go ctrl.Run(ctx, processorsCount) + <-ctx.Done() + return nil + }, + } + clientConfig = addK8SFlagsToCmd(&command) + command.Flags().IntVar(&processorsCount, "processors-count", 1, "Processors count.") + command.Flags().StringVar(&appLabelSelector, "app-label-selector", "", "App label selector.") + command.Flags().StringVar(&namespace, "namespace", "", "Namespace which controller handles. Current namespace if empty.") + command.Flags().StringVar(&logLevel, "loglevel", env.StringFromEnv("ARGOCD_NOTIFICATIONS_CONTROLLER_LOGLEVEL", "info"), "Set the logging level. One of: debug|info|warn|error") + command.Flags().StringVar(&logFormat, "logformat", env.StringFromEnv("ARGOCD_NOTIFICATIONS_CONTROLLER_LOGFORMAT", "text"), "Set the logging format. One of: text|json") + command.Flags().IntVar(&metricsPort, "metrics-port", defaultMetricsPort, "Metrics port") + command.Flags().StringVar(&argocdRepoServer, "argocd-repo-server", common.DefaultRepoServerAddr, "Argo CD repo server address") + command.Flags().BoolVar(&argocdRepoServerPlaintext, "argocd-repo-server-plaintext", false, "Use a plaintext client (non-TLS) to connect to repository server") + command.Flags().BoolVar(&argocdRepoServerStrictTLS, "argocd-repo-server-strict-tls", false, "Perform strict validation of TLS certificates when connecting to repo server") + command.Flags().StringVar(&configMapName, "config-map-name", "argocd-notifications-cm", "Set notifications ConfigMap name") + command.Flags().StringVar(&secretName, "secret-name", "argocd-notifications-secret", "Set notifications Secret name") + command.Flags().StringSliceVar(&applicationNamespaces, "application-namespaces", env.StringsFromEnv("ARGOCD_APPLICATION_NAMESPACES", []string{}, ","), "List of additional namespaces that this controller should send notifications for") + return &command +} diff --git a/cmd/argocd-repo-server/commands/argocd_repo_server.go b/cmd/argocd-repo-server/commands/argocd_repo_server.go index eba1f6c72072a..69358d2a91efd 100644 --- a/cmd/argocd-repo-server/commands/argocd_repo_server.go +++ b/cmd/argocd-repo-server/commands/argocd_repo_server.go @@ -5,19 +5,20 @@ import ( "math" "net" "net/http" - "os" "time" "github.com/argoproj/pkg/stats" - "github.com/go-redis/redis/v8" + "github.com/redis/go-redis/v9" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "google.golang.org/grpc/health/grpc_health_v1" + "k8s.io/apimachinery/pkg/api/resource" cmdutil "github.com/argoproj/argo-cd/v2/cmd/util" "github.com/argoproj/argo-cd/v2/common" "github.com/argoproj/argo-cd/v2/reposerver" "github.com/argoproj/argo-cd/v2/reposerver/apiclient" + "github.com/argoproj/argo-cd/v2/reposerver/askpass" reposervercache "github.com/argoproj/argo-cd/v2/reposerver/cache" "github.com/argoproj/argo-cd/v2/reposerver/metrics" "github.com/argoproj/argo-cd/v2/reposerver/repository" @@ -29,48 +30,43 @@ import ( "github.com/argoproj/argo-cd/v2/util/healthz" ioutil "github.com/argoproj/argo-cd/v2/util/io" "github.com/argoproj/argo-cd/v2/util/tls" + traceutil "github.com/argoproj/argo-cd/v2/util/trace" ) const ( // CLIName is the name of the CLI - cliName = "argocd-repo-server" - gnuPGSourcePath = "/app/config/gpg/source" - - defaultPauseGenerationAfterFailedGenerationAttempts = 3 - defaultPauseGenerationOnFailureForMinutes = 60 - defaultPauseGenerationOnFailureForRequests = 0 + cliName = "argocd-repo-server" ) -func getGnuPGSourcePath() string { - if path := os.Getenv("ARGOCD_GPG_DATA_PATH"); path != "" { - return path - } else { - return gnuPGSourcePath - } -} - -func getPauseGenerationAfterFailedGenerationAttempts() int { - return env.ParseNumFromEnv(common.EnvPauseGenerationAfterFailedAttempts, defaultPauseGenerationAfterFailedGenerationAttempts, 0, math.MaxInt32) -} - -func getPauseGenerationOnFailureForMinutes() int { - return env.ParseNumFromEnv(common.EnvPauseGenerationMinutes, defaultPauseGenerationOnFailureForMinutes, 0, math.MaxInt32) -} - -func getPauseGenerationOnFailureForRequests() int { - return env.ParseNumFromEnv(common.EnvPauseGenerationRequests, defaultPauseGenerationOnFailureForRequests, 0, math.MaxInt32) -} +var ( + gnuPGSourcePath = env.StringFromEnv(common.EnvGPGDataPath, "/app/config/gpg/source") + pauseGenerationAfterFailedGenerationAttempts = env.ParseNumFromEnv(common.EnvPauseGenerationAfterFailedAttempts, 3, 0, math.MaxInt32) + pauseGenerationOnFailureForMinutes = env.ParseNumFromEnv(common.EnvPauseGenerationMinutes, 60, 0, math.MaxInt32) + pauseGenerationOnFailureForRequests = env.ParseNumFromEnv(common.EnvPauseGenerationRequests, 0, 0, math.MaxInt32) + gitSubmoduleEnabled = env.ParseBoolFromEnv(common.EnvGitSubmoduleEnabled, true) +) func NewCommand() *cobra.Command { var ( - parallelismLimit int64 - listenPort int - metricsPort int - cacheSrc func() (*reposervercache.Cache, error) - tlsConfigCustomizer tls.ConfigCustomizer - tlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error) - redisClient *redis.Client - disableTLS bool + parallelismLimit int64 + listenPort int + listenHost string + metricsPort int + metricsHost string + otlpAddress string + otlpAttrs []string + cacheSrc func() (*reposervercache.Cache, error) + tlsConfigCustomizer tls.ConfigCustomizer + tlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error) + redisClient *redis.Client + disableTLS bool + maxCombinedDirectoryManifestsSize string + cmpTarExcludedGlobs []string + allowOutOfBoundsSymlinks bool + streamedManifestMaxTarSize string + streamedManifestMaxExtractedSize string + helmManifestMaxExtractedSize string + disableManifestMaxExtractedSize bool ) var command = cobra.Command{ Use: cliName, @@ -78,6 +74,16 @@ func NewCommand() *cobra.Command { Long: "ArgoCD Repository Server is an internal service which maintains a local cache of the Git repository holding the application manifests, and is responsible for generating and returning the Kubernetes manifests. This command runs Repository Server in the foreground. It can be configured by following options.", DisableAutoGenTag: true, RunE: func(c *cobra.Command, args []string) error { + ctx := c.Context() + + vers := common.GetVersion() + vers.LogStartupInfo( + "ArgoCD Repository Server", + map[string]any{ + "port": listenPort, + }, + ) + cli.SetLogFormat(cmdutil.LogFormat) cli.SetLogLevel(cmdutil.LogLevel) @@ -90,18 +96,48 @@ func NewCommand() *cobra.Command { cache, err := cacheSrc() errors.CheckError(err) + maxCombinedDirectoryManifestsQuantity, err := resource.ParseQuantity(maxCombinedDirectoryManifestsSize) + errors.CheckError(err) + + streamedManifestMaxTarSizeQuantity, err := resource.ParseQuantity(streamedManifestMaxTarSize) + errors.CheckError(err) + + streamedManifestMaxExtractedSizeQuantity, err := resource.ParseQuantity(streamedManifestMaxExtractedSize) + errors.CheckError(err) + + helmManifestMaxExtractedSizeQuantity, err := resource.ParseQuantity(helmManifestMaxExtractedSize) + errors.CheckError(err) + + askPassServer := askpass.NewServer() metricsServer := metrics.NewMetricsServer() cacheutil.CollectMetrics(redisClient, metricsServer) server, err := reposerver.NewServer(metricsServer, cache, tlsConfigCustomizer, repository.RepoServerInitConstants{ ParallelismLimit: parallelismLimit, - PauseGenerationAfterFailedGenerationAttempts: getPauseGenerationAfterFailedGenerationAttempts(), - PauseGenerationOnFailureForMinutes: getPauseGenerationOnFailureForMinutes(), - PauseGenerationOnFailureForRequests: getPauseGenerationOnFailureForRequests(), - }) + PauseGenerationAfterFailedGenerationAttempts: pauseGenerationAfterFailedGenerationAttempts, + PauseGenerationOnFailureForMinutes: pauseGenerationOnFailureForMinutes, + PauseGenerationOnFailureForRequests: pauseGenerationOnFailureForRequests, + SubmoduleEnabled: gitSubmoduleEnabled, + MaxCombinedDirectoryManifestsSize: maxCombinedDirectoryManifestsQuantity, + CMPTarExcludedGlobs: cmpTarExcludedGlobs, + AllowOutOfBoundsSymlinks: allowOutOfBoundsSymlinks, + StreamedManifestMaxExtractedSize: streamedManifestMaxExtractedSizeQuantity.ToDec().Value(), + StreamedManifestMaxTarSize: streamedManifestMaxTarSizeQuantity.ToDec().Value(), + HelmManifestMaxExtractedSize: helmManifestMaxExtractedSizeQuantity.ToDec().Value(), + }, askPassServer) errors.CheckError(err) + if otlpAddress != "" { + var closer func() + var err error + closer, err = traceutil.InitTracer(ctx, "argocd-repo-server", otlpAddress, otlpAttrs) + if err != nil { + log.Fatalf("failed to initialize tracing: %v", err) + } + defer closer() + } + grpc := server.CreateGRPC() - listener, err := net.Listen("tcp", fmt.Sprintf(":%d", listenPort)) + listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", listenHost, listenPort)) errors.CheckError(err) healthz.ServeHealthCheck(http.DefaultServeMux, func(r *http.Request) error { @@ -127,22 +163,23 @@ func NewCommand() *cobra.Command { return nil }) http.Handle("/metrics", metricsServer.GetHandler()) - go func() { errors.CheckError(http.ListenAndServe(fmt.Sprintf(":%d", metricsPort), nil)) }() + go func() { errors.CheckError(http.ListenAndServe(fmt.Sprintf("%s:%d", metricsHost, metricsPort), nil)) }() + go func() { errors.CheckError(askPassServer.Run(askpass.SocketPath)) }() if gpg.IsGPGEnabled() { log.Infof("Initializing GnuPG keyring at %s", common.GetGnuPGHomePath()) err = gpg.InitializeGnuPG() errors.CheckError(err) - log.Infof("Populating GnuPG keyring with keys from %s", getGnuPGSourcePath()) - added, removed, err := gpg.SyncKeyRingFromDirectory(getGnuPGSourcePath()) + log.Infof("Populating GnuPG keyring with keys from %s", gnuPGSourcePath) + added, removed, err := gpg.SyncKeyRingFromDirectory(gnuPGSourcePath) errors.CheckError(err) log.Infof("Loaded %d (and removed %d) keys from keyring", len(added), len(removed)) - go func() { errors.CheckError(reposerver.StartGPGWatcher(getGnuPGSourcePath())) }() + go func() { errors.CheckError(reposerver.StartGPGWatcher(gnuPGSourcePath)) }() } - log.Infof("argocd-repo-server %s serving on %s", common.GetVersion(), listener.Addr()) + log.Infof("argocd-repo-server is listening on %s", listener.Addr()) stats.RegisterStackDumper() stats.StartStatsTicker(10 * time.Minute) stats.RegisterHeapDumper("memprofile") @@ -151,14 +188,23 @@ func NewCommand() *cobra.Command { return nil }, } - - command.Flags().StringVar(&cmdutil.LogFormat, "logformat", "text", "Set the logging format. One of: text|json") - command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error") - command.Flags().Int64Var(¶llelismLimit, "parallelismlimit", 0, "Limit on number of concurrent manifests generate requests. Any value less the 1 means no limit.") + command.Flags().StringVar(&cmdutil.LogFormat, "logformat", env.StringFromEnv("ARGOCD_REPO_SERVER_LOGFORMAT", "text"), "Set the logging format. One of: text|json") + command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", env.StringFromEnv("ARGOCD_REPO_SERVER_LOGLEVEL", "info"), "Set the logging level. One of: debug|info|warn|error") + command.Flags().Int64Var(¶llelismLimit, "parallelismlimit", int64(env.ParseNumFromEnv("ARGOCD_REPO_SERVER_PARALLELISM_LIMIT", 0, 0, math.MaxInt32)), "Limit on number of concurrent manifests generate requests. Any value less the 1 means no limit.") + command.Flags().StringVar(&listenHost, "address", env.StringFromEnv("ARGOCD_REPO_SERVER_LISTEN_ADDRESS", common.DefaultAddressRepoServer), "Listen on given address for incoming connections") command.Flags().IntVar(&listenPort, "port", common.DefaultPortRepoServer, "Listen on given port for incoming connections") + command.Flags().StringVar(&metricsHost, "metrics-address", env.StringFromEnv("ARGOCD_REPO_SERVER_METRICS_LISTEN_ADDRESS", common.DefaultAddressRepoServerMetrics), "Listen on given address for metrics") command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortRepoServerMetrics, "Start metrics server on given port") - command.Flags().BoolVar(&disableTLS, "disable-tls", false, "Disable TLS on the gRPC endpoint") - + command.Flags().StringVar(&otlpAddress, "otlp-address", env.StringFromEnv("ARGOCD_REPO_SERVER_OTLP_ADDRESS", ""), "OpenTelemetry collector address to send traces to") + command.Flags().StringSliceVar(&otlpAttrs, "otlp-attrs", env.StringsFromEnv("ARGOCD_REPO_SERVER_OTLP_ATTRS", []string{}, ","), "List of OpenTelemetry collector extra attrs when send traces, each attribute is separated by a colon(e.g. key:value)") + command.Flags().BoolVar(&disableTLS, "disable-tls", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_DISABLE_TLS", false), "Disable TLS on the gRPC endpoint") + command.Flags().StringVar(&maxCombinedDirectoryManifestsSize, "max-combined-directory-manifests-size", env.StringFromEnv("ARGOCD_REPO_SERVER_MAX_COMBINED_DIRECTORY_MANIFESTS_SIZE", "10M"), "Max combined size of manifest files in a directory-type Application") + command.Flags().StringArrayVar(&cmpTarExcludedGlobs, "plugin-tar-exclude", env.StringsFromEnv("ARGOCD_REPO_SERVER_PLUGIN_TAR_EXCLUSIONS", []string{}, ";"), "Globs to filter when sending tarballs to plugins.") + command.Flags().BoolVar(&allowOutOfBoundsSymlinks, "allow-oob-symlinks", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_ALLOW_OUT_OF_BOUNDS_SYMLINKS", false), "Allow out-of-bounds symlinks in repositories (not recommended)") + command.Flags().StringVar(&streamedManifestMaxTarSize, "streamed-manifest-max-tar-size", env.StringFromEnv("ARGOCD_REPO_SERVER_STREAMED_MANIFEST_MAX_TAR_SIZE", "100M"), "Maximum size of streamed manifest archives") + command.Flags().StringVar(&streamedManifestMaxExtractedSize, "streamed-manifest-max-extracted-size", env.StringFromEnv("ARGOCD_REPO_SERVER_STREAMED_MANIFEST_MAX_EXTRACTED_SIZE", "1G"), "Maximum size of streamed manifest archives when extracted") + command.Flags().StringVar(&helmManifestMaxExtractedSize, "helm-manifest-max-extracted-size", env.StringFromEnv("ARGOCD_REPO_SERVER_HELM_MANIFEST_MAX_EXTRACTED_SIZE", "1G"), "Maximum size of helm manifest archives when extracted") + command.Flags().BoolVar(&disableManifestMaxExtractedSize, "disable-helm-manifest-max-extracted-size", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_DISABLE_HELM_MANIFEST_MAX_EXTRACTED_SIZE", false), "Disable maximum size of helm manifest archives when extracted") tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(&command) cacheSrc = reposervercache.AddCacheFlagsToCmd(&command, func(client *redis.Client) { redisClient = client diff --git a/cmd/argocd-server/commands/argocd_server.go b/cmd/argocd-server/commands/argocd_server.go index 128d5665e6b23..eea346eaed03d 100644 --- a/cmd/argocd-server/commands/argocd_server.go +++ b/cmd/argocd-server/commands/argocd_server.go @@ -3,10 +3,11 @@ package commands import ( "context" "fmt" + "math" "time" "github.com/argoproj/pkg/stats" - "github.com/go-redis/redis/v8" + "github.com/redis/go-redis/v9" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "k8s.io/client-go/kubernetes" @@ -20,10 +21,12 @@ import ( "github.com/argoproj/argo-cd/v2/server" servercache "github.com/argoproj/argo-cd/v2/server/cache" "github.com/argoproj/argo-cd/v2/util/cli" + "github.com/argoproj/argo-cd/v2/util/dex" "github.com/argoproj/argo-cd/v2/util/env" "github.com/argoproj/argo-cd/v2/util/errors" "github.com/argoproj/argo-cd/v2/util/kube" "github.com/argoproj/argo-cd/v2/util/tls" + traceutil "github.com/argoproj/argo-cd/v2/util/trace" ) const ( @@ -32,26 +35,24 @@ const ( ) var ( - failureRetryCount = 0 - failureRetryPeriodMilliSeconds = 100 + failureRetryCount = env.ParseNumFromEnv(failureRetryCountEnv, 0, 0, 10) + failureRetryPeriodMilliSeconds = env.ParseNumFromEnv(failureRetryPeriodMilliSecondsEnv, 100, 0, 1000) ) -func init() { - failureRetryCount = env.ParseNumFromEnv(failureRetryCountEnv, failureRetryCount, 0, 10) - failureRetryPeriodMilliSeconds = env.ParseNumFromEnv(failureRetryPeriodMilliSecondsEnv, failureRetryPeriodMilliSeconds, 0, 1000) -} - // NewCommand returns a new instance of an argocd command func NewCommand() *cobra.Command { var ( redisClient *redis.Client insecure bool + listenHost string listenPort int + metricsHost string metricsPort int + otlpAddress string + otlpAttrs []string glogLevel int clientConfig clientcmd.ClientConfig repoServerTimeoutSeconds int - staticAssetsDir string baseHRef string rootPath string repoServerAddress string @@ -61,8 +62,14 @@ func NewCommand() *cobra.Command { tlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error) cacheSrc func() (*servercache.Cache, error) frameOptions string + contentSecurityPolicy string repoServerPlaintext bool repoServerStrictTLS bool + dexServerPlaintext bool + dexServerStrictTLS bool + staticAssetsDir string + applicationNamespaces []string + enableProxyExtension bool ) var command = &cobra.Command{ Use: cliName, @@ -70,6 +77,19 @@ func NewCommand() *cobra.Command { Long: "The API server is a gRPC/REST server which exposes the API consumed by the Web UI, CLI, and CI/CD systems. This command runs API server in the foreground. It can be configured by following options.", DisableAutoGenTag: true, Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + + vers := common.GetVersion() + namespace, _, err := clientConfig.Namespace() + errors.CheckError(err) + vers.LogStartupInfo( + "ArgoCD API Server", + map[string]any{ + "namespace": namespace, + "port": listenPort, + }, + ) + cli.SetLogFormat(cmdutil.LogFormat) cli.SetLogLevel(cmdutil.LogLevel) cli.SetGLogLevel(glogLevel) @@ -78,9 +98,6 @@ func NewCommand() *cobra.Command { errors.CheckError(err) errors.CheckError(v1alpha1.SetK8SConfigDefaults(config)) - namespace, _, err := clientConfig.Namespace() - errors.CheckError(err) - tlsConfigCustomizer, err := tlsConfigCustomizerSrc() errors.CheckError(err) cache, err := cacheSrc() @@ -91,11 +108,12 @@ func NewCommand() *cobra.Command { appclientsetConfig, err := clientConfig.ClientConfig() errors.CheckError(err) errors.CheckError(v1alpha1.SetK8SConfigDefaults(appclientsetConfig)) + config.UserAgent = fmt.Sprintf("argocd-server/%s (%s)", vers.Version, vers.Platform) if failureRetryCount > 0 { appclientsetConfig = kube.AddFailureRetryWrapper(appclientsetConfig, failureRetryCount, failureRetryPeriodMilliSeconds) } - appclientset := appclientset.NewForConfigOrDie(appclientsetConfig) + appClientSet := appclientset.NewForConfigOrDie(appclientsetConfig) tlsConfig := apiclient.TLSConfiguration{ DisableTLS: repoServerPlaintext, StrictValidation: repoServerStrictTLS, @@ -114,6 +132,28 @@ func NewCommand() *cobra.Command { tlsConfig.Certificates = pool } + dexTlsConfig := &dex.DexTLSConfig{ + DisableTLS: dexServerPlaintext, + StrictValidation: dexServerStrictTLS, + } + + if !dexServerPlaintext && dexServerStrictTLS { + pool, err := tls.LoadX509CertPool( + fmt.Sprintf("%s/dex/tls/ca.crt", env.StringFromEnv(common.EnvAppConfigPath, common.DefaultAppConfigPath)), + ) + if err != nil { + log.Fatalf("%v", err) + } + dexTlsConfig.RootCAs = pool + cert, err := tls.LoadX509Cert( + fmt.Sprintf("%s/dex/tls/tls.crt", env.StringFromEnv(common.EnvAppConfigPath, common.DefaultAppConfigPath)), + ) + if err != nil { + log.Fatalf("%v", err) + } + dexTlsConfig.Certificate = cert.Raw + } + repoclientset := apiclient.NewRepoServerClientset(repoServerAddress, repoServerTimeoutSeconds, tlsConfig) if rootPath != "" { if baseHRef != "" && baseHRef != rootPath { @@ -123,58 +163,84 @@ func NewCommand() *cobra.Command { } argoCDOpts := server.ArgoCDServerOpts{ - Insecure: insecure, - ListenPort: listenPort, - MetricsPort: metricsPort, - Namespace: namespace, - StaticAssetsDir: staticAssetsDir, - BaseHRef: baseHRef, - RootPath: rootPath, - KubeClientset: kubeclientset, - AppClientset: appclientset, - RepoClientset: repoclientset, - DexServerAddr: dexServerAddress, - DisableAuth: disableAuth, - EnableGZip: enableGZip, - TLSConfigCustomizer: tlsConfigCustomizer, - Cache: cache, - XFrameOptions: frameOptions, - RedisClient: redisClient, + Insecure: insecure, + ListenPort: listenPort, + ListenHost: listenHost, + MetricsPort: metricsPort, + MetricsHost: metricsHost, + Namespace: namespace, + BaseHRef: baseHRef, + RootPath: rootPath, + KubeClientset: kubeclientset, + AppClientset: appClientSet, + RepoClientset: repoclientset, + DexServerAddr: dexServerAddress, + DexTLSConfig: dexTlsConfig, + DisableAuth: disableAuth, + EnableGZip: enableGZip, + TLSConfigCustomizer: tlsConfigCustomizer, + Cache: cache, + XFrameOptions: frameOptions, + ContentSecurityPolicy: contentSecurityPolicy, + RedisClient: redisClient, + StaticAssetsDir: staticAssetsDir, + ApplicationNamespaces: applicationNamespaces, + EnableProxyExtension: enableProxyExtension, } stats.RegisterStackDumper() stats.StartStatsTicker(10 * time.Minute) stats.RegisterHeapDumper("memprofile") - + argocd := server.NewServer(ctx, argoCDOpts) + argocd.Init(ctx) + lns, err := argocd.Listen() + errors.CheckError(err) for { - ctx := context.Background() + var closer func() ctx, cancel := context.WithCancel(ctx) - argocd := server.NewServer(ctx, argoCDOpts) - argocd.Run(ctx, listenPort, metricsPort) + if otlpAddress != "" { + closer, err = traceutil.InitTracer(ctx, "argocd-server", otlpAddress, otlpAttrs) + if err != nil { + log.Fatalf("failed to initialize tracing: %v", err) + } + } + argocd.Run(ctx, lns) cancel() + if closer != nil { + closer() + } } }, } clientConfig = cli.AddKubectlFlagsToCmd(command) - command.Flags().BoolVar(&insecure, "insecure", false, "Run server without TLS") - command.Flags().StringVar(&staticAssetsDir, "staticassets", "", "Static assets directory path") - command.Flags().StringVar(&baseHRef, "basehref", "/", "Value for base href in index.html. Used if Argo CD is running behind reverse proxy under subpath different from /") - command.Flags().StringVar(&rootPath, "rootpath", "", "Used if Argo CD is running behind reverse proxy under subpath different from /") - command.Flags().StringVar(&cmdutil.LogFormat, "logformat", "text", "Set the logging format. One of: text|json") - command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error") + command.Flags().BoolVar(&insecure, "insecure", env.ParseBoolFromEnv("ARGOCD_SERVER_INSECURE", false), "Run server without TLS") + command.Flags().StringVar(&staticAssetsDir, "staticassets", env.StringFromEnv("ARGOCD_SERVER_STATIC_ASSETS", "/shared/app"), "Directory path that contains additional static assets") + command.Flags().StringVar(&baseHRef, "basehref", env.StringFromEnv("ARGOCD_SERVER_BASEHREF", "/"), "Value for base href in index.html. Used if Argo CD is running behind reverse proxy under subpath different from /") + command.Flags().StringVar(&rootPath, "rootpath", env.StringFromEnv("ARGOCD_SERVER_ROOTPATH", ""), "Used if Argo CD is running behind reverse proxy under subpath different from /") + command.Flags().StringVar(&cmdutil.LogFormat, "logformat", env.StringFromEnv("ARGOCD_SERVER_LOGFORMAT", "text"), "Set the logging format. One of: text|json") + command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", env.StringFromEnv("ARGOCD_SERVER_LOG_LEVEL", "info"), "Set the logging level. One of: debug|info|warn|error") command.Flags().IntVar(&glogLevel, "gloglevel", 0, "Set the glog logging level") - command.Flags().StringVar(&repoServerAddress, "repo-server", common.DefaultRepoServerAddr, "Repo server address") - command.Flags().StringVar(&dexServerAddress, "dex-server", common.DefaultDexServerAddr, "Dex server address") - command.Flags().BoolVar(&disableAuth, "disable-auth", false, "Disable client authentication") - command.Flags().BoolVar(&enableGZip, "enable-gzip", false, "Enable GZIP compression") + command.Flags().StringVar(&repoServerAddress, "repo-server", env.StringFromEnv("ARGOCD_SERVER_REPO_SERVER", common.DefaultRepoServerAddr), "Repo server address") + command.Flags().StringVar(&dexServerAddress, "dex-server", env.StringFromEnv("ARGOCD_SERVER_DEX_SERVER", common.DefaultDexServerAddr), "Dex server address") + command.Flags().BoolVar(&disableAuth, "disable-auth", env.ParseBoolFromEnv("ARGOCD_SERVER_DISABLE_AUTH", false), "Disable client authentication") + command.Flags().BoolVar(&enableGZip, "enable-gzip", env.ParseBoolFromEnv("ARGOCD_SERVER_ENABLE_GZIP", true), "Enable GZIP compression") command.AddCommand(cli.NewVersionCmd(cliName)) + command.Flags().StringVar(&listenHost, "address", env.StringFromEnv("ARGOCD_SERVER_LISTEN_ADDRESS", common.DefaultAddressAPIServer), "Listen on given address") command.Flags().IntVar(&listenPort, "port", common.DefaultPortAPIServer, "Listen on given port") + command.Flags().StringVar(&metricsHost, env.StringFromEnv("ARGOCD_SERVER_METRICS_LISTEN_ADDRESS", "metrics-address"), common.DefaultAddressAPIServerMetrics, "Listen for metrics on given address") command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortArgoCDAPIServerMetrics, "Start metrics on given port") - command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", 60, "Repo server RPC call timeout seconds.") - command.Flags().StringVar(&frameOptions, "x-frame-options", "sameorigin", "Set X-Frame-Options header in HTTP responses to `value`. To disable, set to \"\".") - command.Flags().BoolVar(&repoServerPlaintext, "repo-server-plaintext", false, "Use a plaintext client (non-TLS) to connect to repository server") - command.Flags().BoolVar(&repoServerStrictTLS, "repo-server-strict-tls", false, "Perform strict validation of TLS certificates when connecting to repo server") + command.Flags().StringVar(&otlpAddress, "otlp-address", env.StringFromEnv("ARGOCD_SERVER_OTLP_ADDRESS", ""), "OpenTelemetry collector address to send traces to") + command.Flags().StringSliceVar(&otlpAttrs, "otlp-attrs", env.StringsFromEnv("ARGOCD_SERVER_OTLP_ATTRS", []string{}, ","), "List of OpenTelemetry collector extra attrs when send traces, each attribute is separated by a colon(e.g. key:value)") + command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", env.ParseNumFromEnv("ARGOCD_SERVER_REPO_SERVER_TIMEOUT_SECONDS", 60, 0, math.MaxInt64), "Repo server RPC call timeout seconds.") + command.Flags().StringVar(&frameOptions, "x-frame-options", env.StringFromEnv("ARGOCD_SERVER_X_FRAME_OPTIONS", "sameorigin"), "Set X-Frame-Options header in HTTP responses to `value`. To disable, set to \"\".") + command.Flags().StringVar(&contentSecurityPolicy, "content-security-policy", env.StringFromEnv("ARGOCD_SERVER_CONTENT_SECURITY_POLICY", "frame-ancestors 'self';"), "Set Content-Security-Policy header in HTTP responses to `value`. To disable, set to \"\".") + command.Flags().BoolVar(&repoServerPlaintext, "repo-server-plaintext", env.ParseBoolFromEnv("ARGOCD_SERVER_REPO_SERVER_PLAINTEXT", false), "Use a plaintext client (non-TLS) to connect to repository server") + command.Flags().BoolVar(&repoServerStrictTLS, "repo-server-strict-tls", env.ParseBoolFromEnv("ARGOCD_SERVER_REPO_SERVER_STRICT_TLS", false), "Perform strict validation of TLS certificates when connecting to repo server") + command.Flags().BoolVar(&dexServerPlaintext, "dex-server-plaintext", env.ParseBoolFromEnv("ARGOCD_SERVER_DEX_SERVER_PLAINTEXT", false), "Use a plaintext client (non-TLS) to connect to dex server") + command.Flags().BoolVar(&dexServerStrictTLS, "dex-server-strict-tls", env.ParseBoolFromEnv("ARGOCD_SERVER_DEX_SERVER_STRICT_TLS", false), "Perform strict validation of TLS certificates when connecting to dex server") + command.Flags().StringSliceVar(&applicationNamespaces, "application-namespaces", env.StringsFromEnv("ARGOCD_APPLICATION_NAMESPACES", []string{}, ","), "List of additional namespaces where application resources can be managed in") + command.Flags().BoolVar(&enableProxyExtension, "enable-proxy-extension", env.ParseBoolFromEnv("ARGOCD_SERVER_ENABLE_PROXY_EXTENSION", false), "Enable Proxy Extension feature") tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(command) cacheSrc = servercache.AddCacheFlagsToCmd(command, func(client *redis.Client) { redisClient = client diff --git a/cmd/argocd-util/commands/app.go b/cmd/argocd-util/commands/app.go deleted file mode 100644 index 3a4425e191782..0000000000000 --- a/cmd/argocd-util/commands/app.go +++ /dev/null @@ -1,406 +0,0 @@ -package commands - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "os" - "sort" - "time" - - "github.com/ghodss/yaml" - "github.com/spf13/cobra" - apiv1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/kubernetes" - kubecache "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/clientcmd" - - cmdutil "github.com/argoproj/argo-cd/v2/cmd/util" - "github.com/argoproj/argo-cd/v2/common" - "github.com/argoproj/argo-cd/v2/controller" - "github.com/argoproj/argo-cd/v2/controller/cache" - "github.com/argoproj/argo-cd/v2/controller/metrics" - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" - appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned" - appinformers "github.com/argoproj/argo-cd/v2/pkg/client/informers/externalversions" - "github.com/argoproj/argo-cd/v2/reposerver/apiclient" - cacheutil "github.com/argoproj/argo-cd/v2/util/cache" - appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate" - "github.com/argoproj/argo-cd/v2/util/cli" - "github.com/argoproj/argo-cd/v2/util/config" - "github.com/argoproj/argo-cd/v2/util/db" - "github.com/argoproj/argo-cd/v2/util/errors" - kubeutil "github.com/argoproj/argo-cd/v2/util/kube" - "github.com/argoproj/argo-cd/v2/util/settings" -) - -func NewAppCommand() *cobra.Command { - var command = &cobra.Command{ - Use: "app", - Short: "Manage applications configuration", - Run: func(c *cobra.Command, args []string) { - c.HelpFunc()(c, args) - }, - } - - command.AddCommand(NewGenAppSpecCommand()) - command.AddCommand(NewReconcileCommand()) - command.AddCommand(NewDiffReconcileResults()) - return command -} - -// NewGenAppSpecCommand generates declarative configuration file for given application -func NewGenAppSpecCommand() *cobra.Command { - var ( - appOpts cmdutil.AppOptions - fileURL string - appName string - labels []string - outputFormat string - ) - var command = &cobra.Command{ - Use: "generate-spec APPNAME", - Short: "Generate declarative config for an application", - Example: ` - # Generate declarative config for a directory app - argocd-util app generate-spec guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --directory-recurse - - # Generate declarative config for a Jsonnet app - argocd-util app generate-spec jsonnet-guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path jsonnet-guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --jsonnet-ext-str replicas=2 - - # Generate declarative config for a Helm app - argocd-util app generate-spec helm-guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path helm-guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --helm-set replicaCount=2 - - # Generate declarative config for a Helm app from a Helm repo - argocd-util app generate-spec nginx-ingress --repo https://charts.helm.sh/stable --helm-chart nginx-ingress --revision 1.24.3 --dest-namespace default --dest-server https://kubernetes.default.svc - - # Generate declarative config for a Kustomize app - argocd-util app generate-spec kustomize-guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path kustomize-guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --kustomize-image gcr.io/heptio-images/ks-guestbook-demo:0.1 - - # Generate declarative config for a app using a custom tool: - argocd-util app generate-spec ksane --repo https://github.com/argoproj/argocd-example-apps.git --path plugins/kasane --dest-namespace default --dest-server https://kubernetes.default.svc --config-management-plugin kasane -`, - Run: func(c *cobra.Command, args []string) { - app, err := cmdutil.ConstructApp(fileURL, appName, labels, args, appOpts, c.Flags()) - errors.CheckError(err) - - if app.Name == "" { - c.HelpFunc()(c, args) - os.Exit(1) - } - - var printResources []interface{} - printResources = append(printResources, app) - errors.CheckError(cmdutil.PrintResources(printResources, outputFormat)) - }, - } - command.Flags().StringVar(&appName, "name", "", "A name for the app, ignored if a file is set (DEPRECATED)") - command.Flags().StringVarP(&fileURL, "file", "f", "", "Filename or URL to Kubernetes manifests for the app") - command.Flags().StringArrayVarP(&labels, "label", "l", []string{}, "Labels to apply to the app") - command.Flags().StringVarP(&outputFormat, "output", "o", "yaml", "Output format. One of: json|yaml") - - // Only complete files with appropriate extension. - err := command.Flags().SetAnnotation("file", cobra.BashCompFilenameExt, []string{"json", "yaml", "yml"}) - errors.CheckError(err) - - cmdutil.AddAppFlags(command, &appOpts) - return command -} - -type appReconcileResult struct { - Name string `json:"name"` - Health *v1alpha1.HealthStatus `json:"health"` - Sync *v1alpha1.SyncStatus `json:"sync"` - Conditions []v1alpha1.ApplicationCondition `json:"conditions"` -} - -type reconcileResults struct { - Applications []appReconcileResult `json:"applications"` -} - -func (r *reconcileResults) getAppsMap() map[string]appReconcileResult { - res := map[string]appReconcileResult{} - for i := range r.Applications { - res[r.Applications[i].Name] = r.Applications[i] - } - return res -} - -func printLine(format string, a ...interface{}) { - _, _ = fmt.Printf(format+"\n", a...) -} - -func NewDiffReconcileResults() *cobra.Command { - var command = &cobra.Command{ - Use: "diff-reconcile-results PATH1 PATH2", - Short: "Compare results of two reconciliations and print diff.", - Run: func(c *cobra.Command, args []string) { - if len(args) != 2 { - c.HelpFunc()(c, args) - os.Exit(1) - } - - path1 := args[0] - path2 := args[1] - var res1 reconcileResults - var res2 reconcileResults - errors.CheckError(config.UnmarshalLocalFile(path1, &res1)) - errors.CheckError(config.UnmarshalLocalFile(path2, &res2)) - errors.CheckError(diffReconcileResults(res1, res2)) - }, - } - - return command -} - -func toUnstructured(val interface{}) (*unstructured.Unstructured, error) { - data, err := json.Marshal(val) - if err != nil { - return nil, err - } - res := make(map[string]interface{}) - err = json.Unmarshal(data, &res) - if err != nil { - return nil, err - } - return &unstructured.Unstructured{Object: res}, nil -} - -type diffPair struct { - name string - first *unstructured.Unstructured - second *unstructured.Unstructured -} - -func diffReconcileResults(res1 reconcileResults, res2 reconcileResults) error { - var pairs []diffPair - resMap1 := res1.getAppsMap() - resMap2 := res2.getAppsMap() - for k, v := range resMap1 { - firstUn, err := toUnstructured(v) - if err != nil { - return err - } - var secondUn *unstructured.Unstructured - second, ok := resMap2[k] - if ok { - secondUn, err = toUnstructured(second) - if err != nil { - return err - } - delete(resMap2, k) - } - pairs = append(pairs, diffPair{name: k, first: firstUn, second: secondUn}) - } - for k, v := range resMap2 { - secondUn, err := toUnstructured(v) - if err != nil { - return err - } - pairs = append(pairs, diffPair{name: k, first: nil, second: secondUn}) - } - sort.Slice(pairs, func(i, j int) bool { - return pairs[i].name < pairs[j].name - }) - for _, item := range pairs { - printLine(item.name) - _ = cli.PrintDiff(item.name, item.first, item.second) - } - - return nil -} - -func NewReconcileCommand() *cobra.Command { - var ( - clientConfig clientcmd.ClientConfig - selector string - repoServerAddress string - outputFormat string - refresh bool - ) - - var command = &cobra.Command{ - Use: "get-reconcile-results PATH", - Short: "Reconcile all applications and stores reconciliation summary in the specified file.", - Run: func(c *cobra.Command, args []string) { - // get rid of logging error handler - runtime.ErrorHandlers = runtime.ErrorHandlers[1:] - - if len(args) != 1 { - c.HelpFunc()(c, args) - os.Exit(1) - } - outputPath := args[0] - - errors.CheckError(os.Setenv(common.EnvVarFakeInClusterConfig, "true")) - cfg, err := clientConfig.ClientConfig() - errors.CheckError(err) - namespace, _, err := clientConfig.Namespace() - errors.CheckError(err) - - var result []appReconcileResult - if refresh { - if repoServerAddress == "" { - printLine("Repo server is not provided, trying to port-forward to argocd-repo-server pod.") - overrides := clientcmd.ConfigOverrides{} - repoServerPort, err := kubeutil.PortForward("app.kubernetes.io/name=argocd-repo-server", 8081, namespace, &overrides) - errors.CheckError(err) - repoServerAddress = fmt.Sprintf("localhost:%d", repoServerPort) - } - repoServerClient := apiclient.NewRepoServerClientset(repoServerAddress, 60, apiclient.TLSConfiguration{DisableTLS: false, StrictValidation: false}) - - appClientset := appclientset.NewForConfigOrDie(cfg) - kubeClientset := kubernetes.NewForConfigOrDie(cfg) - result, err = reconcileApplications(kubeClientset, appClientset, namespace, repoServerClient, selector, newLiveStateCache) - errors.CheckError(err) - } else { - appClientset := appclientset.NewForConfigOrDie(cfg) - result, err = getReconcileResults(appClientset, namespace, selector) - } - - errors.CheckError(saveToFile(err, outputFormat, reconcileResults{Applications: result}, outputPath)) - }, - } - clientConfig = cli.AddKubectlFlagsToCmd(command) - command.Flags().StringVar(&repoServerAddress, "repo-server", "", "Repo server address.") - command.Flags().StringVar(&selector, "l", "", "Label selector") - command.Flags().StringVar(&outputFormat, "o", "yaml", "Output format (yaml|json)") - command.Flags().BoolVar(&refresh, "refresh", false, "If set to true then recalculates apps reconciliation") - - return command -} - -func saveToFile(err error, outputFormat string, result reconcileResults, outputPath string) error { - errors.CheckError(err) - var data []byte - switch outputFormat { - case "yaml": - if data, err = yaml.Marshal(result); err != nil { - return err - } - case "json": - if data, err = json.Marshal(result); err != nil { - return err - } - default: - return fmt.Errorf("format %s is not supported", outputFormat) - } - - return ioutil.WriteFile(outputPath, data, 0644) -} - -func getReconcileResults(appClientset appclientset.Interface, namespace string, selector string) ([]appReconcileResult, error) { - appsList, err := appClientset.ArgoprojV1alpha1().Applications(namespace).List(context.Background(), v1.ListOptions{LabelSelector: selector}) - if err != nil { - return nil, err - } - - var items []appReconcileResult - for _, app := range appsList.Items { - items = append(items, appReconcileResult{ - Name: app.Name, - Conditions: app.Status.Conditions, - Health: &app.Status.Health, - Sync: &app.Status.Sync, - }) - } - return items, nil -} - -func reconcileApplications( - kubeClientset kubernetes.Interface, - appClientset appclientset.Interface, - namespace string, - repoServerClient apiclient.Clientset, - selector string, - createLiveStateCache func(argoDB db.ArgoDB, appInformer kubecache.SharedIndexInformer, settingsMgr *settings.SettingsManager, server *metrics.MetricsServer) cache.LiveStateCache, -) ([]appReconcileResult, error) { - - settingsMgr := settings.NewSettingsManager(context.Background(), kubeClientset, namespace) - argoDB := db.NewDB(namespace, settingsMgr, kubeClientset) - appInformerFactory := appinformers.NewFilteredSharedInformerFactory( - appClientset, - 1*time.Hour, - namespace, - func(options *v1.ListOptions) {}, - ) - - appInformer := appInformerFactory.Argoproj().V1alpha1().Applications().Informer() - projInformer := appInformerFactory.Argoproj().V1alpha1().AppProjects().Informer() - go appInformer.Run(context.Background().Done()) - go projInformer.Run(context.Background().Done()) - if !kubecache.WaitForCacheSync(context.Background().Done(), appInformer.HasSynced, projInformer.HasSynced) { - return nil, fmt.Errorf("failed to sync cache") - } - - appLister := appInformerFactory.Argoproj().V1alpha1().Applications().Lister() - projLister := appInformerFactory.Argoproj().V1alpha1().AppProjects().Lister() - server, err := metrics.NewMetricsServer("", appLister, func(obj interface{}) bool { - return true - }, func(r *http.Request) error { - return nil - }) - - if err != nil { - return nil, err - } - stateCache := createLiveStateCache(argoDB, appInformer, settingsMgr, server) - if err := stateCache.Init(); err != nil { - return nil, err - } - - cache := appstatecache.NewCache( - cacheutil.NewCache(cacheutil.NewInMemoryCache(1*time.Minute)), - 1*time.Minute, - ) - - appStateManager := controller.NewAppStateManager( - argoDB, appClientset, repoServerClient, namespace, kubeutil.NewKubectl(), settingsMgr, stateCache, projInformer, server, cache, time.Second) - - appsList, err := appClientset.ArgoprojV1alpha1().Applications(namespace).List(context.Background(), v1.ListOptions{LabelSelector: selector}) - if err != nil { - return nil, err - } - - sort.Slice(appsList.Items, func(i, j int) bool { - return appsList.Items[i].Spec.Destination.Server < appsList.Items[j].Spec.Destination.Server - }) - - var items []appReconcileResult - prevServer := "" - for _, app := range appsList.Items { - if prevServer != app.Spec.Destination.Server { - if prevServer != "" { - if clusterCache, err := stateCache.GetClusterCache(prevServer); err == nil { - clusterCache.Invalidate() - } - } - printLine("Reconciling apps of %s", app.Spec.Destination.Server) - prevServer = app.Spec.Destination.Server - } - printLine(app.Name) - - proj, err := projLister.AppProjects(namespace).Get(app.Spec.Project) - if err != nil { - return nil, err - } - - res := appStateManager.CompareAppState(&app, proj, app.Spec.Source.TargetRevision, app.Spec.Source, false, nil) - items = append(items, appReconcileResult{ - Name: app.Name, - Conditions: app.Status.Conditions, - Health: res.GetHealthStatus(), - Sync: res.GetSyncStatus(), - }) - } - return items, nil -} - -func newLiveStateCache(argoDB db.ArgoDB, appInformer kubecache.SharedIndexInformer, settingsMgr *settings.SettingsManager, server *metrics.MetricsServer) cache.LiveStateCache { - return cache.NewLiveStateCache(argoDB, appInformer, settingsMgr, kubeutil.NewKubectl(), server, func(managedByApp map[string]bool, ref apiv1.ObjectReference) {}, nil) -} diff --git a/cmd/argocd-util/commands/app_test.go b/cmd/argocd-util/commands/app_test.go deleted file mode 100644 index 50ed5bee961e6..0000000000000 --- a/cmd/argocd-util/commands/app_test.go +++ /dev/null @@ -1,182 +0,0 @@ -package commands - -import ( - "testing" - - "github.com/argoproj/argo-cd/v2/test" - - clustermocks "github.com/argoproj/gitops-engine/pkg/cache/mocks" - "github.com/argoproj/gitops-engine/pkg/health" - "github.com/argoproj/gitops-engine/pkg/utils/kube" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - kubefake "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/tools/cache" - - "github.com/argoproj/argo-cd/v2/common" - statecache "github.com/argoproj/argo-cd/v2/controller/cache" - cachemocks "github.com/argoproj/argo-cd/v2/controller/cache/mocks" - "github.com/argoproj/argo-cd/v2/controller/metrics" - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" - appfake "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/fake" - "github.com/argoproj/argo-cd/v2/reposerver/apiclient" - "github.com/argoproj/argo-cd/v2/reposerver/apiclient/mocks" - "github.com/argoproj/argo-cd/v2/util/db" - "github.com/argoproj/argo-cd/v2/util/settings" -) - -func TestGetReconcileResults(t *testing.T) { - appClientset := appfake.NewSimpleClientset(&v1alpha1.Application{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "default", - }, - Status: v1alpha1.ApplicationStatus{ - Health: v1alpha1.HealthStatus{Status: health.HealthStatusHealthy}, - Sync: v1alpha1.SyncStatus{Status: v1alpha1.SyncStatusCodeOutOfSync}, - }, - }) - - result, err := getReconcileResults(appClientset, "default", "") - if !assert.NoError(t, err) { - return - } - - expectedResults := []appReconcileResult{{ - Name: "test", - Health: &v1alpha1.HealthStatus{Status: health.HealthStatusHealthy}, - Sync: &v1alpha1.SyncStatus{Status: v1alpha1.SyncStatusCodeOutOfSync}, - }} - assert.ElementsMatch(t, expectedResults, result) -} - -func TestGetReconcileResults_Refresh(t *testing.T) { - cm := corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "argocd-cm", - Namespace: "default", - Labels: map[string]string{ - "app.kubernetes.io/part-of": "argocd", - }, - }, - } - proj := &v1alpha1.AppProject{ - ObjectMeta: metav1.ObjectMeta{ - Name: "default", - Namespace: "default", - }, - Spec: v1alpha1.AppProjectSpec{Destinations: []v1alpha1.ApplicationDestination{{Namespace: "*", Server: "*"}}}, - } - - app := &v1alpha1.Application{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "default", - }, - Spec: v1alpha1.ApplicationSpec{ - Project: "default", - Destination: v1alpha1.ApplicationDestination{ - Server: common.KubernetesInternalAPIServerAddr, - Namespace: "default", - }, - }, - } - - appClientset := appfake.NewSimpleClientset(app, proj) - deployment := test.NewDeployment() - kubeClientset := kubefake.NewSimpleClientset(deployment, &cm) - clusterCache := clustermocks.ClusterCache{} - clusterCache.On("IsNamespaced", mock.Anything).Return(true, nil) - repoServerClient := mocks.RepoServerServiceClient{} - repoServerClient.On("GenerateManifest", mock.Anything, mock.Anything).Return(&apiclient.ManifestResponse{ - Manifests: []string{test.DeploymentManifest}, - }, nil) - repoServerClientset := mocks.Clientset{RepoServerServiceClient: &repoServerClient} - liveStateCache := cachemocks.LiveStateCache{} - liveStateCache.On("GetManagedLiveObjs", mock.Anything, mock.Anything).Return(map[kube.ResourceKey]*unstructured.Unstructured{ - kube.GetResourceKey(deployment): deployment, - }, nil) - liveStateCache.On("GetVersionsInfo", mock.Anything).Return("v1.2.3", nil, nil) - liveStateCache.On("Init").Return(nil, nil) - liveStateCache.On("GetClusterCache", mock.Anything).Return(&clusterCache, nil) - liveStateCache.On("IsNamespaced", mock.Anything, mock.Anything).Return(true, nil) - - result, err := reconcileApplications(kubeClientset, appClientset, "default", &repoServerClientset, "", - func(argoDB db.ArgoDB, appInformer cache.SharedIndexInformer, settingsMgr *settings.SettingsManager, server *metrics.MetricsServer) statecache.LiveStateCache { - return &liveStateCache - }, - ) - - if !assert.NoError(t, err) { - return - } - - assert.Equal(t, result[0].Health.Status, health.HealthStatusMissing) - assert.Equal(t, result[0].Sync.Status, v1alpha1.SyncStatusCodeOutOfSync) -} - -func TestDiffReconcileResults_NoDifferences(t *testing.T) { - logs, err := captureStdout(func() { - assert.NoError(t, diffReconcileResults( - reconcileResults{Applications: []appReconcileResult{{ - Name: "app1", - Sync: &v1alpha1.SyncStatus{Status: v1alpha1.SyncStatusCodeOutOfSync}, - }}}, - reconcileResults{Applications: []appReconcileResult{{ - Name: "app1", - Sync: &v1alpha1.SyncStatus{Status: v1alpha1.SyncStatusCodeOutOfSync}, - }}}, - )) - }) - assert.NoError(t, err) - assert.Equal(t, "app1\n", logs) -} - -func TestDiffReconcileResults_DifferentApps(t *testing.T) { - logs, err := captureStdout(func() { - assert.NoError(t, diffReconcileResults( - reconcileResults{Applications: []appReconcileResult{{ - Name: "app1", - Sync: &v1alpha1.SyncStatus{Status: v1alpha1.SyncStatusCodeOutOfSync}, - }, { - Name: "app2", - Sync: &v1alpha1.SyncStatus{Status: v1alpha1.SyncStatusCodeOutOfSync}, - }}}, - reconcileResults{Applications: []appReconcileResult{{ - Name: "app1", - Sync: &v1alpha1.SyncStatus{Status: v1alpha1.SyncStatusCodeOutOfSync}, - }, { - Name: "app3", - Sync: &v1alpha1.SyncStatus{Status: v1alpha1.SyncStatusCodeOutOfSync}, - }}}, - )) - }) - assert.NoError(t, err) - assert.Equal(t, `app1 -app2 -1,9d0 -< conditions: null -< health: null -< name: app2 -< sync: -< comparedTo: -< destination: {} -< source: -< repoURL: "" -< status: OutOfSync -app3 -0a1,9 -> conditions: null -> health: null -> name: app3 -> sync: -> comparedTo: -> destination: {} -> source: -> repoURL: "" -> status: OutOfSync -`, logs) -} diff --git a/cmd/argocd-util/commands/argocd_util.go b/cmd/argocd-util/commands/argocd_util.go deleted file mode 100644 index c03e2e87cb575..0000000000000 --- a/cmd/argocd-util/commands/argocd_util.go +++ /dev/null @@ -1,244 +0,0 @@ -package commands - -import ( - "reflect" - - "github.com/ghodss/yaml" - "github.com/spf13/cobra" - apiv1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - - cmdutil "github.com/argoproj/argo-cd/v2/cmd/util" - "github.com/argoproj/argo-cd/v2/common" - "github.com/argoproj/argo-cd/v2/util/cli" - "github.com/argoproj/argo-cd/v2/util/errors" - "github.com/argoproj/argo-cd/v2/util/settings" -) - -const ( - // CLIName is the name of the CLI - cliName = "argocd-util" - // YamlSeparator separates sections of a YAML file - yamlSeparator = "---\n" -) - -var ( - configMapResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"} - secretResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"} - applicationsResource = schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "applications"} - appprojectsResource = schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "appprojects"} -) - -// NewCommand returns a new instance of an argocd command -func NewCommand() *cobra.Command { - var ( - pathOpts = clientcmd.NewDefaultPathOptions() - ) - - var command = &cobra.Command{ - Use: cliName, - Short: "argocd-util tools used by Argo CD", - Long: "argocd-util has internal utility tools used by Argo CD", - DisableAutoGenTag: true, - Run: func(c *cobra.Command, args []string) { - c.HelpFunc()(c, args) - }, - } - - command.AddCommand(cli.NewVersionCmd(cliName)) - command.AddCommand(NewClusterCommand(pathOpts)) - command.AddCommand(NewProjectsCommand()) - command.AddCommand(NewSettingsCommand()) - command.AddCommand(NewAppCommand()) - command.AddCommand(NewRepoCommand()) - command.AddCommand(NewImportCommand()) - command.AddCommand(NewExportCommand()) - - command.Flags().StringVar(&cmdutil.LogFormat, "logformat", "text", "Set the logging format. One of: text|json") - command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error") - return command -} - -type argoCDClientsets struct { - configMaps dynamic.ResourceInterface - secrets dynamic.ResourceInterface - applications dynamic.ResourceInterface - projects dynamic.ResourceInterface -} - -func newArgoCDClientsets(config *rest.Config, namespace string) *argoCDClientsets { - dynamicIf, err := dynamic.NewForConfig(config) - errors.CheckError(err) - return &argoCDClientsets{ - configMaps: dynamicIf.Resource(configMapResource).Namespace(namespace), - secrets: dynamicIf.Resource(secretResource).Namespace(namespace), - applications: dynamicIf.Resource(applicationsResource).Namespace(namespace), - projects: dynamicIf.Resource(appprojectsResource).Namespace(namespace), - } -} - -// getReferencedSecrets examines the argocd-cm config for any referenced repo secrets and returns a -// map of all referenced secrets. -func getReferencedSecrets(un unstructured.Unstructured) map[string]bool { - var cm apiv1.ConfigMap - err := runtime.DefaultUnstructuredConverter.FromUnstructured(un.Object, &cm) - errors.CheckError(err) - referencedSecrets := make(map[string]bool) - - // Referenced repository secrets - if reposRAW, ok := cm.Data["repositories"]; ok { - repos := make([]settings.Repository, 0) - err := yaml.Unmarshal([]byte(reposRAW), &repos) - errors.CheckError(err) - for _, cred := range repos { - if cred.PasswordSecret != nil { - referencedSecrets[cred.PasswordSecret.Name] = true - } - if cred.SSHPrivateKeySecret != nil { - referencedSecrets[cred.SSHPrivateKeySecret.Name] = true - } - if cred.UsernameSecret != nil { - referencedSecrets[cred.UsernameSecret.Name] = true - } - if cred.TLSClientCertDataSecret != nil { - referencedSecrets[cred.TLSClientCertDataSecret.Name] = true - } - if cred.TLSClientCertKeySecret != nil { - referencedSecrets[cred.TLSClientCertKeySecret.Name] = true - } - } - } - - // Referenced repository credentials secrets - if reposRAW, ok := cm.Data["repository.credentials"]; ok { - creds := make([]settings.RepositoryCredentials, 0) - err := yaml.Unmarshal([]byte(reposRAW), &creds) - errors.CheckError(err) - for _, cred := range creds { - if cred.PasswordSecret != nil { - referencedSecrets[cred.PasswordSecret.Name] = true - } - if cred.SSHPrivateKeySecret != nil { - referencedSecrets[cred.SSHPrivateKeySecret.Name] = true - } - if cred.UsernameSecret != nil { - referencedSecrets[cred.UsernameSecret.Name] = true - } - if cred.TLSClientCertDataSecret != nil { - referencedSecrets[cred.TLSClientCertDataSecret.Name] = true - } - if cred.TLSClientCertKeySecret != nil { - referencedSecrets[cred.TLSClientCertKeySecret.Name] = true - } - } - } - return referencedSecrets -} - -// isArgoCDSecret returns whether or not the given secret is a part of Argo CD configuration -// (e.g. argocd-secret, repo credentials, or cluster credentials) -func isArgoCDSecret(repoSecretRefs map[string]bool, un unstructured.Unstructured) bool { - secretName := un.GetName() - if secretName == common.ArgoCDSecretName { - return true - } - if repoSecretRefs != nil { - if _, ok := repoSecretRefs[secretName]; ok { - return true - } - } - if labels := un.GetLabels(); labels != nil { - if _, ok := labels[common.LabelKeySecretType]; ok { - return true - } - } - if annotations := un.GetAnnotations(); annotations != nil { - if annotations[common.AnnotationKeyManagedBy] == common.AnnotationValueManagedByArgoCD { - return true - } - } - return false -} - -// isArgoCDConfigMap returns true if the configmap name is one of argo cd's well known configmaps -func isArgoCDConfigMap(name string) bool { - switch name { - case common.ArgoCDConfigMapName, common.ArgoCDRBACConfigMapName, common.ArgoCDKnownHostsConfigMapName, common.ArgoCDTLSCertsConfigMapName: - return true - } - return false - -} - -// specsEqual returns if the spec, data, labels, annotations, and finalizers of the two -// supplied objects are equal, indicating that no update is necessary during importing -func specsEqual(left, right unstructured.Unstructured) bool { - if !reflect.DeepEqual(left.GetAnnotations(), right.GetAnnotations()) { - return false - } - if !reflect.DeepEqual(left.GetLabels(), right.GetLabels()) { - return false - } - if !reflect.DeepEqual(left.GetFinalizers(), right.GetFinalizers()) { - return false - } - switch left.GetKind() { - case "Secret", "ConfigMap": - leftData, _, _ := unstructured.NestedMap(left.Object, "data") - rightData, _, _ := unstructured.NestedMap(right.Object, "data") - return reflect.DeepEqual(leftData, rightData) - case "AppProject": - leftSpec, _, _ := unstructured.NestedMap(left.Object, "spec") - rightSpec, _, _ := unstructured.NestedMap(right.Object, "spec") - return reflect.DeepEqual(leftSpec, rightSpec) - case "Application": - leftSpec, _, _ := unstructured.NestedMap(left.Object, "spec") - rightSpec, _, _ := unstructured.NestedMap(right.Object, "spec") - leftStatus, _, _ := unstructured.NestedMap(left.Object, "status") - rightStatus, _, _ := unstructured.NestedMap(right.Object, "status") - // reconciledAt and observedAt are constantly changing and we ignore any diff there - delete(leftStatus, "reconciledAt") - delete(rightStatus, "reconciledAt") - delete(leftStatus, "observedAt") - delete(rightStatus, "observedAt") - return reflect.DeepEqual(leftSpec, rightSpec) && reflect.DeepEqual(leftStatus, rightStatus) - } - return false -} - -func iterateStringFields(obj interface{}, callback func(name string, val string) string) { - if mapField, ok := obj.(map[string]interface{}); ok { - for field, val := range mapField { - if strVal, ok := val.(string); ok { - mapField[field] = callback(field, strVal) - } else { - iterateStringFields(val, callback) - } - } - } else if arrayField, ok := obj.([]interface{}); ok { - for i := range arrayField { - iterateStringFields(arrayField[i], callback) - } - } -} - -func redactor(dirtyString string) string { - config := make(map[string]interface{}) - err := yaml.Unmarshal([]byte(dirtyString), &config) - errors.CheckError(err) - iterateStringFields(config, func(name string, val string) string { - if name == "clientSecret" || name == "secret" || name == "bindPW" { - return "********" - } else { - return val - } - }) - data, err := yaml.Marshal(config) - errors.CheckError(err) - return string(data) -} diff --git a/cmd/argocd-util/commands/backup.go b/cmd/argocd-util/commands/backup.go deleted file mode 100644 index fa33359f917f7..0000000000000 --- a/cmd/argocd-util/commands/backup.go +++ /dev/null @@ -1,298 +0,0 @@ -package commands - -import ( - "bufio" - "context" - "fmt" - "io" - "io/ioutil" - "os" - - "github.com/argoproj/gitops-engine/pkg/utils/kube" - "github.com/ghodss/yaml" - "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - apierr "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/tools/clientcmd" - - "github.com/argoproj/argo-cd/v2/common" - "github.com/argoproj/argo-cd/v2/util/cli" - "github.com/argoproj/argo-cd/v2/util/errors" -) - -// NewExportCommand defines a new command for exporting Kubernetes and Argo CD resources. -func NewExportCommand() *cobra.Command { - var ( - clientConfig clientcmd.ClientConfig - out string - ) - var command = cobra.Command{ - Use: "export", - Short: "Export all Argo CD data to stdout (default) or a file", - Run: func(c *cobra.Command, args []string) { - config, err := clientConfig.ClientConfig() - errors.CheckError(err) - namespace, _, err := clientConfig.Namespace() - errors.CheckError(err) - - var writer io.Writer - if out == "-" { - writer = os.Stdout - } else { - f, err := os.Create(out) - errors.CheckError(err) - bw := bufio.NewWriter(f) - writer = bw - defer func() { - err = bw.Flush() - errors.CheckError(err) - err = f.Close() - errors.CheckError(err) - }() - } - - acdClients := newArgoCDClientsets(config, namespace) - acdConfigMap, err := acdClients.configMaps.Get(context.Background(), common.ArgoCDConfigMapName, v1.GetOptions{}) - errors.CheckError(err) - export(writer, *acdConfigMap) - acdRBACConfigMap, err := acdClients.configMaps.Get(context.Background(), common.ArgoCDRBACConfigMapName, v1.GetOptions{}) - errors.CheckError(err) - export(writer, *acdRBACConfigMap) - acdKnownHostsConfigMap, err := acdClients.configMaps.Get(context.Background(), common.ArgoCDKnownHostsConfigMapName, v1.GetOptions{}) - errors.CheckError(err) - export(writer, *acdKnownHostsConfigMap) - acdTLSCertsConfigMap, err := acdClients.configMaps.Get(context.Background(), common.ArgoCDTLSCertsConfigMapName, v1.GetOptions{}) - errors.CheckError(err) - export(writer, *acdTLSCertsConfigMap) - - referencedSecrets := getReferencedSecrets(*acdConfigMap) - secrets, err := acdClients.secrets.List(context.Background(), v1.ListOptions{}) - errors.CheckError(err) - for _, secret := range secrets.Items { - if isArgoCDSecret(referencedSecrets, secret) { - export(writer, secret) - } - } - projects, err := acdClients.projects.List(context.Background(), v1.ListOptions{}) - errors.CheckError(err) - for _, proj := range projects.Items { - export(writer, proj) - } - applications, err := acdClients.applications.List(context.Background(), v1.ListOptions{}) - errors.CheckError(err) - for _, app := range applications.Items { - export(writer, app) - } - }, - } - - clientConfig = cli.AddKubectlFlagsToCmd(&command) - command.Flags().StringVarP(&out, "out", "o", "-", "Output to the specified file instead of stdout") - - return &command -} - -// NewImportCommand defines a new command for exporting Kubernetes and Argo CD resources. -func NewImportCommand() *cobra.Command { - var ( - clientConfig clientcmd.ClientConfig - prune bool - dryRun bool - verbose bool - ) - var command = cobra.Command{ - Use: "import SOURCE", - Short: "Import Argo CD data from stdin (specify `-') or a file", - Run: func(c *cobra.Command, args []string) { - if len(args) != 1 { - c.HelpFunc()(c, args) - os.Exit(1) - } - config, err := clientConfig.ClientConfig() - errors.CheckError(err) - config.QPS = 100 - config.Burst = 50 - errors.CheckError(err) - namespace, _, err := clientConfig.Namespace() - errors.CheckError(err) - acdClients := newArgoCDClientsets(config, namespace) - - var input []byte - if in := args[0]; in == "-" { - input, err = ioutil.ReadAll(os.Stdin) - } else { - input, err = ioutil.ReadFile(in) - } - errors.CheckError(err) - var dryRunMsg string - if dryRun { - dryRunMsg = " (dry run)" - } - - // pruneObjects tracks live objects and it's current resource version. any remaining - // items in this map indicates the resource should be pruned since it no longer appears - // in the backup - pruneObjects := make(map[kube.ResourceKey]unstructured.Unstructured) - configMaps, err := acdClients.configMaps.List(context.Background(), v1.ListOptions{}) - errors.CheckError(err) - // referencedSecrets holds any secrets referenced in the argocd-cm configmap. These - // secrets need to be imported too - var referencedSecrets map[string]bool - for _, cm := range configMaps.Items { - if isArgoCDConfigMap(cm.GetName()) { - pruneObjects[kube.ResourceKey{Group: "", Kind: "ConfigMap", Name: cm.GetName()}] = cm - } - if cm.GetName() == common.ArgoCDConfigMapName { - referencedSecrets = getReferencedSecrets(cm) - } - } - - secrets, err := acdClients.secrets.List(context.Background(), v1.ListOptions{}) - errors.CheckError(err) - for _, secret := range secrets.Items { - if isArgoCDSecret(referencedSecrets, secret) { - pruneObjects[kube.ResourceKey{Group: "", Kind: "Secret", Name: secret.GetName()}] = secret - } - } - applications, err := acdClients.applications.List(context.Background(), v1.ListOptions{}) - errors.CheckError(err) - for _, app := range applications.Items { - pruneObjects[kube.ResourceKey{Group: "argoproj.io", Kind: "Application", Name: app.GetName()}] = app - } - projects, err := acdClients.projects.List(context.Background(), v1.ListOptions{}) - errors.CheckError(err) - for _, proj := range projects.Items { - pruneObjects[kube.ResourceKey{Group: "argoproj.io", Kind: "AppProject", Name: proj.GetName()}] = proj - } - - // Create or replace existing object - backupObjects, err := kube.SplitYAML(input) - errors.CheckError(err) - for _, bakObj := range backupObjects { - gvk := bakObj.GroupVersionKind() - key := kube.ResourceKey{Group: gvk.Group, Kind: gvk.Kind, Name: bakObj.GetName()} - liveObj, exists := pruneObjects[key] - delete(pruneObjects, key) - var dynClient dynamic.ResourceInterface - switch bakObj.GetKind() { - case "Secret": - dynClient = acdClients.secrets - case "ConfigMap": - dynClient = acdClients.configMaps - case "AppProject": - dynClient = acdClients.projects - case "Application": - dynClient = acdClients.applications - } - if !exists { - if !dryRun { - _, err = dynClient.Create(context.Background(), bakObj, v1.CreateOptions{}) - errors.CheckError(err) - } - fmt.Printf("%s/%s %s created%s\n", gvk.Group, gvk.Kind, bakObj.GetName(), dryRunMsg) - } else if specsEqual(*bakObj, liveObj) { - if verbose { - fmt.Printf("%s/%s %s unchanged%s\n", gvk.Group, gvk.Kind, bakObj.GetName(), dryRunMsg) - } - } else { - if !dryRun { - newLive := updateLive(bakObj, &liveObj) - _, err = dynClient.Update(context.Background(), newLive, v1.UpdateOptions{}) - errors.CheckError(err) - } - fmt.Printf("%s/%s %s updated%s\n", gvk.Group, gvk.Kind, bakObj.GetName(), dryRunMsg) - } - } - - // Delete objects not in backup - for key, liveObj := range pruneObjects { - if prune { - var dynClient dynamic.ResourceInterface - switch key.Kind { - case "Secret": - dynClient = acdClients.secrets - case "AppProject": - dynClient = acdClients.projects - case "Application": - dynClient = acdClients.applications - if !dryRun { - if finalizers := liveObj.GetFinalizers(); len(finalizers) > 0 { - newLive := liveObj.DeepCopy() - newLive.SetFinalizers(nil) - _, err = dynClient.Update(context.Background(), newLive, v1.UpdateOptions{}) - if err != nil && !apierr.IsNotFound(err) { - errors.CheckError(err) - } - } - } - default: - logrus.Fatalf("Unexpected kind '%s' in prune list", key.Kind) - } - if !dryRun { - err = dynClient.Delete(context.Background(), key.Name, v1.DeleteOptions{}) - if err != nil && !apierr.IsNotFound(err) { - errors.CheckError(err) - } - } - fmt.Printf("%s/%s %s pruned%s\n", key.Group, key.Kind, key.Name, dryRunMsg) - } else { - fmt.Printf("%s/%s %s needs pruning\n", key.Group, key.Kind, key.Name) - } - } - }, - } - - clientConfig = cli.AddKubectlFlagsToCmd(&command) - command.Flags().BoolVar(&dryRun, "dry-run", false, "Print what will be performed") - command.Flags().BoolVar(&prune, "prune", false, "Prune secrets, applications and projects which do not appear in the backup") - command.Flags().BoolVar(&verbose, "verbose", false, "Verbose output (versus only changed output)") - - return &command -} - -// export writes the unstructured object and removes extraneous cruft from output before writing -func export(w io.Writer, un unstructured.Unstructured) { - name := un.GetName() - finalizers := un.GetFinalizers() - apiVersion := un.GetAPIVersion() - kind := un.GetKind() - labels := un.GetLabels() - annotations := un.GetAnnotations() - unstructured.RemoveNestedField(un.Object, "metadata") - un.SetName(name) - un.SetFinalizers(finalizers) - un.SetAPIVersion(apiVersion) - un.SetKind(kind) - un.SetLabels(labels) - un.SetAnnotations(annotations) - data, err := yaml.Marshal(un.Object) - errors.CheckError(err) - _, err = w.Write(data) - errors.CheckError(err) - _, err = w.Write([]byte(yamlSeparator)) - errors.CheckError(err) -} - -// updateLive replaces the live object's finalizers, spec, annotations, labels, and data from the -// backup object but leaves all other fields intact (status, other metadata, etc...) -func updateLive(bak, live *unstructured.Unstructured) *unstructured.Unstructured { - newLive := live.DeepCopy() - newLive.SetAnnotations(bak.GetAnnotations()) - newLive.SetLabels(bak.GetLabels()) - newLive.SetFinalizers(bak.GetFinalizers()) - switch live.GetKind() { - case "Secret", "ConfigMap": - newLive.Object["data"] = bak.Object["data"] - case "AppProject": - newLive.Object["spec"] = bak.Object["spec"] - case "Application": - newLive.Object["spec"] = bak.Object["spec"] - if _, ok := bak.Object["status"]; ok { - newLive.Object["status"] = bak.Object["status"] - } - } - return newLive -} diff --git a/cmd/argocd-util/commands/cluster.go b/cmd/argocd-util/commands/cluster.go deleted file mode 100644 index 5885a6b5012e2..0000000000000 --- a/cmd/argocd-util/commands/cluster.go +++ /dev/null @@ -1,238 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "os" - "text/tabwriter" - "time" - - "github.com/argoproj/gitops-engine/pkg/utils/kube" - "github.com/go-redis/redis/v8" - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/tools/clientcmd" - - cmdutil "github.com/argoproj/argo-cd/v2/cmd/util" - "github.com/argoproj/argo-cd/v2/common" - "github.com/argoproj/argo-cd/v2/controller/sharding" - argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" - cacheutil "github.com/argoproj/argo-cd/v2/util/cache" - appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate" - "github.com/argoproj/argo-cd/v2/util/cli" - "github.com/argoproj/argo-cd/v2/util/db" - "github.com/argoproj/argo-cd/v2/util/errors" - kubeutil "github.com/argoproj/argo-cd/v2/util/kube" - "github.com/argoproj/argo-cd/v2/util/settings" -) - -func NewClusterCommand(pathOpts *clientcmd.PathOptions) *cobra.Command { - var command = &cobra.Command{ - Use: "cluster", - Short: "Manage clusters configuration", - Run: func(c *cobra.Command, args []string) { - c.HelpFunc()(c, args) - }, - } - - command.AddCommand(NewClusterConfig()) - command.AddCommand(NewGenClusterConfigCommand(pathOpts)) - command.AddCommand(NewClusterStatsCommand()) - - return command -} - -func NewClusterStatsCommand() *cobra.Command { - var ( - shard int - replicas int - clientConfig clientcmd.ClientConfig - cacheSrc func() (*appstatecache.Cache, error) - portForwardRedis bool - ) - var command = cobra.Command{ - Use: "stats", - Short: "Prints information cluster statistics and inferred shard number", - Run: func(cmd *cobra.Command, args []string) { - log.SetLevel(log.WarnLevel) - - clientCfg, err := clientConfig.ClientConfig() - errors.CheckError(err) - namespace, _, err := clientConfig.Namespace() - errors.CheckError(err) - - kubeClient := kubernetes.NewForConfigOrDie(clientCfg) - if replicas == 0 { - controllerPods, err := kubeClient.CoreV1().Pods(namespace).List(context.Background(), v1.ListOptions{ - LabelSelector: "app.kubernetes.io/name=argocd-application-controller"}) - errors.CheckError(err) - replicas = len(controllerPods.Items) - } - - settingsMgr := settings.NewSettingsManager(context.Background(), kubeClient, namespace) - - argoDB := db.NewDB(namespace, settingsMgr, kubeClient) - clusters, err := argoDB.ListClusters(context.Background()) - errors.CheckError(err) - var cache *appstatecache.Cache - if portForwardRedis { - overrides := clientcmd.ConfigOverrides{} - port, err := kubeutil.PortForward("app.kubernetes.io/name=argocd-redis-ha-haproxy", 6379, namespace, &overrides) - errors.CheckError(err) - client := redis.NewClient(&redis.Options{Addr: fmt.Sprintf("localhost:%d", port)}) - cache = appstatecache.NewCache(cacheutil.NewCache(cacheutil.NewRedisCache(client, time.Hour)), time.Hour) - } else { - cache, err = cacheSrc() - errors.CheckError(err) - } - - w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) - _, _ = fmt.Fprintf(w, "SERVER\tSHARD\tCONNECTION\tAPPS COUNT\tRESOURCES COUNT\n") - - for _, cluster := range clusters.Items { - clusterShard := 0 - if replicas > 0 { - clusterShard = sharding.GetShardByID(cluster.ID, replicas) - } - - if shard != -1 && clusterShard != shard { - continue - } - - var info argoappv1.ClusterInfo - _ = cache.GetClusterInfo(cluster.Server, &info) - _, _ = fmt.Fprintf(w, "%s\t%d\t%s\t%d\t%d\n", cluster.Server, clusterShard, info.ConnectionState.Status, info.ApplicationsCount, info.CacheInfo.ResourcesCount) - } - _ = w.Flush() - }, - } - clientConfig = cli.AddKubectlFlagsToCmd(&command) - command.Flags().IntVar(&shard, "shard", -1, "Cluster shard filter") - command.Flags().IntVar(&replicas, "replicas", 0, "Application controller replicas count. Inferred from number of running controller pods if not specified") - command.Flags().BoolVar(&portForwardRedis, "port-forward-redis", true, "Automatically port-forward ha proxy redis from current namespace?") - cacheSrc = appstatecache.AddCacheFlagsToCmd(&command) - return &command -} - -// NewClusterConfig returns a new instance of `argocd-util kubeconfig` command -func NewClusterConfig() *cobra.Command { - var ( - clientConfig clientcmd.ClientConfig - ) - var command = &cobra.Command{ - Use: "kubeconfig CLUSTER_URL OUTPUT_PATH", - Short: "Generates kubeconfig for the specified cluster", - DisableAutoGenTag: true, - Run: func(c *cobra.Command, args []string) { - if len(args) != 2 { - c.HelpFunc()(c, args) - os.Exit(1) - } - serverUrl := args[0] - output := args[1] - conf, err := clientConfig.ClientConfig() - errors.CheckError(err) - namespace, _, err := clientConfig.Namespace() - errors.CheckError(err) - kubeclientset, err := kubernetes.NewForConfig(conf) - errors.CheckError(err) - - cluster, err := db.NewDB(namespace, settings.NewSettingsManager(context.Background(), kubeclientset, namespace), kubeclientset).GetCluster(context.Background(), serverUrl) - errors.CheckError(err) - err = kube.WriteKubeConfig(cluster.RawRestConfig(), namespace, output) - errors.CheckError(err) - }, - } - clientConfig = cli.AddKubectlFlagsToCmd(command) - return command -} - -func NewGenClusterConfigCommand(pathOpts *clientcmd.PathOptions) *cobra.Command { - var ( - clusterOpts cmdutil.ClusterOptions - bearerToken string - outputFormat string - ) - var command = &cobra.Command{ - Use: "generate-spec CONTEXT", - Short: "Generate declarative config for a cluster", - Run: func(c *cobra.Command, args []string) { - log.SetLevel(log.WarnLevel) - var configAccess clientcmd.ConfigAccess = pathOpts - if len(args) == 0 { - log.Error("Choose a context name from:") - cmdutil.PrintKubeContexts(configAccess) - os.Exit(1) - } - cfgAccess, err := configAccess.GetStartingConfig() - errors.CheckError(err) - contextName := args[0] - clstContext := cfgAccess.Contexts[contextName] - if clstContext == nil { - log.Fatalf("Context %s does not exist in kubeconfig", contextName) - } - - overrides := clientcmd.ConfigOverrides{ - Context: *clstContext, - } - clientConfig := clientcmd.NewDefaultClientConfig(*cfgAccess, &overrides) - conf, err := clientConfig.ClientConfig() - errors.CheckError(err) - kubeClientset := fake.NewSimpleClientset() - - var awsAuthConf *argoappv1.AWSAuthConfig - var execProviderConf *argoappv1.ExecProviderConfig - if clusterOpts.AwsClusterName != "" { - awsAuthConf = &argoappv1.AWSAuthConfig{ - ClusterName: clusterOpts.AwsClusterName, - RoleARN: clusterOpts.AwsRoleArn, - } - } else if clusterOpts.ExecProviderCommand != "" { - execProviderConf = &argoappv1.ExecProviderConfig{ - Command: clusterOpts.ExecProviderCommand, - Args: clusterOpts.ExecProviderArgs, - Env: clusterOpts.ExecProviderEnv, - APIVersion: clusterOpts.ExecProviderAPIVersion, - InstallHint: clusterOpts.ExecProviderInstallHint, - } - } else if bearerToken == "" { - bearerToken = "bearer-token" - } - if clusterOpts.Name != "" { - contextName = clusterOpts.Name - } - clst := cmdutil.NewCluster(contextName, clusterOpts.Namespaces, conf, bearerToken, awsAuthConf, execProviderConf) - if clusterOpts.InCluster { - clst.Server = common.KubernetesInternalAPIServerAddr - } - if clusterOpts.Shard >= 0 { - clst.Shard = &clusterOpts.Shard - } - - settingsMgr := settings.NewSettingsManager(context.Background(), kubeClientset, ArgoCDNamespace) - argoDB := db.NewDB(ArgoCDNamespace, settingsMgr, kubeClientset) - - _, err = argoDB.CreateCluster(context.Background(), clst) - errors.CheckError(err) - - secName, err := db.ServerToSecretName(clst.Server) - errors.CheckError(err) - - secret, err := kubeClientset.CoreV1().Secrets(ArgoCDNamespace).Get(context.Background(), secName, v1.GetOptions{}) - errors.CheckError(err) - - cmdutil.ConvertSecretData(secret) - var printResources []interface{} - printResources = append(printResources, secret) - errors.CheckError(cmdutil.PrintResources(printResources, outputFormat)) - }, - } - command.PersistentFlags().StringVar(&pathOpts.LoadingRules.ExplicitPath, pathOpts.ExplicitFileFlag, pathOpts.LoadingRules.ExplicitPath, "use a particular kubeconfig file") - command.Flags().StringVar(&bearerToken, "bearer-token", "", "Authentication token that should be used to access K8S API server") - command.Flags().StringVarP(&outputFormat, "output", "o", "yaml", "Output format. One of: json|yaml") - cmdutil.AddClusterFlags(command, &clusterOpts) - return command -} diff --git a/cmd/argocd-util/commands/project.go b/cmd/argocd-util/commands/project.go deleted file mode 100644 index e4f484c610528..0000000000000 --- a/cmd/argocd-util/commands/project.go +++ /dev/null @@ -1,226 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "os" - "path/filepath" - "strings" - - cmdutil "github.com/argoproj/argo-cd/v2/cmd/util" - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" - appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned" - appclient "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1" - "github.com/argoproj/argo-cd/v2/util/cli" - "github.com/argoproj/argo-cd/v2/util/errors" - - "github.com/argoproj/gitops-engine/pkg/utils/kube" - "github.com/spf13/cobra" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/clientcmd" -) - -func NewProjectsCommand() *cobra.Command { - var command = &cobra.Command{ - Use: "proj", - Short: "Manage projects configuration", - Run: func(c *cobra.Command, args []string) { - c.HelpFunc()(c, args) - }, - } - - command.AddCommand(NewGenProjectSpecCommand()) - command.AddCommand(NewUpdatePolicyRuleCommand()) - command.AddCommand(NewProjectAllowListGenCommand()) - return command -} - -// NewGenProjectConfigCommand generates declarative configuration file for given project -func NewGenProjectSpecCommand() *cobra.Command { - var ( - opts cmdutil.ProjectOpts - fileURL string - outputFormat string - ) - var command = &cobra.Command{ - Use: "generate-spec PROJECT", - Short: "Generate declarative config for a project", - Run: func(c *cobra.Command, args []string) { - proj, err := cmdutil.ConstructAppProj(fileURL, args, opts, c) - errors.CheckError(err) - - var printResources []interface{} - printResources = append(printResources, proj) - errors.CheckError(cmdutil.PrintResources(printResources, outputFormat)) - }, - } - command.Flags().StringVarP(&outputFormat, "output", "o", "yaml", "Output format. One of: json|yaml") - command.Flags().StringVarP(&fileURL, "file", "f", "", "Filename or URL to Kubernetes manifests for the project") - - // Only complete files with appropriate extension. - err := command.Flags().SetAnnotation("file", cobra.BashCompFilenameExt, []string{"json", "yaml", "yml"}) - errors.CheckError(err) - - cmdutil.AddProjFlags(command, &opts) - return command -} - -func globMatch(pattern string, val string) bool { - if pattern == "*" { - return true - } - if ok, err := filepath.Match(pattern, val); ok && err == nil { - return true - } - return false -} - -func getModification(modification string, resource string, scope string, permission string) (func(string, string) string, error) { - switch modification { - case "set": - if scope == "" { - return nil, fmt.Errorf("Flag --group cannot be empty if permission should be set in role") - } - if permission == "" { - return nil, fmt.Errorf("Flag --permission cannot be empty if permission should be set in role") - } - return func(proj string, action string) string { - return fmt.Sprintf("%s, %s, %s/%s, %s", resource, action, proj, scope, permission) - }, nil - case "remove": - return func(proj string, action string) string { - return "" - }, nil - } - return nil, fmt.Errorf("modification %s is not supported", modification) -} - -func saveProject(updated v1alpha1.AppProject, orig v1alpha1.AppProject, projectsIf appclient.AppProjectInterface, dryRun bool) error { - fmt.Printf("===== %s ======\n", updated.Name) - target, err := kube.ToUnstructured(&updated) - errors.CheckError(err) - live, err := kube.ToUnstructured(&orig) - if err != nil { - return err - } - _ = cli.PrintDiff(updated.Name, target, live) - if !dryRun { - _, err = projectsIf.Update(context.Background(), &updated, v1.UpdateOptions{}) - if err != nil { - return err - } - } - return nil -} - -func formatPolicy(proj string, role string, permission string) string { - return fmt.Sprintf("p, proj:%s:%s, %s", proj, role, permission) -} - -func split(input string, delimiter string) []string { - parts := strings.Split(input, delimiter) - for i := range parts { - parts[i] = strings.TrimSpace(parts[i]) - } - return parts -} - -func NewUpdatePolicyRuleCommand() *cobra.Command { - var ( - clientConfig clientcmd.ClientConfig - resource string - scope string - rolePattern string - permission string - dryRun bool - ) - var command = &cobra.Command{ - Use: "update-role-policy PROJECT_GLOB MODIFICATION ACTION", - Short: "Implement bulk project role update. Useful to back-fill existing project policies or remove obsolete actions.", - Example: ` # Add policy that allows executing any action (action/*) to roles which name matches to *deployer* in all projects - argocd-util projects update-role-policy '*' set 'action/*' --role '*deployer*' --resource applications --scope '*' --permission allow - - # Remove policy that which manages running (action/*) from all roles which name matches *deployer* in all projects - argocd-util projects update-role-policy '*' remove override --role '*deployer*' -`, - Run: func(c *cobra.Command, args []string) { - if len(args) != 3 { - c.HelpFunc()(c, args) - os.Exit(1) - } - projectGlob := args[0] - modificationType := args[1] - action := args[2] - - config, err := clientConfig.ClientConfig() - errors.CheckError(err) - config.QPS = 100 - config.Burst = 50 - - namespace, _, err := clientConfig.Namespace() - errors.CheckError(err) - appclients := appclientset.NewForConfigOrDie(config) - - modification, err := getModification(modificationType, resource, scope, permission) - errors.CheckError(err) - projIf := appclients.ArgoprojV1alpha1().AppProjects(namespace) - - err = updateProjects(projIf, projectGlob, rolePattern, action, modification, dryRun) - errors.CheckError(err) - }, - } - command.Flags().StringVar(&resource, "resource", "", "Resource e.g. 'applications'") - command.Flags().StringVar(&scope, "scope", "", "Resource scope e.g. '*'") - command.Flags().StringVar(&rolePattern, "role", "*", "Role name pattern e.g. '*deployer*'") - command.Flags().StringVar(&permission, "permission", "", "Action permission") - command.Flags().BoolVar(&dryRun, "dry-run", true, "Dry run") - clientConfig = cli.AddKubectlFlagsToCmd(command) - return command -} - -func updateProjects(projIf appclient.AppProjectInterface, projectGlob string, rolePattern string, action string, modification func(string, string) string, dryRun bool) error { - projects, err := projIf.List(context.Background(), v1.ListOptions{}) - if err != nil { - return err - } - for _, proj := range projects.Items { - if !globMatch(projectGlob, proj.Name) { - continue - } - origProj := proj.DeepCopy() - updated := false - for i, role := range proj.Spec.Roles { - if !globMatch(rolePattern, role.Name) { - continue - } - actionPolicyIndex := -1 - for i := range role.Policies { - parts := split(role.Policies[i], ",") - if len(parts) != 6 || parts[3] != action { - continue - } - actionPolicyIndex = i - break - } - policyPermission := modification(proj.Name, action) - if actionPolicyIndex == -1 && policyPermission != "" { - updated = true - role.Policies = append(role.Policies, formatPolicy(proj.Name, role.Name, policyPermission)) - } else if actionPolicyIndex > -1 && policyPermission == "" { - updated = true - role.Policies = append(role.Policies[:actionPolicyIndex], role.Policies[actionPolicyIndex+1:]...) - } else if actionPolicyIndex > -1 && policyPermission != "" { - updated = true - role.Policies[actionPolicyIndex] = formatPolicy(proj.Name, role.Name, policyPermission) - } - proj.Spec.Roles[i] = role - } - if updated { - err = saveProject(proj, *origProj, projIf, dryRun) - if err != nil { - return err - } - } - } - return nil -} diff --git a/cmd/argocd-util/commands/project_allowlist.go b/cmd/argocd-util/commands/project_allowlist.go deleted file mode 100644 index 96d52e15573fb..0000000000000 --- a/cmd/argocd-util/commands/project_allowlist.go +++ /dev/null @@ -1,144 +0,0 @@ -package commands - -import ( - "bufio" - "io" - "io/ioutil" - "os" - "strings" - - "github.com/ghodss/yaml" - "github.com/spf13/cobra" - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/discovery" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/tools/clientcmd" - - "github.com/argoproj/argo-cd/v2/util/errors" - - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" - "github.com/argoproj/argo-cd/v2/util/cli" - - // load the gcp plugin (required to authenticate against GKE clusters). - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - // load the oidc plugin (required to authenticate with OpenID Connect). - _ "k8s.io/client-go/plugin/pkg/client/auth/oidc" - // load the azure plugin (required to authenticate with AKS clusters). - _ "k8s.io/client-go/plugin/pkg/client/auth/azure" -) - -// NewProjectAllowListGenCommand generates a project from clusterRole -func NewProjectAllowListGenCommand() *cobra.Command { - var ( - clientConfig clientcmd.ClientConfig - out string - ) - var command = &cobra.Command{ - Use: "generate-allow-list CLUSTERROLE_PATH PROJ_NAME", - Short: "Generates project allow list from the specified clusterRole file", - Run: func(c *cobra.Command, args []string) { - if len(args) != 2 { - c.HelpFunc()(c, args) - os.Exit(1) - } - clusterRoleFileName := args[0] - projName := args[1] - - var writer io.Writer - if out == "-" { - writer = os.Stdout - } else { - f, err := os.Create(out) - errors.CheckError(err) - bw := bufio.NewWriter(f) - writer = bw - defer func() { - err = bw.Flush() - errors.CheckError(err) - err = f.Close() - errors.CheckError(err) - }() - } - - globalProj := generateProjectAllowList(clientConfig, clusterRoleFileName, projName) - - yamlBytes, err := yaml.Marshal(globalProj) - errors.CheckError(err) - - _, err = writer.Write(yamlBytes) - errors.CheckError(err) - }, - } - clientConfig = cli.AddKubectlFlagsToCmd(command) - command.Flags().StringVarP(&out, "out", "o", "-", "Output to the specified file instead of stdout") - - return command -} - -func generateProjectAllowList(clientConfig clientcmd.ClientConfig, clusterRoleFileName string, projName string) v1alpha1.AppProject { - yamlBytes, err := ioutil.ReadFile(clusterRoleFileName) - errors.CheckError(err) - var obj unstructured.Unstructured - err = yaml.Unmarshal(yamlBytes, &obj) - errors.CheckError(err) - - clusterRole := &rbacv1.ClusterRole{} - err = scheme.Scheme.Convert(&obj, clusterRole, nil) - errors.CheckError(err) - - config, err := clientConfig.ClientConfig() - errors.CheckError(err) - disco, err := discovery.NewDiscoveryClientForConfig(config) - errors.CheckError(err) - serverResources, err := disco.ServerPreferredResources() - errors.CheckError(err) - - resourceList := make([]metav1.GroupKind, 0) - for _, rule := range clusterRole.Rules { - if len(rule.APIGroups) <= 0 { - continue - } - - canCreate := false - for _, verb := range rule.Verbs { - if strings.EqualFold(verb, "Create") { - canCreate = true - break - } - } - - if !canCreate { - continue - } - - ruleApiGroup := rule.APIGroups[0] - for _, ruleResource := range rule.Resources { - for _, apiResourcesList := range serverResources { - gv, err := schema.ParseGroupVersion(apiResourcesList.GroupVersion) - if err != nil { - gv = schema.GroupVersion{} - } - if ruleApiGroup == gv.Group { - for _, apiResource := range apiResourcesList.APIResources { - if apiResource.Name == ruleResource { - resourceList = append(resourceList, metav1.GroupKind{Group: ruleApiGroup, Kind: apiResource.Kind}) - } - } - } - } - } - } - globalProj := v1alpha1.AppProject{ - TypeMeta: metav1.TypeMeta{ - Kind: "AppProject", - APIVersion: "argoproj.io/v1alpha1", - }, - ObjectMeta: metav1.ObjectMeta{Name: projName}, - Spec: v1alpha1.AppProjectSpec{}, - } - globalProj.Spec.NamespaceResourceWhitelist = resourceList - return globalProj -} diff --git a/cmd/argocd-util/commands/project_allowlist_test.go b/cmd/argocd-util/commands/project_allowlist_test.go deleted file mode 100644 index ab81f8e61d7e9..0000000000000 --- a/cmd/argocd-util/commands/project_allowlist_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package commands - -import ( - "reflect" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/undefinedlabs/go-mpatch" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/discovery" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" -) - -func TestProjectAllowListGen(t *testing.T) { - useMock := true - rules := clientcmd.NewDefaultClientConfigLoadingRules() - overrides := &clientcmd.ConfigOverrides{} - clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, overrides) - - if useMock { - var patchClientConfig *mpatch.Patch - patchClientConfig, err := mpatch.PatchInstanceMethodByName(reflect.TypeOf(clientConfig), "ClientConfig", func(*clientcmd.DeferredLoadingClientConfig) (*restclient.Config, error) { - return nil, nil - }) - assert.NoError(t, err) - - patch, err := mpatch.PatchMethod(discovery.NewDiscoveryClientForConfig, func(c *restclient.Config) (*discovery.DiscoveryClient, error) { - return &discovery.DiscoveryClient{LegacyPrefix: "/api"}, nil - }) - assert.NoError(t, err) - - var patchSeverPreferedResources *mpatch.Patch - discoClient := &discovery.DiscoveryClient{} - patchSeverPreferedResources, err = mpatch.PatchInstanceMethodByName(reflect.TypeOf(discoClient), "ServerPreferredResources", func(*discovery.DiscoveryClient) ([]*metav1.APIResourceList, error) { - res := metav1.APIResource{ - Name: "services", - Kind: "Service", - } - resourceList := []*metav1.APIResourceList{{APIResources: []metav1.APIResource{res}}} - return resourceList, nil - }) - assert.NoError(t, err) - - defer func() { - err = patchClientConfig.Unpatch() - assert.NoError(t, err) - err = patch.Unpatch() - assert.NoError(t, err) - err = patchSeverPreferedResources.Unpatch() - err = patch.Unpatch() - }() - } - - globalProj := generateProjectAllowList(clientConfig, "testdata/test_clusterrole.yaml", "testproj") - assert.True(t, len(globalProj.Spec.NamespaceResourceWhitelist) > 0) -} diff --git a/cmd/argocd-util/commands/repo.go b/cmd/argocd-util/commands/repo.go deleted file mode 100644 index 6ff43801fba75..0000000000000 --- a/cmd/argocd-util/commands/repo.go +++ /dev/null @@ -1,182 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "io/ioutil" - "os" - - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - apiv1 "k8s.io/api/core/v1" - apierr "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/fake" - - cmdutil "github.com/argoproj/argo-cd/v2/cmd/util" - "github.com/argoproj/argo-cd/v2/common" - "github.com/argoproj/argo-cd/v2/util/cli" - "github.com/argoproj/argo-cd/v2/util/db" - "github.com/argoproj/argo-cd/v2/util/errors" - "github.com/argoproj/argo-cd/v2/util/git" - "github.com/argoproj/argo-cd/v2/util/settings" -) - -const ( - ArgoCDNamespace = "argocd" - repoSecretPrefix = "repo" -) - -func NewRepoCommand() *cobra.Command { - var command = &cobra.Command{ - Use: "repo", - Short: "Manage repositories configuration", - Run: func(c *cobra.Command, args []string) { - c.HelpFunc()(c, args) - }, - } - command.AddCommand(NewGenRepoSpecCommand()) - - return command -} - -func NewGenRepoSpecCommand() *cobra.Command { - var ( - repoOpts cmdutil.RepoOptions - outputFormat string - ) - - // For better readability and easier formatting - var repoAddExamples = ` - # Add a Git repository via SSH using a private key for authentication, ignoring the server's host key: - argocd-util repo generate-spec git@git.example.com:repos/repo --insecure-ignore-host-key --ssh-private-key-path ~/id_rsa - - # Add a Git repository via SSH on a non-default port - need to use ssh:// style URLs here - argocd-util repo generate-spec ssh://git@git.example.com:2222/repos/repo --ssh-private-key-path ~/id_rsa - - # Add a private Git repository via HTTPS using username/password and TLS client certificates: - argocd-util repo generate-spec https://git.example.com/repos/repo --username git --password secret --tls-client-cert-path ~/mycert.crt --tls-client-cert-key-path ~/mycert.key - - # Add a private Git repository via HTTPS using username/password without verifying the server's TLS certificate - argocd-util repo generate-spec https://git.example.com/repos/repo --username git --password secret --insecure-skip-server-verification - - # Add a public Helm repository named 'stable' via HTTPS - argocd-util repo generate-spec https://charts.helm.sh/stable --type helm --name stable - - # Add a private Helm repository named 'stable' via HTTPS - argocd-util repo generate-spec https://charts.helm.sh/stable --type helm --name stable --username test --password test - - # Add a private Helm OCI-based repository named 'stable' via HTTPS - argocd-util repo generate-spec helm-oci-registry.cn-zhangjiakou.cr.aliyuncs.com --type helm --name stable --enable-oci --username test --password test -` - - var command = &cobra.Command{ - Use: "generate-spec REPOURL", - Short: "Generate declarative config for a repo", - Example: repoAddExamples, - Run: func(c *cobra.Command, args []string) { - log.SetLevel(log.WarnLevel) - if len(args) != 1 { - c.HelpFunc()(c, args) - os.Exit(1) - } - - // Repository URL - repoOpts.Repo.Repo = args[0] - - // Specifying ssh-private-key-path is only valid for SSH repositories - if repoOpts.SshPrivateKeyPath != "" { - if ok, _ := git.IsSSHURL(repoOpts.Repo.Repo); ok { - keyData, err := ioutil.ReadFile(repoOpts.SshPrivateKeyPath) - if err != nil { - log.Fatal(err) - } - repoOpts.Repo.SSHPrivateKey = string(keyData) - } else { - err := fmt.Errorf("--ssh-private-key-path is only supported for SSH repositories.") - errors.CheckError(err) - } - } - - // tls-client-cert-path and tls-client-cert-key-key-path must always be - // specified together - if (repoOpts.TlsClientCertPath != "" && repoOpts.TlsClientCertKeyPath == "") || (repoOpts.TlsClientCertPath == "" && repoOpts.TlsClientCertKeyPath != "") { - err := fmt.Errorf("--tls-client-cert-path and --tls-client-cert-key-path must be specified together") - errors.CheckError(err) - } - - // Specifying tls-client-cert-path is only valid for HTTPS repositories - if repoOpts.TlsClientCertPath != "" { - if git.IsHTTPSURL(repoOpts.Repo.Repo) { - tlsCertData, err := ioutil.ReadFile(repoOpts.TlsClientCertPath) - errors.CheckError(err) - tlsCertKey, err := ioutil.ReadFile(repoOpts.TlsClientCertKeyPath) - errors.CheckError(err) - repoOpts.Repo.TLSClientCertData = string(tlsCertData) - repoOpts.Repo.TLSClientCertKey = string(tlsCertKey) - } else { - err := fmt.Errorf("--tls-client-cert-path is only supported for HTTPS repositories") - errors.CheckError(err) - } - } - - // Set repository connection properties only when creating repository, not - // when creating repository credentials. - // InsecureIgnoreHostKey is deprecated and only here for backwards compat - repoOpts.Repo.InsecureIgnoreHostKey = repoOpts.InsecureIgnoreHostKey - repoOpts.Repo.Insecure = repoOpts.InsecureSkipServerVerification - repoOpts.Repo.EnableLFS = repoOpts.EnableLfs - repoOpts.Repo.EnableOCI = repoOpts.EnableOci - - if repoOpts.Repo.Type == "helm" && repoOpts.Repo.Name == "" { - errors.CheckError(fmt.Errorf("must specify --name for repos of type 'helm'")) - } - - // If the user set a username, but didn't supply password via --password, - // then we prompt for it - if repoOpts.Repo.Username != "" && repoOpts.Repo.Password == "" { - repoOpts.Repo.Password = cli.PromptPassword(repoOpts.Repo.Password) - } - - argoCDCM := &apiv1.ConfigMap{ - TypeMeta: v1.TypeMeta{ - Kind: "ConfigMap", - APIVersion: "v1", - }, - ObjectMeta: v1.ObjectMeta{ - Name: common.ArgoCDConfigMapName, - Namespace: ArgoCDNamespace, - Labels: map[string]string{ - "app.kubernetes.io/part-of": "argocd", - }, - }, - } - kubeClientset := fake.NewSimpleClientset(argoCDCM) - settingsMgr := settings.NewSettingsManager(context.Background(), kubeClientset, ArgoCDNamespace) - argoDB := db.NewDB(ArgoCDNamespace, settingsMgr, kubeClientset) - - var printResources []interface{} - _, err := argoDB.CreateRepository(context.Background(), &repoOpts.Repo) - errors.CheckError(err) - - secret, err := kubeClientset.CoreV1().Secrets(ArgoCDNamespace).Get(context.Background(), db.RepoURLToSecretName(repoSecretPrefix, repoOpts.Repo.Repo), v1.GetOptions{}) - if err != nil { - if !apierr.IsNotFound(err) { - errors.CheckError(err) - } - } else { - cmdutil.ConvertSecretData(secret) - printResources = append(printResources, secret) - } - - cm, err := kubeClientset.CoreV1().ConfigMaps(ArgoCDNamespace).Get(context.Background(), common.ArgoCDConfigMapName, v1.GetOptions{}) - errors.CheckError(err) - - printResources = append(printResources, cm) - errors.CheckError(cmdutil.PrintResources(printResources, outputFormat)) - }, - } - command.Flags().StringVarP(&outputFormat, "output", "o", "yaml", "Output format. One of: json|yaml") - cmdutil.AddRepoFlags(command, &repoOpts) - return command -} diff --git a/cmd/argocd-util/commands/settings.go b/cmd/argocd-util/commands/settings.go deleted file mode 100644 index e6b8e276f9815..0000000000000 --- a/cmd/argocd-util/commands/settings.go +++ /dev/null @@ -1,546 +0,0 @@ -package commands - -import ( - "bytes" - "context" - "fmt" - "io/ioutil" - "os" - "reflect" - "sort" - "strconv" - "strings" - "text/tabwriter" - - healthutil "github.com/argoproj/gitops-engine/pkg/health" - "github.com/ghodss/yaml" - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/tools/clientcmd" - - "github.com/argoproj/argo-cd/v2/common" - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" - "github.com/argoproj/argo-cd/v2/util/argo/normalizers" - "github.com/argoproj/argo-cd/v2/util/cli" - "github.com/argoproj/argo-cd/v2/util/errors" - "github.com/argoproj/argo-cd/v2/util/lua" - "github.com/argoproj/argo-cd/v2/util/settings" -) - -type settingsOpts struct { - argocdCMPath string - argocdSecretPath string - loadClusterSettings bool - clientConfig clientcmd.ClientConfig -} - -type commandContext interface { - createSettingsManager() (*settings.SettingsManager, error) -} - -func collectLogs(callback func()) string { - log.SetLevel(log.DebugLevel) - out := bytes.Buffer{} - log.SetOutput(&out) - defer log.SetLevel(log.FatalLevel) - callback() - return out.String() -} - -func setSettingsMeta(obj v1.Object) { - obj.SetNamespace("default") - labels := obj.GetLabels() - if labels == nil { - labels = make(map[string]string) - } - labels["app.kubernetes.io/part-of"] = "argocd" - obj.SetLabels(labels) -} - -func (opts *settingsOpts) createSettingsManager() (*settings.SettingsManager, error) { - var argocdCM *corev1.ConfigMap - if opts.argocdCMPath == "" && !opts.loadClusterSettings { - return nil, fmt.Errorf("either --argocd-cm-path must be provided or --load-cluster-settings must be set to true") - } else if opts.argocdCMPath == "" { - realClientset, ns, err := opts.getK8sClient() - if err != nil { - return nil, err - } - - argocdCM, err = realClientset.CoreV1().ConfigMaps(ns).Get(context.Background(), common.ArgoCDConfigMapName, v1.GetOptions{}) - if err != nil { - return nil, err - } - } else { - data, err := ioutil.ReadFile(opts.argocdCMPath) - if err != nil { - return nil, err - } - err = yaml.Unmarshal(data, &argocdCM) - if err != nil { - return nil, err - } - } - setSettingsMeta(argocdCM) - - var argocdSecret *corev1.Secret - if opts.argocdSecretPath != "" { - data, err := ioutil.ReadFile(opts.argocdSecretPath) - if err != nil { - return nil, err - } - err = yaml.Unmarshal(data, &argocdSecret) - if err != nil { - return nil, err - } - setSettingsMeta(argocdSecret) - } else if opts.loadClusterSettings { - realClientset, ns, err := opts.getK8sClient() - if err != nil { - return nil, err - } - argocdSecret, err = realClientset.CoreV1().Secrets(ns).Get(context.Background(), common.ArgoCDSecretName, v1.GetOptions{}) - if err != nil { - return nil, err - } - } else { - argocdSecret = &corev1.Secret{ - ObjectMeta: v1.ObjectMeta{ - Name: common.ArgoCDSecretName, - }, - Data: map[string][]byte{ - "admin.password": []byte("test"), - "server.secretkey": []byte("test"), - }, - } - } - setSettingsMeta(argocdSecret) - clientset := fake.NewSimpleClientset(argocdSecret, argocdCM) - - manager := settings.NewSettingsManager(context.Background(), clientset, "default") - errors.CheckError(manager.ResyncInformers()) - - return manager, nil -} - -func (opts *settingsOpts) getK8sClient() (*kubernetes.Clientset, string, error) { - namespace, _, err := opts.clientConfig.Namespace() - if err != nil { - return nil, "", err - } - - restConfig, err := opts.clientConfig.ClientConfig() - if err != nil { - return nil, "", err - } - - realClientset, err := kubernetes.NewForConfig(restConfig) - if err != nil { - return nil, "", err - } - return realClientset, namespace, nil -} - -func NewSettingsCommand() *cobra.Command { - var ( - opts settingsOpts - ) - - var command = &cobra.Command{ - Use: "settings", - Short: "Provides set of commands for settings validation and troubleshooting", - Run: func(c *cobra.Command, args []string) { - c.HelpFunc()(c, args) - }, - } - log.SetLevel(log.FatalLevel) - - command.AddCommand(NewValidateSettingsCommand(&opts)) - command.AddCommand(NewResourceOverridesCommand(&opts)) - command.AddCommand(NewRBACCommand()) - - opts.clientConfig = cli.AddKubectlFlagsToCmd(command) - command.PersistentFlags().StringVar(&opts.argocdCMPath, "argocd-cm-path", "", "Path to local argocd-cm.yaml file") - command.PersistentFlags().StringVar(&opts.argocdSecretPath, "argocd-secret-path", "", "Path to local argocd-secret.yaml file") - command.PersistentFlags().BoolVar(&opts.loadClusterSettings, "load-cluster-settings", false, - "Indicates that config map and secret should be loaded from cluster unless local file path is provided") - return command -} - -type settingValidator func(manager *settings.SettingsManager) (string, error) - -func joinValidators(validators ...settingValidator) settingValidator { - return func(manager *settings.SettingsManager) (string, error) { - var errorStrs []string - var summaries []string - for i := range validators { - summary, err := validators[i](manager) - if err != nil { - errorStrs = append(errorStrs, err.Error()) - } - if summary != "" { - summaries = append(summaries, summary) - } - } - if len(errorStrs) > 0 { - return "", fmt.Errorf("%s", strings.Join(errorStrs, "\n")) - } - return strings.Join(summaries, "\n"), nil - } -} - -var validatorsByGroup = map[string]settingValidator{ - "general": joinValidators(func(manager *settings.SettingsManager) (string, error) { - general, err := manager.GetSettings() - if err != nil { - return "", err - } - ssoProvider := "" - if general.DexConfig != "" { - if _, err := settings.UnmarshalDexConfig(general.DexConfig); err != nil { - return "", fmt.Errorf("invalid dex.config: %v", err) - } - ssoProvider = "Dex" - } else if general.OIDCConfigRAW != "" { - if _, err := settings.UnmarshalOIDCConfig(general.OIDCConfigRAW); err != nil { - return "", fmt.Errorf("invalid oidc.config: %v", err) - } - ssoProvider = "OIDC" - } - var summary string - if ssoProvider != "" { - summary = fmt.Sprintf("%s is configured", ssoProvider) - if general.URL == "" { - summary = summary + " ('url' field is missing)" - } - } else if ssoProvider != "" && general.URL != "" { - - } else { - summary = "SSO is not configured" - } - return summary, nil - }, func(manager *settings.SettingsManager) (string, error) { - _, err := manager.GetAppInstanceLabelKey() - return "", err - }, func(manager *settings.SettingsManager) (string, error) { - _, err := manager.GetHelp() - return "", err - }, func(manager *settings.SettingsManager) (string, error) { - _, err := manager.GetGoogleAnalytics() - return "", err - }), - "plugins": func(manager *settings.SettingsManager) (string, error) { - plugins, err := manager.GetConfigManagementPlugins() - if err != nil { - return "", err - } - return fmt.Sprintf("%d plugins", len(plugins)), nil - }, - "kustomize": func(manager *settings.SettingsManager) (string, error) { - opts, err := manager.GetKustomizeSettings() - if err != nil { - return "", err - } - summary := "default options" - if opts.BuildOptions != "" { - summary = opts.BuildOptions - } - if len(opts.Versions) > 0 { - summary = fmt.Sprintf("%s (%d versions)", summary, len(opts.Versions)) - } - return summary, err - }, - "repositories": joinValidators(func(manager *settings.SettingsManager) (string, error) { - repos, err := manager.GetRepositories() - if err != nil { - return "", err - } - return fmt.Sprintf("%d repositories", len(repos)), nil - }, func(manager *settings.SettingsManager) (string, error) { - creds, err := manager.GetRepositoryCredentials() - if err != nil { - return "", err - } - return fmt.Sprintf("%d repository credentials", len(creds)), nil - }), - "accounts": func(manager *settings.SettingsManager) (string, error) { - accounts, err := manager.GetAccounts() - if err != nil { - return "", err - } - return fmt.Sprintf("%d accounts", len(accounts)), nil - }, - "resource-overrides": func(manager *settings.SettingsManager) (string, error) { - overrides, err := manager.GetResourceOverrides() - if err != nil { - return "", err - } - return fmt.Sprintf("%d resource overrides", len(overrides)), nil - }, -} - -func NewValidateSettingsCommand(cmdCtx commandContext) *cobra.Command { - var ( - groups []string - ) - - var allGroups []string - for k := range validatorsByGroup { - allGroups = append(allGroups, k) - } - sort.Slice(allGroups, func(i, j int) bool { - return allGroups[i] < allGroups[j] - }) - - var command = &cobra.Command{ - Use: "validate", - Short: "Validate settings", - Long: "Validates settings specified in 'argocd-cm' ConfigMap and 'argocd-secret' Secret", - Example: ` -#Validates all settings in the specified YAML file -argocd-util settings validate --argocd-cm-path ./argocd-cm.yaml - -#Validates accounts and plugins settings in Kubernetes cluster of current kubeconfig context -argocd-util settings validate --group accounts --group plugins --load-cluster-settings`, - Run: func(c *cobra.Command, args []string) { - settingsManager, err := cmdCtx.createSettingsManager() - errors.CheckError(err) - - if len(groups) == 0 { - groups = allGroups - } - for i, group := range groups { - validator := validatorsByGroup[group] - - logs := collectLogs(func() { - summary, err := validator(settingsManager) - - if err != nil { - _, _ = fmt.Fprintf(os.Stdout, "❌ %s\n", group) - _, _ = fmt.Fprintf(os.Stdout, "%s\n", err.Error()) - } else { - _, _ = fmt.Fprintf(os.Stdout, "✅ %s\n", group) - if summary != "" { - _, _ = fmt.Fprintf(os.Stdout, "%s\n", summary) - } - } - }) - if logs != "" { - _, _ = fmt.Fprintf(os.Stdout, "%s\n", logs) - } - if i != len(groups)-1 { - _, _ = fmt.Fprintf(os.Stdout, "\n") - } - } - }, - } - - command.Flags().StringArrayVar(&groups, "group", nil, fmt.Sprintf( - "Optional list of setting groups that have to be validated ( one of: %s)", strings.Join(allGroups, ", "))) - - return command -} - -func NewResourceOverridesCommand(cmdCtx commandContext) *cobra.Command { - var command = &cobra.Command{ - Use: "resource-overrides", - Short: "Troubleshoot resource overrides", - Run: func(c *cobra.Command, args []string) { - c.HelpFunc()(c, args) - }, - } - command.AddCommand(NewResourceIgnoreDifferencesCommand(cmdCtx)) - command.AddCommand(NewResourceActionListCommand(cmdCtx)) - command.AddCommand(NewResourceActionRunCommand(cmdCtx)) - command.AddCommand(NewResourceHealthCommand(cmdCtx)) - return command -} - -func executeResourceOverrideCommand(cmdCtx commandContext, args []string, callback func(res unstructured.Unstructured, override v1alpha1.ResourceOverride, overrides map[string]v1alpha1.ResourceOverride)) { - data, err := ioutil.ReadFile(args[0]) - errors.CheckError(err) - - res := unstructured.Unstructured{} - errors.CheckError(yaml.Unmarshal(data, &res)) - - settingsManager, err := cmdCtx.createSettingsManager() - errors.CheckError(err) - - overrides, err := settingsManager.GetResourceOverrides() - errors.CheckError(err) - gvk := res.GroupVersionKind() - key := gvk.Kind - if gvk.Group != "" { - key = fmt.Sprintf("%s/%s", gvk.Group, gvk.Kind) - } - override, hasOverride := overrides[key] - if !hasOverride { - _, _ = fmt.Printf("No overrides configured for '%s/%s'\n", gvk.Group, gvk.Kind) - return - } - callback(res, override, overrides) -} - -func NewResourceIgnoreDifferencesCommand(cmdCtx commandContext) *cobra.Command { - var command = &cobra.Command{ - Use: "ignore-differences RESOURCE_YAML_PATH", - Short: "Renders fields excluded from diffing", - Long: "Renders ignored fields using the 'ignoreDifferences' setting specified in the 'resource.customizations' field of 'argocd-cm' ConfigMap", - Example: ` -argocd-util settings resource-overrides ignore-differences ./deploy.yaml --argocd-cm-path ./argocd-cm.yaml`, - Run: func(c *cobra.Command, args []string) { - if len(args) < 1 { - c.HelpFunc()(c, args) - os.Exit(1) - } - - executeResourceOverrideCommand(cmdCtx, args, func(res unstructured.Unstructured, override v1alpha1.ResourceOverride, overrides map[string]v1alpha1.ResourceOverride) { - gvk := res.GroupVersionKind() - if len(override.IgnoreDifferences.JSONPointers) == 0 { - _, _ = fmt.Printf("Ignore differences are not configured for '%s/%s'\n", gvk.Group, gvk.Kind) - return - } - - normalizer, err := normalizers.NewIgnoreNormalizer(nil, overrides) - errors.CheckError(err) - - normalizedRes := res.DeepCopy() - logs := collectLogs(func() { - errors.CheckError(normalizer.Normalize(normalizedRes)) - }) - if logs != "" { - _, _ = fmt.Println(logs) - } - - if reflect.DeepEqual(&res, normalizedRes) { - _, _ = fmt.Printf("No fields are ignored by ignoreDifferences settings: \n%s\n", override.IgnoreDifferences) - return - } - - _, _ = fmt.Printf("Following fields are ignored:\n\n") - _ = cli.PrintDiff(res.GetName(), &res, normalizedRes) - }) - }, - } - return command -} - -func NewResourceHealthCommand(cmdCtx commandContext) *cobra.Command { - var command = &cobra.Command{ - Use: "health RESOURCE_YAML_PATH", - Short: "Assess resource health", - Long: "Assess resource health using the lua script configured in the 'resource.customizations' field of 'argocd-cm' ConfigMap", - Example: ` -argocd-util settings resource-overrides health ./deploy.yaml --argocd-cm-path ./argocd-cm.yaml`, - Run: func(c *cobra.Command, args []string) { - if len(args) < 1 { - c.HelpFunc()(c, args) - os.Exit(1) - } - - executeResourceOverrideCommand(cmdCtx, args, func(res unstructured.Unstructured, override v1alpha1.ResourceOverride, overrides map[string]v1alpha1.ResourceOverride) { - gvk := res.GroupVersionKind() - if override.HealthLua == "" { - _, _ = fmt.Printf("Health script is not configured for '%s/%s'\n", gvk.Group, gvk.Kind) - return - } - - resHealth, err := healthutil.GetResourceHealth(&res, lua.ResourceHealthOverrides(overrides)) - errors.CheckError(err) - - _, _ = fmt.Printf("STATUS: %s\n", resHealth.Status) - _, _ = fmt.Printf("MESSAGE: %s\n", resHealth.Message) - }) - }, - } - return command -} - -func NewResourceActionListCommand(cmdCtx commandContext) *cobra.Command { - var command = &cobra.Command{ - Use: "list-actions RESOURCE_YAML_PATH", - Short: "List available resource actions", - Long: "List actions available for given resource action using the lua scripts configured in the 'resource.customizations' field of 'argocd-cm' ConfigMap and outputs updated fields", - Example: ` -argocd-util settings resource-overrides action list /tmp/deploy.yaml --argocd-cm-path ./argocd-cm.yaml`, - Run: func(c *cobra.Command, args []string) { - if len(args) < 1 { - c.HelpFunc()(c, args) - os.Exit(1) - } - - executeResourceOverrideCommand(cmdCtx, args, func(res unstructured.Unstructured, override v1alpha1.ResourceOverride, overrides map[string]v1alpha1.ResourceOverride) { - gvk := res.GroupVersionKind() - if override.Actions == "" { - _, _ = fmt.Printf("Actions are not configured for '%s/%s'\n", gvk.Group, gvk.Kind) - return - } - - luaVM := lua.VM{ResourceOverrides: overrides} - discoveryScript, err := luaVM.GetResourceActionDiscovery(&res) - errors.CheckError(err) - - availableActions, err := luaVM.ExecuteResourceActionDiscovery(&res, discoveryScript) - errors.CheckError(err) - sort.Slice(availableActions, func(i, j int) bool { - return availableActions[i].Name < availableActions[j].Name - }) - - w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) - _, _ = fmt.Fprintf(w, "NAME\tENABLED\n") - for _, action := range availableActions { - _, _ = fmt.Fprintf(w, "%s\t%s\n", action.Name, strconv.FormatBool(action.Disabled)) - } - _ = w.Flush() - }) - }, - } - return command -} - -func NewResourceActionRunCommand(cmdCtx commandContext) *cobra.Command { - var command = &cobra.Command{ - Use: "run-action RESOURCE_YAML_PATH ACTION", - Aliases: []string{"action"}, - Short: "Executes resource action", - Long: "Executes resource action using the lua script configured in the 'resource.customizations' field of 'argocd-cm' ConfigMap and outputs updated fields", - Example: ` -argocd-util settings resource-overrides action run /tmp/deploy.yaml restart --argocd-cm-path ./argocd-cm.yaml`, - Run: func(c *cobra.Command, args []string) { - if len(args) < 2 { - c.HelpFunc()(c, args) - os.Exit(1) - } - action := args[1] - - executeResourceOverrideCommand(cmdCtx, args, func(res unstructured.Unstructured, override v1alpha1.ResourceOverride, overrides map[string]v1alpha1.ResourceOverride) { - gvk := res.GroupVersionKind() - if override.Actions == "" { - _, _ = fmt.Printf("Actions are not configured for '%s/%s'\n", gvk.Group, gvk.Kind) - return - } - - luaVM := lua.VM{ResourceOverrides: overrides} - action, err := luaVM.GetResourceAction(&res, action) - errors.CheckError(err) - - modifiedRes, err := luaVM.ExecuteResourceAction(&res, action.ActionLua) - errors.CheckError(err) - - if reflect.DeepEqual(&res, modifiedRes) { - _, _ = fmt.Printf("No fields had been changed by action: \n%s\n", action.Name) - return - } - - _, _ = fmt.Printf("Following fields have been changed:\n\n") - _ = cli.PrintDiff(res.GetName(), &res, modifiedRes) - }) - }, - } - return command -} diff --git a/cmd/argocd-util/commands/settings_rbac.go b/cmd/argocd-util/commands/settings_rbac.go deleted file mode 100644 index 50113d99845f8..0000000000000 --- a/cmd/argocd-util/commands/settings_rbac.go +++ /dev/null @@ -1,374 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "io/ioutil" - "os" - - "github.com/ghodss/yaml" - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" - - "github.com/argoproj/argo-cd/v2/common" - "github.com/argoproj/argo-cd/v2/server/rbacpolicy" - "github.com/argoproj/argo-cd/v2/util/assets" - "github.com/argoproj/argo-cd/v2/util/cli" - "github.com/argoproj/argo-cd/v2/util/rbac" -) - -// Provide a mapping of short-hand resource names to their RBAC counterparts -var resourceMap map[string]string = map[string]string{ - "account": rbacpolicy.ResourceAccounts, - "app": rbacpolicy.ResourceApplications, - "apps": rbacpolicy.ResourceApplications, - "application": rbacpolicy.ResourceApplications, - "cert": rbacpolicy.ResourceCertificates, - "certs": rbacpolicy.ResourceCertificates, - "certificate": rbacpolicy.ResourceCertificates, - "cluster": rbacpolicy.ResourceClusters, - "gpgkey": rbacpolicy.ResourceGPGKeys, - "key": rbacpolicy.ResourceGPGKeys, - "proj": rbacpolicy.ResourceProjects, - "projs": rbacpolicy.ResourceProjects, - "project": rbacpolicy.ResourceProjects, - "repo": rbacpolicy.ResourceRepositories, - "repos": rbacpolicy.ResourceRepositories, - "repository": rbacpolicy.ResourceRepositories, -} - -// List of allowed RBAC resources -var validRBACResources map[string]bool = map[string]bool{ - rbacpolicy.ResourceAccounts: true, - rbacpolicy.ResourceApplications: true, - rbacpolicy.ResourceCertificates: true, - rbacpolicy.ResourceClusters: true, - rbacpolicy.ResourceGPGKeys: true, - rbacpolicy.ResourceProjects: true, - rbacpolicy.ResourceRepositories: true, -} - -// List of allowed RBAC actions -var validRBACActions map[string]bool = map[string]bool{ - rbacpolicy.ActionAction: true, - rbacpolicy.ActionCreate: true, - rbacpolicy.ActionDelete: true, - rbacpolicy.ActionGet: true, - rbacpolicy.ActionOverride: true, - rbacpolicy.ActionSync: true, - rbacpolicy.ActionUpdate: true, -} - -// NewRBACCommand is the command for 'rbac' -func NewRBACCommand() *cobra.Command { - var command = &cobra.Command{ - Use: "rbac", - Short: "Validate and test RBAC configuration", - Run: func(c *cobra.Command, args []string) { - c.HelpFunc()(c, args) - }, - } - command.AddCommand(NewRBACCanCommand()) - command.AddCommand(NewRBACValidateCommand()) - return command -} - -// NewRBACCanRoleCommand is the command for 'rbac can-role' -func NewRBACCanCommand() *cobra.Command { - var ( - policyFile string - defaultRole string - useBuiltin bool - strict bool - quiet bool - subject string - action string - resource string - subResource string - clientConfig clientcmd.ClientConfig - ) - var command = &cobra.Command{ - Use: "can ROLE/SUBJECT ACTION RESOURCE [SUB-RESOURCE]", - Short: "Check RBAC permissions for a role or subject", - Long: ` -Check whether a given role or subject has appropriate RBAC permissions to do -something. -`, - Example: ` -# Check whether role some:role has permissions to create an application in the -# 'default' project, using a local policy.csv file -argocd-util settings rbac can some:role create application 'default/app' --policy-file policy.csv - -# Policy file can also be K8s config map with data keys like argocd-rbac-cm, -# i.e. 'policy.csv' and (optionally) 'policy.default' -argocd-util settings rbac can some:role create application 'default/app' --policy-file argocd-rbac-cm.yaml - -# If --policy-file is not given, the ConfigMap 'argocd-rbac-cm' from K8s is -# used. You need to specify the argocd namespace, and make sure that your -# current Kubernetes context is pointing to the cluster Argo CD is running in -argocd-util settings rbac can some:role create application 'default/app' --namespace argocd - -# You can override a possibly configured default role -argocd-util settings rbac can someuser create application 'default/app' --default-role role:readonly - -`, - Run: func(c *cobra.Command, args []string) { - if len(args) < 3 || len(args) > 4 { - c.HelpFunc()(c, args) - os.Exit(1) - } - subject = args[0] - action = args[1] - resource = args[2] - if len(args) > 3 { - subResource = args[3] - } - - userPolicy := "" - builtinPolicy := "" - - var newDefaultRole string - - namespace, nsOverride, err := clientConfig.Namespace() - if err != nil { - log.Fatalf("could not create k8s client: %v", err) - } - - // Exactly one of --namespace or --policy-file must be given. - if (!nsOverride && policyFile == "") || (nsOverride && policyFile != "") { - c.HelpFunc()(c, args) - log.Fatalf("please provide exactly one of --policy-file or --namespace") - } - - restConfig, err := clientConfig.ClientConfig() - if err != nil { - log.Fatalf("could not create k8s client: %v", err) - } - realClientset, err := kubernetes.NewForConfig(restConfig) - if err != nil { - log.Fatalf("could not create k8s client: %v", err) - } - - userPolicy, newDefaultRole = getPolicy(policyFile, realClientset, namespace) - - // Use built-in policy as augmentation if requested - if useBuiltin { - builtinPolicy = assets.BuiltinPolicyCSV - } - - // If no explicit default role was given, but we have one defined from - // a policy, use this to check for enforce. - if newDefaultRole != "" && defaultRole == "" { - defaultRole = newDefaultRole - } - - res := checkPolicy(subject, action, resource, subResource, builtinPolicy, userPolicy, defaultRole, strict) - if res { - if !quiet { - fmt.Println("Yes") - } - os.Exit(0) - } else { - if !quiet { - fmt.Println("No") - } - os.Exit(1) - } - }, - } - - clientConfig = cli.AddKubectlFlagsToCmd(command) - command.Flags().StringVar(&policyFile, "policy-file", "", "path to the policy file to use") - command.Flags().StringVar(&defaultRole, "default-role", "", "name of the default role to use") - command.Flags().BoolVar(&useBuiltin, "use-builtin-policy", true, "whether to also use builtin-policy") - command.Flags().BoolVar(&strict, "strict", true, "whether to perform strict check on action and resource names") - command.Flags().BoolVarP(&quiet, "quiet", "q", false, "quiet mode - do not print results to stdout") - return command -} - -// NewRBACValidateCommand returns a new rbac validate command -func NewRBACValidateCommand() *cobra.Command { - var ( - policyFile string - ) - - var command = &cobra.Command{ - Use: "validate --policy-file=POLICYFILE", - Short: "Validate RBAC policy", - Long: ` -Validates an RBAC policy for being syntactically correct. The policy must be -a local file, and in either CSV or K8s ConfigMap format. -`, - Run: func(c *cobra.Command, args []string) { - if policyFile == "" { - c.HelpFunc()(c, args) - log.Fatalf("Please specify policy to validate using --policy-file") - } - userPolicy, _ := getPolicy(policyFile, nil, "") - if userPolicy != "" { - if err := rbac.ValidatePolicy(userPolicy); err == nil { - fmt.Printf("Policy is valid.\n") - os.Exit(0) - } else { - fmt.Printf("Policy is invalid: %v\n", err) - os.Exit(1) - } - } - }, - } - - command.Flags().StringVar(&policyFile, "policy-file", "", "path to the policy file to use") - return command -} - -// Load user policy file if requested or use Kubernetes client to get the -// appropriate ConfigMap from the current context -func getPolicy(policyFile string, kubeClient kubernetes.Interface, namespace string) (userPolicy string, defaultRole string) { - var err error - if policyFile != "" { - // load from file - userPolicy, defaultRole, err = getPolicyFromFile(policyFile) - if err != nil { - log.Fatalf("could not read policy file: %v", err) - } - } else { - cm, err := getPolicyConfigMap(kubeClient, namespace) - if err != nil { - log.Fatalf("could not get configmap: %v", err) - } - userPolicy, defaultRole = getPolicyFromConfigMap(cm) - } - - return userPolicy, defaultRole -} - -// getPolicyFromFile loads a RBAC policy from given path -func getPolicyFromFile(policyFile string) (string, string, error) { - var ( - userPolicy string - defaultRole string - ) - - upol, err := ioutil.ReadFile(policyFile) - if err != nil { - log.Fatalf("error opening policy file: %v", err) - return "", "", err - } - - // Try to unmarshal the input file as ConfigMap first. If it succeeds, we - // assume config map input. Otherwise, we treat it as - var upolCM *corev1.ConfigMap - err = yaml.Unmarshal(upol, &upolCM) - if err != nil { - userPolicy = string(upol) - } else { - userPolicy, defaultRole = getPolicyFromConfigMap(upolCM) - } - - return userPolicy, defaultRole, nil -} - -// Retrieve policy information from a ConfigMap -func getPolicyFromConfigMap(cm *corev1.ConfigMap) (string, string) { - var ( - userPolicy string - defaultRole string - ok bool - ) - userPolicy, ok = cm.Data[rbac.ConfigMapPolicyCSVKey] - if !ok { - userPolicy = "" - } - if defaultRole == "" { - defaultRole, ok = cm.Data[rbac.ConfigMapPolicyDefaultKey] - if !ok { - defaultRole = "" - } - } - - return userPolicy, defaultRole -} - -// getPolicyConfigMap fetches the RBAC config map from K8s cluster -func getPolicyConfigMap(client kubernetes.Interface, namespace string) (*corev1.ConfigMap, error) { - cm, err := client.CoreV1().ConfigMaps(namespace).Get(context.Background(), common.ArgoCDRBACConfigMapName, v1.GetOptions{}) - if err != nil { - return nil, err - } - return cm, nil -} - -// checkPolicy checks whether given subject is allowed to execute specified -// action against specified resource -func checkPolicy(subject, action, resource, subResource, builtinPolicy, userPolicy, defaultRole string, strict bool) bool { - enf := rbac.NewEnforcer(nil, "argocd", "argocd-rbac-cm", nil) - enf.SetDefaultRole(defaultRole) - if builtinPolicy != "" { - if err := enf.SetBuiltinPolicy(builtinPolicy); err != nil { - log.Fatalf("could not set built-in policy: %v", err) - return false - } - } - if userPolicy != "" { - if err := rbac.ValidatePolicy(userPolicy); err != nil { - log.Fatalf("invalid user policy: %v", err) - return false - } - if err := enf.SetUserPolicy(userPolicy); err != nil { - log.Fatalf("could not set user policy: %v", err) - return false - } - } - - // User could have used a mutation of the resource name (i.e. 'cert' for - // 'certificate') - let's resolve it to the valid resource. - realResource := resolveRBACResourceName(resource) - - // If in strict mode, validate that given RBAC resource and action are - // actually valid tokens. - if strict { - if !isValidRBACResource(realResource) { - log.Fatalf("error in RBAC request: '%s' is not a valid resource name", realResource) - } - if !isValidRBACAction(action) { - log.Fatalf("error in RBAC request: '%s' is not a valid action name", action) - } - } - - // Application resources have a special notation - for simplicity's sake, - // if user gives no sub-resource (or specifies simple '*'), we construct - // the required notation by setting subresource to '*/*'. - if realResource == rbacpolicy.ResourceApplications { - if subResource == "*" || subResource == "" { - subResource = "*/*" - } - } - - return enf.Enforce(subject, realResource, action, subResource) -} - -// resolveRBACResourceName resolves a user supplied value to a valid RBAC -// resource name. If no mapping is found, returns the value verbatim. -func resolveRBACResourceName(name string) string { - if res, ok := resourceMap[name]; ok { - return res - } else { - return name - } -} - -// isValidRBACAction checks whether a given action is a valid RBAC action -func isValidRBACAction(action string) bool { - _, ok := validRBACActions[action] - return ok -} - -// isValidRBACResource checks whether a given resource is a valid RBAC resource -func isValidRBACResource(resource string) bool { - _, ok := validRBACResources[resource] - return ok -} diff --git a/cmd/argocd-util/commands/settings_rbac_test.go b/cmd/argocd-util/commands/settings_rbac_test.go deleted file mode 100644 index 6cae85b89a9e7..0000000000000 --- a/cmd/argocd-util/commands/settings_rbac_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package commands - -import ( - "io/ioutil" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/fake" - - "github.com/argoproj/argo-cd/v2/util/assets" -) - -func Test_isValidRBACAction(t *testing.T) { - for k := range validRBACActions { - t.Run(k, func(t *testing.T) { - ok := isValidRBACAction(k) - assert.True(t, ok) - }) - } - t.Run("invalid", func(t *testing.T) { - ok := isValidRBACAction("invalid") - assert.False(t, ok) - }) -} - -func Test_isValidRBACResource(t *testing.T) { - for k := range validRBACResources { - t.Run(k, func(t *testing.T) { - ok := isValidRBACResource(k) - assert.True(t, ok) - }) - } - t.Run("invalid", func(t *testing.T) { - ok := isValidRBACResource("invalid") - assert.False(t, ok) - }) -} - -func Test_PolicyFromCSV(t *testing.T) { - uPol, dRole := getPolicy("testdata/rbac/policy.csv", nil, "") - require.NotEmpty(t, uPol) - require.Empty(t, dRole) -} - -func Test_PolicyFromYAML(t *testing.T) { - uPol, dRole := getPolicy("testdata/rbac/argocd-rbac-cm.yaml", nil, "") - require.NotEmpty(t, uPol) - require.Equal(t, "role:unknown", dRole) -} - -func Test_PolicyFromK8s(t *testing.T) { - data, err := ioutil.ReadFile("testdata/rbac/policy.csv") - require.NoError(t, err) - kubeclientset := fake.NewSimpleClientset(&v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "argocd-rbac-cm", - Namespace: "argocd", - }, - Data: map[string]string{ - "policy.csv": string(data), - "policy.default": "role:unknown", - }, - }) - uPol, dRole := getPolicy("", kubeclientset, "argocd") - require.NotEmpty(t, uPol) - require.Equal(t, "role:unknown", dRole) - - t.Run("get applications", func(t *testing.T) { - ok := checkPolicy("role:user", "get", "applications", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, true) - require.True(t, ok) - }) - t.Run("get clusters", func(t *testing.T) { - ok := checkPolicy("role:user", "get", "clusters", "*", assets.BuiltinPolicyCSV, uPol, dRole, true) - require.True(t, ok) - }) - t.Run("get certificates", func(t *testing.T) { - ok := checkPolicy("role:user", "get", "certificates", "*", assets.BuiltinPolicyCSV, uPol, dRole, true) - require.False(t, ok) - }) - t.Run("get certificates by default role", func(t *testing.T) { - ok := checkPolicy("role:user", "get", "certificates", "*", assets.BuiltinPolicyCSV, uPol, "role:readonly", true) - require.True(t, ok) - }) - t.Run("get certificates by default role without builtin policy", func(t *testing.T) { - ok := checkPolicy("role:user", "get", "certificates", "*", "", uPol, "role:readonly", true) - require.False(t, ok) - }) -} diff --git a/cmd/argocd-util/commands/testdata/rbac/policy.csv b/cmd/argocd-util/commands/testdata/rbac/policy.csv deleted file mode 100644 index de6d17104d2a2..0000000000000 --- a/cmd/argocd-util/commands/testdata/rbac/policy.csv +++ /dev/null @@ -1,9 +0,0 @@ -p, role:user, clusters, get, *, allow -p, role:user, clusters, get, https://kubernetes*, deny -p, role:user, projects, get, *, allow -p, role:user, applications, get, *, allow -p, role:user, applications, create, */*, allow -p, role:user, applications, delete, *, allow -p, role:user, applications, delete, */guestbook, deny -p, role:test, certificates, get, *, allow -g, test, role:user diff --git a/cmd/argocd/commands/account.go b/cmd/argocd/commands/account.go index 52bbe38b3a050..5472859551f75 100644 --- a/cmd/argocd/commands/account.go +++ b/cmd/argocd/commands/account.go @@ -11,11 +11,12 @@ import ( "time" timeutil "github.com/argoproj/pkg/time" - "github.com/ghodss/yaml" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "golang.org/x/crypto/ssh/terminal" + "golang.org/x/term" + "sigs.k8s.io/yaml" + "github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless" argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient" accountpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/account" "github.com/argoproj/argo-cd/v2/pkg/apiclient/session" @@ -25,12 +26,26 @@ import ( "github.com/argoproj/argo-cd/v2/util/io" "github.com/argoproj/argo-cd/v2/util/localconfig" sessionutil "github.com/argoproj/argo-cd/v2/util/session" + "github.com/argoproj/argo-cd/v2/util/templates" ) func NewAccountCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { var command = &cobra.Command{ Use: "account", Short: "Manage account settings", + Example: templates.Examples(` + # List accounts + argocd account list + + # Update the current user's password + argocd account update-password + + # Can I sync any app? + argocd account can-i sync applications '*' + + # Get User information + argocd account get-user-info + `), Run: func(c *cobra.Command, args []string) { c.HelpFunc()(c, args) os.Exit(1) @@ -43,6 +58,7 @@ func NewAccountCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { command.AddCommand(NewAccountGenerateTokenCommand(clientOpts)) command.AddCommand(NewAccountGetCommand(clientOpts)) command.AddCommand(NewAccountDeleteTokenCommand(clientOpts)) + command.AddCommand(NewBcryptCmd()) return command } @@ -54,29 +70,47 @@ func NewAccountUpdatePasswordCommand(clientOpts *argocdclient.ClientOptions) *co ) var command = &cobra.Command{ Use: "update-password", - Short: "Update password", + Short: "Update an account's password", + Long: ` +This command can be used to update the password of the currently logged on +user, or an arbitrary local user account when the currently logged on user +has appropriate RBAC permissions to change other accounts. +`, + Example: ` + # Update the current user's password + argocd account update-password + + # Update the password for user foobar + argocd account update-password --account foobar +`, Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 0 { c.HelpFunc()(c, args) os.Exit(1) } - acdClient := argocdclient.NewClientOrDie(clientOpts) + acdClient := headless.NewClientOrDie(clientOpts, c) conn, usrIf := acdClient.NewAccountClientOrDie() defer io.Close(conn) - userInfo := getCurrentAccount(acdClient) + userInfo := getCurrentAccount(ctx, acdClient) if userInfo.Iss == sessionutil.SessionManagerClaimsIssuer && currentPassword == "" { - fmt.Print("*** Enter current password: ") - password, err := terminal.ReadPassword(int(os.Stdin.Fd())) + fmt.Printf("*** Enter password of currently logged in user (%s): ", userInfo.Username) + password, err := term.ReadPassword(int(os.Stdin.Fd())) errors.CheckError(err) currentPassword = string(password) fmt.Print("\n") } + if account == "" { + account = userInfo.Username + } + if newPassword == "" { var err error - newPassword, err = cli.ReadAndConfirmPassword() + newPassword, err = cli.ReadAndConfirmPassword(account) errors.CheckError(err) } @@ -86,7 +120,6 @@ func NewAccountUpdatePasswordCommand(clientOpts *argocdclient.ClientOptions) *co Name: account, } - ctx := context.Background() _, err := usrIf.UpdatePassword(ctx, &updatePasswordRequest) errors.CheckError(err) fmt.Printf("Password updated\n") @@ -99,7 +132,7 @@ func NewAccountUpdatePasswordCommand(clientOpts *argocdclient.ClientOptions) *co errors.CheckError(err) claims, err := configCtx.User.Claims() errors.CheckError(err) - tokenString := passwordLogin(acdClient, localconfig.GetUsername(claims.Subject), newPassword) + tokenString := passwordLogin(ctx, acdClient, localconfig.GetUsername(claims.Subject), newPassword) localCfg.UpsertUser(localconfig.User{ Name: localCfg.CurrentContext, AuthToken: tokenString, @@ -111,9 +144,9 @@ func NewAccountUpdatePasswordCommand(clientOpts *argocdclient.ClientOptions) *co }, } - command.Flags().StringVar(¤tPassword, "current-password", "", "current password you wish to change") - command.Flags().StringVar(&newPassword, "new-password", "", "new password you want to update to") - command.Flags().StringVar(&account, "account", "", "an account name that should be updated. Defaults to current user account") + command.Flags().StringVar(¤tPassword, "current-password", "", "Password of the currently logged on user") + command.Flags().StringVar(&newPassword, "new-password", "", "New password you want to update to") + command.Flags().StringVar(&account, "account", "", "An account name that should be updated. Defaults to current user account") return command } @@ -124,16 +157,24 @@ func NewAccountGetUserInfoCommand(clientOpts *argocdclient.ClientOptions) *cobra var command = &cobra.Command{ Use: "get-user-info", Short: "Get user info", + Example: templates.Examples(` + # Get User information for the currently logged-in user (see 'argocd login') + argocd account get-user-info + + # Get User information in yaml format + argocd account get-user-info -o yaml + `), Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 0 { c.HelpFunc()(c, args) os.Exit(1) } - conn, client := argocdclient.NewClientOrDie(clientOpts).NewSessionClientOrDie() + conn, client := headless.NewClientOrDie(clientOpts, c).NewSessionClientOrDie() defer io.Close(conn) - ctx := context.Background() response, err := client.GetUserInfo(ctx, &session.GetUserInfoRequest{}) errors.CheckError(err) @@ -180,15 +221,16 @@ Actions: %v Resources: %v `, rbacpolicy.Actions, rbacpolicy.Resources), Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 3 { c.HelpFunc()(c, args) os.Exit(1) } - conn, client := argocdclient.NewClientOrDie(clientOpts).NewAccountClientOrDie() + conn, client := headless.NewClientOrDie(clientOpts, c).NewAccountClientOrDie() defer io.Close(conn) - ctx := context.Background() response, err := client.CanI(ctx, &accountpkg.CanIRequest{ Action: args[0], Resource: args[1], @@ -224,11 +266,11 @@ func NewAccountListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman Short: "List accounts", Example: "argocd account list", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() - conn, client := argocdclient.NewClientOrDie(clientOpts).NewAccountClientOrDie() + conn, client := headless.NewClientOrDie(clientOpts, c).NewAccountClientOrDie() defer io.Close(conn) - ctx := context.Background() response, err := client.ListAccounts(ctx, &accountpkg.ListAccountRequest{}) errors.CheckError(err) @@ -249,10 +291,10 @@ func NewAccountListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman return cmd } -func getCurrentAccount(clientset argocdclient.Client) session.GetUserInfoResponse { +func getCurrentAccount(ctx context.Context, clientset argocdclient.Client) session.GetUserInfoResponse { conn, client := clientset.NewSessionClientOrDie() defer io.Close(conn) - userInfo, err := client.GetUserInfo(context.Background(), &session.GetUserInfoRequest{}) + userInfo, err := client.GetUserInfo(ctx, &session.GetUserInfoRequest{}) errors.CheckError(err) return *userInfo } @@ -271,16 +313,18 @@ argocd account get # Get details for an account by name argocd account get --account `, Run: func(c *cobra.Command, args []string) { - clientset := argocdclient.NewClientOrDie(clientOpts) + ctx := c.Context() + + clientset := headless.NewClientOrDie(clientOpts, c) if account == "" { - account = getCurrentAccount(clientset).Username + account = getCurrentAccount(ctx, clientset).Username } conn, client := clientset.NewAccountClientOrDie() defer io.Close(conn) - acc, err := client.GetAccount(context.Background(), &accountpkg.GetAccountRequest{Name: account}) + acc, err := client.GetAccount(ctx, &accountpkg.GetAccountRequest{Name: account}) errors.CheckError(err) switch output { @@ -342,16 +386,17 @@ argocd account generate-token # Generate token for the account with the specified name argocd account generate-token --account `, Run: func(c *cobra.Command, args []string) { + ctx := c.Context() - clientset := argocdclient.NewClientOrDie(clientOpts) + clientset := headless.NewClientOrDie(clientOpts, c) conn, client := clientset.NewAccountClientOrDie() defer io.Close(conn) if account == "" { - account = getCurrentAccount(clientset).Username + account = getCurrentAccount(ctx, clientset).Username } expiresIn, err := timeutil.ParseDuration(expiresIn) errors.CheckError(err) - response, err := client.CreateToken(context.Background(), &accountpkg.CreateTokenRequest{ + response, err := client.CreateToken(ctx, &accountpkg.CreateTokenRequest{ Name: account, ExpiresIn: int64(expiresIn.Seconds()), Id: id, @@ -362,7 +407,7 @@ argocd account generate-token --account `, } cmd.Flags().StringVarP(&account, "account", "a", "", "Account name. Defaults to the current account.") cmd.Flags().StringVarP(&expiresIn, "expires-in", "e", "0s", "Duration before the token will expire. (Default: No expiration)") - cmd.Flags().StringVar(&id, "id", "", "Optional token id. Fallback to uuid if not value specified.") + cmd.Flags().StringVar(&id, "id", "", "Optional token id. Fall back to uuid if not value specified.") return cmd } @@ -377,21 +422,23 @@ func NewAccountDeleteTokenCommand(clientOpts *argocdclient.ClientOptions) *cobra argocd account delete-token ID # Delete token of the account with the specified name -argocd account generate-token --account `, +argocd account delete-token --account ID`, Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 1 { c.HelpFunc()(c, args) os.Exit(1) } id := args[0] - clientset := argocdclient.NewClientOrDie(clientOpts) + clientset := headless.NewClientOrDie(clientOpts, c) conn, client := clientset.NewAccountClientOrDie() defer io.Close(conn) if account == "" { - account = getCurrentAccount(clientset).Username + account = getCurrentAccount(ctx, clientset).Username } - _, err := client.DeleteToken(context.Background(), &accountpkg.DeleteTokenRequest{Name: account, Id: id}) + _, err := client.DeleteToken(ctx, &accountpkg.DeleteTokenRequest{Name: account, Id: id}) errors.CheckError(err) }, } diff --git a/cmd/argocd/commands/admin/admin.go b/cmd/argocd/commands/admin/admin.go new file mode 100644 index 0000000000000..92cad10479d68 --- /dev/null +++ b/cmd/argocd/commands/admin/admin.go @@ -0,0 +1,248 @@ +package admin + +import ( + "reflect" + + "github.com/spf13/cobra" + apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/yaml" + + cmdutil "github.com/argoproj/argo-cd/v2/cmd/util" + "github.com/argoproj/argo-cd/v2/common" + argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient" + "github.com/argoproj/argo-cd/v2/util/errors" + "github.com/argoproj/argo-cd/v2/util/settings" + + "github.com/argoproj/argo-cd/v2/pkg/apis/application" +) + +const ( + // YamlSeparator separates sections of a YAML file + yamlSeparator = "---\n" +) + +var ( + configMapResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"} + secretResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"} + applicationsResource = schema.GroupVersionResource{Group: application.Group, Version: "v1alpha1", Resource: application.ApplicationPlural} + appprojectsResource = schema.GroupVersionResource{Group: application.Group, Version: "v1alpha1", Resource: application.AppProjectPlural} + appplicationSetResource = schema.GroupVersionResource{Group: application.Group, Version: "v1alpha1", Resource: application.ApplicationSetPlural} +) + +// NewAdminCommand returns a new instance of an argocd command +func NewAdminCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { + var ( + pathOpts = clientcmd.NewDefaultPathOptions() + ) + + var command = &cobra.Command{ + Use: "admin", + Short: "Contains a set of commands useful for Argo CD administrators and requires direct Kubernetes access", + DisableAutoGenTag: true, + Run: func(c *cobra.Command, args []string) { + c.HelpFunc()(c, args) + }, + } + + command.AddCommand(NewClusterCommand(clientOpts, pathOpts)) + command.AddCommand(NewProjectsCommand()) + command.AddCommand(NewSettingsCommand()) + command.AddCommand(NewAppCommand(clientOpts)) + command.AddCommand(NewRepoCommand()) + command.AddCommand(NewImportCommand()) + command.AddCommand(NewExportCommand()) + command.AddCommand(NewDashboardCommand()) + command.AddCommand(NewNotificationsCommand()) + command.AddCommand(NewInitialPasswordCommand()) + + command.Flags().StringVar(&cmdutil.LogFormat, "logformat", "text", "Set the logging format. One of: text|json") + command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error") + return command +} + +type argoCDClientsets struct { + configMaps dynamic.ResourceInterface + secrets dynamic.ResourceInterface + applications dynamic.ResourceInterface + projects dynamic.ResourceInterface + applicationSets dynamic.ResourceInterface +} + +func newArgoCDClientsets(config *rest.Config, namespace string) *argoCDClientsets { + dynamicIf, err := dynamic.NewForConfig(config) + errors.CheckError(err) + return &argoCDClientsets{ + configMaps: dynamicIf.Resource(configMapResource).Namespace(namespace), + secrets: dynamicIf.Resource(secretResource).Namespace(namespace), + applications: dynamicIf.Resource(applicationsResource).Namespace(namespace), + projects: dynamicIf.Resource(appprojectsResource).Namespace(namespace), + applicationSets: dynamicIf.Resource(appplicationSetResource).Namespace(namespace), + } +} + +// getReferencedSecrets examines the argocd-cm config for any referenced repo secrets and returns a +// map of all referenced secrets. +func getReferencedSecrets(un unstructured.Unstructured) map[string]bool { + var cm apiv1.ConfigMap + err := runtime.DefaultUnstructuredConverter.FromUnstructured(un.Object, &cm) + errors.CheckError(err) + referencedSecrets := make(map[string]bool) + + // Referenced repository secrets + if reposRAW, ok := cm.Data["repositories"]; ok { + repos := make([]settings.Repository, 0) + err := yaml.Unmarshal([]byte(reposRAW), &repos) + errors.CheckError(err) + for _, cred := range repos { + if cred.PasswordSecret != nil { + referencedSecrets[cred.PasswordSecret.Name] = true + } + if cred.SSHPrivateKeySecret != nil { + referencedSecrets[cred.SSHPrivateKeySecret.Name] = true + } + if cred.UsernameSecret != nil { + referencedSecrets[cred.UsernameSecret.Name] = true + } + if cred.TLSClientCertDataSecret != nil { + referencedSecrets[cred.TLSClientCertDataSecret.Name] = true + } + if cred.TLSClientCertKeySecret != nil { + referencedSecrets[cred.TLSClientCertKeySecret.Name] = true + } + } + } + + // Referenced repository credentials secrets + if reposRAW, ok := cm.Data["repository.credentials"]; ok { + creds := make([]settings.RepositoryCredentials, 0) + err := yaml.Unmarshal([]byte(reposRAW), &creds) + errors.CheckError(err) + for _, cred := range creds { + if cred.PasswordSecret != nil { + referencedSecrets[cred.PasswordSecret.Name] = true + } + if cred.SSHPrivateKeySecret != nil { + referencedSecrets[cred.SSHPrivateKeySecret.Name] = true + } + if cred.UsernameSecret != nil { + referencedSecrets[cred.UsernameSecret.Name] = true + } + if cred.TLSClientCertDataSecret != nil { + referencedSecrets[cred.TLSClientCertDataSecret.Name] = true + } + if cred.TLSClientCertKeySecret != nil { + referencedSecrets[cred.TLSClientCertKeySecret.Name] = true + } + } + } + return referencedSecrets +} + +// isArgoCDSecret returns whether or not the given secret is a part of Argo CD configuration +// (e.g. argocd-secret, repo credentials, or cluster credentials) +func isArgoCDSecret(repoSecretRefs map[string]bool, un unstructured.Unstructured) bool { + secretName := un.GetName() + if secretName == common.ArgoCDSecretName { + return true + } + if repoSecretRefs != nil { + if _, ok := repoSecretRefs[secretName]; ok { + return true + } + } + if labels := un.GetLabels(); labels != nil { + if _, ok := labels[common.LabelKeySecretType]; ok { + return true + } + } + if annotations := un.GetAnnotations(); annotations != nil { + if annotations[common.AnnotationKeyManagedBy] == common.AnnotationValueManagedByArgoCD { + return true + } + } + return false +} + +// isArgoCDConfigMap returns true if the configmap name is one of argo cd's well known configmaps +func isArgoCDConfigMap(name string) bool { + switch name { + case common.ArgoCDConfigMapName, common.ArgoCDRBACConfigMapName, common.ArgoCDKnownHostsConfigMapName, common.ArgoCDTLSCertsConfigMapName: + return true + } + return false + +} + +// specsEqual returns if the spec, data, labels, annotations, and finalizers of the two +// supplied objects are equal, indicating that no update is necessary during importing +func specsEqual(left, right unstructured.Unstructured) bool { + if !reflect.DeepEqual(left.GetAnnotations(), right.GetAnnotations()) { + return false + } + if !reflect.DeepEqual(left.GetLabels(), right.GetLabels()) { + return false + } + if !reflect.DeepEqual(left.GetFinalizers(), right.GetFinalizers()) { + return false + } + switch left.GetKind() { + case "Secret", "ConfigMap": + leftData, _, _ := unstructured.NestedMap(left.Object, "data") + rightData, _, _ := unstructured.NestedMap(right.Object, "data") + return reflect.DeepEqual(leftData, rightData) + case application.AppProjectKind: + leftSpec, _, _ := unstructured.NestedMap(left.Object, "spec") + rightSpec, _, _ := unstructured.NestedMap(right.Object, "spec") + return reflect.DeepEqual(leftSpec, rightSpec) + case application.ApplicationKind: + leftSpec, _, _ := unstructured.NestedMap(left.Object, "spec") + rightSpec, _, _ := unstructured.NestedMap(right.Object, "spec") + leftStatus, _, _ := unstructured.NestedMap(left.Object, "status") + rightStatus, _, _ := unstructured.NestedMap(right.Object, "status") + // reconciledAt and observedAt are constantly changing and we ignore any diff there + delete(leftStatus, "reconciledAt") + delete(rightStatus, "reconciledAt") + delete(leftStatus, "observedAt") + delete(rightStatus, "observedAt") + return reflect.DeepEqual(leftSpec, rightSpec) && reflect.DeepEqual(leftStatus, rightStatus) + } + return false +} + +func iterateStringFields(obj interface{}, callback func(name string, val string) string) { + if mapField, ok := obj.(map[string]interface{}); ok { + for field, val := range mapField { + if strVal, ok := val.(string); ok { + mapField[field] = callback(field, strVal) + } else { + iterateStringFields(val, callback) + } + } + } else if arrayField, ok := obj.([]interface{}); ok { + for i := range arrayField { + iterateStringFields(arrayField[i], callback) + } + } +} + +func redactor(dirtyString string) string { + config := make(map[string]interface{}) + err := yaml.Unmarshal([]byte(dirtyString), &config) + errors.CheckError(err) + iterateStringFields(config, func(name string, val string) string { + if name == "clientSecret" || name == "secret" || name == "bindPW" { + return "********" + } else { + return val + } + }) + data, err := yaml.Marshal(config) + errors.CheckError(err) + return string(data) +} diff --git a/cmd/argocd/commands/admin/app.go b/cmd/argocd/commands/admin/app.go new file mode 100644 index 0000000000000..fbceb436f8609 --- /dev/null +++ b/cmd/argocd/commands/admin/app.go @@ -0,0 +1,425 @@ +package admin + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "os" + "sort" + "time" + + "github.com/spf13/cobra" + apiv1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes" + kubecache "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/yaml" + + cmdutil "github.com/argoproj/argo-cd/v2/cmd/util" + "github.com/argoproj/argo-cd/v2/common" + "github.com/argoproj/argo-cd/v2/controller" + "github.com/argoproj/argo-cd/v2/controller/cache" + "github.com/argoproj/argo-cd/v2/controller/metrics" + argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient" + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned" + appinformers "github.com/argoproj/argo-cd/v2/pkg/client/informers/externalversions" + reposerverclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient" + "github.com/argoproj/argo-cd/v2/util/argo" + cacheutil "github.com/argoproj/argo-cd/v2/util/cache" + appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate" + "github.com/argoproj/argo-cd/v2/util/cli" + "github.com/argoproj/argo-cd/v2/util/config" + "github.com/argoproj/argo-cd/v2/util/db" + "github.com/argoproj/argo-cd/v2/util/errors" + "github.com/argoproj/argo-cd/v2/util/io" + kubeutil "github.com/argoproj/argo-cd/v2/util/kube" + "github.com/argoproj/argo-cd/v2/util/settings" +) + +func NewAppCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { + var command = &cobra.Command{ + Use: "app", + Short: "Manage applications configuration", + Run: func(c *cobra.Command, args []string) { + c.HelpFunc()(c, args) + }, + } + + command.AddCommand(NewGenAppSpecCommand()) + command.AddCommand(NewReconcileCommand(clientOpts)) + command.AddCommand(NewDiffReconcileResults()) + return command +} + +// NewGenAppSpecCommand generates declarative configuration file for given application +func NewGenAppSpecCommand() *cobra.Command { + var ( + appOpts cmdutil.AppOptions + fileURL string + appName string + labels []string + outputFormat string + annotations []string + inline bool + ) + var command = &cobra.Command{ + Use: "generate-spec APPNAME", + Short: "Generate declarative config for an application", + Example: ` + # Generate declarative config for a directory app + argocd admin app generate-spec guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --directory-recurse + + # Generate declarative config for a Jsonnet app + argocd admin app generate-spec jsonnet-guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path jsonnet-guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --jsonnet-ext-str replicas=2 + + # Generate declarative config for a Helm app + argocd admin app generate-spec helm-guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path helm-guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --helm-set replicaCount=2 + + # Generate declarative config for a Helm app from a Helm repo + argocd admin app generate-spec nginx-ingress --repo https://charts.helm.sh/stable --helm-chart nginx-ingress --revision 1.24.3 --dest-namespace default --dest-server https://kubernetes.default.svc + + # Generate declarative config for a Kustomize app + argocd admin app generate-spec kustomize-guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path kustomize-guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --kustomize-image gcr.io/heptio-images/ks-guestbook-demo:0.1 + + # Generate declarative config for a app using a custom tool: + argocd admin app generate-spec kasane --repo https://github.com/argoproj/argocd-example-apps.git --path plugins/kasane --dest-namespace default --dest-server https://kubernetes.default.svc --config-management-plugin kasane +`, + Run: func(c *cobra.Command, args []string) { + apps, err := cmdutil.ConstructApps(fileURL, appName, labels, annotations, args, appOpts, c.Flags()) + errors.CheckError(err) + if len(apps) > 1 { + errors.CheckError(fmt.Errorf("failed to generate spec, more than one application is not supported")) + } + app := apps[0] + if app.Name == "" { + c.HelpFunc()(c, args) + os.Exit(1) + } + + out, closer, err := getOutWriter(inline, fileURL) + errors.CheckError(err) + defer io.Close(closer) + + errors.CheckError(PrintResources(outputFormat, out, app)) + }, + } + command.Flags().StringVar(&appName, "name", "", "A name for the app, ignored if a file is set (DEPRECATED)") + command.Flags().StringVarP(&fileURL, "file", "f", "", "Filename or URL to Kubernetes manifests for the app") + command.Flags().StringArrayVarP(&labels, "label", "l", []string{}, "Labels to apply to the app") + command.Flags().StringArrayVarP(&annotations, "annotations", "", []string{}, "Set metadata annotations (e.g. example=value)") + command.Flags().StringVarP(&outputFormat, "output", "o", "yaml", "Output format. One of: json|yaml") + command.Flags().BoolVarP(&inline, "inline", "i", false, "If set then generated resource is written back to the file specified in --file flag") + + // Only complete files with appropriate extension. + err := command.Flags().SetAnnotation("file", cobra.BashCompFilenameExt, []string{"json", "yaml", "yml"}) + errors.CheckError(err) + + cmdutil.AddAppFlags(command, &appOpts) + return command +} + +type appReconcileResult struct { + Name string `json:"name"` + Health *v1alpha1.HealthStatus `json:"health"` + Sync *v1alpha1.SyncStatus `json:"sync"` + Conditions []v1alpha1.ApplicationCondition `json:"conditions"` +} + +type reconcileResults struct { + Applications []appReconcileResult `json:"applications"` +} + +func (r *reconcileResults) getAppsMap() map[string]appReconcileResult { + res := map[string]appReconcileResult{} + for i := range r.Applications { + res[r.Applications[i].Name] = r.Applications[i] + } + return res +} + +func printLine(format string, a ...interface{}) { + _, _ = fmt.Printf(format+"\n", a...) +} + +func NewDiffReconcileResults() *cobra.Command { + var command = &cobra.Command{ + Use: "diff-reconcile-results PATH1 PATH2", + Short: "Compare results of two reconciliations and print diff.", + Run: func(c *cobra.Command, args []string) { + if len(args) != 2 { + c.HelpFunc()(c, args) + os.Exit(1) + } + + path1 := args[0] + path2 := args[1] + var res1 reconcileResults + var res2 reconcileResults + errors.CheckError(config.UnmarshalLocalFile(path1, &res1)) + errors.CheckError(config.UnmarshalLocalFile(path2, &res2)) + errors.CheckError(diffReconcileResults(res1, res2)) + }, + } + + return command +} + +func toUnstructured(val interface{}) (*unstructured.Unstructured, error) { + data, err := json.Marshal(val) + if err != nil { + return nil, err + } + res := make(map[string]interface{}) + err = json.Unmarshal(data, &res) + if err != nil { + return nil, err + } + return &unstructured.Unstructured{Object: res}, nil +} + +type diffPair struct { + name string + first *unstructured.Unstructured + second *unstructured.Unstructured +} + +func diffReconcileResults(res1 reconcileResults, res2 reconcileResults) error { + var pairs []diffPair + resMap1 := res1.getAppsMap() + resMap2 := res2.getAppsMap() + for k, v := range resMap1 { + firstUn, err := toUnstructured(v) + if err != nil { + return fmt.Errorf("error converting first resource to unstructured: %w", err) + } + var secondUn *unstructured.Unstructured + second, ok := resMap2[k] + if ok { + secondUn, err = toUnstructured(second) + if err != nil { + return fmt.Errorf("error converting second resource to unstructured: %w", err) + } + delete(resMap2, k) + } + pairs = append(pairs, diffPair{name: k, first: firstUn, second: secondUn}) + } + for k, v := range resMap2 { + secondUn, err := toUnstructured(v) + if err != nil { + return err + } + pairs = append(pairs, diffPair{name: k, first: nil, second: secondUn}) + } + sort.Slice(pairs, func(i, j int) bool { + return pairs[i].name < pairs[j].name + }) + for _, item := range pairs { + printLine(item.name) + _ = cli.PrintDiff(item.name, item.first, item.second) + } + + return nil +} + +func NewReconcileCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { + var ( + clientConfig clientcmd.ClientConfig + selector string + repoServerAddress string + outputFormat string + refresh bool + ) + + var command = &cobra.Command{ + Use: "get-reconcile-results PATH", + Short: "Reconcile all applications and stores reconciliation summary in the specified file.", + Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + + // get rid of logging error handler + runtime.ErrorHandlers = runtime.ErrorHandlers[1:] + + if len(args) != 1 { + c.HelpFunc()(c, args) + os.Exit(1) + } + outputPath := args[0] + + errors.CheckError(os.Setenv(v1alpha1.EnvVarFakeInClusterConfig, "true")) + cfg, err := clientConfig.ClientConfig() + errors.CheckError(err) + namespace, _, err := clientConfig.Namespace() + errors.CheckError(err) + + var result []appReconcileResult + if refresh { + if repoServerAddress == "" { + printLine("Repo server is not provided, trying to port-forward to argocd-repo-server pod.") + overrides := clientcmd.ConfigOverrides{} + repoServerPodLabelSelector := common.LabelKeyAppName + "=" + clientOpts.RepoServerName + repoServerPort, err := kubeutil.PortForward(8081, namespace, &overrides, repoServerPodLabelSelector) + errors.CheckError(err) + repoServerAddress = fmt.Sprintf("localhost:%d", repoServerPort) + } + repoServerClient := reposerverclient.NewRepoServerClientset(repoServerAddress, 60, reposerverclient.TLSConfiguration{DisableTLS: false, StrictValidation: false}) + + appClientset := appclientset.NewForConfigOrDie(cfg) + kubeClientset := kubernetes.NewForConfigOrDie(cfg) + result, err = reconcileApplications(ctx, kubeClientset, appClientset, namespace, repoServerClient, selector, newLiveStateCache) + errors.CheckError(err) + } else { + appClientset := appclientset.NewForConfigOrDie(cfg) + result, err = getReconcileResults(ctx, appClientset, namespace, selector) + } + + errors.CheckError(saveToFile(err, outputFormat, reconcileResults{Applications: result}, outputPath)) + }, + } + clientConfig = cli.AddKubectlFlagsToCmd(command) + command.Flags().StringVar(&repoServerAddress, "repo-server", "", "Repo server address.") + command.Flags().StringVar(&selector, "l", "", "Label selector") + command.Flags().StringVar(&outputFormat, "o", "yaml", "Output format (yaml|json)") + command.Flags().BoolVar(&refresh, "refresh", false, "If set to true then recalculates apps reconciliation") + + return command +} + +func saveToFile(err error, outputFormat string, result reconcileResults, outputPath string) error { + errors.CheckError(err) + var data []byte + switch outputFormat { + case "yaml": + if data, err = yaml.Marshal(result); err != nil { + return fmt.Errorf("error marshalling yaml: %w", err) + } + case "json": + if data, err = json.Marshal(result); err != nil { + return fmt.Errorf("error marshalling json: %w", err) + } + default: + return fmt.Errorf("format %s is not supported", outputFormat) + } + + return os.WriteFile(outputPath, data, 0644) +} + +func getReconcileResults(ctx context.Context, appClientset appclientset.Interface, namespace string, selector string) ([]appReconcileResult, error) { + appsList, err := appClientset.ArgoprojV1alpha1().Applications(namespace).List(ctx, v1.ListOptions{LabelSelector: selector}) + if err != nil { + return nil, err + } + + var items []appReconcileResult + for _, app := range appsList.Items { + items = append(items, appReconcileResult{ + Name: app.Name, + Conditions: app.Status.Conditions, + Health: &app.Status.Health, + Sync: &app.Status.Sync, + }) + } + return items, nil +} + +func reconcileApplications( + ctx context.Context, + kubeClientset kubernetes.Interface, + appClientset appclientset.Interface, + namespace string, + repoServerClient reposerverclient.Clientset, + selector string, + createLiveStateCache func(argoDB db.ArgoDB, appInformer kubecache.SharedIndexInformer, settingsMgr *settings.SettingsManager, server *metrics.MetricsServer) cache.LiveStateCache, +) ([]appReconcileResult, error) { + settingsMgr := settings.NewSettingsManager(ctx, kubeClientset, namespace) + argoDB := db.NewDB(namespace, settingsMgr, kubeClientset) + appInformerFactory := appinformers.NewSharedInformerFactoryWithOptions( + appClientset, + 1*time.Hour, + appinformers.WithNamespace(namespace), + appinformers.WithTweakListOptions(func(options *v1.ListOptions) {}), + ) + + appInformer := appInformerFactory.Argoproj().V1alpha1().Applications().Informer() + projInformer := appInformerFactory.Argoproj().V1alpha1().AppProjects().Informer() + go appInformer.Run(ctx.Done()) + go projInformer.Run(ctx.Done()) + if !kubecache.WaitForCacheSync(ctx.Done(), appInformer.HasSynced, projInformer.HasSynced) { + return nil, fmt.Errorf("failed to sync cache") + } + + appLister := appInformerFactory.Argoproj().V1alpha1().Applications().Lister() + projLister := appInformerFactory.Argoproj().V1alpha1().AppProjects().Lister() + server, err := metrics.NewMetricsServer("", appLister, func(obj interface{}) bool { + return true + }, func(r *http.Request) error { + return nil + }, []string{}) + + if err != nil { + return nil, err + } + stateCache := createLiveStateCache(argoDB, appInformer, settingsMgr, server) + if err := stateCache.Init(); err != nil { + return nil, err + } + + cache := appstatecache.NewCache( + cacheutil.NewCache(cacheutil.NewInMemoryCache(1*time.Minute)), + 1*time.Minute, + ) + + appStateManager := controller.NewAppStateManager( + argoDB, appClientset, repoServerClient, namespace, kubeutil.NewKubectl(), settingsMgr, stateCache, projInformer, server, cache, time.Second, argo.NewResourceTracking(), false) + + appsList, err := appClientset.ArgoprojV1alpha1().Applications(namespace).List(ctx, v1.ListOptions{LabelSelector: selector}) + if err != nil { + return nil, err + } + + sort.Slice(appsList.Items, func(i, j int) bool { + return appsList.Items[i].Spec.Destination.Server < appsList.Items[j].Spec.Destination.Server + }) + + var items []appReconcileResult + prevServer := "" + for _, app := range appsList.Items { + if prevServer != app.Spec.Destination.Server { + if prevServer != "" { + if clusterCache, err := stateCache.GetClusterCache(prevServer); err == nil { + clusterCache.Invalidate() + } + } + printLine("Reconciling apps of %s", app.Spec.Destination.Server) + prevServer = app.Spec.Destination.Server + } + printLine(app.Name) + + proj, err := projLister.AppProjects(namespace).Get(app.Spec.Project) + if err != nil { + return nil, err + } + + sources := make([]v1alpha1.ApplicationSource, 0) + revisions := make([]string, 0) + sources = append(sources, app.Spec.GetSource()) + revisions = append(revisions, app.Spec.GetSource().TargetRevision) + + res := appStateManager.CompareAppState(&app, proj, revisions, sources, false, false, nil, false) + items = append(items, appReconcileResult{ + Name: app.Name, + Conditions: app.Status.Conditions, + Health: res.GetHealthStatus(), + Sync: res.GetSyncStatus(), + }) + } + return items, nil +} + +func newLiveStateCache(argoDB db.ArgoDB, appInformer kubecache.SharedIndexInformer, settingsMgr *settings.SettingsManager, server *metrics.MetricsServer) cache.LiveStateCache { + return cache.NewLiveStateCache(argoDB, appInformer, settingsMgr, kubeutil.NewKubectl(), server, func(managedByApp map[string]bool, ref apiv1.ObjectReference) {}, nil, argo.NewResourceTracking()) +} diff --git a/cmd/argocd/commands/admin/app_test.go b/cmd/argocd/commands/admin/app_test.go new file mode 100644 index 0000000000000..0cad2485e6696 --- /dev/null +++ b/cmd/argocd/commands/admin/app_test.go @@ -0,0 +1,187 @@ +package admin + +import ( + "context" + "testing" + + clustermocks "github.com/argoproj/gitops-engine/pkg/cache/mocks" + "github.com/argoproj/gitops-engine/pkg/health" + "github.com/argoproj/gitops-engine/pkg/utils/kube" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + kubefake "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/cache" + + statecache "github.com/argoproj/argo-cd/v2/controller/cache" + cachemocks "github.com/argoproj/argo-cd/v2/controller/cache/mocks" + "github.com/argoproj/argo-cd/v2/controller/metrics" + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + appfake "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/fake" + argocdclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient" + "github.com/argoproj/argo-cd/v2/reposerver/apiclient/mocks" + "github.com/argoproj/argo-cd/v2/test" + "github.com/argoproj/argo-cd/v2/util/db" + "github.com/argoproj/argo-cd/v2/util/settings" +) + +func TestGetReconcileResults(t *testing.T) { + ctx := context.Background() + + appClientset := appfake.NewSimpleClientset(&v1alpha1.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Status: v1alpha1.ApplicationStatus{ + Health: v1alpha1.HealthStatus{Status: health.HealthStatusHealthy}, + Sync: v1alpha1.SyncStatus{Status: v1alpha1.SyncStatusCodeOutOfSync}, + }, + }) + + result, err := getReconcileResults(ctx, appClientset, "default", "") + if !assert.NoError(t, err) { + return + } + + expectedResults := []appReconcileResult{{ + Name: "test", + Health: &v1alpha1.HealthStatus{Status: health.HealthStatusHealthy}, + Sync: &v1alpha1.SyncStatus{Status: v1alpha1.SyncStatusCodeOutOfSync}, + }} + assert.ElementsMatch(t, expectedResults, result) +} + +func TestGetReconcileResults_Refresh(t *testing.T) { + ctx := context.Background() + + cm := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "argocd-cm", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/part-of": "argocd", + }, + }, + } + proj := &v1alpha1.AppProject{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "default", + }, + Spec: v1alpha1.AppProjectSpec{Destinations: []v1alpha1.ApplicationDestination{{Namespace: "*", Server: "*"}}}, + } + + app := &v1alpha1.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Spec: v1alpha1.ApplicationSpec{ + Source: &v1alpha1.ApplicationSource{}, + Project: "default", + Destination: v1alpha1.ApplicationDestination{ + Server: v1alpha1.KubernetesInternalAPIServerAddr, + Namespace: "default", + }, + }, + } + + appClientset := appfake.NewSimpleClientset(app, proj) + deployment := test.NewDeployment() + kubeClientset := kubefake.NewSimpleClientset(deployment, &cm) + clusterCache := clustermocks.ClusterCache{} + clusterCache.On("IsNamespaced", mock.Anything).Return(true, nil) + clusterCache.On("GetGVKParser", mock.Anything).Return(nil) + repoServerClient := mocks.RepoServerServiceClient{} + repoServerClient.On("GenerateManifest", mock.Anything, mock.Anything).Return(&argocdclient.ManifestResponse{ + Manifests: []string{test.DeploymentManifest}, + }, nil) + repoServerClientset := mocks.Clientset{RepoServerServiceClient: &repoServerClient} + liveStateCache := cachemocks.LiveStateCache{} + liveStateCache.On("GetManagedLiveObjs", mock.Anything, mock.Anything).Return(map[kube.ResourceKey]*unstructured.Unstructured{ + kube.GetResourceKey(deployment): deployment, + }, nil) + liveStateCache.On("GetVersionsInfo", mock.Anything).Return("v1.2.3", nil, nil) + liveStateCache.On("Init").Return(nil, nil) + liveStateCache.On("GetClusterCache", mock.Anything).Return(&clusterCache, nil) + liveStateCache.On("IsNamespaced", mock.Anything, mock.Anything).Return(true, nil) + + result, err := reconcileApplications(ctx, kubeClientset, appClientset, "default", &repoServerClientset, "", + func(argoDB db.ArgoDB, appInformer cache.SharedIndexInformer, settingsMgr *settings.SettingsManager, server *metrics.MetricsServer) statecache.LiveStateCache { + return &liveStateCache + }, + ) + + if !assert.NoError(t, err) { + return + } + + assert.Equal(t, result[0].Health.Status, health.HealthStatusMissing) + assert.Equal(t, result[0].Sync.Status, v1alpha1.SyncStatusCodeOutOfSync) +} + +func TestDiffReconcileResults_NoDifferences(t *testing.T) { + logs, err := captureStdout(func() { + assert.NoError(t, diffReconcileResults( + reconcileResults{Applications: []appReconcileResult{{ + Name: "app1", + Sync: &v1alpha1.SyncStatus{Status: v1alpha1.SyncStatusCodeOutOfSync}, + }}}, + reconcileResults{Applications: []appReconcileResult{{ + Name: "app1", + Sync: &v1alpha1.SyncStatus{Status: v1alpha1.SyncStatusCodeOutOfSync}, + }}}, + )) + }) + assert.NoError(t, err) + assert.Equal(t, "app1\n", logs) +} + +func TestDiffReconcileResults_DifferentApps(t *testing.T) { + logs, err := captureStdout(func() { + assert.NoError(t, diffReconcileResults( + reconcileResults{Applications: []appReconcileResult{{ + Name: "app1", + Sync: &v1alpha1.SyncStatus{Status: v1alpha1.SyncStatusCodeOutOfSync}, + }, { + Name: "app2", + Sync: &v1alpha1.SyncStatus{Status: v1alpha1.SyncStatusCodeOutOfSync}, + }}}, + reconcileResults{Applications: []appReconcileResult{{ + Name: "app1", + Sync: &v1alpha1.SyncStatus{Status: v1alpha1.SyncStatusCodeOutOfSync}, + }, { + Name: "app3", + Sync: &v1alpha1.SyncStatus{Status: v1alpha1.SyncStatusCodeOutOfSync}, + }}}, + )) + }) + assert.NoError(t, err) + assert.Equal(t, `app1 +app2 +1,9d0 +< conditions: null +< health: null +< name: app2 +< sync: +< comparedTo: +< destination: {} +< source: +< repoURL: "" +< status: OutOfSync +app3 +0a1,9 +> conditions: null +> health: null +> name: app3 +> sync: +> comparedTo: +> destination: {} +> source: +> repoURL: "" +> status: OutOfSync +`, logs) +} diff --git a/cmd/argocd/commands/admin/backup.go b/cmd/argocd/commands/admin/backup.go new file mode 100644 index 0000000000000..49e0615c64ba4 --- /dev/null +++ b/cmd/argocd/commands/admin/backup.go @@ -0,0 +1,371 @@ +package admin + +import ( + "bufio" + "fmt" + "io" + "os" + + "github.com/argoproj/gitops-engine/pkg/utils/kube" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + apierr "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/yaml" + + "github.com/argoproj/argo-cd/v2/common" + "github.com/argoproj/argo-cd/v2/pkg/apis/application" + "github.com/argoproj/argo-cd/v2/util/cli" + "github.com/argoproj/argo-cd/v2/util/errors" +) + +// NewExportCommand defines a new command for exporting Kubernetes and Argo CD resources. +func NewExportCommand() *cobra.Command { + var ( + clientConfig clientcmd.ClientConfig + out string + ) + var command = cobra.Command{ + Use: "export", + Short: "Export all Argo CD data to stdout (default) or a file", + Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + + config, err := clientConfig.ClientConfig() + errors.CheckError(err) + namespace, _, err := clientConfig.Namespace() + errors.CheckError(err) + + var writer io.Writer + if out == "-" { + writer = os.Stdout + } else { + f, err := os.Create(out) + errors.CheckError(err) + bw := bufio.NewWriter(f) + writer = bw + defer func() { + err = bw.Flush() + errors.CheckError(err) + err = f.Close() + errors.CheckError(err) + }() + } + + acdClients := newArgoCDClientsets(config, namespace) + acdConfigMap, err := acdClients.configMaps.Get(ctx, common.ArgoCDConfigMapName, v1.GetOptions{}) + errors.CheckError(err) + export(writer, *acdConfigMap) + acdRBACConfigMap, err := acdClients.configMaps.Get(ctx, common.ArgoCDRBACConfigMapName, v1.GetOptions{}) + errors.CheckError(err) + export(writer, *acdRBACConfigMap) + acdKnownHostsConfigMap, err := acdClients.configMaps.Get(ctx, common.ArgoCDKnownHostsConfigMapName, v1.GetOptions{}) + errors.CheckError(err) + export(writer, *acdKnownHostsConfigMap) + acdTLSCertsConfigMap, err := acdClients.configMaps.Get(ctx, common.ArgoCDTLSCertsConfigMapName, v1.GetOptions{}) + errors.CheckError(err) + export(writer, *acdTLSCertsConfigMap) + + referencedSecrets := getReferencedSecrets(*acdConfigMap) + secrets, err := acdClients.secrets.List(ctx, v1.ListOptions{}) + errors.CheckError(err) + for _, secret := range secrets.Items { + if isArgoCDSecret(referencedSecrets, secret) { + export(writer, secret) + } + } + projects, err := acdClients.projects.List(ctx, v1.ListOptions{}) + errors.CheckError(err) + for _, proj := range projects.Items { + export(writer, proj) + } + applications, err := acdClients.applications.List(ctx, v1.ListOptions{}) + errors.CheckError(err) + for _, app := range applications.Items { + export(writer, app) + } + applicationSets, err := acdClients.applicationSets.List(ctx, v1.ListOptions{}) + if err != nil && !apierr.IsNotFound(err) { + if apierr.IsForbidden(err) { + log.Warn(err) + } else { + errors.CheckError(err) + } + } + if applicationSets != nil { + for _, appSet := range applicationSets.Items { + export(writer, appSet) + } + } + }, + } + + clientConfig = cli.AddKubectlFlagsToCmd(&command) + command.Flags().StringVarP(&out, "out", "o", "-", "Output to the specified file instead of stdout") + + return &command +} + +// NewImportCommand defines a new command for exporting Kubernetes and Argo CD resources. +func NewImportCommand() *cobra.Command { + var ( + clientConfig clientcmd.ClientConfig + prune bool + dryRun bool + verbose bool + stopOperation bool + ) + var command = cobra.Command{ + Use: "import SOURCE", + Short: "Import Argo CD data from stdin (specify `-') or a file", + Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + + if len(args) != 1 { + c.HelpFunc()(c, args) + os.Exit(1) + } + config, err := clientConfig.ClientConfig() + errors.CheckError(err) + config.QPS = 100 + config.Burst = 50 + namespace, _, err := clientConfig.Namespace() + errors.CheckError(err) + acdClients := newArgoCDClientsets(config, namespace) + + var input []byte + if in := args[0]; in == "-" { + input, err = io.ReadAll(os.Stdin) + } else { + input, err = os.ReadFile(in) + } + errors.CheckError(err) + var dryRunMsg string + if dryRun { + dryRunMsg = " (dry run)" + } + + // pruneObjects tracks live objects and it's current resource version. any remaining + // items in this map indicates the resource should be pruned since it no longer appears + // in the backup + pruneObjects := make(map[kube.ResourceKey]unstructured.Unstructured) + configMaps, err := acdClients.configMaps.List(ctx, v1.ListOptions{}) + errors.CheckError(err) + // referencedSecrets holds any secrets referenced in the argocd-cm configmap. These + // secrets need to be imported too + var referencedSecrets map[string]bool + for _, cm := range configMaps.Items { + if isArgoCDConfigMap(cm.GetName()) { + pruneObjects[kube.ResourceKey{Group: "", Kind: "ConfigMap", Name: cm.GetName()}] = cm + } + if cm.GetName() == common.ArgoCDConfigMapName { + referencedSecrets = getReferencedSecrets(cm) + } + } + + secrets, err := acdClients.secrets.List(ctx, v1.ListOptions{}) + errors.CheckError(err) + for _, secret := range secrets.Items { + if isArgoCDSecret(referencedSecrets, secret) { + pruneObjects[kube.ResourceKey{Group: "", Kind: "Secret", Name: secret.GetName()}] = secret + } + } + applications, err := acdClients.applications.List(ctx, v1.ListOptions{}) + errors.CheckError(err) + for _, app := range applications.Items { + pruneObjects[kube.ResourceKey{Group: application.Group, Kind: application.ApplicationKind, Name: app.GetName()}] = app + } + projects, err := acdClients.projects.List(ctx, v1.ListOptions{}) + errors.CheckError(err) + for _, proj := range projects.Items { + pruneObjects[kube.ResourceKey{Group: application.Group, Kind: application.AppProjectKind, Name: proj.GetName()}] = proj + } + applicationSets, err := acdClients.applicationSets.List(ctx, v1.ListOptions{}) + if apierr.IsForbidden(err) || apierr.IsNotFound(err) { + log.Warnf("argoproj.io/ApplicationSet: %v\n", err) + } else { + errors.CheckError(err) + } + if applicationSets != nil { + for _, appSet := range applicationSets.Items { + pruneObjects[kube.ResourceKey{Group: application.Group, Kind: application.ApplicationSetKind, Name: appSet.GetName()}] = appSet + } + } + + // Create or replace existing object + backupObjects, err := kube.SplitYAML(input) + errors.CheckError(err) + for _, bakObj := range backupObjects { + gvk := bakObj.GroupVersionKind() + key := kube.ResourceKey{Group: gvk.Group, Kind: gvk.Kind, Name: bakObj.GetName()} + liveObj, exists := pruneObjects[key] + delete(pruneObjects, key) + var dynClient dynamic.ResourceInterface + switch bakObj.GetKind() { + case "Secret": + dynClient = acdClients.secrets + case "ConfigMap": + dynClient = acdClients.configMaps + case application.AppProjectKind: + dynClient = acdClients.projects + case application.ApplicationKind: + dynClient = acdClients.applications + case application.ApplicationSetKind: + dynClient = acdClients.applicationSets + } + if !exists { + isForbidden := false + if !dryRun { + _, err = dynClient.Create(ctx, bakObj, v1.CreateOptions{}) + if apierr.IsForbidden(err) || apierr.IsNotFound(err) { + isForbidden = true + log.Warnf("%s/%s %s: %v", gvk.Group, gvk.Kind, bakObj.GetName(), err) + } else { + errors.CheckError(err) + } + } + if !isForbidden { + fmt.Printf("%s/%s %s created%s\n", gvk.Group, gvk.Kind, bakObj.GetName(), dryRunMsg) + } + + } else if specsEqual(*bakObj, liveObj) && checkAppHasNoNeedToStopOperation(liveObj, stopOperation) { + if verbose { + fmt.Printf("%s/%s %s unchanged%s\n", gvk.Group, gvk.Kind, bakObj.GetName(), dryRunMsg) + } + } else { + isForbidden := false + if !dryRun { + newLive := updateLive(bakObj, &liveObj, stopOperation) + _, err = dynClient.Update(ctx, newLive, v1.UpdateOptions{}) + if apierr.IsForbidden(err) || apierr.IsNotFound(err) { + isForbidden = true + log.Warnf("%s/%s %s: %v", gvk.Group, gvk.Kind, bakObj.GetName(), err) + } else { + errors.CheckError(err) + } + } + if !isForbidden { + fmt.Printf("%s/%s %s updated%s\n", gvk.Group, gvk.Kind, bakObj.GetName(), dryRunMsg) + } + } + } + + // Delete objects not in backup + for key, liveObj := range pruneObjects { + if prune { + var dynClient dynamic.ResourceInterface + switch key.Kind { + case "Secret": + dynClient = acdClients.secrets + case application.AppProjectKind: + dynClient = acdClients.projects + case application.ApplicationKind: + dynClient = acdClients.applications + if !dryRun { + if finalizers := liveObj.GetFinalizers(); len(finalizers) > 0 { + newLive := liveObj.DeepCopy() + newLive.SetFinalizers(nil) + _, err = dynClient.Update(ctx, newLive, v1.UpdateOptions{}) + if err != nil && !apierr.IsNotFound(err) { + errors.CheckError(err) + } + } + } + case application.ApplicationSetKind: + dynClient = acdClients.applicationSets + default: + log.Fatalf("Unexpected kind '%s' in prune list", key.Kind) + } + isForbidden := false + if !dryRun { + err = dynClient.Delete(ctx, key.Name, v1.DeleteOptions{}) + if apierr.IsForbidden(err) || apierr.IsNotFound(err) { + isForbidden = true + log.Warnf("%s/%s %s: %v\n", key.Group, key.Kind, key.Name, err) + } else { + errors.CheckError(err) + } + } + if !isForbidden { + fmt.Printf("%s/%s %s pruned%s\n", key.Group, key.Kind, key.Name, dryRunMsg) + } + } else { + fmt.Printf("%s/%s %s needs pruning\n", key.Group, key.Kind, key.Name) + } + } + }, + } + + clientConfig = cli.AddKubectlFlagsToCmd(&command) + command.Flags().BoolVar(&dryRun, "dry-run", false, "Print what will be performed") + command.Flags().BoolVar(&prune, "prune", false, "Prune secrets, applications and projects which do not appear in the backup") + command.Flags().BoolVar(&verbose, "verbose", false, "Verbose output (versus only changed output)") + command.Flags().BoolVar(&stopOperation, "stop-operation", false, "Stop any existing operations") + + return &command +} + +// check app has no need to stop operation. +func checkAppHasNoNeedToStopOperation(liveObj unstructured.Unstructured, stopOperation bool) bool { + if !stopOperation { + return true + } + switch liveObj.GetKind() { + case application.ApplicationKind: + return liveObj.Object["operation"] == nil + } + return true +} + +// export writes the unstructured object and removes extraneous cruft from output before writing +func export(w io.Writer, un unstructured.Unstructured) { + name := un.GetName() + finalizers := un.GetFinalizers() + apiVersion := un.GetAPIVersion() + kind := un.GetKind() + labels := un.GetLabels() + annotations := un.GetAnnotations() + unstructured.RemoveNestedField(un.Object, "metadata") + un.SetName(name) + un.SetFinalizers(finalizers) + un.SetAPIVersion(apiVersion) + un.SetKind(kind) + un.SetLabels(labels) + un.SetAnnotations(annotations) + data, err := yaml.Marshal(un.Object) + errors.CheckError(err) + _, err = w.Write(data) + errors.CheckError(err) + _, err = w.Write([]byte(yamlSeparator)) + errors.CheckError(err) +} + +// updateLive replaces the live object's finalizers, spec, annotations, labels, and data from the +// backup object but leaves all other fields intact (status, other metadata, etc...) +func updateLive(bak, live *unstructured.Unstructured, stopOperation bool) *unstructured.Unstructured { + newLive := live.DeepCopy() + newLive.SetAnnotations(bak.GetAnnotations()) + newLive.SetLabels(bak.GetLabels()) + newLive.SetFinalizers(bak.GetFinalizers()) + switch live.GetKind() { + case "Secret", "ConfigMap": + newLive.Object["data"] = bak.Object["data"] + case application.AppProjectKind: + newLive.Object["spec"] = bak.Object["spec"] + case application.ApplicationKind: + newLive.Object["spec"] = bak.Object["spec"] + if _, ok := bak.Object["status"]; ok { + newLive.Object["status"] = bak.Object["status"] + } + if stopOperation { + newLive.Object["operation"] = nil + } + + case "ApplicationSet": + newLive.Object["spec"] = bak.Object["spec"] + } + return newLive +} diff --git a/cmd/argocd/commands/admin/cluster.go b/cmd/argocd/commands/admin/cluster.go new file mode 100644 index 0000000000000..1bc1417fead4d --- /dev/null +++ b/cmd/argocd/commands/admin/cluster.go @@ -0,0 +1,643 @@ +package admin + +import ( + "context" + "fmt" + "math" + "os" + "sort" + "strings" + "text/tabwriter" + "time" + + "github.com/argoproj/gitops-engine/pkg/utils/kube" + "github.com/redis/go-redis/v9" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/utils/pointer" + + cmdutil "github.com/argoproj/argo-cd/v2/cmd/util" + "github.com/argoproj/argo-cd/v2/common" + "github.com/argoproj/argo-cd/v2/controller/sharding" + argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient" + argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned" + "github.com/argoproj/argo-cd/v2/util/argo" + cacheutil "github.com/argoproj/argo-cd/v2/util/cache" + appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate" + "github.com/argoproj/argo-cd/v2/util/cli" + "github.com/argoproj/argo-cd/v2/util/clusterauth" + "github.com/argoproj/argo-cd/v2/util/db" + "github.com/argoproj/argo-cd/v2/util/errors" + "github.com/argoproj/argo-cd/v2/util/glob" + kubeutil "github.com/argoproj/argo-cd/v2/util/kube" + "github.com/argoproj/argo-cd/v2/util/settings" + "github.com/argoproj/argo-cd/v2/util/text/label" +) + +func NewClusterCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clientcmd.PathOptions) *cobra.Command { + var command = &cobra.Command{ + Use: "cluster", + Short: "Manage clusters configuration", + Run: func(c *cobra.Command, args []string) { + c.HelpFunc()(c, args) + }, + } + + command.AddCommand(NewClusterConfig()) + command.AddCommand(NewGenClusterConfigCommand(pathOpts)) + command.AddCommand(NewClusterStatsCommand(clientOpts)) + command.AddCommand(NewClusterShardsCommand(clientOpts)) + namespacesCommand := NewClusterNamespacesCommand() + namespacesCommand.AddCommand(NewClusterEnableNamespacedMode()) + namespacesCommand.AddCommand(NewClusterDisableNamespacedMode()) + command.AddCommand(namespacesCommand) + + return command +} + +type ClusterWithInfo struct { + argoappv1.Cluster + // Shard holds controller shard number that handles the cluster + Shard int + // Namespaces holds list of namespaces managed by Argo CD in the cluster + Namespaces []string +} + +func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClient *versioned.Clientset, replicas int, namespace string, portForwardRedis bool, cacheSrc func() (*appstatecache.Cache, error), shard int, redisName string, redisHaProxyName string) ([]ClusterWithInfo, error) { + settingsMgr := settings.NewSettingsManager(ctx, kubeClient, namespace) + + argoDB := db.NewDB(namespace, settingsMgr, kubeClient) + clustersList, err := argoDB.ListClusters(ctx) + if err != nil { + return nil, err + } + var cache *appstatecache.Cache + if portForwardRedis { + overrides := clientcmd.ConfigOverrides{} + redisHaProxyPodLabelSelector := common.LabelKeyAppName + "=" + redisHaProxyName + redisPodLabelSelector := common.LabelKeyAppName + "=" + redisName + port, err := kubeutil.PortForward(6379, namespace, &overrides, + redisHaProxyPodLabelSelector, redisPodLabelSelector) + if err != nil { + return nil, err + } + client := redis.NewClient(&redis.Options{Addr: fmt.Sprintf("localhost:%d", port)}) + cache = appstatecache.NewCache(cacheutil.NewCache(cacheutil.NewRedisCache(client, time.Hour, cacheutil.RedisCompressionNone)), time.Hour) + } else { + cache, err = cacheSrc() + if err != nil { + return nil, err + } + } + + appItems, err := appClient.ArgoprojV1alpha1().Applications(namespace).List(ctx, v1.ListOptions{}) + if err != nil { + return nil, err + } + apps := appItems.Items + for i, app := range apps { + err := argo.ValidateDestination(ctx, &app.Spec.Destination, argoDB) + if err != nil { + return nil, err + } + apps[i] = app + } + clusters := make([]ClusterWithInfo, len(clustersList.Items)) + batchSize := 10 + batchesCount := int(math.Ceil(float64(len(clusters)) / float64(batchSize))) + for batchNum := 0; batchNum < batchesCount; batchNum++ { + batchStart := batchSize * batchNum + batchEnd := batchSize * (batchNum + 1) + if batchEnd > len(clustersList.Items) { + batchEnd = len(clustersList.Items) + } + batch := clustersList.Items[batchStart:batchEnd] + _ = kube.RunAllAsync(len(batch), func(i int) error { + clusterShard := 0 + cluster := batch[i] + if replicas > 0 { + distributionFunction := sharding.GetDistributionFunction(argoDB, common.DefaultShardingAlgorithm) + distributionFunction(&cluster) + cluster.Shard = pointer.Int64Ptr(int64(clusterShard)) + log.Infof("Cluster with uid: %s will be processed by shard %d", cluster.ID, clusterShard) + } + + if shard != -1 && clusterShard != shard { + return nil + } + nsSet := map[string]bool{} + for _, app := range apps { + if app.Spec.Destination.Server == cluster.Server { + nsSet[app.Spec.Destination.Namespace] = true + } + } + var namespaces []string + for ns := range nsSet { + namespaces = append(namespaces, ns) + } + _ = cache.GetClusterInfo(cluster.Server, &cluster.Info) + clusters[batchStart+i] = ClusterWithInfo{cluster, clusterShard, namespaces} + return nil + }) + } + return clusters, nil +} + +func getControllerReplicas(ctx context.Context, kubeClient *kubernetes.Clientset, namespace string, appControllerName string) (int, error) { + appControllerPodLabelSelector := common.LabelKeyAppName + "=" + appControllerName + controllerPods, err := kubeClient.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{ + LabelSelector: appControllerPodLabelSelector}) + if err != nil { + return 0, err + } + return len(controllerPods.Items), nil +} + +func NewClusterShardsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { + var ( + shard int + replicas int + clientConfig clientcmd.ClientConfig + cacheSrc func() (*appstatecache.Cache, error) + portForwardRedis bool + ) + var command = cobra.Command{ + Use: "shards", + Short: "Print information about each controller shard and portion of Kubernetes resources it is responsible for.", + Run: func(cmd *cobra.Command, args []string) { + ctx := cmd.Context() + + log.SetLevel(log.WarnLevel) + + clientCfg, err := clientConfig.ClientConfig() + errors.CheckError(err) + namespace, _, err := clientConfig.Namespace() + errors.CheckError(err) + kubeClient := kubernetes.NewForConfigOrDie(clientCfg) + appClient := versioned.NewForConfigOrDie(clientCfg) + + if replicas == 0 { + replicas, err = getControllerReplicas(ctx, kubeClient, namespace, clientOpts.AppControllerName) + errors.CheckError(err) + } + if replicas == 0 { + return + } + + clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName) + errors.CheckError(err) + if len(clusters) == 0 { + return + } + + printStatsSummary(clusters) + }, + } + clientConfig = cli.AddKubectlFlagsToCmd(&command) + command.Flags().IntVar(&shard, "shard", -1, "Cluster shard filter") + command.Flags().IntVar(&replicas, "replicas", 0, "Application controller replicas count. Inferred from number of running controller pods if not specified") + command.Flags().BoolVar(&portForwardRedis, "port-forward-redis", true, "Automatically port-forward ha proxy redis from current namespace?") + cacheSrc = appstatecache.AddCacheFlagsToCmd(&command) + return &command +} + +func printStatsSummary(clusters []ClusterWithInfo) { + totalResourcesCount := int64(0) + resourcesCountByShard := map[int]int64{} + for _, c := range clusters { + totalResourcesCount += c.Info.CacheInfo.ResourcesCount + resourcesCountByShard[c.Shard] += c.Info.CacheInfo.ResourcesCount + } + + avgResourcesByShard := totalResourcesCount / int64(len(resourcesCountByShard)) + w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) + _, _ = fmt.Fprintf(w, "SHARD\tRESOURCES COUNT\n") + for shard := 0; shard < len(resourcesCountByShard); shard++ { + cnt := resourcesCountByShard[shard] + percent := (float64(cnt) / float64(avgResourcesByShard)) * 100.0 + _, _ = fmt.Fprintf(w, "%d\t%s\n", shard, fmt.Sprintf("%d (%.0f%%)", cnt, percent)) + } + _ = w.Flush() +} + +func runClusterNamespacesCommand(ctx context.Context, clientConfig clientcmd.ClientConfig, action func(appClient *versioned.Clientset, argoDB db.ArgoDB, clusters map[string][]string) error) error { + clientCfg, err := clientConfig.ClientConfig() + if err != nil { + return fmt.Errorf("error while creating client config: %w", err) + } + namespace, _, err := clientConfig.Namespace() + if err != nil { + return fmt.Errorf("error while getting namespace from client config: %w", err) + } + + kubeClient := kubernetes.NewForConfigOrDie(clientCfg) + appClient := versioned.NewForConfigOrDie(clientCfg) + + settingsMgr := settings.NewSettingsManager(ctx, kubeClient, namespace) + argoDB := db.NewDB(namespace, settingsMgr, kubeClient) + clustersList, err := argoDB.ListClusters(ctx) + if err != nil { + return fmt.Errorf("error listing clusters: %w", err) + } + appItems, err := appClient.ArgoprojV1alpha1().Applications(namespace).List(ctx, v1.ListOptions{}) + if err != nil { + return fmt.Errorf("error listing application: %w", err) + } + apps := appItems.Items + for i, app := range apps { + if err := argo.ValidateDestination(ctx, &app.Spec.Destination, argoDB); err != nil { + return fmt.Errorf("error validating application destination: %w", err) + } + apps[i] = app + } + + clusters := map[string][]string{} + for _, cluster := range clustersList.Items { + nsSet := map[string]bool{} + for _, app := range apps { + if app.Spec.Destination.Server != cluster.Server { + continue + } + // Use namespaces of actually deployed resources, since some application use dummy target namespace + // If resources list is empty then use target namespace + if len(app.Status.Resources) != 0 { + for _, res := range app.Status.Resources { + if res.Namespace != "" { + nsSet[res.Namespace] = true + } + } + } else { + nsSet[app.Spec.Destination.Namespace] = true + } + } + var namespaces []string + for ns := range nsSet { + namespaces = append(namespaces, ns) + } + clusters[cluster.Server] = namespaces + } + return action(appClient, argoDB, clusters) +} + +func NewClusterNamespacesCommand() *cobra.Command { + var ( + clientConfig clientcmd.ClientConfig + ) + var command = cobra.Command{ + Use: "namespaces", + Short: "Print information namespaces which Argo CD manages in each cluster.", + Run: func(cmd *cobra.Command, args []string) { + ctx := cmd.Context() + + log.SetLevel(log.WarnLevel) + + err := runClusterNamespacesCommand(ctx, clientConfig, func(appClient *versioned.Clientset, _ db.ArgoDB, clusters map[string][]string) error { + w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) + _, _ = fmt.Fprintf(w, "CLUSTER\tNAMESPACES\n") + + for cluster, namespaces := range clusters { + // print shortest namespace names first + sort.Slice(namespaces, func(i, j int) bool { + return len(namespaces[j]) > len(namespaces[i]) + }) + namespacesStr := "" + if len(namespaces) > 4 { + namespacesStr = fmt.Sprintf("%s (total %d)", strings.Join(namespaces[:4], ","), len(namespaces)) + } else { + namespacesStr = strings.Join(namespaces, ",") + } + + _, _ = fmt.Fprintf(w, "%s\t%s\n", cluster, namespacesStr) + } + _ = w.Flush() + return nil + }) + errors.CheckError(err) + }, + } + clientConfig = cli.AddKubectlFlagsToCmd(&command) + return &command +} + +func NewClusterEnableNamespacedMode() *cobra.Command { + var ( + clientConfig clientcmd.ClientConfig + dryRun bool + clusterResources bool + namespacesCount int + ) + var command = cobra.Command{ + Use: "enable-namespaced-mode PATTERN", + Short: "Enable namespaced mode for clusters which name matches to the specified pattern.", + Run: func(cmd *cobra.Command, args []string) { + ctx := cmd.Context() + + log.SetLevel(log.WarnLevel) + + if len(args) == 0 { + cmd.HelpFunc()(cmd, args) + os.Exit(1) + } + pattern := args[0] + + errors.CheckError(runClusterNamespacesCommand(ctx, clientConfig, func(_ *versioned.Clientset, argoDB db.ArgoDB, clusters map[string][]string) error { + for server, namespaces := range clusters { + if len(namespaces) == 0 || len(namespaces) > namespacesCount || !glob.Match(pattern, server) { + continue + } + + cluster, err := argoDB.GetCluster(ctx, server) + if err != nil { + return fmt.Errorf("error getting cluster from server: %w", err) + } + cluster.Namespaces = namespaces + cluster.ClusterResources = clusterResources + fmt.Printf("Setting cluster %s namespaces to %v...", server, namespaces) + if !dryRun { + if _, err = argoDB.UpdateCluster(ctx, cluster); err != nil { + return fmt.Errorf("error updating cluster: %w", err) + } + fmt.Println("done") + } else { + fmt.Println("done (dry run)") + } + + } + return nil + })) + }, + } + clientConfig = cli.AddKubectlFlagsToCmd(&command) + command.Flags().BoolVar(&dryRun, "dry-run", true, "Print what will be performed") + command.Flags().BoolVar(&clusterResources, "cluster-resources", false, "Indicates if cluster level resources should be managed.") + command.Flags().IntVar(&namespacesCount, "max-namespace-count", 0, "Max number of namespaces that cluster should managed managed namespaces is less or equal to specified count") + + return &command +} + +func NewClusterDisableNamespacedMode() *cobra.Command { + var ( + clientConfig clientcmd.ClientConfig + dryRun bool + ) + var command = cobra.Command{ + Use: "disable-namespaced-mode PATTERN", + Short: "Disable namespaced mode for clusters which name matches to the specified pattern.", + Run: func(cmd *cobra.Command, args []string) { + ctx := cmd.Context() + + log.SetLevel(log.WarnLevel) + + if len(args) == 0 { + cmd.HelpFunc()(cmd, args) + os.Exit(1) + } + + pattern := args[0] + + errors.CheckError(runClusterNamespacesCommand(ctx, clientConfig, func(_ *versioned.Clientset, argoDB db.ArgoDB, clusters map[string][]string) error { + for server := range clusters { + if !glob.Match(pattern, server) { + continue + } + + cluster, err := argoDB.GetCluster(ctx, server) + if err != nil { + return fmt.Errorf("error getting cluster from server: %w", err) + } + + if len(cluster.Namespaces) == 0 { + continue + } + + cluster.Namespaces = nil + fmt.Printf("Disabling namespaced mode for cluster %s...", server) + if !dryRun { + if _, err = argoDB.UpdateCluster(ctx, cluster); err != nil { + return fmt.Errorf("error updating cluster: %w", err) + } + fmt.Println("done") + } else { + fmt.Println("done (dry run)") + } + + } + return nil + })) + }, + } + clientConfig = cli.AddKubectlFlagsToCmd(&command) + command.Flags().BoolVar(&dryRun, "dry-run", true, "Print what will be performed") + return &command +} + +func NewClusterStatsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { + var ( + shard int + replicas int + clientConfig clientcmd.ClientConfig + cacheSrc func() (*appstatecache.Cache, error) + portForwardRedis bool + ) + var command = cobra.Command{ + Use: "stats", + Short: "Prints information cluster statistics and inferred shard number", + Run: func(cmd *cobra.Command, args []string) { + ctx := cmd.Context() + + log.SetLevel(log.WarnLevel) + + clientCfg, err := clientConfig.ClientConfig() + errors.CheckError(err) + namespace, _, err := clientConfig.Namespace() + errors.CheckError(err) + + kubeClient := kubernetes.NewForConfigOrDie(clientCfg) + appClient := versioned.NewForConfigOrDie(clientCfg) + if replicas == 0 { + replicas, err = getControllerReplicas(ctx, kubeClient, namespace, clientOpts.AppControllerName) + errors.CheckError(err) + } + clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName) + errors.CheckError(err) + + w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) + _, _ = fmt.Fprintf(w, "SERVER\tSHARD\tCONNECTION\tNAMESPACES COUNT\tAPPS COUNT\tRESOURCES COUNT\n") + for _, cluster := range clusters { + _, _ = fmt.Fprintf(w, "%s\t%d\t%s\t%d\t%d\t%d\n", cluster.Server, cluster.Shard, cluster.Info.ConnectionState.Status, len(cluster.Namespaces), cluster.Info.ApplicationsCount, cluster.Info.CacheInfo.ResourcesCount) + } + _ = w.Flush() + }, + } + clientConfig = cli.AddKubectlFlagsToCmd(&command) + command.Flags().IntVar(&shard, "shard", -1, "Cluster shard filter") + command.Flags().IntVar(&replicas, "replicas", 0, "Application controller replicas count. Inferred from number of running controller pods if not specified") + command.Flags().BoolVar(&portForwardRedis, "port-forward-redis", true, "Automatically port-forward ha proxy redis from current namespace?") + cacheSrc = appstatecache.AddCacheFlagsToCmd(&command) + return &command +} + +// NewClusterConfig returns a new instance of `argocd admin kubeconfig` command +func NewClusterConfig() *cobra.Command { + var ( + clientConfig clientcmd.ClientConfig + ) + var command = &cobra.Command{ + Use: "kubeconfig CLUSTER_URL OUTPUT_PATH", + Short: "Generates kubeconfig for the specified cluster", + DisableAutoGenTag: true, + Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + + if len(args) != 2 { + c.HelpFunc()(c, args) + os.Exit(1) + } + serverUrl := args[0] + output := args[1] + conf, err := clientConfig.ClientConfig() + errors.CheckError(err) + namespace, _, err := clientConfig.Namespace() + errors.CheckError(err) + kubeclientset, err := kubernetes.NewForConfig(conf) + errors.CheckError(err) + + cluster, err := db.NewDB(namespace, settings.NewSettingsManager(ctx, kubeclientset, namespace), kubeclientset).GetCluster(ctx, serverUrl) + errors.CheckError(err) + err = kube.WriteKubeConfig(cluster.RawRestConfig(), namespace, output) + errors.CheckError(err) + }, + } + clientConfig = cli.AddKubectlFlagsToCmd(command) + return command +} + +func NewGenClusterConfigCommand(pathOpts *clientcmd.PathOptions) *cobra.Command { + var ( + clusterOpts cmdutil.ClusterOptions + bearerToken string + generateToken bool + outputFormat string + labels []string + annotations []string + ) + var command = &cobra.Command{ + Use: "generate-spec CONTEXT", + Short: "Generate declarative config for a cluster", + Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + + log.SetLevel(log.WarnLevel) + var configAccess clientcmd.ConfigAccess = pathOpts + if len(args) == 0 { + log.Error("Choose a context name from:") + cmdutil.PrintKubeContexts(configAccess) + os.Exit(1) + } + cfgAccess, err := configAccess.GetStartingConfig() + errors.CheckError(err) + contextName := args[0] + clstContext := cfgAccess.Contexts[contextName] + if clstContext == nil { + log.Fatalf("Context %s does not exist in kubeconfig", contextName) + return + } + + if clusterOpts.InCluster && clusterOpts.ClusterEndpoint != "" { + log.Fatal("Can only use one of --in-cluster or --cluster-endpoint") + return + } + + overrides := clientcmd.ConfigOverrides{ + Context: *clstContext, + } + clientConfig := clientcmd.NewDefaultClientConfig(*cfgAccess, &overrides) + conf, err := clientConfig.ClientConfig() + errors.CheckError(err) + kubeClientset := fake.NewSimpleClientset() + + var awsAuthConf *argoappv1.AWSAuthConfig + var execProviderConf *argoappv1.ExecProviderConfig + if clusterOpts.AwsClusterName != "" { + awsAuthConf = &argoappv1.AWSAuthConfig{ + ClusterName: clusterOpts.AwsClusterName, + RoleARN: clusterOpts.AwsRoleArn, + } + } else if clusterOpts.ExecProviderCommand != "" { + execProviderConf = &argoappv1.ExecProviderConfig{ + Command: clusterOpts.ExecProviderCommand, + Args: clusterOpts.ExecProviderArgs, + Env: clusterOpts.ExecProviderEnv, + APIVersion: clusterOpts.ExecProviderAPIVersion, + InstallHint: clusterOpts.ExecProviderInstallHint, + } + } else if generateToken { + bearerToken, err = GenerateToken(clusterOpts, conf) + errors.CheckError(err) + } else if bearerToken == "" { + bearerToken = "bearer-token" + } + if clusterOpts.Name != "" { + contextName = clusterOpts.Name + } + + labelsMap, err := label.Parse(labels) + errors.CheckError(err) + annotationsMap, err := label.Parse(annotations) + errors.CheckError(err) + + clst := cmdutil.NewCluster(contextName, clusterOpts.Namespaces, clusterOpts.ClusterResources, conf, bearerToken, awsAuthConf, execProviderConf, labelsMap, annotationsMap) + if clusterOpts.InClusterEndpoint() { + clst.Server = argoappv1.KubernetesInternalAPIServerAddr + } + if clusterOpts.ClusterEndpoint == string(cmdutil.KubePublicEndpoint) { + // Ignore `kube-public` cluster endpoints, since this command is intended to run without invoking any network connections. + log.Warn("kube-public cluster endpoints are not supported. Falling back to the endpoint listed in the kubconfig context.") + } + if clusterOpts.Shard >= 0 { + clst.Shard = &clusterOpts.Shard + } + + settingsMgr := settings.NewSettingsManager(ctx, kubeClientset, ArgoCDNamespace) + argoDB := db.NewDB(ArgoCDNamespace, settingsMgr, kubeClientset) + + _, err = argoDB.CreateCluster(ctx, clst) + errors.CheckError(err) + + secName, err := db.URIToSecretName("cluster", clst.Server) + errors.CheckError(err) + + secret, err := kubeClientset.CoreV1().Secrets(ArgoCDNamespace).Get(ctx, secName, v1.GetOptions{}) + errors.CheckError(err) + + errors.CheckError(PrintResources(outputFormat, os.Stdout, secret)) + }, + } + command.PersistentFlags().StringVar(&pathOpts.LoadingRules.ExplicitPath, pathOpts.ExplicitFileFlag, pathOpts.LoadingRules.ExplicitPath, "use a particular kubeconfig file") + command.Flags().StringVar(&bearerToken, "bearer-token", "", "Authentication token that should be used to access K8S API server") + command.Flags().BoolVar(&generateToken, "generate-bearer-token", false, "Generate authentication token that should be used to access K8S API server") + command.Flags().StringVar(&clusterOpts.ServiceAccount, "service-account", "argocd-manager", fmt.Sprintf("System namespace service account to use for kubernetes resource management. If not set then default \"%s\" SA will be used", clusterauth.ArgoCDManagerServiceAccount)) + command.Flags().StringVar(&clusterOpts.SystemNamespace, "system-namespace", common.DefaultSystemNamespace, "Use different system namespace") + command.Flags().StringVarP(&outputFormat, "output", "o", "yaml", "Output format. One of: json|yaml") + command.Flags().StringArrayVar(&labels, "label", nil, "Set metadata labels (e.g. --label key=value)") + command.Flags().StringArrayVar(&annotations, "annotation", nil, "Set metadata annotations (e.g. --annotation key=value)") + cmdutil.AddClusterFlags(command, &clusterOpts) + return command +} + +func GenerateToken(clusterOpts cmdutil.ClusterOptions, conf *rest.Config) (string, error) { + clientset, err := kubernetes.NewForConfig(conf) + errors.CheckError(err) + + bearerToken, err := clusterauth.GetServiceAccountBearerToken(clientset, clusterOpts.SystemNamespace, clusterOpts.ServiceAccount, common.BearerTokenTimeout) + if err != nil { + return "", err + } + return bearerToken, nil +} diff --git a/cmd/argocd/commands/admin/dashboard.go b/cmd/argocd/commands/admin/dashboard.go new file mode 100644 index 0000000000000..c75476ea8eb2d --- /dev/null +++ b/cmd/argocd/commands/admin/dashboard.go @@ -0,0 +1,41 @@ +package admin + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless" + "github.com/argoproj/argo-cd/v2/cmd/argocd/commands/initialize" + "github.com/argoproj/argo-cd/v2/common" + argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient" + "github.com/argoproj/argo-cd/v2/util/cache" + "github.com/argoproj/argo-cd/v2/util/env" + "github.com/argoproj/argo-cd/v2/util/errors" +) + +func NewDashboardCommand() *cobra.Command { + var ( + port int + address string + compressionStr string + ) + cmd := &cobra.Command{ + Use: "dashboard", + Short: "Starts Argo CD Web UI locally", + Run: func(cmd *cobra.Command, args []string) { + ctx := cmd.Context() + + compression, err := cache.CompressionTypeFromString(compressionStr) + errors.CheckError(err) + errors.CheckError(headless.MaybeStartLocalServer(ctx, &argocdclient.ClientOptions{Core: true}, initialize.RetrieveContextIfChanged(cmd.Flag("context")), &port, &address, compression)) + println(fmt.Sprintf("Argo CD UI is available at http://%s:%d", address, port)) + <-ctx.Done() + }, + } + initialize.InitCommand(cmd) + cmd.Flags().IntVar(&port, "port", common.DefaultPortAPIServer, "Listen on given port") + cmd.Flags().StringVar(&address, "address", common.DefaultAddressAdminDashboard, "Listen on given address") + cmd.Flags().StringVar(&compressionStr, "redis-compress", env.StringFromEnv("REDIS_COMPRESSION", string(cache.RedisCompressionGZip)), "Enable this if the application controller is configured with redis compression enabled. (possible values: gzip, none)") + return cmd +} diff --git a/cmd/argocd/commands/admin/generatespec_utils.go b/cmd/argocd/commands/admin/generatespec_utils.go new file mode 100644 index 0000000000000..f9d902111a5d1 --- /dev/null +++ b/cmd/argocd/commands/admin/generatespec_utils.go @@ -0,0 +1,108 @@ +package admin + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "os" + + "github.com/argoproj/gitops-engine/pkg/utils/kube" + v1 "k8s.io/api/core/v1" + "sigs.k8s.io/yaml" + + ioutil "github.com/argoproj/argo-cd/v2/util/io" +) + +func getOutWriter(inline bool, filePath string) (io.Writer, io.Closer, error) { + if !inline { + return os.Stdout, ioutil.NopCloser, nil + } + + if filePath == "" { + return nil, nil, errors.New("The file path must be specified using flag '--file'") + } + + err := os.Rename(filePath, fmt.Sprintf("%s.back", filePath)) + if err != nil { + return nil, nil, err + } + + fileOut, err := os.Create(filePath) + if err != nil { + return nil, nil, err + } + return fileOut, fileOut, nil +} + +// PrintResources prints a single resource in YAML or JSON format to stdout according to the output format +func PrintResources(output string, out io.Writer, resources ...interface{}) error { + for i, resource := range resources { + if secret, ok := resource.(*v1.Secret); ok { + convertSecretData(secret) + } + filteredResource, err := omitFields(resource) + if err != nil { + return fmt.Errorf("error omitting filtered fields from the resource: %w", err) + } + resources[i] = filteredResource + } + var obj interface{} = resources + if len(resources) == 1 { + obj = resources[0] + } + + switch output { + case "json": + jsonBytes, err := json.MarshalIndent(obj, "", " ") + if err != nil { + return fmt.Errorf("error marshaling json: %w", err) + } + + _, _ = fmt.Fprintln(out, string(jsonBytes)) + case "yaml": + yamlBytes, err := yaml.Marshal(obj) + if err != nil { + return fmt.Errorf("error marshaling yaml: %w", err) + } + // marshaled YAML already ends with the new line character + _, _ = fmt.Fprint(out, string(yamlBytes)) + default: + return fmt.Errorf("unknown output format: %s", output) + } + return nil +} + +// omit fields such as status, creationTimestamp and metadata.namespace in k8s objects +func omitFields(resource interface{}) (interface{}, error) { + jsonBytes, err := json.Marshal(resource) + if err != nil { + return nil, err + } + + toMap := make(map[string]interface{}) + err = json.Unmarshal(jsonBytes, &toMap) + if err != nil { + return nil, err + } + + delete(toMap, "status") + if v, ok := toMap["metadata"]; ok { + if metadata, ok := v.(map[string]interface{}); ok { + delete(metadata, "creationTimestamp") + delete(metadata, "namespace") + } + } + return toMap, nil +} + +// convertSecretData converts kubernetes secret's data to stringData +func convertSecretData(secret *v1.Secret) { + secret.Kind = kube.SecretKind + secret.APIVersion = "v1" + secret.StringData = map[string]string{} + for k, v := range secret.Data { + secret.StringData[k] = string(v) + } + secret.Data = map[string][]byte{} +} diff --git a/cmd/argocd/commands/admin/generatespec_utils_test.go b/cmd/argocd/commands/admin/generatespec_utils_test.go new file mode 100644 index 0000000000000..ea71b1ffa76ae --- /dev/null +++ b/cmd/argocd/commands/admin/generatespec_utils_test.go @@ -0,0 +1,55 @@ +package admin + +import ( + "bytes" + "fmt" + "os" + "testing" + + "github.com/argoproj/argo-cd/v2/util/io" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestGetOutWriter_InlineOff(t *testing.T) { + out, closer, err := getOutWriter(false, "") + require.NoError(t, err) + defer io.Close(closer) + + assert.Equal(t, os.Stdout, out) +} + +func TestGetOutWriter_InlineOn(t *testing.T) { + tmpFile := t.TempDir() + defer func() { + _ = os.Remove(fmt.Sprintf("%s.back", tmpFile)) + }() + + out, closer, err := getOutWriter(true, tmpFile) + require.NoError(t, err) + defer io.Close(closer) + + assert.Equal(t, tmpFile, out.(*os.File).Name()) + _, err = os.Stat(fmt.Sprintf("%s.back", tmpFile)) + assert.NoError(t, err, "Back file must be created") +} + +func TestPrintResources_Secret_YAML(t *testing.T) { + out := bytes.Buffer{} + err := PrintResources("yaml", &out, &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "my-secret"}, + Data: map[string][]byte{"my-secret-key": []byte("my-secret-data")}, + }) + assert.NoError(t, err) + + assert.Equal(t, `apiVersion: v1 +kind: Secret +metadata: + name: my-secret +stringData: + my-secret-key: my-secret-data +`, out.String()) +} diff --git a/cmd/argocd/commands/admin/initial_password.go b/cmd/argocd/commands/admin/initial_password.go new file mode 100644 index 0000000000000..a130ee875ae60 --- /dev/null +++ b/cmd/argocd/commands/admin/initial_password.go @@ -0,0 +1,46 @@ +package admin + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + + "github.com/argoproj/argo-cd/v2/util/cli" + "github.com/argoproj/argo-cd/v2/util/errors" +) + +const initialPasswordSecretName = "argocd-initial-admin-secret" + +// NewInitialPasswordCommand defines a new command to retrieve Argo CD initial password. +func NewInitialPasswordCommand() *cobra.Command { + var ( + clientConfig clientcmd.ClientConfig + ) + var command = cobra.Command{ + Use: "initial-password", + Short: "Prints initial password to log in to Argo CD for the first time", + Run: func(c *cobra.Command, args []string) { + + config, err := clientConfig.ClientConfig() + errors.CheckError(err) + namespace, _, err := clientConfig.Namespace() + errors.CheckError(err) + + kubeClientset := kubernetes.NewForConfigOrDie(config) + secret, err := kubeClientset.CoreV1().Secrets(namespace).Get(context.Background(), initialPasswordSecretName, v1.GetOptions{}) + errors.CheckError(err) + + if initialPass, ok := secret.Data["password"]; ok { + fmt.Println(string(initialPass)) + fmt.Println("\n This password must be only used for first time login. We strongly recommend you update the password using `argocd account update-password`.") + } + }, + } + clientConfig = cli.AddKubectlFlagsToCmd(&command) + + return &command +} diff --git a/cmd/argocd/commands/admin/notifications.go b/cmd/argocd/commands/admin/notifications.go new file mode 100644 index 0000000000000..a1234cc53b7fe --- /dev/null +++ b/cmd/argocd/commands/admin/notifications.go @@ -0,0 +1,72 @@ +package admin + +import ( + "fmt" + "log" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + + "github.com/argoproj/argo-cd/v2/common" + "github.com/argoproj/argo-cd/v2/reposerver/apiclient" + "github.com/argoproj/argo-cd/v2/util/env" + service "github.com/argoproj/argo-cd/v2/util/notification/argocd" + settings "github.com/argoproj/argo-cd/v2/util/notification/settings" + "github.com/argoproj/argo-cd/v2/util/tls" + + "github.com/argoproj/argo-cd/v2/pkg/apis/application" + "github.com/argoproj/notifications-engine/pkg/cmd" + "github.com/spf13/cobra" +) + +var ( + applications = schema.GroupVersionResource{Group: application.Group, Version: "v1alpha1", Resource: application.ApplicationPlural} +) + +func NewNotificationsCommand() *cobra.Command { + var ( + argocdRepoServer string + argocdRepoServerPlaintext bool + argocdRepoServerStrictTLS bool + ) + + var argocdService service.Service + toolsCommand := cmd.NewToolsCommand( + "notifications", + "argocd admin notifications", + applications, + settings.GetFactorySettings(argocdService, "argocd-notifications-secret", "argocd-notifications-cm"), func(clientConfig clientcmd.ClientConfig) { + k8sCfg, err := clientConfig.ClientConfig() + if err != nil { + log.Fatalf("Failed to parse k8s config: %v", err) + } + ns, _, err := clientConfig.Namespace() + if err != nil { + log.Fatalf("Failed to parse k8s config: %v", err) + } + tlsConfig := apiclient.TLSConfiguration{ + DisableTLS: argocdRepoServerPlaintext, + StrictValidation: argocdRepoServerStrictTLS, + } + if !tlsConfig.DisableTLS && tlsConfig.StrictValidation { + pool, err := tls.LoadX509CertPool( + fmt.Sprintf("%s/reposerver/tls/tls.crt", env.StringFromEnv(common.EnvAppConfigPath, common.DefaultAppConfigPath)), + fmt.Sprintf("%s/reposerver/tls/ca.crt", env.StringFromEnv(common.EnvAppConfigPath, common.DefaultAppConfigPath)), + ) + if err != nil { + log.Fatalf("Failed to load tls certs: %v", err) + } + tlsConfig.Certificates = pool + } + repoClientset := apiclient.NewRepoServerClientset(argocdRepoServer, 5, tlsConfig) + argocdService, err = service.NewArgoCDService(kubernetes.NewForConfigOrDie(k8sCfg), ns, repoClientset) + if err != nil { + log.Fatalf("Failed to initialize Argo CD service: %v", err) + } + }) + toolsCommand.PersistentFlags().StringVar(&argocdRepoServer, "argocd-repo-server", common.DefaultRepoServerAddr, "Argo CD repo server address") + toolsCommand.PersistentFlags().BoolVar(&argocdRepoServerPlaintext, "argocd-repo-server-plaintext", false, "Use a plaintext client (non-TLS) to connect to repository server") + toolsCommand.PersistentFlags().BoolVar(&argocdRepoServerStrictTLS, "argocd-repo-server-strict-tls", false, "Perform strict validation of TLS certificates when connecting to repo server") + return toolsCommand +} diff --git a/cmd/argocd/commands/admin/project.go b/cmd/argocd/commands/admin/project.go new file mode 100644 index 0000000000000..8d4d5615bc826 --- /dev/null +++ b/cmd/argocd/commands/admin/project.go @@ -0,0 +1,245 @@ +package admin + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + + cmdutil "github.com/argoproj/argo-cd/v2/cmd/util" + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned" + appclient "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1" + "github.com/argoproj/argo-cd/v2/util/cli" + "github.com/argoproj/argo-cd/v2/util/errors" + "github.com/argoproj/argo-cd/v2/util/io" + "github.com/argoproj/argo-cd/v2/util/templates" + + "github.com/argoproj/gitops-engine/pkg/utils/kube" + "github.com/spf13/cobra" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/clientcmd" +) + +func NewProjectsCommand() *cobra.Command { + var command = &cobra.Command{ + Use: "proj", + Short: "Manage projects configuration", + Run: func(c *cobra.Command, args []string) { + c.HelpFunc()(c, args) + }, + } + + command.AddCommand(NewGenProjectSpecCommand()) + command.AddCommand(NewUpdatePolicyRuleCommand()) + command.AddCommand(NewProjectAllowListGenCommand()) + return command +} + +// NewGenProjectSpecCommand generates declarative configuration file for given project +func NewGenProjectSpecCommand() *cobra.Command { + var ( + opts cmdutil.ProjectOpts + fileURL string + outputFormat string + inline bool + ) + var command = &cobra.Command{ + Use: "generate-spec PROJECT", + Short: "Generate declarative config for a project", + Example: templates.Examples(` + # Generate a YAML configuration for a project named "myproject" + argocd admin projects generate-spec myproject + + # Generate a JSON configuration for a project named "anotherproject" and specify an output file + argocd admin projects generate-spec anotherproject --output json --file config.json + + # Generate a YAML configuration for a project named "someproject" and write it back to the input file + argocd admin projects generate-spec someproject --inline + `), + + Run: func(c *cobra.Command, args []string) { + proj, err := cmdutil.ConstructAppProj(fileURL, args, opts, c) + errors.CheckError(err) + + out, closer, err := getOutWriter(inline, fileURL) + errors.CheckError(err) + defer io.Close(closer) + + errors.CheckError(PrintResources(outputFormat, out, proj)) + }, + } + command.Flags().StringVarP(&outputFormat, "output", "o", "yaml", "Output format. One of: json|yaml") + command.Flags().StringVarP(&fileURL, "file", "f", "", "Filename or URL to Kubernetes manifests for the project") + command.Flags().BoolVarP(&inline, "inline", "i", false, "If set then generated resource is written back to the file specified in --file flag") + + // Only complete files with appropriate extension. + err := command.Flags().SetAnnotation("file", cobra.BashCompFilenameExt, []string{"json", "yaml", "yml"}) + errors.CheckError(err) + + cmdutil.AddProjFlags(command, &opts) + return command +} + +func globMatch(pattern string, val string) bool { + if pattern == "*" { + return true + } + if ok, err := filepath.Match(pattern, val); ok && err == nil { + return true + } + return false +} + +func getModification(modification string, resource string, scope string, permission string) (func(string, string) string, error) { + switch modification { + case "set": + if scope == "" { + return nil, fmt.Errorf("Flag --group cannot be empty if permission should be set in role") + } + if permission == "" { + return nil, fmt.Errorf("Flag --permission cannot be empty if permission should be set in role") + } + return func(proj string, action string) string { + return fmt.Sprintf("%s, %s, %s/%s, %s", resource, action, proj, scope, permission) + }, nil + case "remove": + return func(proj string, action string) string { + return "" + }, nil + } + return nil, fmt.Errorf("modification %s is not supported", modification) +} + +func saveProject(ctx context.Context, updated v1alpha1.AppProject, orig v1alpha1.AppProject, projectsIf appclient.AppProjectInterface, dryRun bool) error { + fmt.Printf("===== %s ======\n", updated.Name) + target, err := kube.ToUnstructured(&updated) + errors.CheckError(err) + live, err := kube.ToUnstructured(&orig) + if err != nil { + return fmt.Errorf("error converting project to unstructured: %w", err) + } + _ = cli.PrintDiff(updated.Name, target, live) + if !dryRun { + _, err = projectsIf.Update(ctx, &updated, v1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("error while updating project: %w", err) + } + } + return nil +} + +func formatPolicy(proj string, role string, permission string) string { + return fmt.Sprintf("p, proj:%s:%s, %s", proj, role, permission) +} + +func split(input string, delimiter string) []string { + parts := strings.Split(input, delimiter) + for i := range parts { + parts[i] = strings.TrimSpace(parts[i]) + } + return parts +} + +func NewUpdatePolicyRuleCommand() *cobra.Command { + var ( + clientConfig clientcmd.ClientConfig + resource string + scope string + rolePattern string + permission string + dryRun bool + ) + var command = &cobra.Command{ + Use: "update-role-policy PROJECT_GLOB MODIFICATION ACTION", + Short: "Implement bulk project role update. Useful to back-fill existing project policies or remove obsolete actions.", + Example: ` # Add policy that allows executing any action (action/*) to roles which name matches to *deployer* in all projects + argocd admin projects update-role-policy '*' set 'action/*' --role '*deployer*' --resource applications --scope '*' --permission allow + + # Remove policy that which manages running (action/*) from all roles which name matches *deployer* in all projects + argocd admin projects update-role-policy '*' remove override --role '*deployer*' +`, + Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + + if len(args) != 3 { + c.HelpFunc()(c, args) + os.Exit(1) + } + projectGlob := args[0] + modificationType := args[1] + action := args[2] + + config, err := clientConfig.ClientConfig() + errors.CheckError(err) + config.QPS = 100 + config.Burst = 50 + + namespace, _, err := clientConfig.Namespace() + errors.CheckError(err) + appclients := appclientset.NewForConfigOrDie(config) + + modification, err := getModification(modificationType, resource, scope, permission) + errors.CheckError(err) + projIf := appclients.ArgoprojV1alpha1().AppProjects(namespace) + + err = updateProjects(ctx, projIf, projectGlob, rolePattern, action, modification, dryRun) + errors.CheckError(err) + }, + } + command.Flags().StringVar(&resource, "resource", "", "Resource e.g. 'applications'") + command.Flags().StringVar(&scope, "scope", "", "Resource scope e.g. '*'") + command.Flags().StringVar(&rolePattern, "role", "*", "Role name pattern e.g. '*deployer*'") + command.Flags().StringVar(&permission, "permission", "", "Action permission") + command.Flags().BoolVar(&dryRun, "dry-run", true, "Dry run") + clientConfig = cli.AddKubectlFlagsToCmd(command) + return command +} + +func updateProjects(ctx context.Context, projIf appclient.AppProjectInterface, projectGlob string, rolePattern string, action string, modification func(string, string) string, dryRun bool) error { + projects, err := projIf.List(ctx, v1.ListOptions{}) + if err != nil { + return fmt.Errorf("error listing the projects: %w", err) + } + for _, proj := range projects.Items { + if !globMatch(projectGlob, proj.Name) { + continue + } + origProj := proj.DeepCopy() + updated := false + for i, role := range proj.Spec.Roles { + if !globMatch(rolePattern, role.Name) { + continue + } + actionPolicyIndex := -1 + for i := range role.Policies { + parts := split(role.Policies[i], ",") + if len(parts) != 6 || parts[3] != action { + continue + } + actionPolicyIndex = i + break + } + policyPermission := modification(proj.Name, action) + if actionPolicyIndex == -1 && policyPermission != "" { + updated = true + role.Policies = append(role.Policies, formatPolicy(proj.Name, role.Name, policyPermission)) + } else if actionPolicyIndex > -1 && policyPermission == "" { + updated = true + role.Policies = append(role.Policies[:actionPolicyIndex], role.Policies[actionPolicyIndex+1:]...) + } else if actionPolicyIndex > -1 && policyPermission != "" { + updated = true + role.Policies[actionPolicyIndex] = formatPolicy(proj.Name, role.Name, policyPermission) + } + proj.Spec.Roles[i] = role + } + if updated { + err = saveProject(ctx, proj, *origProj, projIf, dryRun) + if err != nil { + return fmt.Errorf("error saving the project: %w", err) + } + } + } + return nil +} diff --git a/cmd/argocd/commands/admin/project_allowlist.go b/cmd/argocd/commands/admin/project_allowlist.go new file mode 100644 index 0000000000000..57b855251daa9 --- /dev/null +++ b/cmd/argocd/commands/admin/project_allowlist.go @@ -0,0 +1,164 @@ +package admin + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" + + "github.com/spf13/cobra" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/yaml" + + "github.com/argoproj/argo-cd/v2/util/errors" + + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/argoproj/argo-cd/v2/util/cli" + + // load the gcp plugin (required to authenticate against GKE clusters). + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + // load the oidc plugin (required to authenticate with OpenID Connect). + _ "k8s.io/client-go/plugin/pkg/client/auth/oidc" + // load the azure plugin (required to authenticate with AKS clusters). + _ "k8s.io/client-go/plugin/pkg/client/auth/azure" + + "github.com/argoproj/argo-cd/v2/pkg/apis/application" +) + +// NewProjectAllowListGenCommand generates a project from clusterRole +func NewProjectAllowListGenCommand() *cobra.Command { + var ( + clientConfig clientcmd.ClientConfig + out string + ) + var command = &cobra.Command{ + Use: "generate-allow-list CLUSTERROLE_PATH PROJ_NAME", + Short: "Generates project allow list from the specified clusterRole file", + Run: func(c *cobra.Command, args []string) { + if len(args) != 2 { + c.HelpFunc()(c, args) + os.Exit(1) + } + clusterRoleFileName := args[0] + projName := args[1] + + var writer io.Writer + if out == "-" { + writer = os.Stdout + } else { + f, err := os.Create(out) + errors.CheckError(err) + bw := bufio.NewWriter(f) + writer = bw + defer func() { + err = bw.Flush() + errors.CheckError(err) + err = f.Close() + errors.CheckError(err) + }() + } + + resourceList, err := getResourceList(clientConfig) + errors.CheckError(err) + globalProj, err := generateProjectAllowList(resourceList, clusterRoleFileName, projName) + errors.CheckError(err) + + yamlBytes, err := yaml.Marshal(globalProj) + errors.CheckError(err) + + _, err = writer.Write(yamlBytes) + errors.CheckError(err) + }, + } + clientConfig = cli.AddKubectlFlagsToCmd(command) + command.Flags().StringVarP(&out, "out", "o", "-", "Output to the specified file instead of stdout") + + return command +} + +func getResourceList(clientConfig clientcmd.ClientConfig) ([]*metav1.APIResourceList, error) { + config, err := clientConfig.ClientConfig() + if err != nil { + return nil, fmt.Errorf("error while creating client config: %s", err) + } + disco, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return nil, fmt.Errorf("error while creating discovery client: %s", err) + } + serverResources, err := disco.ServerPreferredResources() + if err != nil { + return nil, fmt.Errorf("error while getting server resources: %s", err) + } + return serverResources, nil +} + +func generateProjectAllowList(serverResources []*metav1.APIResourceList, clusterRoleFileName string, projName string) (*v1alpha1.AppProject, error) { + yamlBytes, err := os.ReadFile(clusterRoleFileName) + if err != nil { + return nil, fmt.Errorf("error reading cluster role file: %s", err) + } + var obj unstructured.Unstructured + err = yaml.Unmarshal(yamlBytes, &obj) + if err != nil { + return nil, fmt.Errorf("error unmarshalling cluster role file yaml: %s", err) + } + + clusterRole := &rbacv1.ClusterRole{} + err = scheme.Scheme.Convert(&obj, clusterRole, nil) + if err != nil { + return nil, fmt.Errorf("error converting cluster role yaml into ClusterRole struct: %s", err) + } + + resourceList := make([]metav1.GroupKind, 0) + for _, rule := range clusterRole.Rules { + if len(rule.APIGroups) <= 0 { + continue + } + + canCreate := false + for _, verb := range rule.Verbs { + if strings.EqualFold(verb, "Create") { + canCreate = true + break + } + } + + if !canCreate { + continue + } + + ruleApiGroup := rule.APIGroups[0] + for _, ruleResource := range rule.Resources { + for _, apiResourcesList := range serverResources { + gv, err := schema.ParseGroupVersion(apiResourcesList.GroupVersion) + if err != nil { + gv = schema.GroupVersion{} + } + if ruleApiGroup == gv.Group { + for _, apiResource := range apiResourcesList.APIResources { + if apiResource.Name == ruleResource { + resourceList = append(resourceList, metav1.GroupKind{Group: ruleApiGroup, Kind: apiResource.Kind}) + } + } + } + } + } + } + globalProj := v1alpha1.AppProject{ + TypeMeta: metav1.TypeMeta{ + Kind: application.AppProjectKind, + APIVersion: "argoproj.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{Name: projName}, + Spec: v1alpha1.AppProjectSpec{}, + } + globalProj.Spec.NamespaceResourceWhitelist = resourceList + return &globalProj, nil +} diff --git a/cmd/argocd/commands/admin/project_allowlist_test.go b/cmd/argocd/commands/admin/project_allowlist_test.go new file mode 100644 index 0000000000000..c4634fb9310c1 --- /dev/null +++ b/cmd/argocd/commands/admin/project_allowlist_test.go @@ -0,0 +1,20 @@ +package admin + +import ( + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestProjectAllowListGen(t *testing.T) { + res := metav1.APIResource{ + Name: "services", + Kind: "Service", + } + resourceList := []*metav1.APIResourceList{{APIResources: []metav1.APIResource{res}}} + + globalProj, err := generateProjectAllowList(resourceList, "testdata/test_clusterrole.yaml", "testproj") + assert.NoError(t, err) + assert.True(t, len(globalProj.Spec.NamespaceResourceWhitelist) > 0) +} diff --git a/cmd/argocd-util/commands/project_test.go b/cmd/argocd/commands/admin/project_test.go similarity index 84% rename from cmd/argocd-util/commands/project_test.go rename to cmd/argocd/commands/admin/project_test.go index 8447b755af31f..93d8626ce1b25 100644 --- a/cmd/argocd-util/commands/project_test.go +++ b/cmd/argocd/commands/admin/project_test.go @@ -1,4 +1,4 @@ -package commands +package admin import ( "context" @@ -29,31 +29,35 @@ func newProj(name string, roleNames ...string) *v1alpha1.AppProject { } func TestUpdateProjects_FindMatchingProject(t *testing.T) { + ctx := context.Background() + clientset := fake.NewSimpleClientset(newProj("foo", "test"), newProj("bar", "test")) modification, err := getModification("set", "*", "*", "allow") assert.NoError(t, err) - err = updateProjects(clientset.ArgoprojV1alpha1().AppProjects(namespace), "ba*", "*", "set", modification, false) + err = updateProjects(ctx, clientset.ArgoprojV1alpha1().AppProjects(namespace), "ba*", "*", "set", modification, false) assert.NoError(t, err) - fooProj, err := clientset.ArgoprojV1alpha1().AppProjects(namespace).Get(context.Background(), "foo", v1.GetOptions{}) + fooProj, err := clientset.ArgoprojV1alpha1().AppProjects(namespace).Get(ctx, "foo", v1.GetOptions{}) assert.NoError(t, err) assert.Len(t, fooProj.Spec.Roles[0].Policies, 0) - barProj, err := clientset.ArgoprojV1alpha1().AppProjects(namespace).Get(context.Background(), "bar", v1.GetOptions{}) + barProj, err := clientset.ArgoprojV1alpha1().AppProjects(namespace).Get(ctx, "bar", v1.GetOptions{}) assert.NoError(t, err) assert.EqualValues(t, barProj.Spec.Roles[0].Policies, []string{"p, proj:bar:test, *, set, bar/*, allow"}) } func TestUpdateProjects_FindMatchingRole(t *testing.T) { + ctx := context.Background() + clientset := fake.NewSimpleClientset(newProj("proj", "foo", "bar")) modification, err := getModification("set", "*", "*", "allow") assert.NoError(t, err) - err = updateProjects(clientset.ArgoprojV1alpha1().AppProjects(namespace), "*", "fo*", "set", modification, false) + err = updateProjects(ctx, clientset.ArgoprojV1alpha1().AppProjects(namespace), "*", "fo*", "set", modification, false) assert.NoError(t, err) - proj, err := clientset.ArgoprojV1alpha1().AppProjects(namespace).Get(context.Background(), "proj", v1.GetOptions{}) + proj, err := clientset.ArgoprojV1alpha1().AppProjects(namespace).Get(ctx, "proj", v1.GetOptions{}) assert.NoError(t, err) assert.EqualValues(t, proj.Spec.Roles[0].Policies, []string{"p, proj:proj:foo, *, set, proj/*, allow"}) assert.Len(t, proj.Spec.Roles[1].Policies, 0) diff --git a/cmd/argocd/commands/admin/repo.go b/cmd/argocd/commands/admin/repo.go new file mode 100644 index 0000000000000..208a6ef8550f8 --- /dev/null +++ b/cmd/argocd/commands/admin/repo.go @@ -0,0 +1,169 @@ +package admin + +import ( + "fmt" + "os" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + apiv1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" + + cmdutil "github.com/argoproj/argo-cd/v2/cmd/util" + "github.com/argoproj/argo-cd/v2/common" + "github.com/argoproj/argo-cd/v2/util/cli" + "github.com/argoproj/argo-cd/v2/util/db" + "github.com/argoproj/argo-cd/v2/util/errors" + "github.com/argoproj/argo-cd/v2/util/git" + "github.com/argoproj/argo-cd/v2/util/settings" +) + +const ( + ArgoCDNamespace = "argocd" + repoSecretPrefix = "repo" +) + +func NewRepoCommand() *cobra.Command { + var command = &cobra.Command{ + Use: "repo", + Short: "Manage repositories configuration", + Run: func(c *cobra.Command, args []string) { + c.HelpFunc()(c, args) + }, + } + command.AddCommand(NewGenRepoSpecCommand()) + + return command +} + +func NewGenRepoSpecCommand() *cobra.Command { + var ( + repoOpts cmdutil.RepoOptions + outputFormat string + ) + + // For better readability and easier formatting + var repoAddExamples = ` + # Add a Git repository via SSH using a private key for authentication, ignoring the server's host key: + argocd admin repo generate-spec git@git.example.com:repos/repo --insecure-ignore-host-key --ssh-private-key-path ~/id_rsa + + # Add a Git repository via SSH on a non-default port - need to use ssh:// style URLs here + argocd admin repo generate-spec ssh://git@git.example.com:2222/repos/repo --ssh-private-key-path ~/id_rsa + + # Add a private Git repository via HTTPS using username/password and TLS client certificates: + argocd admin repo generate-spec https://git.example.com/repos/repo --username git --password secret --tls-client-cert-path ~/mycert.crt --tls-client-cert-key-path ~/mycert.key + + # Add a private Git repository via HTTPS using username/password without verifying the server's TLS certificate + argocd admin repo generate-spec https://git.example.com/repos/repo --username git --password secret --insecure-skip-server-verification + + # Add a public Helm repository named 'stable' via HTTPS + argocd admin repo generate-spec https://charts.helm.sh/stable --type helm --name stable + + # Add a private Helm repository named 'stable' via HTTPS + argocd admin repo generate-spec https://charts.helm.sh/stable --type helm --name stable --username test --password test + + # Add a private Helm OCI-based repository named 'stable' via HTTPS + argocd admin repo generate-spec helm-oci-registry.cn-zhangjiakou.cr.aliyuncs.com --type helm --name stable --enable-oci --username test --password test +` + + var command = &cobra.Command{ + Use: "generate-spec REPOURL", + Short: "Generate declarative config for a repo", + Example: repoAddExamples, + Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + + log.SetLevel(log.WarnLevel) + if len(args) != 1 { + c.HelpFunc()(c, args) + os.Exit(1) + } + + // Repository URL + repoOpts.Repo.Repo = args[0] + + // Specifying ssh-private-key-path is only valid for SSH repositories + if repoOpts.SshPrivateKeyPath != "" { + if ok, _ := git.IsSSHURL(repoOpts.Repo.Repo); ok { + keyData, err := os.ReadFile(repoOpts.SshPrivateKeyPath) + if err != nil { + log.Fatal(err) + } + repoOpts.Repo.SSHPrivateKey = string(keyData) + } else { + err := fmt.Errorf("--ssh-private-key-path is only supported for SSH repositories") + errors.CheckError(err) + } + } + + // tls-client-cert-path and tls-client-cert-key-key-path must always be + // specified together + if (repoOpts.TlsClientCertPath != "" && repoOpts.TlsClientCertKeyPath == "") || (repoOpts.TlsClientCertPath == "" && repoOpts.TlsClientCertKeyPath != "") { + err := fmt.Errorf("--tls-client-cert-path and --tls-client-cert-key-path must be specified together") + errors.CheckError(err) + } + + // Specifying tls-client-cert-path is only valid for HTTPS repositories + if repoOpts.TlsClientCertPath != "" { + if git.IsHTTPSURL(repoOpts.Repo.Repo) { + tlsCertData, err := os.ReadFile(repoOpts.TlsClientCertPath) + errors.CheckError(err) + tlsCertKey, err := os.ReadFile(repoOpts.TlsClientCertKeyPath) + errors.CheckError(err) + repoOpts.Repo.TLSClientCertData = string(tlsCertData) + repoOpts.Repo.TLSClientCertKey = string(tlsCertKey) + } else { + err := fmt.Errorf("--tls-client-cert-path is only supported for HTTPS repositories") + errors.CheckError(err) + } + } + + // Set repository connection properties only when creating repository, not + // when creating repository credentials. + // InsecureIgnoreHostKey is deprecated and only here for backwards compat + repoOpts.Repo.InsecureIgnoreHostKey = repoOpts.InsecureIgnoreHostKey + repoOpts.Repo.Insecure = repoOpts.InsecureSkipServerVerification + repoOpts.Repo.EnableLFS = repoOpts.EnableLfs + repoOpts.Repo.EnableOCI = repoOpts.EnableOci + + if repoOpts.Repo.Type == "helm" && repoOpts.Repo.Name == "" { + errors.CheckError(fmt.Errorf("must specify --name for repos of type 'helm'")) + } + + // If the user set a username, but didn't supply password via --password, + // then we prompt for it + if repoOpts.Repo.Username != "" && repoOpts.Repo.Password == "" { + repoOpts.Repo.Password = cli.PromptPassword(repoOpts.Repo.Password) + } + + argoCDCM := &apiv1.ConfigMap{ + TypeMeta: v1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: "v1", + }, + ObjectMeta: v1.ObjectMeta{ + Name: common.ArgoCDConfigMapName, + Namespace: ArgoCDNamespace, + Labels: map[string]string{ + "app.kubernetes.io/part-of": "argocd", + }, + }, + } + kubeClientset := fake.NewSimpleClientset(argoCDCM) + settingsMgr := settings.NewSettingsManager(ctx, kubeClientset, ArgoCDNamespace) + argoDB := db.NewDB(ArgoCDNamespace, settingsMgr, kubeClientset) + + _, err := argoDB.CreateRepository(ctx, &repoOpts.Repo) + errors.CheckError(err) + + secret, err := kubeClientset.CoreV1().Secrets(ArgoCDNamespace).Get(ctx, db.RepoURLToSecretName(repoSecretPrefix, repoOpts.Repo.Repo), v1.GetOptions{}) + errors.CheckError(err) + + errors.CheckError(PrintResources(outputFormat, os.Stdout, secret)) + }, + } + command.Flags().StringVarP(&outputFormat, "output", "o", "yaml", "Output format. One of: json|yaml") + cmdutil.AddRepoFlags(command, &repoOpts) + return command +} diff --git a/cmd/argocd-util/commands/secrets_redactor_test.go b/cmd/argocd/commands/admin/secrets_redactor_test.go similarity index 99% rename from cmd/argocd-util/commands/secrets_redactor_test.go rename to cmd/argocd/commands/admin/secrets_redactor_test.go index b8cc43135ac33..cb1b3e78dbfea 100644 --- a/cmd/argocd-util/commands/secrets_redactor_test.go +++ b/cmd/argocd/commands/admin/secrets_redactor_test.go @@ -1,4 +1,4 @@ -package commands +package admin import ( "testing" diff --git a/cmd/argocd/commands/admin/settings.go b/cmd/argocd/commands/admin/settings.go new file mode 100644 index 0000000000000..281d9875691c4 --- /dev/null +++ b/cmd/argocd/commands/admin/settings.go @@ -0,0 +1,637 @@ +package admin + +import ( + "bytes" + "context" + "fmt" + "os" + "reflect" + "sort" + "strconv" + "strings" + "text/tabwriter" + + healthutil "github.com/argoproj/gitops-engine/pkg/health" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/yaml" + + "github.com/argoproj/argo-cd/v2/common" + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/argoproj/argo-cd/v2/util/argo/normalizers" + "github.com/argoproj/argo-cd/v2/util/cli" + "github.com/argoproj/argo-cd/v2/util/errors" + "github.com/argoproj/argo-cd/v2/util/lua" + "github.com/argoproj/argo-cd/v2/util/settings" +) + +type settingsOpts struct { + argocdCMPath string + argocdSecretPath string + loadClusterSettings bool + clientConfig clientcmd.ClientConfig +} + +type commandContext interface { + createSettingsManager(context.Context) (*settings.SettingsManager, error) +} + +func collectLogs(callback func()) string { + log.SetLevel(log.DebugLevel) + out := bytes.Buffer{} + log.SetOutput(&out) + defer log.SetLevel(log.FatalLevel) + callback() + return out.String() +} + +func setSettingsMeta(obj v1.Object) { + obj.SetNamespace("default") + labels := obj.GetLabels() + if labels == nil { + labels = make(map[string]string) + } + labels["app.kubernetes.io/part-of"] = "argocd" + obj.SetLabels(labels) +} + +func (opts *settingsOpts) createSettingsManager(ctx context.Context) (*settings.SettingsManager, error) { + var argocdCM *corev1.ConfigMap + if opts.argocdCMPath == "" && !opts.loadClusterSettings { + return nil, fmt.Errorf("either --argocd-cm-path must be provided or --load-cluster-settings must be set to true") + } else if opts.argocdCMPath == "" { + realClientset, ns, err := opts.getK8sClient() + if err != nil { + return nil, err + } + + argocdCM, err = realClientset.CoreV1().ConfigMaps(ns).Get(ctx, common.ArgoCDConfigMapName, v1.GetOptions{}) + if err != nil { + return nil, err + } + } else { + data, err := os.ReadFile(opts.argocdCMPath) + if err != nil { + return nil, err + } + err = yaml.Unmarshal(data, &argocdCM) + if err != nil { + return nil, err + } + } + setSettingsMeta(argocdCM) + + var argocdSecret *corev1.Secret + if opts.argocdSecretPath != "" { + data, err := os.ReadFile(opts.argocdSecretPath) + if err != nil { + return nil, err + } + err = yaml.Unmarshal(data, &argocdSecret) + if err != nil { + return nil, err + } + setSettingsMeta(argocdSecret) + } else if opts.loadClusterSettings { + realClientset, ns, err := opts.getK8sClient() + if err != nil { + return nil, err + } + argocdSecret, err = realClientset.CoreV1().Secrets(ns).Get(ctx, common.ArgoCDSecretName, v1.GetOptions{}) + if err != nil { + return nil, err + } + } else { + argocdSecret = &corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: common.ArgoCDSecretName, + }, + Data: map[string][]byte{ + "admin.password": []byte("test"), + "server.secretkey": []byte("test"), + }, + } + } + setSettingsMeta(argocdSecret) + clientset := fake.NewSimpleClientset(argocdSecret, argocdCM) + + manager := settings.NewSettingsManager(ctx, clientset, "default") + errors.CheckError(manager.ResyncInformers()) + + return manager, nil +} + +func (opts *settingsOpts) getK8sClient() (*kubernetes.Clientset, string, error) { + namespace, _, err := opts.clientConfig.Namespace() + if err != nil { + return nil, "", err + } + + restConfig, err := opts.clientConfig.ClientConfig() + if err != nil { + return nil, "", err + } + + realClientset, err := kubernetes.NewForConfig(restConfig) + if err != nil { + return nil, "", err + } + return realClientset, namespace, nil +} + +func NewSettingsCommand() *cobra.Command { + var ( + opts settingsOpts + ) + + var command = &cobra.Command{ + Use: "settings", + Short: "Provides set of commands for settings validation and troubleshooting", + Run: func(c *cobra.Command, args []string) { + c.HelpFunc()(c, args) + }, + } + log.SetLevel(log.FatalLevel) + + command.AddCommand(NewValidateSettingsCommand(&opts)) + command.AddCommand(NewResourceOverridesCommand(&opts)) + command.AddCommand(NewRBACCommand()) + + opts.clientConfig = cli.AddKubectlFlagsToCmd(command) + command.PersistentFlags().StringVar(&opts.argocdCMPath, "argocd-cm-path", "", "Path to local argocd-cm.yaml file") + command.PersistentFlags().StringVar(&opts.argocdSecretPath, "argocd-secret-path", "", "Path to local argocd-secret.yaml file") + command.PersistentFlags().BoolVar(&opts.loadClusterSettings, "load-cluster-settings", false, + "Indicates that config map and secret should be loaded from cluster unless local file path is provided") + return command +} + +type settingValidator func(manager *settings.SettingsManager) (string, error) + +func joinValidators(validators ...settingValidator) settingValidator { + return func(manager *settings.SettingsManager) (string, error) { + var errorStrs []string + var summaries []string + for i := range validators { + summary, err := validators[i](manager) + if err != nil { + errorStrs = append(errorStrs, err.Error()) + } + if summary != "" { + summaries = append(summaries, summary) + } + } + if len(errorStrs) > 0 { + return "", fmt.Errorf("%s", strings.Join(errorStrs, "\n")) + } + return strings.Join(summaries, "\n"), nil + } +} + +var validatorsByGroup = map[string]settingValidator{ + "general": joinValidators(func(manager *settings.SettingsManager) (string, error) { + general, err := manager.GetSettings() + if err != nil { + return "", err + } + ssoProvider := "" + if general.DexConfig != "" { + if _, err := settings.UnmarshalDexConfig(general.DexConfig); err != nil { + return "", fmt.Errorf("invalid dex.config: %v", err) + } + ssoProvider = "Dex" + } else if general.OIDCConfigRAW != "" { + if err := settings.ValidateOIDCConfig(general.OIDCConfigRAW); err != nil { + return "", fmt.Errorf("invalid oidc.config: %v", err) + } + ssoProvider = "OIDC" + } + var summary string + if ssoProvider != "" { + summary = fmt.Sprintf("%s is configured", ssoProvider) + if general.URL == "" { + summary = summary + " ('url' field is missing)" + } + } else if ssoProvider != "" && general.URL != "" { + + } else { + summary = "SSO is not configured" + } + return summary, nil + }, func(manager *settings.SettingsManager) (string, error) { + _, err := manager.GetAppInstanceLabelKey() + return "", err + }, func(manager *settings.SettingsManager) (string, error) { + _, err := manager.GetHelp() + return "", err + }, func(manager *settings.SettingsManager) (string, error) { + _, err := manager.GetGoogleAnalytics() + return "", err + }), + "kustomize": func(manager *settings.SettingsManager) (string, error) { + opts, err := manager.GetKustomizeSettings() + if err != nil { + return "", err + } + summary := "default options" + if opts.BuildOptions != "" { + summary = opts.BuildOptions + } + if len(opts.Versions) > 0 { + summary = fmt.Sprintf("%s (%d versions)", summary, len(opts.Versions)) + } + return summary, err + }, + "repositories": joinValidators(func(manager *settings.SettingsManager) (string, error) { + repos, err := manager.GetRepositories() + if err != nil { + return "", err + } + return fmt.Sprintf("%d repositories", len(repos)), nil + }, func(manager *settings.SettingsManager) (string, error) { + creds, err := manager.GetRepositoryCredentials() + if err != nil { + return "", err + } + return fmt.Sprintf("%d repository credentials", len(creds)), nil + }), + "accounts": func(manager *settings.SettingsManager) (string, error) { + accounts, err := manager.GetAccounts() + if err != nil { + return "", err + } + return fmt.Sprintf("%d accounts", len(accounts)), nil + }, + "resource-overrides": func(manager *settings.SettingsManager) (string, error) { + overrides, err := manager.GetResourceOverrides() + if err != nil { + return "", err + } + return fmt.Sprintf("%d resource overrides", len(overrides)), nil + }, +} + +func NewValidateSettingsCommand(cmdCtx commandContext) *cobra.Command { + var ( + groups []string + ) + + var allGroups []string + for k := range validatorsByGroup { + allGroups = append(allGroups, k) + } + sort.Slice(allGroups, func(i, j int) bool { + return allGroups[i] < allGroups[j] + }) + + var command = &cobra.Command{ + Use: "validate", + Short: "Validate settings", + Long: "Validates settings specified in 'argocd-cm' ConfigMap and 'argocd-secret' Secret", + Example: ` +#Validates all settings in the specified YAML file +argocd admin settings validate --argocd-cm-path ./argocd-cm.yaml + +#Validates accounts and plugins settings in Kubernetes cluster of current kubeconfig context +argocd admin settings validate --group accounts --group plugins --load-cluster-settings`, + Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + + settingsManager, err := cmdCtx.createSettingsManager(ctx) + errors.CheckError(err) + + if len(groups) == 0 { + groups = allGroups + } + for i, group := range groups { + validator := validatorsByGroup[group] + + logs := collectLogs(func() { + summary, err := validator(settingsManager) + + if err != nil { + _, _ = fmt.Fprintf(os.Stdout, "❌ %s\n", group) + _, _ = fmt.Fprintf(os.Stdout, "%s\n", err.Error()) + } else { + _, _ = fmt.Fprintf(os.Stdout, "✅ %s\n", group) + if summary != "" { + _, _ = fmt.Fprintf(os.Stdout, "%s\n", summary) + } + } + }) + if logs != "" { + _, _ = fmt.Fprintf(os.Stdout, "%s\n", logs) + } + if i != len(groups)-1 { + _, _ = fmt.Fprintf(os.Stdout, "\n") + } + } + }, + } + + command.Flags().StringArrayVar(&groups, "group", nil, fmt.Sprintf( + "Optional list of setting groups that have to be validated ( one of: %s)", strings.Join(allGroups, ", "))) + + return command +} + +func NewResourceOverridesCommand(cmdCtx commandContext) *cobra.Command { + var command = &cobra.Command{ + Use: "resource-overrides", + Short: "Troubleshoot resource overrides", + Run: func(c *cobra.Command, args []string) { + c.HelpFunc()(c, args) + }, + } + command.AddCommand(NewResourceIgnoreDifferencesCommand(cmdCtx)) + command.AddCommand(NewResourceIgnoreResourceUpdatesCommand(cmdCtx)) + command.AddCommand(NewResourceActionListCommand(cmdCtx)) + command.AddCommand(NewResourceActionRunCommand(cmdCtx)) + command.AddCommand(NewResourceHealthCommand(cmdCtx)) + return command +} + +func executeResourceOverrideCommand(ctx context.Context, cmdCtx commandContext, args []string, callback func(res unstructured.Unstructured, override v1alpha1.ResourceOverride, overrides map[string]v1alpha1.ResourceOverride)) { + data, err := os.ReadFile(args[0]) + errors.CheckError(err) + + res := unstructured.Unstructured{} + errors.CheckError(yaml.Unmarshal(data, &res)) + + settingsManager, err := cmdCtx.createSettingsManager(ctx) + errors.CheckError(err) + + overrides, err := settingsManager.GetResourceOverrides() + errors.CheckError(err) + gvk := res.GroupVersionKind() + key := gvk.Kind + if gvk.Group != "" { + key = fmt.Sprintf("%s/%s", gvk.Group, gvk.Kind) + } + override, hasOverride := overrides[key] + if !hasOverride { + _, _ = fmt.Printf("No overrides configured for '%s/%s'\n", gvk.Group, gvk.Kind) + return + } + callback(res, override, overrides) +} + +func executeIgnoreResourceUpdatesOverrideCommand(ctx context.Context, cmdCtx commandContext, args []string, callback func(res unstructured.Unstructured, override v1alpha1.ResourceOverride, overrides map[string]v1alpha1.ResourceOverride)) { + data, err := os.ReadFile(args[0]) + errors.CheckError(err) + + res := unstructured.Unstructured{} + errors.CheckError(yaml.Unmarshal(data, &res)) + + settingsManager, err := cmdCtx.createSettingsManager(ctx) + errors.CheckError(err) + + overrides, err := settingsManager.GetIgnoreResourceUpdatesOverrides() + errors.CheckError(err) + gvk := res.GroupVersionKind() + key := gvk.Kind + if gvk.Group != "" { + key = fmt.Sprintf("%s/%s", gvk.Group, gvk.Kind) + } + override, hasOverride := overrides[key] + if !hasOverride { + _, _ = fmt.Printf("No overrides configured for '%s/%s'\n", gvk.Group, gvk.Kind) + return + } + callback(res, override, overrides) +} + +func NewResourceIgnoreDifferencesCommand(cmdCtx commandContext) *cobra.Command { + var command = &cobra.Command{ + Use: "ignore-differences RESOURCE_YAML_PATH", + Short: "Renders fields excluded from diffing", + Long: "Renders ignored fields using the 'ignoreDifferences' setting specified in the 'resource.customizations' field of 'argocd-cm' ConfigMap", + Example: ` +argocd admin settings resource-overrides ignore-differences ./deploy.yaml --argocd-cm-path ./argocd-cm.yaml`, + Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + + if len(args) < 1 { + c.HelpFunc()(c, args) + os.Exit(1) + } + + executeResourceOverrideCommand(ctx, cmdCtx, args, func(res unstructured.Unstructured, override v1alpha1.ResourceOverride, overrides map[string]v1alpha1.ResourceOverride) { + gvk := res.GroupVersionKind() + if len(override.IgnoreDifferences.JSONPointers) == 0 && len(override.IgnoreDifferences.JQPathExpressions) == 0 { + _, _ = fmt.Printf("Ignore differences are not configured for '%s/%s'\n", gvk.Group, gvk.Kind) + return + } + + // This normalizer won't verify 'managedFieldsManagers' ignore difference + // configurations. This requires access to live resources which is not the + // purpose of this command. This will just apply jsonPointers and + // jqPathExpressions configurations. + normalizer, err := normalizers.NewIgnoreNormalizer(nil, overrides) + errors.CheckError(err) + + normalizedRes := res.DeepCopy() + logs := collectLogs(func() { + errors.CheckError(normalizer.Normalize(normalizedRes)) + }) + if logs != "" { + _, _ = fmt.Println(logs) + } + + if reflect.DeepEqual(&res, normalizedRes) { + _, _ = fmt.Printf("No fields are ignored by ignoreDifferences settings: \n%s\n", override.IgnoreDifferences) + return + } + + _, _ = fmt.Printf("Following fields are ignored:\n\n") + _ = cli.PrintDiff(res.GetName(), &res, normalizedRes) + }) + }, + } + return command +} + +func NewResourceIgnoreResourceUpdatesCommand(cmdCtx commandContext) *cobra.Command { + var command = &cobra.Command{ + Use: "ignore-resource-updates RESOURCE_YAML_PATH", + Short: "Renders fields excluded from resource updates", + Long: "Renders ignored fields using the 'ignoreResourceUpdates' setting specified in the 'resource.customizations' field of 'argocd-cm' ConfigMap", + Example: ` +argocd admin settings resource-overrides ignore-resource-updates ./deploy.yaml --argocd-cm-path ./argocd-cm.yaml`, + Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + + if len(args) < 1 { + c.HelpFunc()(c, args) + os.Exit(1) + } + + executeIgnoreResourceUpdatesOverrideCommand(ctx, cmdCtx, args, func(res unstructured.Unstructured, override v1alpha1.ResourceOverride, overrides map[string]v1alpha1.ResourceOverride) { + gvk := res.GroupVersionKind() + if len(override.IgnoreResourceUpdates.JSONPointers) == 0 && len(override.IgnoreResourceUpdates.JQPathExpressions) == 0 { + _, _ = fmt.Printf("Ignore resource updates are not configured for '%s/%s'\n", gvk.Group, gvk.Kind) + return + } + + normalizer, err := normalizers.NewIgnoreNormalizer(nil, overrides) + errors.CheckError(err) + + normalizedRes := res.DeepCopy() + logs := collectLogs(func() { + errors.CheckError(normalizer.Normalize(normalizedRes)) + }) + if logs != "" { + _, _ = fmt.Println(logs) + } + + if reflect.DeepEqual(&res, normalizedRes) { + _, _ = fmt.Printf("No fields are ignored by ignoreResourceUpdates settings: \n%s\n", override.IgnoreResourceUpdates) + return + } + + _, _ = fmt.Printf("Following fields are ignored:\n\n") + _ = cli.PrintDiff(res.GetName(), &res, normalizedRes) + }) + }, + } + return command +} + +func NewResourceHealthCommand(cmdCtx commandContext) *cobra.Command { + var command = &cobra.Command{ + Use: "health RESOURCE_YAML_PATH", + Short: "Assess resource health", + Long: "Assess resource health using the lua script configured in the 'resource.customizations' field of 'argocd-cm' ConfigMap", + Example: ` +argocd admin settings resource-overrides health ./deploy.yaml --argocd-cm-path ./argocd-cm.yaml`, + Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + + if len(args) < 1 { + c.HelpFunc()(c, args) + os.Exit(1) + } + + executeResourceOverrideCommand(ctx, cmdCtx, args, func(res unstructured.Unstructured, override v1alpha1.ResourceOverride, overrides map[string]v1alpha1.ResourceOverride) { + gvk := res.GroupVersionKind() + if override.HealthLua == "" { + _, _ = fmt.Printf("Health script is not configured for '%s/%s'\n", gvk.Group, gvk.Kind) + return + } + + resHealth, err := healthutil.GetResourceHealth(&res, lua.ResourceHealthOverrides(overrides)) + errors.CheckError(err) + + _, _ = fmt.Printf("STATUS: %s\n", resHealth.Status) + _, _ = fmt.Printf("MESSAGE: %s\n", resHealth.Message) + }) + }, + } + return command +} + +func NewResourceActionListCommand(cmdCtx commandContext) *cobra.Command { + var command = &cobra.Command{ + Use: "list-actions RESOURCE_YAML_PATH", + Short: "List available resource actions", + Long: "List actions available for given resource action using the lua scripts configured in the 'resource.customizations' field of 'argocd-cm' ConfigMap and outputs updated fields", + Example: ` +argocd admin settings resource-overrides action list /tmp/deploy.yaml --argocd-cm-path ./argocd-cm.yaml`, + Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + + if len(args) < 1 { + c.HelpFunc()(c, args) + os.Exit(1) + } + + executeResourceOverrideCommand(ctx, cmdCtx, args, func(res unstructured.Unstructured, override v1alpha1.ResourceOverride, overrides map[string]v1alpha1.ResourceOverride) { + gvk := res.GroupVersionKind() + if override.Actions == "" { + _, _ = fmt.Printf("Actions are not configured for '%s/%s'\n", gvk.Group, gvk.Kind) + return + } + + luaVM := lua.VM{ResourceOverrides: overrides} + discoveryScript, err := luaVM.GetResourceActionDiscovery(&res) + errors.CheckError(err) + + availableActions, err := luaVM.ExecuteResourceActionDiscovery(&res, discoveryScript) + errors.CheckError(err) + sort.Slice(availableActions, func(i, j int) bool { + return availableActions[i].Name < availableActions[j].Name + }) + + w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) + _, _ = fmt.Fprintf(w, "NAME\tDISABLED\n") + for _, action := range availableActions { + _, _ = fmt.Fprintf(w, "%s\t%s\n", action.Name, strconv.FormatBool(action.Disabled)) + } + _ = w.Flush() + }) + }, + } + return command +} + +func NewResourceActionRunCommand(cmdCtx commandContext) *cobra.Command { + var command = &cobra.Command{ + Use: "run-action RESOURCE_YAML_PATH ACTION", + Aliases: []string{"action"}, + Short: "Executes resource action", + Long: "Executes resource action using the lua script configured in the 'resource.customizations' field of 'argocd-cm' ConfigMap and outputs updated fields", + Example: ` +argocd admin settings resource-overrides action run /tmp/deploy.yaml restart --argocd-cm-path ./argocd-cm.yaml`, + Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + + if len(args) < 2 { + c.HelpFunc()(c, args) + os.Exit(1) + } + action := args[1] + + executeResourceOverrideCommand(ctx, cmdCtx, args, func(res unstructured.Unstructured, override v1alpha1.ResourceOverride, overrides map[string]v1alpha1.ResourceOverride) { + gvk := res.GroupVersionKind() + if override.Actions == "" { + _, _ = fmt.Printf("Actions are not configured for '%s/%s'\n", gvk.Group, gvk.Kind) + return + } + + luaVM := lua.VM{ResourceOverrides: overrides} + action, err := luaVM.GetResourceAction(&res, action) + errors.CheckError(err) + + modifiedRes, err := luaVM.ExecuteResourceAction(&res, action.ActionLua) + errors.CheckError(err) + + for _, impactedResource := range modifiedRes { + result := impactedResource.UnstructuredObj + switch impactedResource.K8SOperation { + // No default case since a not supported operation would have failed upon unmarshaling earlier + case lua.PatchOperation: + if reflect.DeepEqual(&res, modifiedRes) { + _, _ = fmt.Printf("No fields had been changed by action: \n%s\n", action.Name) + return + } + + _, _ = fmt.Printf("Following fields have been changed:\n\n") + _ = cli.PrintDiff(res.GetName(), &res, result) + case lua.CreateOperation: + yamlBytes, err := yaml.Marshal(impactedResource.UnstructuredObj) + errors.CheckError(err) + fmt.Println("Following resource was created:") + fmt.Println(bytes.NewBuffer(yamlBytes).String()) + } + } + + }) + }, + } + return command +} diff --git a/cmd/argocd/commands/admin/settings_rbac.go b/cmd/argocd/commands/admin/settings_rbac.go new file mode 100644 index 0000000000000..8d94feeaad466 --- /dev/null +++ b/cmd/argocd/commands/admin/settings_rbac.go @@ -0,0 +1,388 @@ +package admin + +import ( + "context" + "fmt" + "os" + "strings" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/yaml" + + "github.com/argoproj/argo-cd/v2/common" + "github.com/argoproj/argo-cd/v2/server/rbacpolicy" + "github.com/argoproj/argo-cd/v2/util/assets" + "github.com/argoproj/argo-cd/v2/util/cli" + "github.com/argoproj/argo-cd/v2/util/rbac" +) + +// Provide a mapping of short-hand resource names to their RBAC counterparts +var resourceMap map[string]string = map[string]string{ + "account": rbacpolicy.ResourceAccounts, + "app": rbacpolicy.ResourceApplications, + "apps": rbacpolicy.ResourceApplications, + "application": rbacpolicy.ResourceApplications, + "applicationsets": rbacpolicy.ResourceApplicationSets, + "cert": rbacpolicy.ResourceCertificates, + "certs": rbacpolicy.ResourceCertificates, + "certificate": rbacpolicy.ResourceCertificates, + "cluster": rbacpolicy.ResourceClusters, + "gpgkey": rbacpolicy.ResourceGPGKeys, + "key": rbacpolicy.ResourceGPGKeys, + "log": rbacpolicy.ResourceLogs, + "logs": rbacpolicy.ResourceLogs, + "exec": rbacpolicy.ResourceExec, + "proj": rbacpolicy.ResourceProjects, + "projs": rbacpolicy.ResourceProjects, + "project": rbacpolicy.ResourceProjects, + "repo": rbacpolicy.ResourceRepositories, + "repos": rbacpolicy.ResourceRepositories, + "repository": rbacpolicy.ResourceRepositories, +} + +// List of allowed RBAC resources +var validRBACResources map[string]bool = map[string]bool{ + rbacpolicy.ResourceAccounts: true, + rbacpolicy.ResourceApplications: true, + rbacpolicy.ResourceApplicationSets: true, + rbacpolicy.ResourceCertificates: true, + rbacpolicy.ResourceClusters: true, + rbacpolicy.ResourceGPGKeys: true, + rbacpolicy.ResourceLogs: true, + rbacpolicy.ResourceExec: true, + rbacpolicy.ResourceProjects: true, + rbacpolicy.ResourceRepositories: true, +} + +// List of allowed RBAC actions +var validRBACActions map[string]bool = map[string]bool{ + rbacpolicy.ActionAction: true, + rbacpolicy.ActionCreate: true, + rbacpolicy.ActionDelete: true, + rbacpolicy.ActionGet: true, + rbacpolicy.ActionOverride: true, + rbacpolicy.ActionSync: true, + rbacpolicy.ActionUpdate: true, +} + +// NewRBACCommand is the command for 'rbac' +func NewRBACCommand() *cobra.Command { + var command = &cobra.Command{ + Use: "rbac", + Short: "Validate and test RBAC configuration", + Run: func(c *cobra.Command, args []string) { + c.HelpFunc()(c, args) + }, + } + command.AddCommand(NewRBACCanCommand()) + command.AddCommand(NewRBACValidateCommand()) + return command +} + +// NewRBACCanRoleCommand is the command for 'rbac can-role' +func NewRBACCanCommand() *cobra.Command { + var ( + policyFile string + defaultRole string + useBuiltin bool + strict bool + quiet bool + subject string + action string + resource string + subResource string + clientConfig clientcmd.ClientConfig + ) + var command = &cobra.Command{ + Use: "can ROLE/SUBJECT ACTION RESOURCE [SUB-RESOURCE]", + Short: "Check RBAC permissions for a role or subject", + Long: ` +Check whether a given role or subject has appropriate RBAC permissions to do +something. +`, + Example: ` +# Check whether role some:role has permissions to create an application in the +# 'default' project, using a local policy.csv file +argocd admin settings rbac can some:role create application 'default/app' --policy-file policy.csv + +# Policy file can also be K8s config map with data keys like argocd-rbac-cm, +# i.e. 'policy.csv' and (optionally) 'policy.default' +argocd admin settings rbac can some:role create application 'default/app' --policy-file argocd-rbac-cm.yaml + +# If --policy-file is not given, the ConfigMap 'argocd-rbac-cm' from K8s is +# used. You need to specify the argocd namespace, and make sure that your +# current Kubernetes context is pointing to the cluster Argo CD is running in +argocd admin settings rbac can some:role create application 'default/app' --namespace argocd + +# You can override a possibly configured default role +argocd admin settings rbac can someuser create application 'default/app' --default-role role:readonly + +`, + Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + + if len(args) < 3 || len(args) > 4 { + c.HelpFunc()(c, args) + os.Exit(1) + } + subject = args[0] + action = args[1] + resource = args[2] + if len(args) > 3 { + subResource = args[3] + } + + userPolicy := "" + builtinPolicy := "" + + var newDefaultRole string + + namespace, nsOverride, err := clientConfig.Namespace() + if err != nil { + log.Fatalf("could not create k8s client: %v", err) + } + + // Exactly one of --namespace or --policy-file must be given. + if (!nsOverride && policyFile == "") || (nsOverride && policyFile != "") { + c.HelpFunc()(c, args) + log.Fatalf("please provide exactly one of --policy-file or --namespace") + } + + restConfig, err := clientConfig.ClientConfig() + if err != nil { + log.Fatalf("could not create k8s client: %v", err) + } + realClientset, err := kubernetes.NewForConfig(restConfig) + if err != nil { + log.Fatalf("could not create k8s client: %v", err) + } + + userPolicy, newDefaultRole, matchMode := getPolicy(ctx, policyFile, realClientset, namespace) + + // Use built-in policy as augmentation if requested + if useBuiltin { + builtinPolicy = assets.BuiltinPolicyCSV + } + + // If no explicit default role was given, but we have one defined from + // a policy, use this to check for enforce. + if newDefaultRole != "" && defaultRole == "" { + defaultRole = newDefaultRole + } + + res := checkPolicy(subject, action, resource, subResource, builtinPolicy, userPolicy, defaultRole, matchMode, strict) + if res { + if !quiet { + fmt.Println("Yes") + } + os.Exit(0) + } else { + if !quiet { + fmt.Println("No") + } + os.Exit(1) + } + }, + } + + clientConfig = cli.AddKubectlFlagsToCmd(command) + command.Flags().StringVar(&policyFile, "policy-file", "", "path to the policy file to use") + command.Flags().StringVar(&defaultRole, "default-role", "", "name of the default role to use") + command.Flags().BoolVar(&useBuiltin, "use-builtin-policy", true, "whether to also use builtin-policy") + command.Flags().BoolVar(&strict, "strict", true, "whether to perform strict check on action and resource names") + command.Flags().BoolVarP(&quiet, "quiet", "q", false, "quiet mode - do not print results to stdout") + return command +} + +// NewRBACValidateCommand returns a new rbac validate command +func NewRBACValidateCommand() *cobra.Command { + var ( + policyFile string + ) + + var command = &cobra.Command{ + Use: "validate --policy-file=POLICYFILE", + Short: "Validate RBAC policy", + Long: ` +Validates an RBAC policy for being syntactically correct. The policy must be +a local file, and in either CSV or K8s ConfigMap format. +`, + Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + + if policyFile == "" { + c.HelpFunc()(c, args) + log.Fatalf("Please specify policy to validate using --policy-file") + } + userPolicy, _, _ := getPolicy(ctx, policyFile, nil, "") + if userPolicy != "" { + if err := rbac.ValidatePolicy(userPolicy); err == nil { + fmt.Printf("Policy is valid.\n") + os.Exit(0) + } else { + fmt.Printf("Policy is invalid: %v\n", err) + os.Exit(1) + } + } + }, + } + + command.Flags().StringVar(&policyFile, "policy-file", "", "path to the policy file to use") + return command +} + +// Load user policy file if requested or use Kubernetes client to get the +// appropriate ConfigMap from the current context +func getPolicy(ctx context.Context, policyFile string, kubeClient kubernetes.Interface, namespace string) (userPolicy string, defaultRole string, matchMode string) { + var err error + if policyFile != "" { + // load from file + userPolicy, defaultRole, matchMode, err = getPolicyFromFile(policyFile) + if err != nil { + log.Fatalf("could not read policy file: %v", err) + } + } else { + cm, err := getPolicyConfigMap(ctx, kubeClient, namespace) + if err != nil { + log.Fatalf("could not get configmap: %v", err) + } + userPolicy, defaultRole, matchMode = getPolicyFromConfigMap(cm) + } + + return userPolicy, defaultRole, matchMode +} + +// getPolicyFromFile loads a RBAC policy from given path +func getPolicyFromFile(policyFile string) (string, string, string, error) { + var ( + userPolicy string + defaultRole string + matchMode string + ) + + upol, err := os.ReadFile(policyFile) + if err != nil { + log.Fatalf("error opening policy file: %v", err) + return "", "", "", err + } + + // Try to unmarshal the input file as ConfigMap first. If it succeeds, we + // assume config map input. Otherwise, we treat it as + var upolCM *corev1.ConfigMap + err = yaml.Unmarshal(upol, &upolCM) + if err != nil { + userPolicy = string(upol) + } else { + userPolicy, defaultRole, matchMode = getPolicyFromConfigMap(upolCM) + } + + return userPolicy, defaultRole, matchMode, nil +} + +// Retrieve policy information from a ConfigMap +func getPolicyFromConfigMap(cm *corev1.ConfigMap) (string, string, string) { + var ( + userPolicy string + defaultRole string + ok bool + ) + userPolicy, ok = cm.Data[rbac.ConfigMapPolicyCSVKey] + if !ok { + userPolicy = "" + } + defaultRole, ok = cm.Data[rbac.ConfigMapPolicyDefaultKey] + if !ok { + defaultRole = "" + } + + return userPolicy, defaultRole, cm.Data[rbac.ConfigMapMatchModeKey] +} + +// getPolicyConfigMap fetches the RBAC config map from K8s cluster +func getPolicyConfigMap(ctx context.Context, client kubernetes.Interface, namespace string) (*corev1.ConfigMap, error) { + cm, err := client.CoreV1().ConfigMaps(namespace).Get(ctx, common.ArgoCDRBACConfigMapName, v1.GetOptions{}) + if err != nil { + return nil, err + } + return cm, nil +} + +// checkPolicy checks whether given subject is allowed to execute specified +// action against specified resource +func checkPolicy(subject, action, resource, subResource, builtinPolicy, userPolicy, defaultRole, matchMode string, strict bool) bool { + enf := rbac.NewEnforcer(nil, "argocd", "argocd-rbac-cm", nil) + enf.SetDefaultRole(defaultRole) + enf.SetMatchMode(matchMode) + if builtinPolicy != "" { + if err := enf.SetBuiltinPolicy(builtinPolicy); err != nil { + log.Fatalf("could not set built-in policy: %v", err) + return false + } + } + if userPolicy != "" { + if err := rbac.ValidatePolicy(userPolicy); err != nil { + log.Fatalf("invalid user policy: %v", err) + return false + } + if err := enf.SetUserPolicy(userPolicy); err != nil { + log.Fatalf("could not set user policy: %v", err) + return false + } + } + + // User could have used a mutation of the resource name (i.e. 'cert' for + // 'certificate') - let's resolve it to the valid resource. + realResource := resolveRBACResourceName(resource) + + // If in strict mode, validate that given RBAC resource and action are + // actually valid tokens. + if strict { + if !isValidRBACResource(realResource) { + log.Fatalf("error in RBAC request: '%s' is not a valid resource name", realResource) + } + if !isValidRBACAction(action) { + log.Fatalf("error in RBAC request: '%s' is not a valid action name", action) + } + } + + // Application resources have a special notation - for simplicity's sake, + // if user gives no sub-resource (or specifies simple '*'), we construct + // the required notation by setting subresource to '*/*'. + if realResource == rbacpolicy.ResourceApplications { + if subResource == "*" || subResource == "" { + subResource = "*/*" + } + } + + return enf.Enforce(subject, realResource, action, subResource) +} + +// resolveRBACResourceName resolves a user supplied value to a valid RBAC +// resource name. If no mapping is found, returns the value verbatim. +func resolveRBACResourceName(name string) string { + if res, ok := resourceMap[name]; ok { + return res + } else { + return name + } +} + +// isValidRBACAction checks whether a given action is a valid RBAC action +func isValidRBACAction(action string) bool { + if strings.HasPrefix(action, rbacpolicy.ActionAction+"/") { + return true + } + _, ok := validRBACActions[action] + return ok +} + +// isValidRBACResource checks whether a given resource is a valid RBAC resource +func isValidRBACResource(resource string) bool { + _, ok := validRBACResources[resource] + return ok +} diff --git a/cmd/argocd/commands/admin/settings_rbac_test.go b/cmd/argocd/commands/admin/settings_rbac_test.go new file mode 100644 index 0000000000000..a4b4b437e114c --- /dev/null +++ b/cmd/argocd/commands/admin/settings_rbac_test.go @@ -0,0 +1,202 @@ +package admin + +import ( + "context" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" + + "github.com/argoproj/argo-cd/v2/util/assets" +) + +func Test_isValidRBACAction(t *testing.T) { + for k := range validRBACActions { + t.Run(k, func(t *testing.T) { + ok := isValidRBACAction(k) + assert.True(t, ok) + }) + } + t.Run("invalid", func(t *testing.T) { + ok := isValidRBACAction("invalid") + assert.False(t, ok) + }) +} + +func Test_isValidRBACAction_ActionAction(t *testing.T) { + ok := isValidRBACAction("action/apps/Deployment/restart") + assert.True(t, ok) +} + +func Test_isValidRBACResource(t *testing.T) { + for k := range validRBACResources { + t.Run(k, func(t *testing.T) { + ok := isValidRBACResource(k) + assert.True(t, ok) + }) + } + t.Run("invalid", func(t *testing.T) { + ok := isValidRBACResource("invalid") + assert.False(t, ok) + }) +} + +func Test_PolicyFromCSV(t *testing.T) { + ctx := context.Background() + + uPol, dRole, matchMode := getPolicy(ctx, "testdata/rbac/policy.csv", nil, "") + require.NotEmpty(t, uPol) + require.Empty(t, dRole) + require.Empty(t, matchMode) +} + +func Test_PolicyFromYAML(t *testing.T) { + ctx := context.Background() + + uPol, dRole, matchMode := getPolicy(ctx, "testdata/rbac/argocd-rbac-cm.yaml", nil, "") + require.NotEmpty(t, uPol) + require.Equal(t, "role:unknown", dRole) + require.Empty(t, matchMode) +} + +func Test_PolicyFromK8s(t *testing.T) { + data, err := os.ReadFile("testdata/rbac/policy.csv") + ctx := context.Background() + + require.NoError(t, err) + kubeclientset := fake.NewSimpleClientset(&v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "argocd-rbac-cm", + Namespace: "argocd", + }, + Data: map[string]string{ + "policy.csv": string(data), + "policy.default": "role:unknown", + }, + }) + uPol, dRole, matchMode := getPolicy(ctx, "", kubeclientset, "argocd") + require.NotEmpty(t, uPol) + require.Equal(t, "role:unknown", dRole) + require.Equal(t, "", matchMode) + + t.Run("get applications", func(t *testing.T) { + ok := checkPolicy("role:user", "get", "applications", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true) + require.True(t, ok) + }) + t.Run("get clusters", func(t *testing.T) { + ok := checkPolicy("role:user", "get", "clusters", "*", assets.BuiltinPolicyCSV, uPol, dRole, "", true) + require.True(t, ok) + }) + t.Run("get certificates", func(t *testing.T) { + ok := checkPolicy("role:user", "get", "certificates", "*", assets.BuiltinPolicyCSV, uPol, dRole, "", true) + require.False(t, ok) + }) + t.Run("get certificates by default role", func(t *testing.T) { + ok := checkPolicy("role:user", "get", "certificates", "*", assets.BuiltinPolicyCSV, uPol, "role:readonly", "glob", true) + require.True(t, ok) + }) + t.Run("get certificates by default role without builtin policy", func(t *testing.T) { + ok := checkPolicy("role:user", "get", "certificates", "*", "", uPol, "role:readonly", "glob", true) + require.False(t, ok) + }) + t.Run("use regex match mode instead of glob", func(t *testing.T) { + ok := checkPolicy("role:user", "get", "certificates", ".*", assets.BuiltinPolicyCSV, uPol, "role:readonly", "regex", true) + require.False(t, ok) + }) + t.Run("get logs", func(t *testing.T) { + ok := checkPolicy("role:test", "get", "logs", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true) + require.True(t, ok) + }) + t.Run("create exec", func(t *testing.T) { + ok := checkPolicy("role:test", "create", "exec", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true) + require.True(t, ok) + }) + t.Run("create applicationsets", func(t *testing.T) { + ok := checkPolicy("role:user", "create", "applicationsets", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true) + require.True(t, ok) + }) + t.Run("delete applicationsets", func(t *testing.T) { + ok := checkPolicy("role:user", "delete", "applicationsets", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true) + require.True(t, ok) + }) +} + +func Test_PolicyFromK8sUsingRegex(t *testing.T) { + ctx := context.Background() + + policy := ` +p, role:user, clusters, get, .+, allow +p, role:user, clusters, get, https://kubernetes.*, deny +p, role:user, applications, get, .*, allow +p, role:user, applications, create, .*/.*, allow +p, role:user, applicationsets, create, .*/.*, allow +p, role:user, applicationsets, delete, .*/.*, allow +p, role:user, logs, get, .*/.*, allow +p, role:user, exec, create, .*/.*, allow +` + + kubeclientset := fake.NewSimpleClientset(&v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "argocd-rbac-cm", + Namespace: "argocd", + }, + Data: map[string]string{ + "policy.csv": policy, + "policy.default": "role:unknown", + "policy.matchMode": "regex", + }, + }) + uPol, dRole, matchMode := getPolicy(ctx, "", kubeclientset, "argocd") + require.NotEmpty(t, uPol) + require.Equal(t, "role:unknown", dRole) + require.Equal(t, "regex", matchMode) + + builtInPolicy := ` +p, role:readonly, certificates, get, .*, allow +p, role:, certificates, get, .*, allow` + + t.Run("get applications", func(t *testing.T) { + ok := checkPolicy("role:user", "get", "applications", ".*/.*", builtInPolicy, uPol, dRole, "regex", true) + require.True(t, ok) + }) + t.Run("get clusters", func(t *testing.T) { + ok := checkPolicy("role:user", "get", "clusters", ".*", builtInPolicy, uPol, dRole, "regex", true) + require.True(t, ok) + }) + t.Run("get certificates", func(t *testing.T) { + ok := checkPolicy("role:user", "get", "certificates", ".*", builtInPolicy, uPol, dRole, "regex", true) + require.False(t, ok) + }) + t.Run("get certificates by default role", func(t *testing.T) { + ok := checkPolicy("role:user", "get", "certificates", ".*", builtInPolicy, uPol, "role:readonly", "regex", true) + require.True(t, ok) + }) + t.Run("get certificates by default role without builtin policy", func(t *testing.T) { + ok := checkPolicy("role:user", "get", "certificates", ".*", "", uPol, "role:readonly", "regex", true) + require.False(t, ok) + }) + t.Run("use glob match mode instead of regex", func(t *testing.T) { + ok := checkPolicy("role:user", "get", "certificates", ".+", builtInPolicy, uPol, dRole, "glob", true) + require.False(t, ok) + }) + t.Run("get logs via glob match mode", func(t *testing.T) { + ok := checkPolicy("role:user", "get", "logs", ".*/.*", builtInPolicy, uPol, dRole, "glob", true) + require.True(t, ok) + }) + t.Run("create exec", func(t *testing.T) { + ok := checkPolicy("role:user", "create", "exec", ".*/.*", builtInPolicy, uPol, dRole, "regex", true) + require.True(t, ok) + }) + t.Run("create applicationsets", func(t *testing.T) { + ok := checkPolicy("role:user", "create", "applicationsets", ".*/.*", builtInPolicy, uPol, dRole, "regex", true) + require.True(t, ok) + }) + t.Run("delete applicationsets", func(t *testing.T) { + ok := checkPolicy("role:user", "delete", "applicationsets", ".*/.*", builtInPolicy, uPol, dRole, "regex", true) + require.True(t, ok) + }) +} diff --git a/cmd/argocd-util/commands/settings_test.go b/cmd/argocd/commands/admin/settings_test.go similarity index 80% rename from cmd/argocd-util/commands/settings_test.go rename to cmd/argocd/commands/admin/settings_test.go index 3e1fd7bad4f0e..adb18c80ee84e 100644 --- a/cmd/argocd-util/commands/settings_test.go +++ b/cmd/argocd/commands/admin/settings_test.go @@ -1,11 +1,10 @@ -package commands +package admin import ( "bytes" "context" "fmt" "io" - "io/ioutil" "os" "testing" @@ -35,7 +34,7 @@ func captureStdout(callback func()) (string, error) { callback() utils.Close(w) - data, err := ioutil.ReadAll(r) + data, err := io.ReadAll(r) if err != nil { return "", err @@ -44,6 +43,8 @@ func captureStdout(callback func()) (string, error) { } func newSettingsManager(data map[string]string) *settings.SettingsManager { + ctx := context.Background() + clientset := fake.NewSimpleClientset(&v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Namespace: "default", @@ -63,7 +64,7 @@ func newSettingsManager(data map[string]string) *settings.SettingsManager { "server.secretkey": []byte("test"), }, }) - return settings.NewSettingsManager(context.Background(), clientset, "default") + return settings.NewSettingsManager(ctx, clientset, "default") } type fakeCmdContext struct { @@ -76,7 +77,7 @@ func newCmdContext(data map[string]string) *fakeCmdContext { return &fakeCmdContext{mgr: newSettingsManager(data)} } -func (ctx *fakeCmdContext) createSettingsManager() (*settings.SettingsManager, error) { +func (ctx *fakeCmdContext) createSettingsManager(context.Context) (*settings.SettingsManager, error) { return ctx.mgr, nil } @@ -88,6 +89,8 @@ type validatorTestCase struct { } func TestCreateSettingsManager(t *testing.T) { + ctx := context.Background() + f, closer, err := tempFile(`apiVersion: v1 kind: ConfigMap metadata: @@ -100,7 +103,7 @@ data: defer utils.Close(closer) opts := settingsOpts{argocdCMPath: f} - settingsManager, err := opts.createSettingsManager() + settingsManager, err := opts.createSettingsManager(ctx) if !assert.NoError(t, err) { return @@ -148,13 +151,6 @@ clientSecret: aaaabbbbccccddddeee`, }, containsSummary: "Dex is configured ('url' field is missing)", }, - "Plugins_ValidConfig": { - validator: "plugins", - data: map[string]string{ - "configManagementPlugins": `[{"name": "test1"}, {"name": "test2"}]`, - }, - containsSummary: "2 plugins", - }, "Kustomize_ModifiedOptions": { validator: "kustomize", containsSummary: "default options", @@ -230,8 +226,19 @@ spec: replicas: 0` ) +const ( + testCronJobYAML = `apiVersion: batch/v1 +kind: CronJob +metadata: + name: hello + namespace: test-ns + uid: "123" +spec: + schedule: "* * * * *"` +) + func tempFile(content string) (string, io.Closer, error) { - f, err := ioutil.TempFile("", "*.yaml") + f, err := os.CreateTemp("", "*.yaml") if err != nil { return "", nil, err } @@ -240,6 +247,11 @@ func tempFile(content string) (string, io.Closer, error) { _ = os.Remove(f.Name()) return "", nil, err } + defer func() { + if err = f.Close(); err != nil { + panic(err) + } + }() return f.Name(), utils.NewCloser(func() error { return os.Remove(f.Name()) }), nil @@ -334,6 +346,12 @@ func TestResourceOverrideAction(t *testing.T) { } defer utils.Close(closer) + cronJobFile, closer, err := tempFile(testCronJobYAML) + if !assert.NoError(t, err) { + return + } + defer utils.Close(closer) + t.Run("NoActions", func(t *testing.T) { cmd := NewResourceOverridesCommand(newCmdContext(map[string]string{ "resource.customizations": `apps/Deployment: {}`})) @@ -346,7 +364,7 @@ func TestResourceOverrideAction(t *testing.T) { assert.Contains(t, out, "Actions are not configured") }) - t.Run("ActionConfigured", func(t *testing.T) { + t.Run("OldStyleActionConfigured", func(t *testing.T) { cmd := NewResourceOverridesCommand(newCmdContext(map[string]string{ "resource.customizations": `apps/Deployment: actions: | @@ -375,9 +393,55 @@ func TestResourceOverrideAction(t *testing.T) { assert.NoError(t, err) }) assert.NoError(t, err) - assert.Contains(t, out, `NAME ENABLED + assert.Contains(t, out, `NAME DISABLED restart false resume false `) }) + + t.Run("NewStyleActionConfigured", func(t *testing.T) { + cmd := NewResourceOverridesCommand(newCmdContext(map[string]string{ + "resource.customizations": `batch/CronJob: + actions: | + discovery.lua: | + actions = {} + actions["create-a-job"] = {["disabled"] = false} + return actions + definitions: + - name: test + action.lua: | + job1 = {} + job1.apiVersion = "batch/v1" + job1.kind = "Job" + job1.metadata = {} + job1.metadata.name = "hello-1" + job1.metadata.namespace = "obj.metadata.namespace" + impactedResource1 = {} + impactedResource1.operation = "create" + impactedResource1.resource = job1 + result = {} + result[1] = impactedResource1 + return result +`})) + out, err := captureStdout(func() { + cmd.SetArgs([]string{"run-action", cronJobFile, "test"}) + err := cmd.Execute() + assert.NoError(t, err) + }) + assert.NoError(t, err) + assert.Contains(t, out, "resource was created:") + assert.Contains(t, out, "hello-1") + + out, err = captureStdout(func() { + cmd.SetArgs([]string{"list-actions", cronJobFile}) + err := cmd.Execute() + assert.NoError(t, err) + }) + + assert.NoError(t, err) + assert.Contains(t, out, "NAME") + assert.Contains(t, out, "DISABLED") + assert.Contains(t, out, "create-a-job") + assert.Contains(t, out, "false") + }) } diff --git a/cmd/argocd-util/commands/testdata/rbac/argocd-rbac-cm.yaml b/cmd/argocd/commands/admin/testdata/rbac/argocd-rbac-cm.yaml similarity index 80% rename from cmd/argocd-util/commands/testdata/rbac/argocd-rbac-cm.yaml rename to cmd/argocd/commands/admin/testdata/rbac/argocd-rbac-cm.yaml index a8a38af789f1f..bf947fb8b7110 100644 --- a/cmd/argocd-util/commands/testdata/rbac/argocd-rbac-cm.yaml +++ b/cmd/argocd/commands/admin/testdata/rbac/argocd-rbac-cm.yaml @@ -8,6 +8,9 @@ data: p, role:user, applications, create, */*, allow p, role:user, applications, delete, *, allow p, role:user, applications, delete, */guestbook, deny + p, role:user, applicationsets, create, */*, allow + p, role:user, applicationsets, delete, */*, allow + p, role:user, logs, get, */*, allow g, test, role:user policy.default: role:unknown kind: ConfigMap diff --git a/cmd/argocd/commands/admin/testdata/rbac/policy.csv b/cmd/argocd/commands/admin/testdata/rbac/policy.csv new file mode 100644 index 0000000000000..b18d0904f5f60 --- /dev/null +++ b/cmd/argocd/commands/admin/testdata/rbac/policy.csv @@ -0,0 +1,13 @@ +p, role:user, clusters, get, *, allow +p, role:user, clusters, get, https://kubernetes*, deny +p, role:user, projects, get, *, allow +p, role:user, applications, get, *, allow +p, role:user, applications, create, */*, allow +p, role:user, applications, delete, *, allow +p, role:user, applications, delete, */guestbook, deny +p, role:user, applicationsets, create, */*, allow +p, role:user, applicationsets, delete, */*, allow +p, role:test, certificates, get, *, allow +p, role:test, logs, get, */*, allow +p, role:test, exec, create, */*, allow +g, test, role:user diff --git a/cmd/argocd-util/commands/testdata/test_clusterrole.yaml b/cmd/argocd/commands/admin/testdata/test_clusterrole.yaml similarity index 100% rename from cmd/argocd-util/commands/testdata/test_clusterrole.yaml rename to cmd/argocd/commands/admin/testdata/test_clusterrole.yaml diff --git a/cmd/argocd/commands/app.go b/cmd/argocd/commands/app.go index 5f10afcfc68e9..948c35c83e45d 100644 --- a/cmd/argocd/commands/app.go +++ b/cmd/argocd/commands/app.go @@ -14,64 +14,59 @@ import ( "time" "unicode/utf8" - "github.com/argoproj/gitops-engine/pkg/diff" "github.com/argoproj/gitops-engine/pkg/health" + "github.com/argoproj/gitops-engine/pkg/sync/common" "github.com/argoproj/gitops-engine/pkg/sync/hook" "github.com/argoproj/gitops-engine/pkg/sync/ignore" "github.com/argoproj/gitops-engine/pkg/utils/kube" - "github.com/ghodss/yaml" + grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" "github.com/mattn/go-isatty" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" "k8s.io/utils/pointer" + "sigs.k8s.io/yaml" + "github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless" cmdutil "github.com/argoproj/argo-cd/v2/cmd/util" - "github.com/argoproj/argo-cd/v2/common" "github.com/argoproj/argo-cd/v2/controller" - "github.com/argoproj/argo-cd/v2/pkg/apiclient" argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient" "github.com/argoproj/argo-cd/v2/pkg/apiclient/application" - applicationpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/application" clusterpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/cluster" projectpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/project" "github.com/argoproj/argo-cd/v2/pkg/apiclient/settings" - settingspkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/settings" argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" repoapiclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient" "github.com/argoproj/argo-cd/v2/reposerver/repository" "github.com/argoproj/argo-cd/v2/util/argo" + argodiff "github.com/argoproj/argo-cd/v2/util/argo/diff" "github.com/argoproj/argo-cd/v2/util/cli" "github.com/argoproj/argo-cd/v2/util/errors" "github.com/argoproj/argo-cd/v2/util/git" + "github.com/argoproj/argo-cd/v2/util/grpc" argoio "github.com/argoproj/argo-cd/v2/util/io" - argokube "github.com/argoproj/argo-cd/v2/util/kube" + "github.com/argoproj/argo-cd/v2/util/manifeststream" "github.com/argoproj/argo-cd/v2/util/templates" "github.com/argoproj/argo-cd/v2/util/text/label" ) -var ( - appExample = templates.Examples(` - # List all the applications. - argocd app list - - # Get the details of a application - argocd app get my-app - - # Set an override parameter - argocd app set my-app -p image.tag=v1.0.1`) -) - // NewApplicationCommand returns a new instance of an `argocd app` command func NewApplicationCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { var command = &cobra.Command{ - Use: "app", - Short: "Manage applications", - Example: appExample, + Use: "app", + Short: "Manage applications", + Example: ` # List all the applications. + argocd app list + + # Get the details of a application + argocd app get my-app + + # Set an override parameter + argocd app set my-app -p image.tag=v1.0.1`, Run: func(c *cobra.Command, args []string) { c.HelpFunc()(c, args) os.Exit(1) @@ -93,79 +88,121 @@ func NewApplicationCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman command.AddCommand(NewApplicationEditCommand(clientOpts)) command.AddCommand(NewApplicationPatchCommand(clientOpts)) command.AddCommand(NewApplicationPatchResourceCommand(clientOpts)) + command.AddCommand(NewApplicationDeleteResourceCommand(clientOpts)) command.AddCommand(NewApplicationResourceActionsCommand(clientOpts)) command.AddCommand(NewApplicationListResourcesCommand(clientOpts)) command.AddCommand(NewApplicationLogsCommand(clientOpts)) return command } +type watchOpts struct { + sync bool + health bool + operation bool + suspended bool + degraded bool +} + // NewApplicationCreateCommand returns a new instance of an `argocd app create` command func NewApplicationCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { var ( - appOpts cmdutil.AppOptions - fileURL string - appName string - upsert bool - labels []string + appOpts cmdutil.AppOptions + fileURL string + appName string + upsert bool + labels []string + annotations []string + setFinalizer bool + appNamespace string ) var command = &cobra.Command{ Use: "create APPNAME", Short: "Create an application", - Example: ` - # Create a directory app - argocd app create guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --directory-recurse + Example: ` # Create a directory app + argocd app create guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --directory-recurse - # Create a Jsonnet app - argocd app create jsonnet-guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path jsonnet-guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --jsonnet-ext-str replicas=2 + # Create a Jsonnet app + argocd app create jsonnet-guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path jsonnet-guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --jsonnet-ext-str replicas=2 - # Create a Helm app - argocd app create helm-guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path helm-guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --helm-set replicaCount=2 + # Create a Helm app + argocd app create helm-guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path helm-guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --helm-set replicaCount=2 - # Create a Helm app from a Helm repo - argocd app create nginx-ingress --repo https://charts.helm.sh/stable --helm-chart nginx-ingress --revision 1.24.3 --dest-namespace default --dest-server https://kubernetes.default.svc + # Create a Helm app from a Helm repo + argocd app create nginx-ingress --repo https://charts.helm.sh/stable --helm-chart nginx-ingress --revision 1.24.3 --dest-namespace default --dest-server https://kubernetes.default.svc - # Create a Kustomize app - argocd app create kustomize-guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path kustomize-guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --kustomize-image gcr.io/heptio-images/ks-guestbook-demo:0.1 + # Create a Kustomize app + argocd app create kustomize-guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path kustomize-guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --kustomize-image gcr.io/heptio-images/ks-guestbook-demo:0.1 - # Create a app using a custom tool: - argocd app create ksane --repo https://github.com/argoproj/argocd-example-apps.git --path plugins/kasane --dest-namespace default --dest-server https://kubernetes.default.svc --config-management-plugin kasane -`, + # Create a app using a custom tool: + argocd app create kasane --repo https://github.com/argoproj/argocd-example-apps.git --path plugins/kasane --dest-namespace default --dest-server https://kubernetes.default.svc --config-management-plugin kasane`, Run: func(c *cobra.Command, args []string) { - argocdClient := argocdclient.NewClientOrDie(clientOpts) + ctx := c.Context() + + argocdClient := headless.NewClientOrDie(clientOpts, c) - app, err := cmdutil.ConstructApp(fileURL, appName, labels, args, appOpts, c.Flags()) + apps, err := cmdutil.ConstructApps(fileURL, appName, labels, annotations, args, appOpts, c.Flags()) errors.CheckError(err) - if app.Name == "" { - c.HelpFunc()(c, args) - os.Exit(1) - } + for _, app := range apps { + if app.Name == "" { + c.HelpFunc()(c, args) + os.Exit(1) + } + if appNamespace != "" { + app.Namespace = appNamespace + } + if setFinalizer { + app.Finalizers = append(app.Finalizers, "resources-finalizer.argocd.argoproj.io") + } + conn, appIf := argocdClient.NewApplicationClientOrDie() + defer argoio.Close(conn) + appCreateRequest := application.ApplicationCreateRequest{ + Application: app, + Upsert: &upsert, + Validate: &appOpts.Validate, + } - conn, appIf := argocdClient.NewApplicationClientOrDie() - defer argoio.Close(conn) - appCreateRequest := applicationpkg.ApplicationCreateRequest{ - Application: *app, - Upsert: &upsert, - Validate: &appOpts.Validate, + // Get app before creating to see if it is being updated or no change + existing, err := appIf.Get(ctx, &application.ApplicationQuery{Name: &app.Name}) + unwrappedError := grpc.UnwrapGRPCStatus(err).Code() + // As part of the fix for CVE-2022-41354, the API will return Permission Denied when an app does not exist. + if unwrappedError != codes.NotFound && unwrappedError != codes.PermissionDenied { + errors.CheckError(err) + } + + created, err := appIf.Create(ctx, &appCreateRequest) + errors.CheckError(err) + + var action string + if existing == nil { + action = "created" + } else if !hasAppChanged(existing, created, upsert) { + action = "unchanged" + } else { + action = "updated" + } + + fmt.Printf("application '%s' %s\n", created.ObjectMeta.Name, action) } - created, err := appIf.Create(context.Background(), &appCreateRequest) - errors.CheckError(err) - fmt.Printf("application '%s' created\n", created.ObjectMeta.Name) }, } command.Flags().StringVar(&appName, "name", "", "A name for the app, ignored if a file is set (DEPRECATED)") command.Flags().BoolVar(&upsert, "upsert", false, "Allows to override application with the same name even if supplied application spec is different from existing spec") command.Flags().StringVarP(&fileURL, "file", "f", "", "Filename or URL to Kubernetes manifests for the app") command.Flags().StringArrayVarP(&labels, "label", "l", []string{}, "Labels to apply to the app") + command.Flags().StringArrayVarP(&annotations, "annotations", "", []string{}, "Set metadata annotations (e.g. example=value)") + command.Flags().BoolVar(&setFinalizer, "set-finalizer", false, "Sets deletion finalizer on the application, application resources will be cascaded on deletion") // Only complete files with appropriate extension. err := command.Flags().SetAnnotation("file", cobra.BashCompFilenameExt, []string{"json", "yaml", "yml"}) if err != nil { log.Fatal(err) } + command.Flags().StringVarP(&appNamespace, "app-namespace", "N", "", "Namespace where the application will be created in") cmdutil.AddAppFlags(command, &appOpts) return command } +// getInfos converts a list of string key=value pairs to a list of Info objects. func getInfos(infos []string) []*argoappv1.Info { mapInfos, err := label.Parse(infos) errors.CheckError(err) @@ -192,6 +229,83 @@ func getRefreshType(refresh bool, hardRefresh bool) *string { return nil } +func hasAppChanged(appReq, appRes *argoappv1.Application, upsert bool) bool { + // upsert==false, no change occurred from create command + if !upsert { + return false + } + + // If no project, assume default project + if appReq.Spec.Project == "" { + appReq.Spec.Project = "default" + } + // Server will return nils for empty labels, annotations, finalizers + if len(appReq.Labels) == 0 { + appReq.Labels = nil + } + if len(appReq.Annotations) == 0 { + appReq.Annotations = nil + } + if len(appReq.Finalizers) == 0 { + appReq.Finalizers = nil + } + + if reflect.DeepEqual(appRes.Spec, appReq.Spec) && + reflect.DeepEqual(appRes.Labels, appReq.Labels) && + reflect.DeepEqual(appRes.ObjectMeta.Annotations, appReq.Annotations) && + reflect.DeepEqual(appRes.Finalizers, appReq.Finalizers) { + return false + } + + return true +} + +func parentChildDetails(appIf application.ApplicationServiceClient, ctx context.Context, appName string, appNs string) (map[string]argoappv1.ResourceNode, map[string][]string, map[string]struct{}) { + + mapUidToNode := make(map[string]argoappv1.ResourceNode) + mapParentToChild := make(map[string][]string) + parentNode := make(map[string]struct{}) + + resourceTree, err := appIf.ResourceTree(ctx, &application.ResourcesQuery{Name: &appName, AppNamespace: &appNs, ApplicationName: &appName}) + errors.CheckError(err) + + for _, node := range resourceTree.Nodes { + mapUidToNode[node.UID] = node + + if len(node.ParentRefs) > 0 { + _, ok := mapParentToChild[node.ParentRefs[0].UID] + if !ok { + var temp []string + mapParentToChild[node.ParentRefs[0].UID] = temp + } + mapParentToChild[node.ParentRefs[0].UID] = append(mapParentToChild[node.ParentRefs[0].UID], node.UID) + } else { + parentNode[node.UID] = struct{}{} + } + } + return mapUidToNode, mapParentToChild, parentNode +} + +func printHeader(acdClient argocdclient.Client, app *argoappv1.Application, ctx context.Context, windows *argoappv1.SyncWindows, showOperation bool, showParams bool) { + aURL := appURL(ctx, acdClient, app.Name) + printAppSummaryTable(app, aURL, windows) + + if len(app.Status.Conditions) > 0 { + fmt.Println() + w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) + printAppConditions(w, app) + _ = w.Flush() + fmt.Println() + } + if showOperation && app.Status.OperationState != nil { + fmt.Println() + printOperationResult(app.Status.OperationState) + } + if showParams { + printParams(app) + } +} + // NewApplicationGetCommand returns a new instance of an `argocd app get` command func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { var ( @@ -205,20 +319,28 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com Use: "get APPNAME", Short: "Get application details", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() if len(args) == 0 { c.HelpFunc()(c, args) os.Exit(1) } - acdClient := argocdclient.NewClientOrDie(clientOpts) + acdClient := headless.NewClientOrDie(clientOpts, c) conn, appIf := acdClient.NewApplicationClientOrDie() defer argoio.Close(conn) - appName := args[0] - app, err := appIf.Get(context.Background(), &applicationpkg.ApplicationQuery{Name: &appName, Refresh: getRefreshType(refresh, hardRefresh)}) + + appName, appNs := argo.ParseFromQualifiedName(args[0], "") + + app, err := appIf.Get(ctx, &application.ApplicationQuery{ + Name: &appName, + Refresh: getRefreshType(refresh, hardRefresh), + AppNamespace: &appNs, + }) + errors.CheckError(err) - pConn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + pConn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer argoio.Close(pConn) - proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: app.Spec.Project}) + proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: app.Spec.Project}) errors.CheckError(err) windows := proj.Spec.SyncWindows.Matches(app) @@ -228,35 +350,33 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com err := PrintResource(app, output) errors.CheckError(err) case "wide", "": - aURL := appURL(acdClient, app.Name) - printAppSummaryTable(app, aURL, windows) - - if len(app.Status.Conditions) > 0 { + printHeader(acdClient, app, ctx, windows, showOperation, showParams) + if len(app.Status.Resources) > 0 { fmt.Println() w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) - printAppConditions(w, app) + printAppResources(w, app) _ = w.Flush() - fmt.Println() } - if showOperation && app.Status.OperationState != nil { + case "tree": + printHeader(acdClient, app, ctx, windows, showOperation, showParams) + mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState := resourceParentChild(ctx, acdClient, appName, appNs) + if len(mapUidToNode) > 0 { fmt.Println() - printOperationResult(app.Status.OperationState) - } - if showParams { - printParams(app) + printTreeView(mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState) } - if len(app.Status.Resources) > 0 { + case "tree=detailed": + printHeader(acdClient, app, ctx, windows, showOperation, showParams) + mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState := resourceParentChild(ctx, acdClient, appName, appNs) + if len(mapUidToNode) > 0 { fmt.Println() - w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) - printAppResources(w, app) - _ = w.Flush() + printTreeViewDetailed(mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState) } default: errors.CheckError(fmt.Errorf("unknown output format: %s", output)) } }, } - command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide") + command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|tree") command.Flags().BoolVar(&showOperation, "show-operation", false, "Show application operation") command.Flags().BoolVar(&showParams, "show-params", false, "Show application parameters and overrides") command.Flags().BoolVar(&refresh, "refresh", false, "Refresh application data when retrieving") @@ -276,34 +396,79 @@ func NewApplicationLogsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co sinceSeconds int64 untilTime string filter string + container string + previous bool ) var command = &cobra.Command{ Use: "logs APPNAME", Short: "Get logs of application pods", + Example: templates.Examples(` + # Get logs of pods associated with the application "my-app" + argocd app logs my-app + + # Get logs of pods associated with the application "my-app" in a specific resource group + argocd app logs my-app --group my-group + + # Get logs of pods associated with the application "my-app" in a specific resource kind + argocd app logs my-app --kind my-kind + + # Get logs of pods associated with the application "my-app" in a specific namespace + argocd app logs my-app --namespace my-namespace + + # Get logs of pods associated with the application "my-app" for a specific resource name + argocd app logs my-app --name my-resource + + # Stream logs in real-time for the application "my-app" + argocd app logs my-app -f + + # Get the last N lines of logs for the application "my-app" + argocd app logs my-app --tail 100 + + # Get logs since a specified number of seconds ago + argocd app logs my-app --since-seconds 3600 + + # Get logs until a specified time (format: "2023-10-10T15:30:00Z") + argocd app logs my-app --until-time "2023-10-10T15:30:00Z" + + # Filter logs to show only those containing a specific string + argocd app logs my-app --filter "error" + + # Get logs for a specific container within the pods + argocd app logs my-app -c my-container + + # Get previously terminated container logs + argocd app logs my-app -p + `), + Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) == 0 { c.HelpFunc()(c, args) os.Exit(1) } - acdClient := argocdclient.NewClientOrDie(clientOpts) + acdClient := headless.NewClientOrDie(clientOpts, c) conn, appIf := acdClient.NewApplicationClientOrDie() defer argoio.Close(conn) - appName := args[0] + appName, appNs := argo.ParseFromQualifiedName(args[0], "") retry := true for retry { retry = false - stream, err := appIf.PodLogs(context.Background(), &applicationpkg.ApplicationPodLogsQuery{ + stream, err := appIf.PodLogs(ctx, &application.ApplicationPodLogsQuery{ Name: &appName, Group: &group, - Namespace: namespace, + Namespace: pointer.String(namespace), Kind: &kind, ResourceName: &resourceName, - Follow: follow, - TailLines: tail, - SinceSeconds: sinceSeconds, + Follow: pointer.Bool(follow), + TailLines: pointer.Int64(tail), + SinceSeconds: pointer.Int64(sinceSeconds), UntilTime: &untilTime, Filter: &filter, + Container: pointer.String(container), + Previous: pointer.Bool(previous), + AppNamespace: &appNs, }) if err != nil { log.Fatalf("failed to get pod logs: %v", err) @@ -325,8 +490,8 @@ func NewApplicationLogsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co } log.Fatalf("stream read failed: %v", err) } - if !msg.Last { - fmt.Println(msg.Content) + if !msg.GetLast() { + fmt.Println(msg.GetContent()) } else { return } @@ -339,25 +504,28 @@ func NewApplicationLogsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co command.Flags().StringVar(&kind, "kind", "", "Resource kind") command.Flags().StringVar(&namespace, "namespace", "", "Resource namespace") command.Flags().StringVar(&resourceName, "name", "", "Resource name") - command.Flags().BoolVar(&follow, "follow", false, "Specify if the logs should be streamed") + command.Flags().BoolVarP(&follow, "follow", "f", false, "Specify if the logs should be streamed") command.Flags().Int64Var(&tail, "tail", 0, "The number of lines from the end of the logs to show") command.Flags().Int64Var(&sinceSeconds, "since-seconds", 0, "A relative time in seconds before the current time from which to show logs") command.Flags().StringVar(&untilTime, "until-time", "", "Show logs until this time") command.Flags().StringVar(&filter, "filter", "", "Show logs contain this string") + command.Flags().StringVarP(&container, "container", "c", "", "Optional container name") + command.Flags().BoolVarP(&previous, "previous", "p", false, "Specify if the previously terminated container logs should be returned") return command } func printAppSummaryTable(app *argoappv1.Application, appURL string, windows *argoappv1.SyncWindows) { - fmt.Printf(printOpFmtStr, "Name:", app.Name) + source := app.Spec.GetSource() + fmt.Printf(printOpFmtStr, "Name:", app.QualifiedName()) fmt.Printf(printOpFmtStr, "Project:", app.Spec.GetProject()) - fmt.Printf(printOpFmtStr, "Server:", app.Spec.Destination.Server) + fmt.Printf(printOpFmtStr, "Server:", getServer(app)) fmt.Printf(printOpFmtStr, "Namespace:", app.Spec.Destination.Namespace) fmt.Printf(printOpFmtStr, "URL:", appURL) - fmt.Printf(printOpFmtStr, "Repo:", app.Spec.Source.RepoURL) - fmt.Printf(printOpFmtStr, "Target:", app.Spec.Source.TargetRevision) - fmt.Printf(printOpFmtStr, "Path:", app.Spec.Source.Path) - printAppSourceDetails(&app.Spec.Source) + fmt.Printf(printOpFmtStr, "Repo:", source.RepoURL) + fmt.Printf(printOpFmtStr, "Target:", source.TargetRevision) + fmt.Printf(printOpFmtStr, "Path:", source.Path) + printAppSourceDetails(&source) var wds []string var status string var allow, deny, inactiveAllows bool @@ -382,7 +550,6 @@ func printAppSummaryTable(app *argoappv1.Application, appURL string, windows *ar status = "Manual Allowed" } else { status = "Sync Denied" - } } else { status = "Sync Allowed" @@ -412,11 +579,11 @@ func printAppSummaryTable(app *argoappv1.Application, appURL string, windows *ar syncStatusStr := string(app.Status.Sync.Status) switch app.Status.Sync.Status { case argoappv1.SyncStatusCodeSynced: - syncStatusStr += fmt.Sprintf(" to %s", app.Spec.Source.TargetRevision) + syncStatusStr += fmt.Sprintf(" to %s", app.Spec.GetSource().TargetRevision) case argoappv1.SyncStatusCodeOutOfSync: - syncStatusStr += fmt.Sprintf(" from %s", app.Spec.Source.TargetRevision) + syncStatusStr += fmt.Sprintf(" from %s", app.Spec.GetSource().TargetRevision) } - if !git.IsCommitSHA(app.Spec.Source.TargetRevision) && !git.IsTruncatedCommitSHA(app.Spec.Source.TargetRevision) && len(app.Status.Sync.Revision) > 7 { + if !git.IsCommitSHA(app.Spec.GetSource().TargetRevision) && !git.IsTruncatedCommitSHA(app.Spec.GetSource().TargetRevision) && len(app.Status.Sync.Revision) > 7 { syncStatusStr += fmt.Sprintf(" (%s)", app.Status.Sync.Revision[0:7]) } fmt.Printf(printOpFmtStr, "Sync Status:", syncStatusStr) @@ -428,9 +595,6 @@ func printAppSummaryTable(app *argoappv1.Application, appURL string, windows *ar } func printAppSourceDetails(appSrc *argoappv1.ApplicationSource) { - if appSrc.Ksonnet != nil && appSrc.Ksonnet.Environment != "" { - fmt.Printf(printOpFmtStr, "Environment:", appSrc.Ksonnet.Environment) - } if appSrc.Helm != nil && len(appSrc.Helm.ValueFiles) > 0 { fmt.Printf(printOpFmtStr, "Helm Values:", strings.Join(appSrc.Helm.ValueFiles, ",")) } @@ -463,10 +627,10 @@ func appURLDefault(acdClient argocdclient.Client, appName string) string { } // appURL returns the URL of an application -func appURL(acdClient argocdclient.Client, appName string) string { +func appURL(ctx context.Context, acdClient argocdclient.Client, appName string) string { conn, settingsIf := acdClient.NewSettingsClientOrDie() defer argoio.Close(conn) - argoSettings, err := settingsIf.Get(context.Background(), &settingspkg.SettingsQuery{}) + argoSettings, err := settingsIf.Get(ctx, &settings.SettingsQuery{}) errors.CheckError(err) if argoSettings.URL != "" { @@ -488,25 +652,33 @@ func truncateString(str string, num int) string { // printParams prints parameters and overrides func printParams(app *argoappv1.Application) { + if app.Spec.GetSource().Helm != nil { + printHelmParams(app.Spec.GetSource().Helm) + } +} + +func printHelmParams(helm *argoappv1.ApplicationSourceHelm) { paramLenLimit := 80 fmt.Println() w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) - if app.Spec.Source.Ksonnet != nil { - fmt.Println() - _, _ = fmt.Fprintf(w, "COMPONENT\tNAME\tVALUE\n") - for _, p := range app.Spec.Source.Ksonnet.Parameters { - _, _ = fmt.Fprintf(w, "%s\t%s\t%s\n", p.Component, p.Name, truncateString(p.Value, paramLenLimit)) - } - } else if app.Spec.Source.Helm != nil { + if helm != nil { fmt.Println() _, _ = fmt.Fprintf(w, "NAME\tVALUE\n") - for _, p := range app.Spec.Source.Helm.Parameters { + for _, p := range helm.Parameters { _, _ = fmt.Fprintf(w, "%s\t%s\n", p.Name, truncateString(p.Value, paramLenLimit)) } } _ = w.Flush() } +func getServer(app *argoappv1.Application) string { + if app.Spec.Destination.Server == "" { + return app.Spec.Destination.Name + } + + return app.Spec.Destination.Server +} + // NewApplicationSetCommand returns a new instance of an `argocd app set` command func NewApplicationSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { var ( @@ -515,29 +687,50 @@ func NewApplicationSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com var command = &cobra.Command{ Use: "set APPNAME", Short: "Set application parameters", + Example: templates.Examples(` + # Set application parameters for the application "my-app" + argocd app set my-app --parameter key1=value1 --parameter key2=value2 + + # Set and validate application parameters for "my-app" + argocd app set my-app --parameter key1=value1 --parameter key2=value2 --validate + + # Set and override application parameters with JSON or YAML file + argocd app set my-app --from-file path/to/parameters.json + + # Set and override application parameters with a parameter file + argocd app set my-app --parameter-file path/to/parameter-file.yaml + + # Set application parameters and specify the namespace + argocd app set my-app --parameter key1=value1 --parameter key2=value2 --namespace my-namespace + `), + Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 1 { c.HelpFunc()(c, args) os.Exit(1) } - ctx := context.Background() - appName := args[0] - argocdClient := argocdclient.NewClientOrDie(clientOpts) + appName, appNs := argo.ParseFromQualifiedName(args[0], "") + argocdClient := headless.NewClientOrDie(clientOpts, c) conn, appIf := argocdClient.NewApplicationClientOrDie() defer argoio.Close(conn) - app, err := appIf.Get(ctx, &applicationpkg.ApplicationQuery{Name: &appName}) + app, err := appIf.Get(ctx, &application.ApplicationQuery{Name: &appName, AppNamespace: &appNs}) errors.CheckError(err) + visited := cmdutil.SetAppSpecOptions(c.Flags(), &app.Spec, &appOpts) if visited == 0 { log.Error("Please set at least one option to update") c.HelpFunc()(c, args) os.Exit(1) } + setParameterOverrides(app, appOpts.Parameters) - _, err = appIf.UpdateSpec(ctx, &applicationpkg.ApplicationUpdateSpecRequest{ - Name: &app.Name, - Spec: app.Spec, - Validate: &appOpts.Validate, + _, err = appIf.UpdateSpec(ctx, &application.ApplicationUpdateSpecRequest{ + Name: &app.Name, + Spec: &app.Spec, + Validate: &appOpts.Validate, + AppNamespace: &appNs, }) errors.CheckError(err) }, @@ -546,19 +739,37 @@ func NewApplicationSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com return command } +// unsetOpts describe what to unset in an Application. +type unsetOpts struct { + namePrefix bool + nameSuffix bool + kustomizeVersion bool + kustomizeNamespace bool + kustomizeImages []string + kustomizeReplicas []string + parameters []string + valuesFiles []string + valuesLiteral bool + ignoreMissingValueFiles bool + pluginEnvs []string + passCredentials bool +} + +// IsZero returns true when the Application options for kustomize are considered empty +func (o *unsetOpts) KustomizeIsZero() bool { + return o == nil || + !o.namePrefix && + !o.nameSuffix && + !o.kustomizeVersion && + !o.kustomizeNamespace && + len(o.kustomizeImages) == 0 && + len(o.kustomizeReplicas) == 0 +} + // NewApplicationUnsetCommand returns a new instance of an `argocd app unset` command func NewApplicationUnsetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { - var ( - parameters []string - valuesLiteral bool - valuesFiles []string - nameSuffix bool - namePrefix bool - kustomizeVersion bool - kustomizeImages []string - pluginEnvs []string - appOpts cmdutil.AppOptions - ) + appOpts := cmdutil.AppOptions{} + opts := unsetOpts{} var command = &cobra.Command{ Use: "unset APPNAME parameters", Short: "Unset application parameters", @@ -572,132 +783,155 @@ func NewApplicationUnsetCommand(clientOpts *argocdclient.ClientOptions) *cobra.C argocd app unset my-app -p COMPONENT=PARAM`, Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 1 { c.HelpFunc()(c, args) os.Exit(1) } - appName := args[0] - conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie() + appName, appNs := argo.ParseFromQualifiedName(args[0], "") + conn, appIf := headless.NewClientOrDie(clientOpts, c).NewApplicationClientOrDie() defer argoio.Close(conn) - app, err := appIf.Get(context.Background(), &applicationpkg.ApplicationQuery{Name: &appName}) + app, err := appIf.Get(ctx, &application.ApplicationQuery{Name: &appName, AppNamespace: &appNs}) errors.CheckError(err) - updated := false - if app.Spec.Source.Kustomize != nil { - if namePrefix { - updated = true - app.Spec.Source.Kustomize.NamePrefix = "" - } + source := app.Spec.GetSource() + updated, nothingToUnset := unset(&source, opts) + if nothingToUnset { + c.HelpFunc()(c, args) + os.Exit(1) + } + if !updated { + return + } - if nameSuffix { - updated = true - app.Spec.Source.Kustomize.NameSuffix = "" - } + cmdutil.SetAppSpecOptions(c.Flags(), &app.Spec, &appOpts) + _, err = appIf.UpdateSpec(ctx, &application.ApplicationUpdateSpecRequest{ + Name: &app.Name, + Spec: &app.Spec, + Validate: &appOpts.Validate, + AppNamespace: &appNs, + }) + errors.CheckError(err) + }, + } + command.Flags().StringArrayVarP(&opts.parameters, "parameter", "p", []string{}, "Unset a parameter override (e.g. -p guestbook=image)") + command.Flags().StringArrayVar(&opts.valuesFiles, "values", []string{}, "Unset one or more Helm values files") + command.Flags().BoolVar(&opts.valuesLiteral, "values-literal", false, "Unset literal Helm values block") + command.Flags().BoolVar(&opts.ignoreMissingValueFiles, "ignore-missing-value-files", false, "Unset the helm ignore-missing-value-files option (revert to false)") + command.Flags().BoolVar(&opts.nameSuffix, "namesuffix", false, "Kustomize namesuffix") + command.Flags().BoolVar(&opts.namePrefix, "nameprefix", false, "Kustomize nameprefix") + command.Flags().BoolVar(&opts.kustomizeVersion, "kustomize-version", false, "Kustomize version") + command.Flags().BoolVar(&opts.kustomizeNamespace, "kustomize-namespace", false, "Kustomize namespace") + command.Flags().StringArrayVar(&opts.kustomizeImages, "kustomize-image", []string{}, "Kustomize images name (e.g. --kustomize-image node --kustomize-image mysql)") + command.Flags().StringArrayVar(&opts.kustomizeReplicas, "kustomize-replica", []string{}, "Kustomize replicas name (e.g. --kustomize-replica my-deployment --kustomize-replica my-statefulset)") + command.Flags().StringArrayVar(&opts.pluginEnvs, "plugin-env", []string{}, "Unset plugin env variables (e.g --plugin-env name)") + command.Flags().BoolVar(&opts.passCredentials, "pass-credentials", false, "Unset passCredentials") + return command +} + +func unset(source *argoappv1.ApplicationSource, opts unsetOpts) (updated bool, nothingToUnset bool) { + if source.Kustomize != nil { + if opts.KustomizeIsZero() { + return false, true + } + + if opts.namePrefix && source.Kustomize.NamePrefix != "" { + updated = true + source.Kustomize.NamePrefix = "" + } + + if opts.nameSuffix && source.Kustomize.NameSuffix != "" { + updated = true + source.Kustomize.NameSuffix = "" + } + + if opts.kustomizeVersion && source.Kustomize.Version != "" { + updated = true + source.Kustomize.Version = "" + } + + if opts.kustomizeNamespace && source.Kustomize.Namespace != "" { + updated = true + source.Kustomize.Namespace = "" + } - if kustomizeVersion { + for _, kustomizeImage := range opts.kustomizeImages { + for i, item := range source.Kustomize.Images { + if argoappv1.KustomizeImage(kustomizeImage).Match(item) { updated = true - app.Spec.Source.Kustomize.Version = "" + //remove i + a := source.Kustomize.Images + copy(a[i:], a[i+1:]) // Shift a[i+1:] left one index. + a[len(a)-1] = "" // Erase last element (write zero value). + a = a[:len(a)-1] // Truncate slice. + source.Kustomize.Images = a } + } + } - for _, kustomizeImage := range kustomizeImages { - for i, item := range app.Spec.Source.Kustomize.Images { - if argoappv1.KustomizeImage(kustomizeImage).Match(item) { - updated = true - //remove i - a := app.Spec.Source.Kustomize.Images - copy(a[i:], a[i+1:]) // Shift a[i+1:] left one index. - a[len(a)-1] = "" // Erase last element (write zero value). - a = a[:len(a)-1] // Truncate slice. - app.Spec.Source.Kustomize.Images = a - } - } + for _, kustomizeReplica := range opts.kustomizeReplicas { + kustomizeReplicas := source.Kustomize.Replicas + for i, item := range kustomizeReplicas { + if kustomizeReplica == item.Name { + source.Kustomize.Replicas = append(kustomizeReplicas[0:i], kustomizeReplicas[i+1:]...) + updated = true + break } } - if app.Spec.Source.Ksonnet != nil { - if len(parameters) == 0 && len(valuesFiles) == 0 { - c.HelpFunc()(c, args) - os.Exit(1) - } - for _, paramStr := range parameters { - parts := strings.SplitN(paramStr, "=", 2) - if len(parts) != 2 { - log.Fatalf("Expected parameter of the form: component=param. Received: %s", paramStr) - } - overrides := app.Spec.Source.Ksonnet.Parameters - for i, override := range overrides { - if override.Component == parts[0] && override.Name == parts[1] { - app.Spec.Source.Ksonnet.Parameters = append(overrides[0:i], overrides[i+1:]...) - updated = true - break - } - } + } + } + if source.Helm != nil { + if len(opts.parameters) == 0 && len(opts.valuesFiles) == 0 && !opts.valuesLiteral && !opts.ignoreMissingValueFiles && !opts.passCredentials { + return false, true + } + for _, paramStr := range opts.parameters { + helmParams := source.Helm.Parameters + for i, p := range helmParams { + if p.Name == paramStr { + source.Helm.Parameters = append(helmParams[0:i], helmParams[i+1:]...) + updated = true + break } } - if app.Spec.Source.Helm != nil { - if len(parameters) == 0 && len(valuesFiles) == 0 && !valuesLiteral { - c.HelpFunc()(c, args) - os.Exit(1) - } - for _, paramStr := range parameters { - helmParams := app.Spec.Source.Helm.Parameters - for i, p := range helmParams { - if p.Name == paramStr { - app.Spec.Source.Helm.Parameters = append(helmParams[0:i], helmParams[i+1:]...) - updated = true - break - } - } - } - if valuesLiteral { - app.Spec.Source.Helm.Values = "" + } + if opts.valuesLiteral && !source.Helm.ValuesIsEmpty() { + err := source.Helm.SetValuesString("") + if err == nil { + updated = true + } + } + for _, valuesFile := range opts.valuesFiles { + specValueFiles := source.Helm.ValueFiles + for i, vf := range specValueFiles { + if vf == valuesFile { + source.Helm.ValueFiles = append(specValueFiles[0:i], specValueFiles[i+1:]...) updated = true - } - for _, valuesFile := range valuesFiles { - specValueFiles := app.Spec.Source.Helm.ValueFiles - for i, vf := range specValueFiles { - if vf == valuesFile { - app.Spec.Source.Helm.ValueFiles = append(specValueFiles[0:i], specValueFiles[i+1:]...) - updated = true - break - } - } + break } } - - if app.Spec.Source.Plugin != nil { - if len(pluginEnvs) == 0 { - c.HelpFunc()(c, args) - os.Exit(1) - } - for _, env := range pluginEnvs { - err = app.Spec.Source.Plugin.RemoveEnvEntry(env) - errors.CheckError(err) - } + } + if opts.ignoreMissingValueFiles && source.Helm.IgnoreMissingValueFiles { + source.Helm.IgnoreMissingValueFiles = false + updated = true + } + if opts.passCredentials && source.Helm.PassCredentials { + source.Helm.PassCredentials = false + updated = true + } + } + if source.Plugin != nil { + if len(opts.pluginEnvs) == 0 { + return false, true + } + for _, env := range opts.pluginEnvs { + err := source.Plugin.RemoveEnvEntry(env) + if err == nil { updated = true } - - if !updated { - return - } - - cmdutil.SetAppSpecOptions(c.Flags(), &app.Spec, &appOpts) - _, err = appIf.UpdateSpec(context.Background(), &applicationpkg.ApplicationUpdateSpecRequest{ - Name: &app.Name, - Spec: app.Spec, - Validate: &appOpts.Validate, - }) - errors.CheckError(err) - }, + } } - command.Flags().StringArrayVarP(¶meters, "parameter", "p", []string{}, "Unset a parameter override (e.g. -p guestbook=image)") - command.Flags().StringArrayVar(&valuesFiles, "values", []string{}, "Unset one or more Helm values files") - command.Flags().BoolVar(&valuesLiteral, "values-literal", false, "Unset literal Helm values block") - command.Flags().BoolVar(&nameSuffix, "namesuffix", false, "Kustomize namesuffix") - command.Flags().BoolVar(&namePrefix, "nameprefix", false, "Kustomize nameprefix") - command.Flags().BoolVar(&kustomizeVersion, "kustomize-version", false, "Kustomize version") - command.Flags().StringArrayVar(&kustomizeImages, "kustomize-image", []string{}, "Kustomize images name (e.g. --kustomize-image node --kustomize-image mysql)") - command.Flags().StringArrayVar(&pluginEnvs, "plugin-env", []string{}, "Unset plugin env variables (e.g --plugin-env name)") - return command + return updated, false } // targetObjects deserializes the list of target states into unstructured objects @@ -713,22 +947,9 @@ func targetObjects(resources []*argoappv1.ResourceDiff) ([]*unstructured.Unstruc return objs, nil } -// liveObjects deserializes the list of live states into unstructured objects -func liveObjects(resources []*argoappv1.ResourceDiff) ([]*unstructured.Unstructured, error) { - objs := make([]*unstructured.Unstructured, len(resources)) - for i, resState := range resources { - obj, err := resState.LiveObject() - if err != nil { - return nil, err - } - objs[i] = obj - } - return objs, nil -} - -func getLocalObjects(app *argoappv1.Application, local, localRepoRoot, appLabelKey, kubeVersion string, kustomizeOptions *argoappv1.KustomizeOptions, - configManagementPlugins []*argoappv1.ConfigManagementPlugin) []*unstructured.Unstructured { - manifestStrings := getLocalObjectsString(app, local, localRepoRoot, appLabelKey, kubeVersion, kustomizeOptions, configManagementPlugins) +func getLocalObjects(ctx context.Context, app *argoappv1.Application, proj *argoappv1.AppProject, local, localRepoRoot, appLabelKey, kubeVersion string, apiVersions []string, kustomizeOptions *argoappv1.KustomizeOptions, + trackingMethod string) []*unstructured.Unstructured { + manifestStrings := getLocalObjectsString(ctx, app, proj, local, localRepoRoot, appLabelKey, kubeVersion, apiVersions, kustomizeOptions, trackingMethod) objs := make([]*unstructured.Unstructured, len(manifestStrings)) for i := range manifestStrings { obj := unstructured.Unstructured{} @@ -739,19 +960,22 @@ func getLocalObjects(app *argoappv1.Application, local, localRepoRoot, appLabelK return objs } -func getLocalObjectsString(app *argoappv1.Application, local, localRepoRoot, appLabelKey, kubeVersion string, kustomizeOptions *argoappv1.KustomizeOptions, - configManagementPlugins []*argoappv1.ConfigManagementPlugin) []string { - - res, err := repository.GenerateManifests(local, localRepoRoot, app.Spec.Source.TargetRevision, &repoapiclient.ManifestRequest{ - Repo: &argoappv1.Repository{Repo: app.Spec.Source.RepoURL}, - AppLabelKey: appLabelKey, - AppName: app.Name, - Namespace: app.Spec.Destination.Namespace, - ApplicationSource: &app.Spec.Source, - KustomizeOptions: kustomizeOptions, - KubeVersion: kubeVersion, - Plugins: configManagementPlugins, - }, true) +func getLocalObjectsString(ctx context.Context, app *argoappv1.Application, proj *argoappv1.AppProject, local, localRepoRoot, appLabelKey, kubeVersion string, apiVersions []string, kustomizeOptions *argoappv1.KustomizeOptions, + trackingMethod string) []string { + source := app.Spec.GetSource() + res, err := repository.GenerateManifests(ctx, local, localRepoRoot, source.TargetRevision, &repoapiclient.ManifestRequest{ + Repo: &argoappv1.Repository{Repo: source.RepoURL}, + AppLabelKey: appLabelKey, + AppName: app.Name, + Namespace: app.Spec.Destination.Namespace, + ApplicationSource: &source, + KustomizeOptions: kustomizeOptions, + KubeVersion: kubeVersion, + ApiVersions: apiVersions, + TrackingMethod: trackingMethod, + ProjectName: proj.Name, + ProjectSourceRepos: proj.Spec.SourceRepos, + }, true, &git.NoopCredsStore{}, resource.MustParse("0"), nil) errors.CheckError(err) return res.Manifests @@ -803,6 +1027,8 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co local string revision string localRepoRoot string + serverSideGenerate bool + localIncludes []string ) shortDesc := "Perform a diff against the target and live state." var command = &cobra.Command{ @@ -810,116 +1036,180 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co Short: shortDesc, Long: shortDesc + "\nUses 'diff' to render the difference. KUBECTL_EXTERNAL_DIFF environment variable can be used to select your own diff tool.\nReturns the following exit codes: 2 on general errors, 1 when a diff is found, and 0 when no diff is found", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 1 { c.HelpFunc()(c, args) os.Exit(2) } - - clientset := argocdclient.NewClientOrDie(clientOpts) + clientset := headless.NewClientOrDie(clientOpts, c) conn, appIf := clientset.NewApplicationClientOrDie() defer argoio.Close(conn) - appName := args[0] - app, err := appIf.Get(context.Background(), &applicationpkg.ApplicationQuery{Name: &appName, Refresh: getRefreshType(refresh, hardRefresh)}) - errors.CheckError(err) - resources, err := appIf.ManagedResources(context.Background(), &applicationpkg.ResourcesQuery{ApplicationName: &appName}) - errors.CheckError(err) - liveObjs, err := liveObjects(resources.Items) + appName, appNs := argo.ParseFromQualifiedName(args[0], "") + app, err := appIf.Get(ctx, &application.ApplicationQuery{ + Name: &appName, + Refresh: getRefreshType(refresh, hardRefresh), + AppNamespace: &appNs, + }) errors.CheckError(err) - items := make([]objKeyLiveTarget, 0) + resources, err := appIf.ManagedResources(ctx, &application.ResourcesQuery{ApplicationName: &appName, AppNamespace: &appNs}) + errors.CheckError(err) conn, settingsIf := clientset.NewSettingsClientOrDie() defer argoio.Close(conn) - argoSettings, err := settingsIf.Get(context.Background(), &settingspkg.SettingsQuery{}) + argoSettings, err := settingsIf.Get(ctx, &settings.SettingsQuery{}) errors.CheckError(err) - - if local != "" { - conn, clusterIf := clientset.NewClusterClientOrDie() - defer argoio.Close(conn) - cluster, err := clusterIf.Get(context.Background(), &clusterpkg.ClusterQuery{Name: app.Spec.Destination.Name, Server: app.Spec.Destination.Server}) - errors.CheckError(err) - localObjs := groupObjsByKey(getLocalObjects(app, local, localRepoRoot, argoSettings.AppLabelKey, cluster.ServerVersion, argoSettings.KustomizeOptions, argoSettings.ConfigManagementPlugins), liveObjs, app.Spec.Destination.Namespace, includeResourceHook) - items = groupObjsForDiff(resources, localObjs, items, argoSettings, appName) - } else if revision != "" { - var unstructureds []*unstructured.Unstructured - q := applicationpkg.ApplicationManifestQuery{ - Name: &appName, - Revision: revision, + diffOption := &DifferenceOption{} + if revision != "" { + q := application.ApplicationManifestQuery{ + Name: &appName, + Revision: &revision, + AppNamespace: &appNs, } - res, err := appIf.GetManifests(context.Background(), &q) + res, err := appIf.GetManifests(ctx, &q) errors.CheckError(err) - for _, mfst := range res.Manifests { - obj, err := argoappv1.UnmarshalToUnstructured(mfst) + diffOption.res = res + diffOption.revision = revision + } else if local != "" { + if serverSideGenerate { + client, err := appIf.GetManifestsWithFiles(ctx, grpc_retry.Disable()) errors.CheckError(err) - unstructureds = append(unstructureds, obj) - } - groupedObjs := groupObjsByKey(unstructureds, liveObjs, app.Spec.Destination.Namespace, includeResourceHook) - items = groupObjsForDiff(resources, groupedObjs, items, argoSettings, appName) - } else { - for i := range resources.Items { - res := resources.Items[i] - var live = &unstructured.Unstructured{} - err := json.Unmarshal([]byte(res.NormalizedLiveState), &live) + + err = manifeststream.SendApplicationManifestQueryWithFiles(ctx, client, appName, appNs, local, localIncludes) errors.CheckError(err) - var target = &unstructured.Unstructured{} - err = json.Unmarshal([]byte(res.TargetState), &target) + res, err := client.CloseAndRecv() errors.CheckError(err) - items = append(items, objKeyLiveTarget{kube.NewResourceKey(res.Group, res.Kind, res.Namespace, res.Name), live, target}) + diffOption.serversideRes = res + } else { + fmt.Fprintf(os.Stderr, "Warning: local diff without --server-side-generate is deprecated and does not work with plugins. Server-side generation will be the default in v2.7.") + conn, clusterIf := clientset.NewClusterClientOrDie() + defer argoio.Close(conn) + cluster, err := clusterIf.Get(ctx, &clusterpkg.ClusterQuery{Name: app.Spec.Destination.Name, Server: app.Spec.Destination.Server}) + errors.CheckError(err) + diffOption.local = local + diffOption.localRepoRoot = localRepoRoot + diffOption.cluster = cluster } } + proj := getProject(c, clientOpts, ctx, app.Spec.Project) + foundDiffs := findandPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption) + if foundDiffs && exitCode { + os.Exit(1) + } + }, + } + command.Flags().BoolVar(&refresh, "refresh", false, "Refresh application data when retrieving") + command.Flags().BoolVar(&hardRefresh, "hard-refresh", false, "Refresh application data as well as target manifests cache") + command.Flags().BoolVar(&exitCode, "exit-code", true, "Return non-zero exit code when there is a diff") + command.Flags().BoolVar(&includeResourceHook, "include-resource-hook", false, "Display the diff of resource hooks. Used together with --local or --revision") + command.Flags().StringVar(&local, "local", "", "Compare live app to a local manifests") + command.Flags().StringVar(&revision, "revision", "", "Compare live app to a particular revision") + command.Flags().StringVar(&localRepoRoot, "local-repo-root", "/", "Path to the repository root. Used together with --local allows setting the repository root") + command.Flags().BoolVar(&serverSideGenerate, "server-side-generate", false, "Used with --local, this will send your manifests to the server for diffing") + command.Flags().StringArrayVar(&localIncludes, "local-include", []string{"*.yaml", "*.yml", "*.json"}, "Used with --server-side-generate, specify patterns of filenames to send. Matching is based on filename and not path.") + return command +} - foundDiffs := false - for _, item := range items { - if item.target != nil && hook.IsHook(item.target) && !includeResourceHook || item.live != nil && hook.IsHook(item.live) && !includeResourceHook { - continue - } - overrides := make(map[string]argoappv1.ResourceOverride) - for k := range argoSettings.ResourceOverrides { - val := argoSettings.ResourceOverrides[k] - overrides[k] = *val - } - normalizer, err := argo.NewDiffNormalizer(app.Spec.IgnoreDifferences, overrides) - errors.CheckError(err) +// DifferenceOption struct to store diff options +type DifferenceOption struct { + local string + localRepoRoot string + revision string + cluster *argoappv1.Cluster + res *repoapiclient.ManifestResponse + serversideRes *repoapiclient.ManifestResponse +} - diffRes, err := diff.Diff(item.target, item.live, diff.WithNormalizer(normalizer)) - errors.CheckError(err) +// findandPrintDiff ... Prints difference between application current state and state stored in git or locally, returns boolean as true if difference is found else returns false +func findandPrintDiff(ctx context.Context, app *argoappv1.Application, proj *argoappv1.AppProject, resources *application.ManagedResourcesResponse, argoSettings *settings.Settings, diffOptions *DifferenceOption) bool { + var foundDiffs bool + liveObjs, err := cmdutil.LiveObjects(resources.Items) + errors.CheckError(err) + items := make([]objKeyLiveTarget, 0) + if diffOptions.local != "" { + localObjs := groupObjsByKey(getLocalObjects(ctx, app, proj, diffOptions.local, diffOptions.localRepoRoot, argoSettings.AppLabelKey, diffOptions.cluster.Info.ServerVersion, diffOptions.cluster.Info.APIVersions, argoSettings.KustomizeOptions, argoSettings.TrackingMethod), liveObjs, app.Spec.Destination.Namespace) + items = groupObjsForDiff(resources, localObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace), app.Spec.Destination.Namespace) + } else if diffOptions.revision != "" { + var unstructureds []*unstructured.Unstructured + for _, mfst := range diffOptions.res.Manifests { + obj, err := argoappv1.UnmarshalToUnstructured(mfst) + errors.CheckError(err) + unstructureds = append(unstructureds, obj) + } + groupedObjs := groupObjsByKey(unstructureds, liveObjs, app.Spec.Destination.Namespace) + items = groupObjsForDiff(resources, groupedObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace), app.Spec.Destination.Namespace) + } else if diffOptions.serversideRes != nil { + var unstructureds []*unstructured.Unstructured + for _, mfst := range diffOptions.serversideRes.Manifests { + obj, err := argoappv1.UnmarshalToUnstructured(mfst) + errors.CheckError(err) + unstructureds = append(unstructureds, obj) + } + groupedObjs := groupObjsByKey(unstructureds, liveObjs, app.Spec.Destination.Namespace) + items = groupObjsForDiff(resources, groupedObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace), app.Spec.Destination.Namespace) + } else { + for i := range resources.Items { + res := resources.Items[i] + var live = &unstructured.Unstructured{} + err := json.Unmarshal([]byte(res.NormalizedLiveState), &live) + errors.CheckError(err) - if diffRes.Modified || item.target == nil || item.live == nil { - fmt.Printf("===== %s/%s %s/%s ======\n", item.key.Group, item.key.Kind, item.key.Namespace, item.key.Name) - var live *unstructured.Unstructured - var target *unstructured.Unstructured - if item.target != nil && item.live != nil { - target = &unstructured.Unstructured{} - live = item.live - err = json.Unmarshal(diffRes.PredictedLive, target) - errors.CheckError(err) - } else { - live = item.live - target = item.target - } + var target = &unstructured.Unstructured{} + err = json.Unmarshal([]byte(res.TargetState), &target) + errors.CheckError(err) + + items = append(items, objKeyLiveTarget{kube.NewResourceKey(res.Group, res.Kind, res.Namespace, res.Name), live, target}) + } + } + + for _, item := range items { + if item.target != nil && hook.IsHook(item.target) || item.live != nil && hook.IsHook(item.live) { + continue + } + overrides := make(map[string]argoappv1.ResourceOverride) + for k := range argoSettings.ResourceOverrides { + val := argoSettings.ResourceOverrides[k] + overrides[k] = *val + } + + // TODO remove hardcoded IgnoreAggregatedRoles and retrieve the + // compareOptions in the protobuf + ignoreAggregatedRoles := false + diffConfig, err := argodiff.NewDiffConfigBuilder(). + WithDiffSettings(app.Spec.IgnoreDifferences, overrides, ignoreAggregatedRoles). + WithTracking(argoSettings.AppLabelKey, argoSettings.TrackingMethod). + WithNoCache(). + Build() + errors.CheckError(err) + diffRes, err := argodiff.StateDiff(item.live, item.target, diffConfig) + errors.CheckError(err) - foundDiffs = true - _ = cli.PrintDiff(item.key.Name, live, target) - } + if diffRes.Modified || item.target == nil || item.live == nil { + fmt.Printf("\n===== %s/%s %s/%s ======\n", item.key.Group, item.key.Kind, item.key.Namespace, item.key.Name) + var live *unstructured.Unstructured + var target *unstructured.Unstructured + if item.target != nil && item.live != nil { + target = &unstructured.Unstructured{} + live = item.live + err = json.Unmarshal(diffRes.PredictedLive, target) + errors.CheckError(err) + } else { + live = item.live + target = item.target } - if foundDiffs && exitCode { - os.Exit(1) + if !foundDiffs { + foundDiffs = true } - - }, + _ = cli.PrintDiff(item.key.Name, live, target) + } } - command.Flags().BoolVar(&refresh, "refresh", false, "Refresh application data when retrieving") - command.Flags().BoolVar(&hardRefresh, "hard-refresh", false, "Refresh application data as well as target manifests cache") - command.Flags().BoolVar(&exitCode, "exit-code", true, "Return non-zero exit code when there is a diff") - command.Flags().BoolVar(&includeResourceHook, "include-resource-hook", false, "Display the diff of resource hooks. Used together with --local or --revision") - command.Flags().StringVar(&local, "local", "", "Compare live app to a local manifests") - command.Flags().StringVar(&revision, "revision", "", "Compare live app to a particular revision") - command.Flags().StringVar(&localRepoRoot, "local-repo-root", "/", "Path to the repository root. Used together with --local allows setting the repository root") - return command + return foundDiffs } -func groupObjsForDiff(resources *application.ManagedResourcesResponse, objs map[kube.ResourceKey]*unstructured.Unstructured, items []objKeyLiveTarget, argoSettings *settings.Settings, appName string) []objKeyLiveTarget { +func groupObjsForDiff(resources *application.ManagedResourcesResponse, objs map[kube.ResourceKey]*unstructured.Unstructured, items []objKeyLiveTarget, argoSettings *settings.Settings, appName, namespace string) []objKeyLiveTarget { + resourceTracking := argo.NewResourceTracking() for _, res := range resources.Items { var live = &unstructured.Unstructured{} err := json.Unmarshal([]byte(res.NormalizedLiveState), &live) @@ -933,7 +1223,7 @@ func groupObjsForDiff(resources *application.ManagedResourcesResponse, objs map[ } if local, ok := objs[key]; ok || live != nil { if local != nil && !kube.IsCRD(local) { - err = argokube.SetAppInstanceLabel(local, argoSettings.AppLabelKey, appName) + err = resourceTracking.SetAppInstance(local, argoSettings.AppLabelKey, appName, namespace, argoappv1.TrackingMethod(argoSettings.GetTrackingMethod())) errors.CheckError(err) } @@ -958,16 +1248,31 @@ func NewApplicationDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra. cascade bool noPrompt bool propagationPolicy string + selector string ) var command = &cobra.Command{ Use: "delete APPNAME", Short: "Delete an application", + Example: ` # Delete an app + argocd app delete my-app + + # Delete multiple apps + argocd app delete my-app other-app + + # Delete apps by label + argocd app delete -l app.kubernetes.io/instance=my-app + argocd app delete -l app.kubernetes.io/instance!=my-app + argocd app delete -l app.kubernetes.io/instance + argocd app delete -l '!app.kubernetes.io/instance' + argocd app delete -l 'app.kubernetes.io/instance notin (my-app,other-app)'`, Run: func(c *cobra.Command, args []string) { - if len(args) == 0 { + ctx := c.Context() + + if len(args) == 0 && selector == "" { c.HelpFunc()(c, args) os.Exit(1) } - conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie() + conn, appIf := headless.NewClientOrDie(clientOpts, c).NewApplicationClientOrDie() defer argoio.Close(conn) var isTerminal bool = isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd()) var isConfirmAll bool = false @@ -976,9 +1281,19 @@ func NewApplicationDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra. if promptFlag.Changed && promptFlag.Value.String() == "true" { noPrompt = true } - for _, appName := range args { - appDeleteReq := applicationpkg.ApplicationDeleteRequest{ - Name: &appName, + + appNames, err := getAppNamesBySelector(ctx, appIf, selector) + errors.CheckError(err) + + if len(appNames) == 0 { + appNames = args + } + + for _, appFullName := range appNames { + appName, appNs := argo.ParseFromQualifiedName(appFullName, "") + appDeleteReq := application.ApplicationDeleteRequest{ + Name: &appName, + AppNamespace: &appNs, } if c.Flag("cascade").Changed { appDeleteReq.Cascade = &cascade @@ -987,18 +1302,13 @@ func NewApplicationDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra. appDeleteReq.PropagationPolicy = &propagationPolicy } if cascade && isTerminal && !noPrompt { - var confirmAnswer string = "n" var lowercaseAnswer string if numOfApps == 1 { - fmt.Println("Are you sure you want to delete '" + appName + "' and all its resources? [y/n]") - fmt.Scan(&confirmAnswer) - lowercaseAnswer = strings.ToLower(confirmAnswer) + lowercaseAnswer = cli.AskToProceedS("Are you sure you want to delete '" + appFullName + "' and all its resources? [y/n] ") } else { if !isConfirmAll { - fmt.Println("Are you sure you want to delete '" + appName + "' and all its resources? [y/n/A] where 'A' is to delete all specified apps and their resources without prompting") - fmt.Scan(&confirmAnswer) - lowercaseAnswer = strings.ToLower(confirmAnswer) - if lowercaseAnswer == "a" || lowercaseAnswer == "all" { + lowercaseAnswer = cli.AskToProceedS("Are you sure you want to delete '" + appFullName + "' and all its resources? [y/n/A] where 'A' is to delete all specified apps and their resources without prompting ") + if lowercaseAnswer == "a" { lowercaseAnswer = "y" isConfirmAll = true } @@ -1006,14 +1316,15 @@ func NewApplicationDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra. lowercaseAnswer = "y" } } - if lowercaseAnswer == "y" || lowercaseAnswer == "yes" { - _, err := appIf.Delete(context.Background(), &appDeleteReq) + if lowercaseAnswer == "y" { + _, err := appIf.Delete(ctx, &appDeleteReq) errors.CheckError(err) + fmt.Printf("application '%s' deleted\n", appFullName) } else { - fmt.Println("The command to delete '" + appName + "' was cancelled.") + fmt.Println("The command to delete '" + appFullName + "' was cancelled.") } } else { - _, err := appIf.Delete(context.Background(), &appDeleteReq) + _, err := appIf.Delete(ctx, &appDeleteReq) errors.CheckError(err) } } @@ -1022,13 +1333,14 @@ func NewApplicationDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra. command.Flags().BoolVar(&cascade, "cascade", true, "Perform a cascaded deletion of all application resources") command.Flags().StringVarP(&propagationPolicy, "propagation-policy", "p", "foreground", "Specify propagation policy for deletion of application's resources. One of: foreground|background") command.Flags().BoolVarP(&noPrompt, "yes", "y", false, "Turn off prompting to confirm cascaded deletion of application resources") + command.Flags().StringVarP(&selector, "selector", "l", "", "Delete all apps with matching label. Supports '=', '==', '!=', in, notin, exists & not exists. Matching apps must satisfy all of the specified label constraints.") return command } // Print simple list of application names func printApplicationNames(apps []argoappv1.Application) { for _, app := range apps { - fmt.Println(app.Name) + fmt.Println(app.QualifiedName()) } } @@ -1046,8 +1358,8 @@ func printApplicationTable(apps []argoappv1.Application, output *string) { _, _ = fmt.Fprintf(w, fmtStr, headers...) for _, app := range apps { vals := []interface{}{ - app.Name, - app.Spec.Destination.Server, + app.QualifiedName(), + getServer(&app), app.Spec.Destination.Namespace, app.Spec.GetProject(), app.Status.Sync.Status, @@ -1056,7 +1368,7 @@ func printApplicationTable(apps []argoappv1.Application, output *string) { formatConditionsSummary(app), } if *output == "wide" { - vals = append(vals, app.Spec.Source.RepoURL, app.Spec.Source.Path, app.Spec.Source.TargetRevision) + vals = append(vals, app.Spec.GetSource().RepoURL, app.Spec.GetSource().Path, app.Spec.GetSource().TargetRevision) } _, _ = fmt.Fprintf(w, fmtStr, vals...) } @@ -1066,10 +1378,12 @@ func printApplicationTable(apps []argoappv1.Application, output *string) { // NewApplicationListCommand returns a new instance of an `argocd app list` command func NewApplicationListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { var ( - output string - selector string - projects []string - repo string + output string + selector string + projects []string + repo string + appNamespace string + cluster string ) var command = &cobra.Command{ Use: "list", @@ -1078,19 +1392,34 @@ func NewApplicationListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co argocd app list # List apps by label, in this example we listing apps that are children of another app (aka app-of-apps) - argocd app list -l app.kubernetes.io/instance=my-app`, + argocd app list -l app.kubernetes.io/instance=my-app + argocd app list -l app.kubernetes.io/instance!=my-app + argocd app list -l app.kubernetes.io/instance + argocd app list -l '!app.kubernetes.io/instance' + argocd app list -l 'app.kubernetes.io/instance notin (my-app,other-app)'`, Run: func(c *cobra.Command, args []string) { - conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie() + ctx := c.Context() + + conn, appIf := headless.NewClientOrDie(clientOpts, c).NewApplicationClientOrDie() defer argoio.Close(conn) - apps, err := appIf.List(context.Background(), &applicationpkg.ApplicationQuery{Selector: selector}) + apps, err := appIf.List(ctx, &application.ApplicationQuery{ + Selector: pointer.String(selector), + AppNamespace: &appNamespace, + }) + errors.CheckError(err) appList := apps.Items + if len(projects) != 0 { appList = argo.FilterByProjects(appList, projects) } if repo != "" { appList = argo.FilterByRepo(appList, repo) } + if cluster != "" { + appList = argo.FilterByCluster(appList, cluster) + } + switch output { case "yaml", "json": err := PrintResourceList(appList, output, false) @@ -1105,9 +1434,11 @@ func NewApplicationListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co }, } command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: wide|name|json|yaml") - command.Flags().StringVarP(&selector, "selector", "l", "", "List apps by label") + command.Flags().StringVarP(&selector, "selector", "l", "", "List apps by label. Supports '=', '==', '!=', in, notin, exists & not exists. Matching apps must satisfy all of the specified label constraints.") command.Flags().StringArrayVarP(&projects, "project", "p", []string{}, "Filter by project name") command.Flags().StringVarP(&repo, "repo", "r", "", "List apps by source repo URL") + command.Flags().StringVarP(&appNamespace, "app-namespace", "N", "", "Only list applications in namespace") + command.Flags().StringVarP(&cluster, "cluster", "c", "", "List apps by cluster name or url") return command } @@ -1152,49 +1483,76 @@ const ( resourceFieldCount = 3 resourceFieldNamespaceDelimiter = "/" resourceFieldNameWithNamespaceCount = 2 + resourceExcludeIndicator = "!" ) -func parseSelectedResources(resources []string) []argoappv1.SyncOperationResource { - var selectedResources []argoappv1.SyncOperationResource - if resources != nil { - selectedResources = []argoappv1.SyncOperationResource{} - for _, r := range resources { - fields := strings.Split(r, resourceFieldDelimiter) - if len(fields) != resourceFieldCount { - log.Fatalf("Resource should have GROUP%sKIND%sNAME, but instead got: %s", resourceFieldDelimiter, resourceFieldDelimiter, r) - } - name := fields[2] - namespace := "" - if strings.Contains(fields[2], resourceFieldNamespaceDelimiter) { - nameFields := strings.Split(fields[2], resourceFieldNamespaceDelimiter) - if len(nameFields) != resourceFieldNameWithNamespaceCount { - log.Fatalf("Resource with namespace should have GROUP%sKIND%sNAMESPACE%sNAME, but instead got: %s", resourceFieldDelimiter, resourceFieldDelimiter, resourceFieldNamespaceDelimiter, r) - } - namespace = nameFields[0] - name = nameFields[1] - } - rsrc := argoappv1.SyncOperationResource{ - Group: fields[0], - Kind: fields[1], - Name: name, - Namespace: namespace, - } - selectedResources = append(selectedResources, rsrc) +// resource is GROUP:KIND:NAMESPACE/NAME or GROUP:KIND:NAME +func parseSelectedResources(resources []string) ([]*argoappv1.SyncOperationResource, error) { + // retrieve name and namespace in case if format is GROUP:KIND:NAMESPACE/NAME, otherwise return name and empty namespace + nameRetriever := func(resourceName, resource string) (string, string, error) { + if !strings.Contains(resourceName, resourceFieldNamespaceDelimiter) { + return resourceName, "", nil + } + nameFields := strings.Split(resourceName, resourceFieldNamespaceDelimiter) + if len(nameFields) != resourceFieldNameWithNamespaceCount { + return "", "", fmt.Errorf("Resource with namespace should have GROUP%sKIND%sNAMESPACE%sNAME, but instead got: %s", resourceFieldDelimiter, resourceFieldDelimiter, resourceFieldNamespaceDelimiter, resource) + } + namespace := nameFields[0] + name := nameFields[1] + return name, namespace, nil + } + + var selectedResources []*argoappv1.SyncOperationResource + if resources == nil { + return selectedResources, nil + } + + for _, resource := range resources { + isExcluded := false + // check if the resource flag starts with a '!' + if strings.HasPrefix(resource, resourceExcludeIndicator) { + resource = strings.TrimPrefix(resource, resourceExcludeIndicator) + isExcluded = true + } + fields := strings.Split(resource, resourceFieldDelimiter) + if len(fields) != resourceFieldCount { + return nil, fmt.Errorf("Resource should have GROUP%sKIND%sNAME, but instead got: %s", resourceFieldDelimiter, resourceFieldDelimiter, resource) + } + name, namespace, err := nameRetriever(fields[2], resource) + if err != nil { + return nil, err + } + selectedResources = append(selectedResources, &argoappv1.SyncOperationResource{ + Group: fields[0], + Kind: fields[1], + Name: name, + Namespace: namespace, + Exclude: isExcluded, + }) + } + return selectedResources, nil +} + +func getWatchOpts(watch watchOpts) watchOpts { + // if no opts are defined should wait for sync,health,operation + if (watch == watchOpts{}) { + return watchOpts{ + sync: true, + health: true, + operation: true, } } - return selectedResources + return watch } // NewApplicationWaitCommand returns a new instance of an `argocd app wait` command func NewApplicationWaitCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { var ( - watchSync bool - watchHealth bool - watchSuspended bool - watchOperations bool - timeout uint - selector string - resources []string + watch watchOpts + timeout uint + selector string + resources []string + output string ) var command = &cobra.Command{ Use: "wait [APPNAME.. | -l selector]", @@ -1205,44 +1563,58 @@ func NewApplicationWaitCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co # Wait for multiple apps argocd app wait my-app other-app + # Wait for apps by resource + # Resource should be formatted as GROUP:KIND:NAME. If no GROUP is specified then :KIND:NAME. + argocd app wait my-app --resource :Service:my-service + argocd app wait my-app --resource argoproj.io:Rollout:my-rollout + argocd app wait my-app --resource '!apps:Deployment:my-service' + argocd app wait my-app --resource apps:Deployment:my-service --resource :Service:my-service + argocd app wait my-app --resource '!*:Service:*' + # Specify namespace if the application has resources with the same name in different namespaces + argocd app wait my-app --resource argoproj.io:Rollout:my-namespace/my-rollout + # Wait for apps by label, in this example we waiting for apps that are children of another app (aka app-of-apps) - argocd app wait -l app.kubernetes.io/instance=apps`, + argocd app wait -l app.kubernetes.io/instance=my-app + argocd app wait -l app.kubernetes.io/instance!=my-app + argocd app wait -l app.kubernetes.io/instance + argocd app wait -l '!app.kubernetes.io/instance' + argocd app wait -l 'app.kubernetes.io/instance notin (my-app,other-app)'`, Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) == 0 && selector == "" { c.HelpFunc()(c, args) os.Exit(1) } - if !watchSync && !watchHealth && !watchOperations && !watchSuspended { - watchSync = true - watchHealth = true - watchOperations = true - watchSuspended = false - } - selectedResources := parseSelectedResources(resources) + watch = getWatchOpts(watch) + selectedResources, err := parseSelectedResources(resources) + errors.CheckError(err) appNames := args - acdClient := argocdclient.NewClientOrDie(clientOpts) + acdClient := headless.NewClientOrDie(clientOpts, c) closer, appIf := acdClient.NewApplicationClientOrDie() defer argoio.Close(closer) if selector != "" { - list, err := appIf.List(context.Background(), &applicationpkg.ApplicationQuery{Selector: selector}) + list, err := appIf.List(ctx, &application.ApplicationQuery{Selector: pointer.String(selector)}) errors.CheckError(err) for _, i := range list.Items { appNames = append(appNames, i.Name) } } for _, appName := range appNames { - _, err := waitOnApplicationStatus(acdClient, appName, timeout, watchSync, watchHealth, watchOperations, watchSuspended, selectedResources) + _, _, err := waitOnApplicationStatus(ctx, acdClient, appName, timeout, watch, selectedResources, output) errors.CheckError(err) } }, } - command.Flags().BoolVar(&watchSync, "sync", false, "Wait for sync") - command.Flags().BoolVar(&watchHealth, "health", false, "Wait for health") - command.Flags().BoolVar(&watchSuspended, "suspended", false, "Wait for suspended") - command.Flags().StringVarP(&selector, "selector", "l", "", "Wait for apps by label") - command.Flags().StringArrayVar(&resources, "resource", []string{}, fmt.Sprintf("Sync only specific resources as GROUP%sKIND%sNAME. Fields may be blank. This option may be specified repeatedly", resourceFieldDelimiter, resourceFieldDelimiter)) - command.Flags().BoolVar(&watchOperations, "operation", false, "Wait for pending operations") + command.Flags().BoolVar(&watch.sync, "sync", false, "Wait for sync") + command.Flags().BoolVar(&watch.health, "health", false, "Wait for health") + command.Flags().BoolVar(&watch.suspended, "suspended", false, "Wait for suspended") + command.Flags().BoolVar(&watch.degraded, "degraded", false, "Wait for degraded") + command.Flags().StringVarP(&selector, "selector", "l", "", "Wait for apps by label. Supports '=', '==', '!=', in, notin, exists & not exists. Matching apps must satisfy all of the specified label constraints.") + command.Flags().StringArrayVar(&resources, "resource", []string{}, fmt.Sprintf("Sync only specific resources as GROUP%[1]sKIND%[1]sNAME or %[2]sGROUP%[1]sKIND%[1]sNAME. Fields may be blank and '*' can be used. This option may be specified repeatedly", resourceFieldDelimiter, resourceExcludeIndicator)) + command.Flags().BoolVar(&watch.operation, "operation", false, "Wait for pending operations") command.Flags().UintVar(&timeout, "timeout", defaultCheckTimeoutSeconds, "Time out after this many seconds") + command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|tree|tree=detailed") return command } @@ -1254,6 +1626,24 @@ func printAppResources(w io.Writer, app *argoappv1.Application) { } } +func printTreeView(nodeMapping map[string]argoappv1.ResourceNode, parentChildMapping map[string][]string, parentNodes map[string]struct{}, mapNodeNameToResourceState map[string]*resourceState) { + w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) + _, _ = fmt.Fprintf(w, "KIND/NAME\tSTATUS\tHEALTH\tMESSAGE\n") + for uid := range parentNodes { + treeViewAppGet("", nodeMapping, parentChildMapping, nodeMapping[uid], mapNodeNameToResourceState, w) + } + _ = w.Flush() +} + +func printTreeViewDetailed(nodeMapping map[string]argoappv1.ResourceNode, parentChildMapping map[string][]string, parentNodes map[string]struct{}, mapNodeNameToResourceState map[string]*resourceState) { + w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) + fmt.Fprintf(w, "KIND/NAME\tSTATUS\tHEALTH\tAGE\tMESSAGE\tREASON\n") + for uid := range parentNodes { + detailedTreeViewAppGet("", nodeMapping, parentChildMapping, nodeMapping[uid], mapNodeNameToResourceState, w) + } + _ = w.Flush() +} + // NewApplicationSyncCommand returns a new instance of an `argocd app sync` command func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { var ( @@ -1266,6 +1656,9 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co timeout uint strategy string force bool + replace bool + serverSideApply bool + applyOutOfSyncOnly bool async bool retryLimit int64 retryBackoffDuration time.Duration @@ -1274,9 +1667,13 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co local string localRepoRoot string infos []string + diffChanges bool + diffChangesConfirm bool + projects []string + output string ) var command = &cobra.Command{ - Use: "sync [APPNAME... | -l selector]", + Use: "sync [APPNAME... | -l selector | --project project-name]", Short: "Sync an application to its target state", Example: ` # Sync an app argocd app sync my-app @@ -1286,19 +1683,30 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co # Sync apps by label, in this example we sync apps that are children of another app (aka app-of-apps) argocd app sync -l app.kubernetes.io/instance=my-app + argocd app sync -l app.kubernetes.io/instance!=my-app + argocd app sync -l app.kubernetes.io/instance + argocd app sync -l '!app.kubernetes.io/instance' + argocd app sync -l 'app.kubernetes.io/instance notin (my-app,other-app)' # Sync a specific resource # Resource should be formatted as GROUP:KIND:NAME. If no GROUP is specified then :KIND:NAME argocd app sync my-app --resource :Service:my-service argocd app sync my-app --resource argoproj.io:Rollout:my-rollout + argocd app sync my-app --resource '!apps:Deployment:my-service' + argocd app sync my-app --resource apps:Deployment:my-service --resource :Service:my-service + argocd app sync my-app --resource '!*:Service:*' # Specify namespace if the application has resources with the same name in different namespaces argocd app sync my-app --resource argoproj.io:Rollout:my-namespace/my-rollout`, Run: func(c *cobra.Command, args []string) { - if len(args) == 0 && selector == "" { + ctx := c.Context() + if len(args) == 0 && selector == "" && len(projects) == 0 { c.HelpFunc()(c, args) os.Exit(1) } - acdClient := argocdclient.NewClientOrDie(clientOpts) + if len(args) > 1 && selector != "" { + log.Fatal("Cannot use selector option when application name(s) passed as argument(s)") + } + acdClient := headless.NewClientOrDie(clientOpts, c) conn, appIf := acdClient.NewApplicationClientOrDie() defer argoio.Close(conn) @@ -1306,26 +1714,35 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co errors.CheckError(err) appNames := args - if selector != "" { - list, err := appIf.List(context.Background(), &applicationpkg.ApplicationQuery{Selector: selector}) + if selector != "" || len(projects) > 0 { + list, err := appIf.List(ctx, &application.ApplicationQuery{Selector: pointer.String(selector), Projects: projects}) errors.CheckError(err) + // unlike list, we'd want to fail if nothing was found if len(list.Items) == 0 { - log.Fatalf("no apps match selector %v", selector) + errMsg := "No matching apps found for filter:" + if selector != "" { + errMsg += fmt.Sprintf(" selector %s", selector) + } + if len(projects) != 0 { + errMsg += fmt.Sprintf(" projects %v", projects) + } + log.Fatalf(errMsg) } + for _, i := range list.Items { - appNames = append(appNames, i.Name) + appNames = append(appNames, i.QualifiedName()) } } - for _, appName := range appNames { + for _, appQualifiedName := range appNames { + appName, appNs := argo.ParseFromQualifiedName(appQualifiedName, "") if len(selectedLabels) > 0 { - ctx := context.Background() - - q := applicationpkg.ApplicationManifestQuery{ - Name: &appName, - Revision: revision, + q := application.ApplicationManifestQuery{ + Name: &appName, + AppNamespace: &appNs, + Revision: &revision, } res, err := appIf.GetManifests(ctx, &q) @@ -1333,6 +1750,8 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co log.Fatal(err) } + fmt.Println("The name of the app is ", appName) + for _, mfst := range res.Manifests { obj, err := argoappv1.UnmarshalToUnstructured(mfst) errors.CheckError(err) @@ -1352,39 +1771,96 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co } } - selectedResources := parseSelectedResources(resources) + selectedResources, err := parseSelectedResources(resources) + errors.CheckError(err) var localObjsStrings []string + diffOption := &DifferenceOption{} + + app, err := appIf.Get(ctx, &application.ApplicationQuery{ + Name: &appName, + AppNamespace: &appNs, + }) + errors.CheckError(err) + + if app.Spec.HasMultipleSources() { + if revision != "" { + log.Fatal("argocd cli does not work on multi-source app with --revision flag") + return + } + + if local != "" { + log.Fatal("argocd cli does not work on multi-source app with --local flag") + return + } + } + + // filters out only those resources that needs to be synced + filteredResources := filterAppResources(app, selectedResources) + + // if resources are provided and no app resources match, then return error + if len(resources) > 0 && len(filteredResources) == 0 { + log.Fatalf("No matching app resources found for resource filter: %v", strings.Join(resources, ", ")) + } + if local != "" { - app, err := appIf.Get(context.Background(), &applicationpkg.ApplicationQuery{Name: &appName}) - errors.CheckError(err) if app.Spec.SyncPolicy != nil && app.Spec.SyncPolicy.Automated != nil && !dryRun { log.Fatal("Cannot use local sync when Automatic Sync Policy is enabled except with --dry-run") } errors.CheckError(err) conn, settingsIf := acdClient.NewSettingsClientOrDie() - argoSettings, err := settingsIf.Get(context.Background(), &settingspkg.SettingsQuery{}) + argoSettings, err := settingsIf.Get(ctx, &settings.SettingsQuery{}) errors.CheckError(err) argoio.Close(conn) conn, clusterIf := acdClient.NewClusterClientOrDie() defer argoio.Close(conn) - cluster, err := clusterIf.Get(context.Background(), &clusterpkg.ClusterQuery{Name: app.Spec.Destination.Name, Server: app.Spec.Destination.Server}) + cluster, err := clusterIf.Get(ctx, &clusterpkg.ClusterQuery{Name: app.Spec.Destination.Name, Server: app.Spec.Destination.Server}) errors.CheckError(err) argoio.Close(conn) - localObjsStrings = getLocalObjectsString(app, local, localRepoRoot, argoSettings.AppLabelKey, cluster.ServerVersion, argoSettings.KustomizeOptions, argoSettings.ConfigManagementPlugins) + + proj := getProject(c, clientOpts, ctx, app.Spec.Project) + localObjsStrings = getLocalObjectsString(ctx, app, proj.Project, local, localRepoRoot, argoSettings.AppLabelKey, cluster.Info.ServerVersion, cluster.Info.APIVersions, argoSettings.KustomizeOptions, argoSettings.TrackingMethod) + errors.CheckError(err) + diffOption.local = local + diffOption.localRepoRoot = localRepoRoot + diffOption.cluster = cluster } - syncReq := applicationpkg.ApplicationSyncRequest{ - Name: &appName, - DryRun: dryRun, - Revision: revision, - Resources: selectedResources, - Prune: prune, - Manifests: localObjsStrings, - Infos: getInfos(infos), + syncOptionsFactory := func() *application.SyncOptions { + syncOptions := application.SyncOptions{} + items := make([]string, 0) + if replace { + items = append(items, common.SyncOptionReplace) + } + if serverSideApply { + items = append(items, common.SyncOptionServerSideApply) + } + if applyOutOfSyncOnly { + items = append(items, common.SyncOptionApplyOutOfSyncOnly) + } + + if len(items) == 0 { + // for prevent send even empty array if not need + return nil + } + syncOptions.Items = items + return &syncOptions + } + + syncReq := application.ApplicationSyncRequest{ + Name: &appName, + AppNamespace: &appNs, + DryRun: &dryRun, + Revision: &revision, + Resources: filteredResources, + Prune: &prune, + Manifests: localObjsStrings, + Infos: getInfos(infos), + SyncOptions: syncOptionsFactory(), } + switch strategy { case "apply": syncReq.Strategy = &argoappv1.SyncStrategy{Apply: &argoappv1.SyncStrategyApply{}} @@ -1405,20 +1881,45 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co }, } } - ctx := context.Background() - _, err := appIf.Sync(ctx, &syncReq) + if diffChanges { + resources, err := appIf.ManagedResources(ctx, &application.ResourcesQuery{ + ApplicationName: &appName, + AppNamespace: &appNs, + }) + errors.CheckError(err) + conn, settingsIf := acdClient.NewSettingsClientOrDie() + defer argoio.Close(conn) + argoSettings, err := settingsIf.Get(ctx, &settings.SettingsQuery{}) + errors.CheckError(err) + foundDiffs := false + fmt.Printf("====== Previewing differences between live and desired state of application %s ======\n", appQualifiedName) + + proj := getProject(c, clientOpts, ctx, app.Spec.Project) + foundDiffs = findandPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption) + if foundDiffs { + if !diffChangesConfirm { + yesno := cli.AskToProceed(fmt.Sprintf("Please review changes to application %s shown above. Do you want to continue the sync process? (y/n): ", appQualifiedName)) + if !yesno { + os.Exit(0) + } + } + } else { + fmt.Printf("====== No Differences found ======\n") + } + } + _, err = appIf.Sync(ctx, &syncReq) errors.CheckError(err) if !async { - app, err := waitOnApplicationStatus(acdClient, appName, timeout, false, false, true, false, selectedResources) + app, opState, err := waitOnApplicationStatus(ctx, acdClient, appQualifiedName, timeout, watchOpts{operation: true}, selectedResources, output) errors.CheckError(err) if !dryRun { - if !app.Status.OperationState.Phase.Successful() { - log.Fatalf("Operation has completed with phase: %s", app.Status.OperationState.Phase) + if !opState.Phase.Successful() { + log.Fatalf("Operation has completed with phase: %s", opState.Phase) } else if len(selectedResources) == 0 && app.Status.Sync.Status != argoappv1.SyncStatusCodeSynced { // Only get resources to be pruned if sync was application-wide and final status is not synced - pruningRequired := app.Status.OperationState.SyncResult.Resources.PruningRequired() + pruningRequired := opState.SyncResult.Resources.PruningRequired() if pruningRequired > 0 { log.Fatalf("%d resources require pruning", pruningRequired) } @@ -1431,23 +1932,48 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co command.Flags().BoolVar(&dryRun, "dry-run", false, "Preview apply without affecting cluster") command.Flags().BoolVar(&prune, "prune", false, "Allow deleting unexpected resources") command.Flags().StringVar(&revision, "revision", "", "Sync to a specific revision. Preserves parameter overrides") - command.Flags().StringArrayVar(&resources, "resource", []string{}, fmt.Sprintf("Sync only specific resources as GROUP%sKIND%sNAME. Fields may be blank. This option may be specified repeatedly", resourceFieldDelimiter, resourceFieldDelimiter)) - command.Flags().StringVarP(&selector, "selector", "l", "", "Sync apps that match this label") + command.Flags().StringArrayVar(&resources, "resource", []string{}, fmt.Sprintf("Sync only specific resources as GROUP%[1]sKIND%[1]sNAME or %[2]sGROUP%[1]sKIND%[1]sNAME. Fields may be blank and '*' can be used. This option may be specified repeatedly", resourceFieldDelimiter, resourceExcludeIndicator)) + command.Flags().StringVarP(&selector, "selector", "l", "", "Sync apps that match this label. Supports '=', '==', '!=', in, notin, exists & not exists. Matching apps must satisfy all of the specified label constraints.") command.Flags().StringArrayVar(&labels, "label", []string{}, "Sync only specific resources with a label. This option may be specified repeatedly.") command.Flags().UintVar(&timeout, "timeout", defaultCheckTimeoutSeconds, "Time out after this many seconds") command.Flags().Int64Var(&retryLimit, "retry-limit", 0, "Max number of allowed sync retries") - command.Flags().DurationVar(&retryBackoffDuration, "retry-backoff-duration", common.DefaultSyncRetryDuration, "Retry backoff base duration. Input needs to be a duration (e.g. 2m, 1h)") - command.Flags().DurationVar(&retryBackoffMaxDuration, "retry-backoff-max-duration", common.DefaultSyncRetryMaxDuration, "Max retry backoff duration. Input needs to be a duration (e.g. 2m, 1h)") - command.Flags().Int64Var(&retryBackoffFactor, "retry-backoff-factor", common.DefaultSyncRetryFactor, "Factor multiplies the base duration after each failed retry") + command.Flags().DurationVar(&retryBackoffDuration, "retry-backoff-duration", argoappv1.DefaultSyncRetryDuration, "Retry backoff base duration. Input needs to be a duration (e.g. 2m, 1h)") + command.Flags().DurationVar(&retryBackoffMaxDuration, "retry-backoff-max-duration", argoappv1.DefaultSyncRetryMaxDuration, "Max retry backoff duration. Input needs to be a duration (e.g. 2m, 1h)") + command.Flags().Int64Var(&retryBackoffFactor, "retry-backoff-factor", argoappv1.DefaultSyncRetryFactor, "Factor multiplies the base duration after each failed retry") command.Flags().StringVar(&strategy, "strategy", "", "Sync strategy (one of: apply|hook)") command.Flags().BoolVar(&force, "force", false, "Use a force apply") + command.Flags().BoolVar(&replace, "replace", false, "Use a kubectl create/replace instead apply") + command.Flags().BoolVar(&serverSideApply, "server-side", false, "Use server-side apply while syncing the application") + command.Flags().BoolVar(&applyOutOfSyncOnly, "apply-out-of-sync-only", false, "Sync only out-of-sync resources") command.Flags().BoolVar(&async, "async", false, "Do not wait for application to sync before continuing") command.Flags().StringVar(&local, "local", "", "Path to a local directory. When this flag is present no git queries will be made") command.Flags().StringVar(&localRepoRoot, "local-repo-root", "/", "Path to the repository root. Used together with --local allows setting the repository root") command.Flags().StringArrayVar(&infos, "info", []string{}, "A list of key-value pairs during sync process. These infos will be persisted in app.") + command.Flags().BoolVar(&diffChangesConfirm, "assumeYes", false, "Assume yes as answer for all user queries or prompts") + command.Flags().BoolVar(&diffChanges, "preview-changes", false, "Preview difference against the target and live state before syncing app and wait for user confirmation") + command.Flags().StringArrayVar(&projects, "project", []string{}, "Sync apps that belong to the specified projects. This option may be specified repeatedly.") + command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|tree|tree=detailed") return command } +func getAppNamesBySelector(ctx context.Context, appIf application.ApplicationServiceClient, selector string) ([]string, error) { + appNames := []string{} + if selector != "" { + list, err := appIf.List(ctx, &application.ApplicationQuery{Selector: pointer.String(selector)}) + if err != nil { + return []string{}, err + } + // unlike list, we'd want to fail if nothing was found + if len(list.Items) == 0 { + return []string{}, fmt.Errorf("no apps match selector %v", selector) + } + for _, i := range list.Items { + appNames = append(appNames, i.Name) + } + } + return appNames, nil +} + // ResourceDiff tracks the state of a resource when waiting on an application status. type resourceState struct { Group string @@ -1488,7 +2014,7 @@ func (rs *resourceState) Merge(newState *resourceState) bool { return updated } -func getResourceStates(app *argoappv1.Application, selectedResources []argoappv1.SyncOperationResource) []*resourceState { +func getResourceStates(app *argoappv1.Application, selectedResources []*argoappv1.SyncOperationResource) []*resourceState { var states []*resourceState resourceByKey := make(map[kube.ResourceKey]argoappv1.ResourceStatus) for i := range app.Status.Resources { @@ -1535,7 +2061,7 @@ func getResourceStates(app *argoappv1.Application, selectedResources []argoappv1 if len(selectedResources) > 0 { for i := len(states) - 1; i >= 0; i-- { res := states[i] - if !argo.ContainsSyncResource(res.Name, res.Namespace, schema.GroupVersionKind{Group: res.Group, Kind: res.Kind}, selectedResources) { + if !argo.IncludeResource(res.Name, res.Namespace, schema.GroupVersionKind{Group: res.Group, Kind: res.Kind}, selectedResources) { states = append(states[:i], states[i+1:]...) } } @@ -1543,7 +2069,27 @@ func getResourceStates(app *argoappv1.Application, selectedResources []argoappv1 return states } -func groupResourceStates(app *argoappv1.Application, selectedResources []argoappv1.SyncOperationResource) map[string]*resourceState { +// filterAppResources selects the app resources that match atleast one of the resource filters. +func filterAppResources(app *argoappv1.Application, selectedResources []*argoappv1.SyncOperationResource) []*argoappv1.SyncOperationResource { + var filteredResources []*argoappv1.SyncOperationResource + if app != nil && len(selectedResources) > 0 { + for i := range app.Status.Resources { + appResource := app.Status.Resources[i] + if (argo.IncludeResource(appResource.Name, appResource.Namespace, + schema.GroupVersionKind{Group: appResource.Group, Kind: appResource.Kind}, selectedResources)) { + filteredResources = append(filteredResources, &argoappv1.SyncOperationResource{ + Group: appResource.Group, + Kind: appResource.Kind, + Name: appResource.Name, + Namespace: appResource.Namespace, + }) + } + } + } + return filteredResources +} + +func groupResourceStates(app *argoappv1.Application, selectedResources []*argoappv1.SyncOperationResource) map[string]*resourceState { resStates := make(map[string]*resourceState) for _, result := range getResourceStates(app, selectedResources) { key := result.Key() @@ -1556,26 +2102,58 @@ func groupResourceStates(app *argoappv1.Application, selectedResources []argoapp return resStates } -func checkResourceStatus(watchSync bool, watchHealth bool, watchOperation bool, watchSuspended bool, healthStatus string, syncStatus string, operationStatus *argoappv1.Operation) bool { +// check if resource health, sync and operation statuses matches watch options +func checkResourceStatus(watch watchOpts, healthStatus string, syncStatus string, operationStatus *argoappv1.Operation) bool { healthCheckPassed := true - if watchSuspended && watchHealth { + + if watch.suspended && watch.health && watch.degraded { healthCheckPassed = healthStatus == string(health.HealthStatusHealthy) || + healthStatus == string(health.HealthStatusSuspended) || + healthStatus == string(health.HealthStatusDegraded) + } else if watch.suspended && watch.degraded { + healthCheckPassed = healthStatus == string(health.HealthStatusDegraded) || healthStatus == string(health.HealthStatusSuspended) - } else if watchSuspended { + } else if watch.degraded && watch.health { + healthCheckPassed = healthStatus == string(health.HealthStatusHealthy) || + healthStatus == string(health.HealthStatusDegraded) + //below are good + } else if watch.suspended && watch.health { + healthCheckPassed = healthStatus == string(health.HealthStatusHealthy) || + healthStatus == string(health.HealthStatusSuspended) + } else if watch.suspended { healthCheckPassed = healthStatus == string(health.HealthStatusSuspended) - } else if watchHealth { + } else if watch.health { healthCheckPassed = healthStatus == string(health.HealthStatusHealthy) + } else if watch.degraded { + healthCheckPassed = healthStatus == string(health.HealthStatusDegraded) } - synced := !watchSync || syncStatus == string(argoappv1.SyncStatusCodeSynced) - operational := !watchOperation || operationStatus == nil + synced := !watch.sync || syncStatus == string(argoappv1.SyncStatusCodeSynced) + operational := !watch.operation || operationStatus == nil return synced && healthCheckPassed && operational } +// resourceParentChild gets the latest state of the app and the latest state of the app's resource tree and then +// constructs the necessary data structures to print the app as a tree. +func resourceParentChild(ctx context.Context, acdClient argocdclient.Client, appName string, appNs string) (map[string]argoappv1.ResourceNode, map[string][]string, map[string]struct{}, map[string]*resourceState) { + _, appIf := acdClient.NewApplicationClientOrDie() + mapUidToNode, mapParentToChild, parentNode := parentChildDetails(appIf, ctx, appName, appNs) + app, err := appIf.Get(ctx, &application.ApplicationQuery{Name: pointer.String(appName), AppNamespace: pointer.String(appNs)}) + errors.CheckError(err) + mapNodeNameToResourceState := make(map[string]*resourceState) + for _, res := range getResourceStates(app, nil) { + mapNodeNameToResourceState[res.Kind+"/"+res.Name] = res + } + return mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState +} + const waitFormatString = "%s\t%5s\t%10s\t%10s\t%20s\t%8s\t%7s\t%10s\t%s\n" -func waitOnApplicationStatus(acdClient apiclient.Client, appName string, timeout uint, watchSync bool, watchHealth bool, watchOperation bool, watchSuspended bool, selectedResources []argoappv1.SyncOperationResource) (*argoappv1.Application, error) { - ctx, cancel := context.WithCancel(context.Background()) +// waitOnApplicationStatus watches an application and blocks until either the desired watch conditions +// are fulfilled or we reach the timeout. Returns the app once desired conditions have been filled. +// Additionally return the operationState at time of fulfilment (which may be different than returned app). +func waitOnApplicationStatus(ctx context.Context, acdClient argocdclient.Client, appName string, timeout uint, watch watchOpts, selectedResources []*argoappv1.SyncOperationResource, output string) (*argoappv1.Application, *argoappv1.OperationState, error) { + ctx, cancel := context.WithCancel(ctx) defer cancel() // refresh controls whether or not we refresh the app before printing the final status. @@ -1583,35 +2161,72 @@ func waitOnApplicationStatus(acdClient apiclient.Client, appName string, timeout // time when the sync status lags behind when an operation completes refresh := false + appRealName, appNs := argo.ParseFromQualifiedName(appName, "") + printFinalStatus := func(app *argoappv1.Application) *argoappv1.Application { var err error if refresh { conn, appClient := acdClient.NewApplicationClientOrDie() refreshType := string(argoappv1.RefreshTypeNormal) - app, err = appClient.Get(context.Background(), &applicationpkg.ApplicationQuery{Name: &appName, Refresh: &refreshType}) + app, err = appClient.Get(ctx, &application.ApplicationQuery{ + Name: &appRealName, + Refresh: &refreshType, + AppNamespace: &appNs, + }) errors.CheckError(err) _ = conn.Close() } fmt.Println() - printAppSummaryTable(app, appURL(acdClient, appName), nil) + printAppSummaryTable(app, appURL(ctx, acdClient, appName), nil) fmt.Println() - if watchOperation { + if watch.operation { printOperationResult(app.Status.OperationState) } - if len(app.Status.Resources) > 0 { - fmt.Println() - w := tabwriter.NewWriter(os.Stdout, 5, 0, 2, ' ', 0) - printAppResources(w, app) - _ = w.Flush() + switch output { + case "yaml", "json": + err := PrintResource(app, output) + errors.CheckError(err) + case "wide", "": + if len(app.Status.Resources) > 0 { + fmt.Println() + w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) + printAppResources(w, app) + _ = w.Flush() + } + case "tree": + mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState := resourceParentChild(ctx, acdClient, appName, appNs) + if len(mapUidToNode) > 0 { + fmt.Println() + printTreeView(mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState) + } + case "tree=detailed": + mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState := resourceParentChild(ctx, acdClient, appName, appNs) + if len(mapUidToNode) > 0 { + fmt.Println() + printTreeViewDetailed(mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState) + } + default: + errors.CheckError(fmt.Errorf("unknown output format: %s", output)) } return app } if timeout != 0 { time.AfterFunc(time.Duration(timeout)*time.Second, func() { + _, appClient := acdClient.NewApplicationClientOrDie() + app, err := appClient.Get(ctx, &application.ApplicationQuery{ + Name: &appRealName, + AppNamespace: &appNs, + }) + errors.CheckError(err) + fmt.Println() + fmt.Println("This is the state of the app after `wait` timed out:") + printFinalStatus(app) cancel() + fmt.Println() + fmt.Println("The command timed out waiting for the conditions to be met.") }) } @@ -1621,12 +2236,25 @@ func waitOnApplicationStatus(acdClient apiclient.Client, appName string, timeout prevStates := make(map[string]*resourceState) conn, appClient := acdClient.NewApplicationClientOrDie() defer argoio.Close(conn) - app, err := appClient.Get(ctx, &applicationpkg.ApplicationQuery{Name: &appName}) + app, err := appClient.Get(ctx, &application.ApplicationQuery{ + Name: &appRealName, + AppNamespace: &appNs, + }) errors.CheckError(err) + + // printFinalStatus() will refresh and update the app object, potentially causing the app's + // status.operationState to be different than the version when we break out of the event loop. + // This means the app.status is unreliable for determining the final state of the operation. + // finalOperationState captures the operationState as it was seen when we met the conditions of + // the wait, so the caller can rely on it to determine the outcome of the operation. + // See: https://github.com/argoproj/argo-cd/issues/5592 + finalOperationState := app.Status.OperationState + appEventCh := acdClient.WatchApplicationWithRetry(ctx, appName, app.ResourceVersion) for appEvent := range appEventCh { app = &appEvent.Application + finalOperationState = app.Status.OperationState operationInProgress := false // consider the operation is in progress if app.Operation != nil { @@ -1651,7 +2279,7 @@ func waitOnApplicationStatus(acdClient apiclient.Client, appName string, timeout if len(selectedResources) > 0 { selectedResourcesAreReady = true for _, state := range getResourceStates(app, selectedResources) { - resourceIsReady := checkResourceStatus(watchSync, watchHealth, watchOperation, watchSuspended, state.Health, state.Status, appEvent.Application.Operation) + resourceIsReady := checkResourceStatus(watch, state.Health, state.Status, appEvent.Application.Operation) if !resourceIsReady { selectedResourcesAreReady = false break @@ -1659,12 +2287,12 @@ func waitOnApplicationStatus(acdClient apiclient.Client, appName string, timeout } } else { // Wait on the application as a whole - selectedResourcesAreReady = checkResourceStatus(watchSync, watchHealth, watchOperation, watchSuspended, string(app.Status.Health.Status), string(app.Status.Sync.Status), appEvent.Application.Operation) + selectedResourcesAreReady = checkResourceStatus(watch, string(app.Status.Health.Status), string(app.Status.Sync.Status), appEvent.Application.Operation) } - if selectedResourcesAreReady && (!operationInProgress || !watchOperation) { + if selectedResourcesAreReady && (!operationInProgress || !watch.operation) { app = printFinalStatus(app) - return app, nil + return app, finalOperationState, nil } newStates := groupResourceStates(app, selectedResources) @@ -1672,9 +2300,9 @@ func waitOnApplicationStatus(acdClient apiclient.Client, appName string, timeout var doPrint bool stateKey := newState.Key() if prevState, found := prevStates[stateKey]; found { - if watchHealth && prevState.Health != string(health.HealthStatusUnknown) && prevState.Health != string(health.HealthStatusDegraded) && newState.Health == string(health.HealthStatusDegraded) { + if watch.health && prevState.Health != string(health.HealthStatusUnknown) && prevState.Health != string(health.HealthStatusDegraded) && newState.Health == string(health.HealthStatusDegraded) { _ = printFinalStatus(app) - return nil, fmt.Errorf("application '%s' health state has transitioned from %s to %s", appName, prevState.Health, newState.Health) + return nil, finalOperationState, fmt.Errorf("application '%s' health state has transitioned from %s to %s", appName, prevState.Health, newState.Health) } doPrint = prevState.Merge(newState) } else { @@ -1688,65 +2316,32 @@ func waitOnApplicationStatus(acdClient apiclient.Client, appName string, timeout _ = w.Flush() } _ = printFinalStatus(app) - return nil, fmt.Errorf("timed out (%ds) waiting for app %q match desired state", timeout, appName) + return nil, finalOperationState, fmt.Errorf("timed out (%ds) waiting for app %q match desired state", timeout, appName) } // setParameterOverrides updates an existing or appends a new parameter override in the application -// If the app is a ksonnet app, then parameters are expected to be in the form: component=param=value -// Otherwise, the app is assumed to be a helm app and is expected to be in the form: +// the app is assumed to be a helm app and is expected to be in the form: // param=value func setParameterOverrides(app *argoappv1.Application, parameters []string) { if len(parameters) == 0 { return } + source := app.Spec.GetSource() var sourceType argoappv1.ApplicationSourceType - if st, _ := app.Spec.Source.ExplicitType(); st != nil { + if st, _ := source.ExplicitType(); st != nil { sourceType = *st } else if app.Status.SourceType != "" { sourceType = app.Status.SourceType } else { - // HACK: we don't know the source type, so make an educated guess based on the supplied - // parameter string. This code handles the corner case where app doesn't exist yet, and the - // command is something like: `argocd app create MYAPP -p foo=bar` - // This logic is not foolproof, but when ksonnet is deprecated, this will no longer matter - // since helm will remain as the only source type which has parameters. - if len(strings.SplitN(parameters[0], "=", 3)) == 3 { - sourceType = argoappv1.ApplicationSourceTypeKsonnet - } else if len(strings.SplitN(parameters[0], "=", 2)) == 2 { + if len(strings.SplitN(parameters[0], "=", 2)) == 2 { sourceType = argoappv1.ApplicationSourceTypeHelm } } switch sourceType { - case argoappv1.ApplicationSourceTypeKsonnet: - if app.Spec.Source.Ksonnet == nil { - app.Spec.Source.Ksonnet = &argoappv1.ApplicationSourceKsonnet{} - } - for _, paramStr := range parameters { - parts := strings.SplitN(paramStr, "=", 3) - if len(parts) != 3 { - log.Fatalf("Expected ksonnet parameter of the form: component=param=value. Received: %s", paramStr) - } - newParam := argoappv1.KsonnetParameter{ - Component: parts[0], - Name: parts[1], - Value: parts[2], - } - found := false - for i, cp := range app.Spec.Source.Ksonnet.Parameters { - if cp.Component == newParam.Component && cp.Name == newParam.Name { - found = true - app.Spec.Source.Ksonnet.Parameters[i] = newParam - break - } - } - if !found { - app.Spec.Source.Ksonnet.Parameters = append(app.Spec.Source.Ksonnet.Parameters, newParam) - } - } case argoappv1.ApplicationSourceTypeHelm: - if app.Spec.Source.Helm == nil { - app.Spec.Source.Helm = &argoappv1.ApplicationSourceHelm{} + if source.Helm == nil { + source.Helm = &argoappv1.ApplicationSourceHelm{} } for _, p := range parameters { newParam, err := argoappv1.NewHelmParameter(p, false) @@ -1754,10 +2349,10 @@ func setParameterOverrides(app *argoappv1.Application, parameters []string) { log.Error(err) continue } - app.Spec.Source.Helm.AddParameter(*newParam) + source.Helm.AddParameter(*newParam) } default: - log.Fatalf("Parameters can only be set against Ksonnet or Helm applications") + log.Fatalf("Parameters can only be set against Helm applications") } } @@ -1791,15 +2386,21 @@ func NewApplicationHistoryCommand(clientOpts *argocdclient.ClientOptions) *cobra Use: "history APPNAME", Short: "Show application deployment history", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 1 { c.HelpFunc()(c, args) os.Exit(1) } - conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie() + conn, appIf := headless.NewClientOrDie(clientOpts, c).NewApplicationClientOrDie() defer argoio.Close(conn) - appName := args[0] - app, err := appIf.Get(context.Background(), &applicationpkg.ApplicationQuery{Name: &appName}) + appName, appNs := argo.ParseFromQualifiedName(args[0], "") + app, err := appIf.Get(ctx, &application.ApplicationQuery{ + Name: &appName, + AppNamespace: &appNs, + }) errors.CheckError(err) + if output == "id" { printApplicationHistoryIds(app.Status.History) } else { @@ -1811,53 +2412,75 @@ func NewApplicationHistoryCommand(clientOpts *argocdclient.ClientOptions) *cobra return command } +func findRevisionHistory(application *argoappv1.Application, historyId int64) (*argoappv1.RevisionHistory, error) { + // in case if history id not passed and need fetch previous history revision + if historyId == -1 { + l := len(application.Status.History) + if l < 2 { + return nil, fmt.Errorf("Application '%s' should have at least two successful deployments", application.ObjectMeta.Name) + } + return &application.Status.History[l-2], nil + } + for _, di := range application.Status.History { + if di.ID == historyId { + return &di, nil + } + } + return nil, fmt.Errorf("Application '%s' does not have deployment id '%d' in history\n", application.ObjectMeta.Name, historyId) +} + // NewApplicationRollbackCommand returns a new instance of an `argocd app rollback` command func NewApplicationRollbackCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { var ( prune bool timeout uint + output string ) var command = &cobra.Command{ - Use: "rollback APPNAME ID", - Short: "Rollback application to a previous deployed version by History ID", + Use: "rollback APPNAME [ID]", + Short: "Rollback application to a previous deployed version by History ID, omitted will Rollback to the previous version", Run: func(c *cobra.Command, args []string) { - if len(args) != 2 { + ctx := c.Context() + if len(args) == 0 { c.HelpFunc()(c, args) os.Exit(1) } - appName := args[0] - depID, err := strconv.Atoi(args[1]) - errors.CheckError(err) - acdClient := argocdclient.NewClientOrDie(clientOpts) + appName, appNs := argo.ParseFromQualifiedName(args[0], "") + var err error + depID := -1 + if len(args) > 1 { + depID, err = strconv.Atoi(args[1]) + errors.CheckError(err) + } + acdClient := headless.NewClientOrDie(clientOpts, c) conn, appIf := acdClient.NewApplicationClientOrDie() defer argoio.Close(conn) - ctx := context.Background() - app, err := appIf.Get(ctx, &applicationpkg.ApplicationQuery{Name: &appName}) + app, err := appIf.Get(ctx, &application.ApplicationQuery{ + Name: &appName, + AppNamespace: &appNs, + }) + errors.CheckError(err) + + depInfo, err := findRevisionHistory(app, int64(depID)) errors.CheckError(err) - var depInfo *argoappv1.RevisionHistory - for _, di := range app.Status.History { - if di.ID == int64(depID) { - depInfo = &di - break - } - } - if depInfo == nil { - log.Fatalf("Application '%s' does not have deployment id '%d' in history\n", app.ObjectMeta.Name, depID) - } - _, err = appIf.Rollback(ctx, &applicationpkg.ApplicationRollbackRequest{ - Name: &appName, - ID: int64(depID), - Prune: prune, + _, err = appIf.Rollback(ctx, &application.ApplicationRollbackRequest{ + Name: &appName, + AppNamespace: &appNs, + Id: pointer.Int64(depInfo.ID), + Prune: pointer.Bool(prune), }) errors.CheckError(err) - _, err = waitOnApplicationStatus(acdClient, appName, timeout, false, false, true, false, nil) + _, _, err = waitOnApplicationStatus(ctx, acdClient, app.QualifiedName(), timeout, watchOpts{ + operation: true, + }, nil, output) errors.CheckError(err) }, } command.Flags().BoolVar(&prune, "prune", false, "Allow deleting unexpected resources") command.Flags().UintVar(&timeout, "timeout", defaultCheckTimeoutSeconds, "Time out after this many seconds") + command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|tree|tree=detailed") return command } @@ -1890,34 +2513,59 @@ func printOperationResult(opState *argoappv1.OperationState) { // NewApplicationManifestsCommand returns a new instance of an `argocd app manifests` command func NewApplicationManifestsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { var ( - source string - revision string + source string + revision string + local string + localRepoRoot string ) var command = &cobra.Command{ Use: "manifests APPNAME", Short: "Print manifests of an application", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 1 { c.HelpFunc()(c, args) os.Exit(1) } - appName := args[0] - conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie() + appName, appNs := argo.ParseFromQualifiedName(args[0], "") + clientset := headless.NewClientOrDie(clientOpts, c) + conn, appIf := clientset.NewApplicationClientOrDie() defer argoio.Close(conn) - ctx := context.Background() - resources, err := appIf.ManagedResources(context.Background(), &applicationpkg.ResourcesQuery{ApplicationName: &appName}) + resources, err := appIf.ManagedResources(ctx, &application.ResourcesQuery{ + ApplicationName: &appName, + AppNamespace: &appNs, + }) errors.CheckError(err) var unstructureds []*unstructured.Unstructured switch source { case "git": - if revision != "" { - q := applicationpkg.ApplicationManifestQuery{ - Name: &appName, - Revision: revision, + if local != "" { + app, err := appIf.Get(context.Background(), &application.ApplicationQuery{Name: &appName}) + errors.CheckError(err) + + settingsConn, settingsIf := clientset.NewSettingsClientOrDie() + defer argoio.Close(settingsConn) + argoSettings, err := settingsIf.Get(context.Background(), &settings.SettingsQuery{}) + errors.CheckError(err) + + clusterConn, clusterIf := clientset.NewClusterClientOrDie() + defer argoio.Close(clusterConn) + cluster, err := clusterIf.Get(context.Background(), &clusterpkg.ClusterQuery{Name: app.Spec.Destination.Name, Server: app.Spec.Destination.Server}) + errors.CheckError(err) + + proj := getProject(c, clientOpts, ctx, app.Spec.Project) + unstructureds = getLocalObjects(context.Background(), app, proj.Project, local, localRepoRoot, argoSettings.AppLabelKey, cluster.ServerVersion, cluster.Info.APIVersions, argoSettings.KustomizeOptions, argoSettings.TrackingMethod) + } else if revision != "" { + q := application.ApplicationManifestQuery{ + Name: &appName, + AppNamespace: &appNs, + Revision: pointer.String(revision), } res, err := appIf.GetManifests(ctx, &q) errors.CheckError(err) + for _, mfst := range res.Manifests { obj, err := argoappv1.UnmarshalToUnstructured(mfst) errors.CheckError(err) @@ -1929,7 +2577,7 @@ func NewApplicationManifestsCommand(clientOpts *argocdclient.ClientOptions) *cob unstructureds = targetObjs } case "live": - liveObjs, err := liveObjects(resources.Items) + liveObjs, err := cmdutil.LiveObjects(resources.Items) errors.CheckError(err) unstructureds = liveObjs default: @@ -1946,6 +2594,8 @@ func NewApplicationManifestsCommand(clientOpts *argocdclient.ClientOptions) *cob } command.Flags().StringVar(&source, "source", "git", "Source of manifests. One of: live|git") command.Flags().StringVar(&revision, "revision", "", "Show manifests at a specific revision") + command.Flags().StringVar(&local, "local", "", "If set, show locally-generated manifests. Value is the absolute path to app manifests within the manifest repo. Example: '/home/username/apps/env/app-1'.") + command.Flags().StringVar(&localRepoRoot, "local-repo-root", ".", "Path to the local repository root. Used together with --local allows setting the repository root. Example: '/home/username/apps'.") return command } @@ -1955,15 +2605,19 @@ func NewApplicationTerminateOpCommand(clientOpts *argocdclient.ClientOptions) *c Use: "terminate-op APPNAME", Short: "Terminate running operation of an application", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 1 { c.HelpFunc()(c, args) os.Exit(1) } - appName := args[0] - conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie() + appName, appNs := argo.ParseFromQualifiedName(args[0], "") + conn, appIf := headless.NewClientOrDie(clientOpts, c).NewApplicationClientOrDie() defer argoio.Close(conn) - ctx := context.Background() - _, err := appIf.TerminateOperation(ctx, &applicationpkg.OperationTerminateRequest{Name: &appName}) + _, err := appIf.TerminateOperation(ctx, &application.OperationTerminateRequest{ + Name: &appName, + AppNamespace: &appNs, + }) errors.CheckError(err) fmt.Printf("Application '%s' operation terminating\n", appName) }, @@ -1976,15 +2630,21 @@ func NewApplicationEditCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co Use: "edit APPNAME", Short: "Edit application", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 1 { c.HelpFunc()(c, args) os.Exit(1) } - appName := args[0] - conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie() + appName, appNs := argo.ParseFromQualifiedName(args[0], "") + conn, appIf := headless.NewClientOrDie(clientOpts, c).NewApplicationClientOrDie() defer argoio.Close(conn) - app, err := appIf.Get(context.Background(), &applicationpkg.ApplicationQuery{Name: &appName}) + app, err := appIf.Get(ctx, &application.ApplicationQuery{ + Name: &appName, + AppNamespace: &appNs, + }) errors.CheckError(err) + appData, err := json.Marshal(app.Spec) errors.CheckError(err) appData, err = yaml.JSONToYAML(appData) @@ -1993,66 +2653,32 @@ func NewApplicationEditCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co cli.InteractiveEdit(fmt.Sprintf("%s-*-edit.yaml", appName), appData, func(input []byte) error { input, err = yaml.YAMLToJSON(input) if err != nil { - return err + return fmt.Errorf("error converting YAML to JSON: %w", err) } updatedSpec := argoappv1.ApplicationSpec{} err = json.Unmarshal(input, &updatedSpec) if err != nil { - return err + return fmt.Errorf("error unmarshaling input into application spec: %w", err) } var appOpts cmdutil.AppOptions cmdutil.SetAppSpecOptions(c.Flags(), &app.Spec, &appOpts) - _, err = appIf.UpdateSpec(context.Background(), &applicationpkg.ApplicationUpdateSpecRequest{Name: &app.Name, Spec: updatedSpec, Validate: &appOpts.Validate}) + _, err = appIf.UpdateSpec(ctx, &application.ApplicationUpdateSpecRequest{ + Name: &appName, + Spec: &updatedSpec, + Validate: &appOpts.Validate, + AppNamespace: &appNs, + }) if err != nil { - return fmt.Errorf("Failed to update application spec:\n%v", err) + return fmt.Errorf("failed to update application spec: %w", err) } - return err + return nil }) }, } return command } -func NewApplicationListResourcesCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { - var orphaned bool - var command = &cobra.Command{ - Use: "resources APPNAME", - Short: "List resource of application", - Run: func(c *cobra.Command, args []string) { - if len(args) != 1 { - c.HelpFunc()(c, args) - os.Exit(1) - } - listAll := !c.Flag("orphaned").Changed - appName := args[0] - conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie() - defer argoio.Close(conn) - appResourceTree, err := appIf.ResourceTree(context.Background(), &applicationpkg.ResourcesQuery{ApplicationName: &appName}) - errors.CheckError(err) - w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) - headers := []interface{}{"GROUP", "KIND", "NAMESPACE", "NAME", "ORPHANED"} - fmtStr := "%s\t%s\t%s\t%s\t%s\n" - _, _ = fmt.Fprintf(w, fmtStr, headers...) - if !orphaned || listAll { - for _, res := range appResourceTree.Nodes { - if len(res.ParentRefs) == 0 { - _, _ = fmt.Fprintf(w, fmtStr, res.Group, res.Kind, res.Namespace, res.Name, "No") - } - } - } - if orphaned || listAll { - for _, res := range appResourceTree.OrphanedNodes { - _, _ = fmt.Fprintf(w, fmtStr, res.Group, res.Kind, res.Namespace, res.Name, "Yes") - } - } - _ = w.Flush() - }, - } - command.Flags().BoolVar(&orphaned, "orphaned", false, "Lists only orphaned resources") - return command -} - func NewApplicationPatchCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { var patch string var patchType string @@ -2060,25 +2686,27 @@ func NewApplicationPatchCommand(clientOpts *argocdclient.ClientOptions) *cobra.C command := cobra.Command{ Use: "patch APPNAME", Short: "Patch application", - Long: `Examples: - # Update an application's source path using json patch - argocd app patch myapplication --patch='[{"op": "replace", "path": "/spec/source/path", "value": "newPath"}]' --type json + Example: ` # Update an application's source path using json patch + argocd app patch myapplication --patch='[{"op": "replace", "path": "/spec/source/path", "value": "newPath"}]' --type json - # Update an application's repository target revision using merge patch - argocd app patch myapplication --patch '{"spec": { "source": { "targetRevision": "master" } }}' --type merge`, + # Update an application's repository target revision using merge patch + argocd app patch myapplication --patch '{"spec": { "source": { "targetRevision": "master" } }}' --type merge`, Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 1 { c.HelpFunc()(c, args) os.Exit(1) } - appName := args[0] - conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie() + appName, appNs := argo.ParseFromQualifiedName(args[0], "") + conn, appIf := headless.NewClientOrDie(clientOpts, c).NewApplicationClientOrDie() defer argoio.Close(conn) - patchedApp, err := appIf.Patch(context.Background(), &applicationpkg.ApplicationPatchRequest{ - Name: &appName, - Patch: patch, - PatchType: patchType, + patchedApp, err := appIf.Patch(ctx, &application.ApplicationPatchRequest{ + Name: &appName, + Patch: &patch, + PatchType: &patchType, + AppNamespace: &appNs, }) errors.CheckError(err) @@ -2093,95 +2721,3 @@ func NewApplicationPatchCommand(clientOpts *argocdclient.ClientOptions) *cobra.C command.Flags().StringVar(&patchType, "type", "json", "The type of patch being provided; one of [json merge]") return &command } - -func filterResources(command *cobra.Command, resources []*argoappv1.ResourceDiff, group, kind, namespace, resourceName string, all bool) []*unstructured.Unstructured { - liveObjs, err := liveObjects(resources) - errors.CheckError(err) - filteredObjects := make([]*unstructured.Unstructured, 0) - for i := range liveObjs { - obj := liveObjs[i] - if obj == nil { - continue - } - gvk := obj.GroupVersionKind() - if command.Flags().Changed("group") && group != gvk.Group { - continue - } - if namespace != "" && namespace != obj.GetNamespace() { - continue - } - if resourceName != "" && resourceName != obj.GetName() { - continue - } - if kind != "" && kind != gvk.Kind { - continue - } - deepCopy := obj.DeepCopy() - filteredObjects = append(filteredObjects, deepCopy) - } - if len(filteredObjects) == 0 { - log.Fatal("No matching resource found") - } - if len(filteredObjects) > 1 && !all { - log.Fatal("Multiple resources match inputs. Use the --all flag to patch multiple resources") - } - return filteredObjects -} - -func NewApplicationPatchResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { - var patch string - var patchType string - var resourceName string - var namespace string - var kind string - var group string - var all bool - command := &cobra.Command{ - Use: "patch-resource APPNAME", - Short: "Patch resource in an application", - } - - command.Flags().StringVar(&patch, "patch", "", "Patch") - err := command.MarkFlagRequired("patch") - errors.CheckError(err) - command.Flags().StringVar(&patchType, "patch-type", string(types.MergePatchType), "Which Patching strategy to use: 'application/json-patch+json', 'application/merge-patch+json', or 'application/strategic-merge-patch+json'. Defaults to 'application/merge-patch+json'") - command.Flags().StringVar(&resourceName, "resource-name", "", "Name of resource") - command.Flags().StringVar(&kind, "kind", "", "Kind") - err = command.MarkFlagRequired("kind") - errors.CheckError(err) - command.Flags().StringVar(&group, "group", "", "Group") - command.Flags().StringVar(&namespace, "namespace", "", "Namespace") - command.Flags().BoolVar(&all, "all", false, "Indicates whether to patch multiple matching of resources") - command.Run = func(c *cobra.Command, args []string) { - if len(args) != 1 { - c.HelpFunc()(c, args) - os.Exit(1) - } - appName := args[0] - - conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie() - defer argoio.Close(conn) - ctx := context.Background() - resources, err := appIf.ManagedResources(ctx, &applicationpkg.ResourcesQuery{ApplicationName: &appName}) - errors.CheckError(err) - objectsToPatch := filterResources(command, resources.Items, group, kind, namespace, resourceName, all) - for i := range objectsToPatch { - obj := objectsToPatch[i] - gvk := obj.GroupVersionKind() - _, err = appIf.PatchResource(ctx, &applicationpkg.ApplicationResourcePatchRequest{ - Name: &appName, - Namespace: obj.GetNamespace(), - ResourceName: obj.GetName(), - Version: gvk.Version, - Group: gvk.Group, - Kind: gvk.Kind, - Patch: patch, - PatchType: patchType, - }) - errors.CheckError(err) - log.Infof("Resource '%s' patched", obj.GetName()) - } - } - - return command -} diff --git a/cmd/argocd/commands/app_actions.go b/cmd/argocd/commands/app_actions.go index af0f36f898848..866aed5ae349e 100644 --- a/cmd/argocd/commands/app_actions.go +++ b/cmd/argocd/commands/app_actions.go @@ -4,16 +4,24 @@ import ( "context" "encoding/json" "fmt" + "github.com/argoproj/argo-cd/v2/util/templates" "os" "strconv" "text/tabwriter" - "github.com/ghodss/yaml" + "github.com/argoproj/argo-cd/v2/cmd/util" + log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "k8s.io/utils/pointer" + "sigs.k8s.io/yaml" + "github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless" argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient" applicationpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/application" + "github.com/argoproj/argo-cd/v2/pkg/apis/application" + v1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/argoproj/argo-cd/v2/util/argo" "github.com/argoproj/argo-cd/v2/util/errors" "github.com/argoproj/argo-cd/v2/util/io" ) @@ -26,11 +34,22 @@ type DisplayedAction struct { Disabled bool } +var ( + appActionExample = templates.Examples(` + # List all the available actions for an application + argocd app actions list APPNAME + + # Run an available action for an application + argocd app actions run APPNAME ACTION --kind KIND [--resource-name RESOURCE] [--namespace NAMESPACE] [--group GROUP] + `) +) + // NewApplicationResourceActionsCommand returns a new instance of an `argocd app actions` command func NewApplicationResourceActionsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { var command = &cobra.Command{ - Use: "actions", - Short: "Manage Resource actions", + Use: "actions", + Short: "Manage Resource actions", + Example: appActionExample, Run: func(c *cobra.Command, args []string) { c.HelpFunc()(c, args) os.Exit(1) @@ -51,29 +70,37 @@ func NewApplicationResourceActionsListCommand(clientOpts *argocdclient.ClientOpt var command = &cobra.Command{ Use: "list APPNAME", Short: "Lists available actions on a resource", + Example: templates.Examples(` + # List all the available actions for an application + argocd app actions list APPNAME + `), } command.Run = func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 1 { c.HelpFunc()(c, args) os.Exit(1) } - appName := args[0] - conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie() + appName, appNs := argo.ParseFromQualifiedName(args[0], "") + conn, appIf := headless.NewClientOrDie(clientOpts, c).NewApplicationClientOrDie() defer io.Close(conn) - ctx := context.Background() - resources, err := appIf.ManagedResources(ctx, &applicationpkg.ResourcesQuery{ApplicationName: &appName}) + resources, err := getActionableResourcesForApplication(appIf, ctx, &appNs, &appName) + errors.CheckError(err) + filteredObjects, err := util.FilterResources(command.Flags().Changed("group"), resources, group, kind, namespace, resourceName, true) errors.CheckError(err) - filteredObjects := filterResources(command, resources.Items, group, kind, namespace, resourceName, true) var availableActions []DisplayedAction for i := range filteredObjects { obj := filteredObjects[i] gvk := obj.GroupVersionKind() availActionsForResource, err := appIf.ListResourceActions(ctx, &applicationpkg.ApplicationResourceRequest{ Name: &appName, - Namespace: obj.GetNamespace(), - ResourceName: obj.GetName(), - Group: gvk.Group, - Kind: gvk.Kind, + AppNamespace: &appNs, + Namespace: pointer.String(obj.GetNamespace()), + ResourceName: pointer.String(obj.GetName()), + Group: pointer.String(gvk.Group), + Kind: pointer.String(gvk.Kind), + Version: pointer.String(gvk.Version), }) errors.CheckError(err) for _, action := range availActionsForResource.Actions { @@ -125,6 +152,10 @@ func NewApplicationResourceActionsRunCommand(clientOpts *argocdclient.ClientOpti var command = &cobra.Command{ Use: "run APPNAME ACTION", Short: "Runs an available action on resource(s)", + Example: templates.Examples(` + # Run an available action for an application + argocd app actions run APPNAME ACTION --kind KIND [--resource-name RESOURCE] [--namespace NAMESPACE] [--group GROUP] + `), } command.Flags().StringVar(&resourceName, "resource-name", "", "Name of resource") @@ -135,19 +166,21 @@ func NewApplicationResourceActionsRunCommand(clientOpts *argocdclient.ClientOpti command.Flags().BoolVar(&all, "all", false, "Indicates whether to run the action on multiple matching resources") command.Run = func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 2 { c.HelpFunc()(c, args) os.Exit(1) } - appName := args[0] + appName, appNs := argo.ParseFromQualifiedName(args[0], "") actionName := args[1] - conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie() + conn, appIf := headless.NewClientOrDie(clientOpts, c).NewApplicationClientOrDie() defer io.Close(conn) - ctx := context.Background() - resources, err := appIf.ManagedResources(ctx, &applicationpkg.ResourcesQuery{ApplicationName: &appName}) + resources, err := getActionableResourcesForApplication(appIf, ctx, &appNs, &appName) + errors.CheckError(err) + filteredObjects, err := util.FilterResources(command.Flags().Changed("group"), resources, group, kind, namespace, resourceName, all) errors.CheckError(err) - filteredObjects := filterResources(command, resources.Items, group, kind, namespace, resourceName, all) var resGroup = filteredObjects[0].GroupVersionKind().Group for i := range filteredObjects[1:] { if filteredObjects[i].GroupVersionKind().Group != resGroup { @@ -159,16 +192,49 @@ func NewApplicationResourceActionsRunCommand(clientOpts *argocdclient.ClientOpti obj := filteredObjects[i] gvk := obj.GroupVersionKind() objResourceName := obj.GetName() - _, err := appIf.RunResourceAction(context.Background(), &applicationpkg.ResourceActionRunRequest{ + _, err := appIf.RunResourceAction(ctx, &applicationpkg.ResourceActionRunRequest{ Name: &appName, - Namespace: obj.GetNamespace(), - ResourceName: objResourceName, - Group: gvk.Group, - Kind: gvk.Kind, - Action: actionName, + AppNamespace: &appNs, + Namespace: pointer.String(obj.GetNamespace()), + ResourceName: pointer.String(objResourceName), + Group: pointer.String(gvk.Group), + Kind: pointer.String(gvk.Kind), + Version: pointer.String(gvk.GroupVersion().Version), + Action: pointer.String(actionName), }) errors.CheckError(err) } } return command } + +func getActionableResourcesForApplication(appIf applicationpkg.ApplicationServiceClient, ctx context.Context, appNs *string, appName *string) ([]*v1alpha1.ResourceDiff, error) { + resources, err := appIf.ManagedResources(ctx, &applicationpkg.ResourcesQuery{ + ApplicationName: appName, + AppNamespace: appNs, + }) + if err != nil { + return nil, err + } + app, err := appIf.Get(ctx, &applicationpkg.ApplicationQuery{ + Name: appName, + AppNamespace: appNs, + }) + if err != nil { + return nil, err + } + app.Kind = application.ApplicationKind + app.APIVersion = "argoproj.io/v1alpha1" + appManifest, err := json.Marshal(app) + if err != nil { + return nil, err + } + appGVK := app.GroupVersionKind() + return append(resources.Items, &v1alpha1.ResourceDiff{ + Group: appGVK.Group, + Kind: appGVK.Kind, + Namespace: app.Namespace, + Name: *appName, + LiveState: string(appManifest), + }), nil +} diff --git a/cmd/argocd/commands/app_resource_test.go b/cmd/argocd/commands/app_resource_test.go new file mode 100644 index 0000000000000..5846065141e15 --- /dev/null +++ b/cmd/argocd/commands/app_resource_test.go @@ -0,0 +1,122 @@ +package commands + +import ( + "bytes" + "testing" + "text/tabwriter" + + "github.com/stretchr/testify/assert" + + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +func TestPrintTreeViewAppResources(t *testing.T) { + var nodes [3]v1alpha1.ResourceNode + nodes[0].ResourceRef = v1alpha1.ResourceRef{Group: "", Version: "v1", Kind: "Pod", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5-6trpt", UID: "92c3a5fe-d13e-4ae2-b8ec-c10dd3543b28"} + nodes[0].ParentRefs = []v1alpha1.ResourceRef{{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5", UID: "75c30dce-1b66-414f-a86c-573a74be0f40"}} + nodes[1].ResourceRef = v1alpha1.ResourceRef{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5", UID: "75c30dce-1b66-414f-a86c-573a74be0f40"} + nodes[1].ParentRefs = []v1alpha1.ResourceRef{{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"}} + nodes[2].ResourceRef = v1alpha1.ResourceRef{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"} + var nodeMapping = make(map[string]v1alpha1.ResourceNode) + var mapParentToChild = make(map[string][]string) + var parentNode = make(map[string]struct{}) + for _, node := range nodes { + nodeMapping[node.UID] = node + if len(node.ParentRefs) > 0 { + _, ok := mapParentToChild[node.ParentRefs[0].UID] + if !ok { + var temp []string + mapParentToChild[node.ParentRefs[0].UID] = temp + } + mapParentToChild[node.ParentRefs[0].UID] = append(mapParentToChild[node.ParentRefs[0].UID], node.UID) + } else { + parentNode[node.UID] = struct{}{} + } + } + buf := &bytes.Buffer{} + w := tabwriter.NewWriter(buf, 0, 0, 2, ' ', 0) + + printTreeViewAppResourcesNotOrphaned(nodeMapping, mapParentToChild, parentNode, false, false, w) + if err := w.Flush(); err != nil { + t.Fatal(err) + } + output := buf.String() + + assert.Contains(t, output, "Rollout") + assert.Contains(t, output, "argoproj.io") +} + +func TestPrintTreeViewDetailedAppResources(t *testing.T) { + var nodes [3]v1alpha1.ResourceNode + nodes[0].ResourceRef = v1alpha1.ResourceRef{Group: "", Version: "v1", Kind: "Pod", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5-6trpt", UID: "92c3a5fe-d13e-4ae2-b8ec-c10dd3543b28"} + nodes[0].ParentRefs = []v1alpha1.ResourceRef{{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5", UID: "75c30dce-1b66-414f-a86c-573a74be0f40"}} + nodes[1].ResourceRef = v1alpha1.ResourceRef{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5", UID: "75c30dce-1b66-414f-a86c-573a74be0f40"} + nodes[1].ParentRefs = []v1alpha1.ResourceRef{{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"}} + nodes[2].ResourceRef = v1alpha1.ResourceRef{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"} + nodes[2].Health = &v1alpha1.HealthStatus{ + Status: "Degraded", + Message: "Readiness Gate failed", + } + + var nodeMapping = make(map[string]v1alpha1.ResourceNode) + var mapParentToChild = make(map[string][]string) + var parentNode = make(map[string]struct{}) + for _, node := range nodes { + nodeMapping[node.UID] = node + if len(node.ParentRefs) > 0 { + _, ok := mapParentToChild[node.ParentRefs[0].UID] + if !ok { + var temp []string + mapParentToChild[node.ParentRefs[0].UID] = temp + } + mapParentToChild[node.ParentRefs[0].UID] = append(mapParentToChild[node.ParentRefs[0].UID], node.UID) + } else { + parentNode[node.UID] = struct{}{} + } + } + buf := &bytes.Buffer{} + w := tabwriter.NewWriter(buf, 0, 0, 2, ' ', 0) + + printDetailedTreeViewAppResourcesNotOrphaned(nodeMapping, mapParentToChild, parentNode, false, false, w) + if err := w.Flush(); err != nil { + t.Fatal(err) + } + output := buf.String() + + assert.Contains(t, output, "Rollout") + assert.Contains(t, output, "Degraded") + assert.Contains(t, output, "Readiness Gate failed") +} + +func TestPrintResourcesTree(t *testing.T) { + tree := v1alpha1.ApplicationTree{ + Nodes: []v1alpha1.ResourceNode{ + { + ResourceRef: v1alpha1.ResourceRef{ + Group: "group", + Kind: "kind", + Namespace: "ns", + Name: "rs1", + }, + }, + }, + OrphanedNodes: []v1alpha1.ResourceNode{ + { + ResourceRef: v1alpha1.ResourceRef{ + Group: "group2", + Kind: "kind2", + Namespace: "ns2", + Name: "rs2", + }, + }, + }, + } + output, _ := captureOutput(func() error { + printResources(true, false, &tree, "") + return nil + }) + + expectation := "GROUP KIND NAMESPACE NAME ORPHANED\ngroup kind ns rs1 No\ngroup2 kind2 ns2 rs2 Yes\n" + + assert.Equal(t, expectation, output) +} diff --git a/cmd/argocd/commands/app_resources.go b/cmd/argocd/commands/app_resources.go new file mode 100644 index 0000000000000..e48465c7e4693 --- /dev/null +++ b/cmd/argocd/commands/app_resources.go @@ -0,0 +1,277 @@ +package commands + +import ( + "fmt" + "os" + + "github.com/argoproj/argo-cd/v2/cmd/util" + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/pointer" + + "github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless" + argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient" + applicationpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/application" + "github.com/argoproj/argo-cd/v2/util/argo" + "github.com/argoproj/argo-cd/v2/util/errors" + argoio "github.com/argoproj/argo-cd/v2/util/io" + + "text/tabwriter" +) + +func NewApplicationPatchResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { + var patch string + var patchType string + var resourceName string + var namespace string + var kind string + var group string + var all bool + command := &cobra.Command{ + Use: "patch-resource APPNAME", + Short: "Patch resource in an application", + } + + command.Flags().StringVar(&patch, "patch", "", "Patch") + err := command.MarkFlagRequired("patch") + errors.CheckError(err) + command.Flags().StringVar(&patchType, "patch-type", string(types.MergePatchType), "Which Patching strategy to use: 'application/json-patch+json', 'application/merge-patch+json', or 'application/strategic-merge-patch+json'. Defaults to 'application/merge-patch+json'") + command.Flags().StringVar(&resourceName, "resource-name", "", "Name of resource") + command.Flags().StringVar(&kind, "kind", "", "Kind") + err = command.MarkFlagRequired("kind") + errors.CheckError(err) + command.Flags().StringVar(&group, "group", "", "Group") + command.Flags().StringVar(&namespace, "namespace", "", "Namespace") + command.Flags().BoolVar(&all, "all", false, "Indicates whether to patch multiple matching of resources") + command.Run = func(c *cobra.Command, args []string) { + ctx := c.Context() + + if len(args) != 1 { + c.HelpFunc()(c, args) + os.Exit(1) + } + appName, appNs := argo.ParseFromQualifiedName(args[0], "") + + conn, appIf := headless.NewClientOrDie(clientOpts, c).NewApplicationClientOrDie() + defer argoio.Close(conn) + resources, err := appIf.ManagedResources(ctx, &applicationpkg.ResourcesQuery{ + ApplicationName: &appName, + AppNamespace: &appNs, + }) + errors.CheckError(err) + objectsToPatch, err := util.FilterResources(command.Flags().Changed("group"), resources.Items, group, kind, namespace, resourceName, all) + errors.CheckError(err) + for i := range objectsToPatch { + obj := objectsToPatch[i] + gvk := obj.GroupVersionKind() + _, err = appIf.PatchResource(ctx, &applicationpkg.ApplicationResourcePatchRequest{ + Name: &appName, + AppNamespace: &appNs, + Namespace: pointer.String(obj.GetNamespace()), + ResourceName: pointer.String(obj.GetName()), + Version: pointer.String(gvk.Version), + Group: pointer.String(gvk.Group), + Kind: pointer.String(gvk.Kind), + Patch: pointer.String(patch), + PatchType: pointer.String(patchType), + }) + errors.CheckError(err) + log.Infof("Resource '%s' patched", obj.GetName()) + } + } + + return command +} + +func NewApplicationDeleteResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { + var resourceName string + var namespace string + var kind string + var group string + var force bool + var orphan bool + var all bool + command := &cobra.Command{ + Use: "delete-resource APPNAME", + Short: "Delete resource in an application", + } + + command.Flags().StringVar(&resourceName, "resource-name", "", "Name of resource") + command.Flags().StringVar(&kind, "kind", "", "Kind") + err := command.MarkFlagRequired("kind") + errors.CheckError(err) + command.Flags().StringVar(&group, "group", "", "Group") + command.Flags().StringVar(&namespace, "namespace", "", "Namespace") + command.Flags().BoolVar(&force, "force", false, "Indicates whether to orphan the dependents of the deleted resource") + command.Flags().BoolVar(&orphan, "orphan", false, "Indicates whether to force delete the resource") + command.Flags().BoolVar(&all, "all", false, "Indicates whether to patch multiple matching of resources") + command.Run = func(c *cobra.Command, args []string) { + ctx := c.Context() + + if len(args) != 1 { + c.HelpFunc()(c, args) + os.Exit(1) + } + appName, appNs := argo.ParseFromQualifiedName(args[0], "") + + conn, appIf := headless.NewClientOrDie(clientOpts, c).NewApplicationClientOrDie() + defer argoio.Close(conn) + resources, err := appIf.ManagedResources(ctx, &applicationpkg.ResourcesQuery{ + ApplicationName: &appName, + AppNamespace: &appNs, + }) + errors.CheckError(err) + objectsToDelete, err := util.FilterResources(command.Flags().Changed("group"), resources.Items, group, kind, namespace, resourceName, all) + errors.CheckError(err) + for i := range objectsToDelete { + obj := objectsToDelete[i] + gvk := obj.GroupVersionKind() + _, err = appIf.DeleteResource(ctx, &applicationpkg.ApplicationResourceDeleteRequest{ + Name: &appName, + AppNamespace: &appNs, + Namespace: pointer.String(obj.GetNamespace()), + ResourceName: pointer.String(obj.GetName()), + Version: pointer.String(gvk.Version), + Group: pointer.String(gvk.Group), + Kind: pointer.String(gvk.Kind), + Force: &force, + Orphan: &orphan, + }) + errors.CheckError(err) + log.Infof("Resource '%s' deleted", obj.GetName()) + } + } + + return command +} + +func parentChildInfo(nodes []v1alpha1.ResourceNode) (map[string]v1alpha1.ResourceNode, map[string][]string, map[string]struct{}) { + mapUidToNode := make(map[string]v1alpha1.ResourceNode) + mapParentToChild := make(map[string][]string) + parentNode := make(map[string]struct{}) + + for _, node := range nodes { + mapUidToNode[node.UID] = node + + if len(node.ParentRefs) > 0 { + _, ok := mapParentToChild[node.ParentRefs[0].UID] + if !ok { + var temp []string + mapParentToChild[node.ParentRefs[0].UID] = temp + } + mapParentToChild[node.ParentRefs[0].UID] = append(mapParentToChild[node.ParentRefs[0].UID], node.UID) + } else { + parentNode[node.UID] = struct{}{} + } + } + return mapUidToNode, mapParentToChild, parentNode +} + +func printDetailedTreeViewAppResourcesNotOrphaned(nodeMapping map[string]v1alpha1.ResourceNode, parentChildMapping map[string][]string, parentNodes map[string]struct{}, orphaned bool, listAll bool, w *tabwriter.Writer) { + for uid := range parentNodes { + detailedTreeViewAppResourcesNotOrphaned("", nodeMapping, parentChildMapping, nodeMapping[uid], w) + } + +} + +func printDetailedTreeViewAppResourcesOrphaned(nodeMapping map[string]v1alpha1.ResourceNode, parentChildMapping map[string][]string, parentNodes map[string]struct{}, orphaned bool, listAll bool, w *tabwriter.Writer) { + for uid := range parentNodes { + detailedTreeViewAppResourcesOrphaned("", nodeMapping, parentChildMapping, nodeMapping[uid], w) + } +} + +func printTreeViewAppResourcesNotOrphaned(nodeMapping map[string]v1alpha1.ResourceNode, parentChildMapping map[string][]string, parentNodes map[string]struct{}, orphaned bool, listAll bool, w *tabwriter.Writer) { + for uid := range parentNodes { + treeViewAppResourcesNotOrphaned("", nodeMapping, parentChildMapping, nodeMapping[uid], w) + } + +} + +func printTreeViewAppResourcesOrphaned(nodeMapping map[string]v1alpha1.ResourceNode, parentChildMapping map[string][]string, parentNodes map[string]struct{}, orphaned bool, listAll bool, w *tabwriter.Writer) { + for uid := range parentNodes { + treeViewAppResourcesOrphaned("", nodeMapping, parentChildMapping, nodeMapping[uid], w) + } +} + +func printResources(listAll bool, orphaned bool, appResourceTree *v1alpha1.ApplicationTree, output string) { + w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) + if output == "tree=detailed" { + fmt.Fprintf(w, "GROUP\tKIND\tNAMESPACE\tNAME\tORPHANED\tAGE\tHEALTH\tREASON\n") + + if !orphaned || listAll { + mapUidToNode, mapParentToChild, parentNode := parentChildInfo(appResourceTree.Nodes) + printDetailedTreeViewAppResourcesNotOrphaned(mapUidToNode, mapParentToChild, parentNode, orphaned, listAll, w) + } + + if orphaned || listAll { + mapUidToNode, mapParentToChild, parentNode := parentChildInfo(appResourceTree.OrphanedNodes) + printDetailedTreeViewAppResourcesOrphaned(mapUidToNode, mapParentToChild, parentNode, orphaned, listAll, w) + } + + } else if output == "tree" { + fmt.Fprintf(w, "GROUP\tKIND\tNAMESPACE\tNAME\tORPHANED\n") + + if !orphaned || listAll { + mapUidToNode, mapParentToChild, parentNode := parentChildInfo(appResourceTree.Nodes) + printTreeViewAppResourcesNotOrphaned(mapUidToNode, mapParentToChild, parentNode, orphaned, listAll, w) + } + + if orphaned || listAll { + mapUidToNode, mapParentToChild, parentNode := parentChildInfo(appResourceTree.OrphanedNodes) + printTreeViewAppResourcesOrphaned(mapUidToNode, mapParentToChild, parentNode, orphaned, listAll, w) + } + + } else { + + headers := []interface{}{"GROUP", "KIND", "NAMESPACE", "NAME", "ORPHANED"} + fmtStr := "%s\t%s\t%s\t%s\t%s\n" + _, _ = fmt.Fprintf(w, fmtStr, headers...) + if !orphaned || listAll { + for _, res := range appResourceTree.Nodes { + if len(res.ParentRefs) == 0 { + _, _ = fmt.Fprintf(w, fmtStr, res.Group, res.Kind, res.Namespace, res.Name, "No") + } + } + } + if orphaned || listAll { + for _, res := range appResourceTree.OrphanedNodes { + _, _ = fmt.Fprintf(w, fmtStr, res.Group, res.Kind, res.Namespace, res.Name, "Yes") + } + } + + } + _ = w.Flush() + +} + +func NewApplicationListResourcesCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { + var orphaned bool + var output string + var command = &cobra.Command{ + Use: "resources APPNAME", + Short: "List resource of application", + Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 1 { + c.HelpFunc()(c, args) + os.Exit(1) + } + listAll := !c.Flag("orphaned").Changed + appName, appNs := argo.ParseFromQualifiedName(args[0], "") + conn, appIf := headless.NewClientOrDie(clientOpts, c).NewApplicationClientOrDie() + defer argoio.Close(conn) + appResourceTree, err := appIf.ResourceTree(ctx, &applicationpkg.ResourcesQuery{ + ApplicationName: &appName, + AppNamespace: &appNs, + }) + errors.CheckError(err) + printResources(listAll, orphaned, appResourceTree, output) + }, + } + command.Flags().BoolVar(&orphaned, "orphaned", false, "Lists only orphaned resources") + command.Flags().StringVar(&output, "output", "", "Provides the tree view of the resources") + return command +} diff --git a/cmd/argocd/commands/app_test.go b/cmd/argocd/commands/app_test.go new file mode 100644 index 0000000000000..68983560999c8 --- /dev/null +++ b/cmd/argocd/commands/app_test.go @@ -0,0 +1,1601 @@ +package commands + +import ( + "fmt" + "os" + "testing" + "time" + + argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient" + "github.com/argoproj/argo-cd/v2/pkg/apis/application" + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/argoproj/gitops-engine/pkg/health" + "github.com/argoproj/gitops-engine/pkg/utils/kube" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func Test_getInfos(t *testing.T) { + testCases := []struct { + name string + infos []string + expectedInfos []*v1alpha1.Info + }{ + { + name: "empty", + infos: []string{}, + expectedInfos: []*v1alpha1.Info{}, + }, + { + name: "simple key value", + infos: []string{"key1=value1", "key2=value2"}, + expectedInfos: []*v1alpha1.Info{ + {Name: "key1", Value: "value1"}, + {Name: "key2", Value: "value2"}, + }, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + infos := getInfos(testCase.infos) + assert.Len(t, infos, len(testCase.expectedInfos)) + sort := func(a, b *v1alpha1.Info) bool { return a.Name < b.Name } + assert.Empty(t, cmp.Diff(testCase.expectedInfos, infos, cmpopts.SortSlices(sort))) + }) + } +} + +func Test_getRefreshType(t *testing.T) { + refreshTypeNormal := string(v1alpha1.RefreshTypeNormal) + refreshTypeHard := string(v1alpha1.RefreshTypeHard) + testCases := []struct { + refresh bool + hardRefresh bool + expected *string + }{ + {false, false, nil}, + {false, true, &refreshTypeHard}, + {true, false, &refreshTypeNormal}, + {true, true, &refreshTypeHard}, + } + + for _, testCase := range testCases { + t.Run(fmt.Sprintf("hardRefresh=%t refresh=%t", testCase.hardRefresh, testCase.refresh), func(t *testing.T) { + refreshType := getRefreshType(testCase.refresh, testCase.hardRefresh) + if testCase.expected == nil { + assert.Nil(t, refreshType) + } else { + assert.NotNil(t, refreshType) + assert.Equal(t, *testCase.expected, *refreshType) + } + }) + } +} + +func TestFindRevisionHistoryWithoutPassedId(t *testing.T) { + + histories := v1alpha1.RevisionHistories{} + + histories = append(histories, v1alpha1.RevisionHistory{ID: 1}) + histories = append(histories, v1alpha1.RevisionHistory{ID: 2}) + histories = append(histories, v1alpha1.RevisionHistory{ID: 3}) + + status := v1alpha1.ApplicationStatus{ + Resources: nil, + Sync: v1alpha1.SyncStatus{}, + Health: v1alpha1.HealthStatus{}, + History: histories, + Conditions: nil, + ReconciledAt: nil, + OperationState: nil, + ObservedAt: nil, + SourceType: "", + Summary: v1alpha1.ApplicationSummary{}, + } + + application := v1alpha1.Application{ + Status: status, + } + + history, err := findRevisionHistory(&application, -1) + + if err != nil { + t.Fatal("Find revision history should fail without errors") + } + + if history == nil { + t.Fatal("History should be found") + } + +} + +func TestPrintTreeViewAppGet(t *testing.T) { + var nodes [3]v1alpha1.ResourceNode + nodes[0].ResourceRef = v1alpha1.ResourceRef{Group: "", Version: "v1", Kind: "Pod", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5-6trpt", UID: "92c3a5fe-d13e-4ae2-b8ec-c10dd3543b28"} + nodes[0].ParentRefs = []v1alpha1.ResourceRef{{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5", UID: "75c30dce-1b66-414f-a86c-573a74be0f40"}} + nodes[1].ResourceRef = v1alpha1.ResourceRef{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5", UID: "75c30dce-1b66-414f-a86c-573a74be0f40"} + nodes[1].ParentRefs = []v1alpha1.ResourceRef{{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"}} + nodes[2].ResourceRef = v1alpha1.ResourceRef{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"} + + var nodeMapping = make(map[string]v1alpha1.ResourceNode) + var mapParentToChild = make(map[string][]string) + var parentNode = make(map[string]struct{}) + + for _, node := range nodes { + nodeMapping[node.UID] = node + + if len(node.ParentRefs) > 0 { + _, ok := mapParentToChild[node.ParentRefs[0].UID] + if !ok { + var temp []string + mapParentToChild[node.ParentRefs[0].UID] = temp + } + mapParentToChild[node.ParentRefs[0].UID] = append(mapParentToChild[node.ParentRefs[0].UID], node.UID) + } else { + parentNode[node.UID] = struct{}{} + } + } + + output, _ := captureOutput(func() error { + printTreeView(nodeMapping, mapParentToChild, parentNode, nil) + return nil + }) + + assert.Contains(t, output, "Pod") + assert.Contains(t, output, "ReplicaSet") + assert.Contains(t, output, "Rollout") + assert.Contains(t, output, "numalogic-rollout-demo-5dcd5457d5-6trpt") +} + +func TestPrintTreeViewDetailedAppGet(t *testing.T) { + var nodes [3]v1alpha1.ResourceNode + nodes[0].ResourceRef = v1alpha1.ResourceRef{Group: "", Version: "v1", Kind: "Pod", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5-6trpt", UID: "92c3a5fe-d13e-4ae2-b8ec-c10dd3543b28"} + nodes[0].Health = &v1alpha1.HealthStatus{Status: "Degraded", Message: "Readiness Gate failed"} + nodes[0].ParentRefs = []v1alpha1.ResourceRef{{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5", UID: "75c30dce-1b66-414f-a86c-573a74be0f40"}} + nodes[1].ResourceRef = v1alpha1.ResourceRef{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5", UID: "75c30dce-1b66-414f-a86c-573a74be0f40"} + nodes[1].ParentRefs = []v1alpha1.ResourceRef{{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"}} + nodes[2].ResourceRef = v1alpha1.ResourceRef{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"} + + var nodeMapping = make(map[string]v1alpha1.ResourceNode) + var mapParentToChild = make(map[string][]string) + var parentNode = make(map[string]struct{}) + + for _, node := range nodes { + nodeMapping[node.UID] = node + + if len(node.ParentRefs) > 0 { + _, ok := mapParentToChild[node.ParentRefs[0].UID] + if !ok { + var temp []string + mapParentToChild[node.ParentRefs[0].UID] = temp + } + mapParentToChild[node.ParentRefs[0].UID] = append(mapParentToChild[node.ParentRefs[0].UID], node.UID) + } else { + parentNode[node.UID] = struct{}{} + } + } + + output, _ := captureOutput(func() error { + printTreeViewDetailed(nodeMapping, mapParentToChild, parentNode, nil) + return nil + }) + + assert.Contains(t, output, "Pod") + assert.Contains(t, output, "ReplicaSet") + assert.Contains(t, output, "Rollout") + assert.Contains(t, output, "numalogic-rollout-demo-5dcd5457d5-6trpt") + assert.Contains(t, output, "Degraded") + assert.Contains(t, output, "Readiness Gate failed") + +} + +func TestDefaultWaitOptions(t *testing.T) { + watch := watchOpts{ + sync: false, + health: false, + operation: false, + suspended: false, + } + opts := getWatchOpts(watch) + assert.Equal(t, true, opts.sync) + assert.Equal(t, true, opts.health) + assert.Equal(t, true, opts.operation) + assert.Equal(t, false, opts.suspended) +} + +func TestOverrideWaitOptions(t *testing.T) { + watch := watchOpts{ + sync: true, + health: false, + operation: false, + suspended: false, + } + opts := getWatchOpts(watch) + assert.Equal(t, true, opts.sync) + assert.Equal(t, false, opts.health) + assert.Equal(t, false, opts.operation) + assert.Equal(t, false, opts.suspended) +} + +func TestFindRevisionHistoryWithoutPassedIdAndEmptyHistoryList(t *testing.T) { + + histories := v1alpha1.RevisionHistories{} + + status := v1alpha1.ApplicationStatus{ + Resources: nil, + Sync: v1alpha1.SyncStatus{}, + Health: v1alpha1.HealthStatus{}, + History: histories, + Conditions: nil, + ReconciledAt: nil, + OperationState: nil, + ObservedAt: nil, + SourceType: "", + Summary: v1alpha1.ApplicationSummary{}, + } + + application := v1alpha1.Application{ + Status: status, + } + + history, err := findRevisionHistory(&application, -1) + + if err == nil { + t.Fatal("Find revision history should fail with errors") + } + + if history != nil { + t.Fatal("History should be empty") + } + + if err.Error() != "Application '' should have at least two successful deployments" { + t.Fatal("Find revision history should fail with correct error message") + } + +} + +func TestFindRevisionHistoryWithPassedId(t *testing.T) { + + histories := v1alpha1.RevisionHistories{} + + histories = append(histories, v1alpha1.RevisionHistory{ID: 1}) + histories = append(histories, v1alpha1.RevisionHistory{ID: 2}) + histories = append(histories, v1alpha1.RevisionHistory{ID: 3, Revision: "123"}) + + status := v1alpha1.ApplicationStatus{ + Resources: nil, + Sync: v1alpha1.SyncStatus{}, + Health: v1alpha1.HealthStatus{}, + History: histories, + Conditions: nil, + ReconciledAt: nil, + OperationState: nil, + ObservedAt: nil, + SourceType: "", + Summary: v1alpha1.ApplicationSummary{}, + } + + application := v1alpha1.Application{ + Status: status, + } + + history, err := findRevisionHistory(&application, 3) + + if err != nil { + t.Fatal("Find revision history should fail without errors") + } + + if history == nil { + t.Fatal("History should be found") + } + + if history.Revision != "123" { + t.Fatal("Failed to find correct history with correct revision") + } + +} + +func TestFindRevisionHistoryWithPassedIdThatNotExist(t *testing.T) { + + histories := v1alpha1.RevisionHistories{} + + histories = append(histories, v1alpha1.RevisionHistory{ID: 1}) + histories = append(histories, v1alpha1.RevisionHistory{ID: 2}) + histories = append(histories, v1alpha1.RevisionHistory{ID: 3, Revision: "123"}) + + status := v1alpha1.ApplicationStatus{ + Resources: nil, + Sync: v1alpha1.SyncStatus{}, + Health: v1alpha1.HealthStatus{}, + History: histories, + Conditions: nil, + ReconciledAt: nil, + OperationState: nil, + ObservedAt: nil, + SourceType: "", + Summary: v1alpha1.ApplicationSummary{}, + } + + application := v1alpha1.Application{ + Status: status, + } + + history, err := findRevisionHistory(&application, 4) + + if err == nil { + t.Fatal("Find revision history should fail with errors") + } + + if history != nil { + t.Fatal("History should be not found") + } + + if err.Error() != "Application '' does not have deployment id '4' in history\n" { + t.Fatal("Find revision history should fail with correct error message") + } + +} + +func Test_groupObjsByKey(t *testing.T) { + localObjs := []*unstructured.Unstructured{ + { + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": map[string]interface{}{ + "name": "pod-name", + "namespace": "default", + }, + }, + }, + { + Object: map[string]interface{}{ + "apiVersion": "apiextensions.k8s.io/v1", + "kind": "CustomResourceDefinition", + "metadata": map[string]interface{}{ + "name": "certificates.cert-manager.io", + }, + }, + }, + } + liveObjs := []*unstructured.Unstructured{ + { + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": map[string]interface{}{ + "name": "pod-name", + "namespace": "default", + }, + }, + }, + { + Object: map[string]interface{}{ + "apiVersion": "apiextensions.k8s.io/v1", + "kind": "CustomResourceDefinition", + "metadata": map[string]interface{}{ + "name": "certificates.cert-manager.io", + }, + }, + }, + } + + expected := map[kube.ResourceKey]*unstructured.Unstructured{ + {Group: "", Kind: "Pod", Namespace: "default", Name: "pod-name"}: localObjs[0], + {Group: "apiextensions.k8s.io", Kind: "CustomResourceDefinition", Namespace: "", Name: "certificates.cert-manager.io"}: localObjs[1], + } + + objByKey := groupObjsByKey(localObjs, liveObjs, "default") + assert.Equal(t, expected, objByKey) +} + +func TestFormatSyncPolicy(t *testing.T) { + + t.Run("Policy not defined", func(t *testing.T) { + app := v1alpha1.Application{} + + policy := formatSyncPolicy(app) + + if policy != "" { + t.Fatalf("Incorrect policy %q, should be ", policy) + } + }) + + t.Run("Auto policy", func(t *testing.T) { + app := v1alpha1.Application{ + Spec: v1alpha1.ApplicationSpec{ + SyncPolicy: &v1alpha1.SyncPolicy{ + Automated: &v1alpha1.SyncPolicyAutomated{}, + }, + }, + } + + policy := formatSyncPolicy(app) + + if policy != "Auto" { + t.Fatalf("Incorrect policy %q, should be Auto", policy) + } + }) + + t.Run("Auto policy with prune", func(t *testing.T) { + app := v1alpha1.Application{ + Spec: v1alpha1.ApplicationSpec{ + SyncPolicy: &v1alpha1.SyncPolicy{ + Automated: &v1alpha1.SyncPolicyAutomated{ + Prune: true, + }, + }, + }, + } + + policy := formatSyncPolicy(app) + + if policy != "Auto-Prune" { + t.Fatalf("Incorrect policy %q, should be Auto-Prune", policy) + } + }) + +} + +func TestFormatConditionSummary(t *testing.T) { + t.Run("No conditions are defined", func(t *testing.T) { + app := v1alpha1.Application{ + Spec: v1alpha1.ApplicationSpec{ + SyncPolicy: &v1alpha1.SyncPolicy{ + Automated: &v1alpha1.SyncPolicyAutomated{ + Prune: true, + }, + }, + }, + } + + summary := formatConditionsSummary(app) + if summary != "" { + t.Fatalf("Incorrect summary %q, should be ", summary) + } + }) + + t.Run("Few conditions are defined", func(t *testing.T) { + app := v1alpha1.Application{ + Status: v1alpha1.ApplicationStatus{ + Conditions: []v1alpha1.ApplicationCondition{ + { + Type: "type1", + }, + { + Type: "type1", + }, + { + Type: "type2", + }, + }, + }, + } + + summary := formatConditionsSummary(app) + if summary != "type1(2),type2" && summary != "type2,type1(2)" { + t.Fatalf("Incorrect summary %q, should be type1(2),type2", summary) + } + }) +} + +func TestPrintOperationResult(t *testing.T) { + t.Run("Operation state is empty", func(t *testing.T) { + output, _ := captureOutput(func() error { + printOperationResult(nil) + return nil + }) + + if output != "" { + t.Fatalf("Incorrect print operation output %q, should be ''", output) + } + }) + + t.Run("Operation state sync result is not empty", func(t *testing.T) { + time := metav1.Date(2020, time.November, 10, 23, 0, 0, 0, time.UTC) + output, _ := captureOutput(func() error { + printOperationResult(&v1alpha1.OperationState{ + SyncResult: &v1alpha1.SyncOperationResult{Revision: "revision"}, + FinishedAt: &time, + }) + return nil + }) + + expectation := "Operation: Sync\nSync Revision: revision\nPhase: \nStart: 0001-01-01 00:00:00 +0000 UTC\nFinished: 2020-11-10 23:00:00 +0000 UTC\nDuration: 2333448h16m18.871345152s\n" + if output != expectation { + t.Fatalf("Incorrect print operation output %q, should be %q", output, expectation) + } + }) + + t.Run("Operation state sync result with message is not empty", func(t *testing.T) { + time := metav1.Date(2020, time.November, 10, 23, 0, 0, 0, time.UTC) + output, _ := captureOutput(func() error { + printOperationResult(&v1alpha1.OperationState{ + SyncResult: &v1alpha1.SyncOperationResult{Revision: "revision"}, + FinishedAt: &time, + Message: "test", + }) + return nil + }) + + expectation := "Operation: Sync\nSync Revision: revision\nPhase: \nStart: 0001-01-01 00:00:00 +0000 UTC\nFinished: 2020-11-10 23:00:00 +0000 UTC\nDuration: 2333448h16m18.871345152s\nMessage: test\n" + if output != expectation { + t.Fatalf("Incorrect print operation output %q, should be %q", output, expectation) + } + }) +} + +func TestPrintApplicationHistoryTable(t *testing.T) { + histories := []v1alpha1.RevisionHistory{ + { + ID: 1, + Source: v1alpha1.ApplicationSource{ + TargetRevision: "1", + }, + }, + { + ID: 2, + Source: v1alpha1.ApplicationSource{ + TargetRevision: "2", + }, + }, + { + ID: 3, + Source: v1alpha1.ApplicationSource{ + TargetRevision: "3", + }, + }, + } + + output, _ := captureOutput(func() error { + printApplicationHistoryTable(histories) + return nil + }) + + expectation := "ID DATE REVISION\n1 0001-01-01 00:00:00 +0000 UTC 1\n2 0001-01-01 00:00:00 +0000 UTC 2\n3 0001-01-01 00:00:00 +0000 UTC 3\n" + + if output != expectation { + t.Fatalf("Incorrect print operation output %q, should be %q", output, expectation) + } +} + +func TestPrintAppSummaryTable(t *testing.T) { + output, _ := captureOutput(func() error { + app := &v1alpha1.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "argocd", + }, + Spec: v1alpha1.ApplicationSpec{ + SyncPolicy: &v1alpha1.SyncPolicy{ + Automated: &v1alpha1.SyncPolicyAutomated{ + Prune: true, + }, + }, + Project: "default", + Destination: v1alpha1.ApplicationDestination{Server: "local", Namespace: "argocd"}, + Source: &v1alpha1.ApplicationSource{ + RepoURL: "test", + TargetRevision: "master", + Path: "/test", + Helm: &v1alpha1.ApplicationSourceHelm{ + ValueFiles: []string{"path1", "path2"}, + }, + Kustomize: &v1alpha1.ApplicationSourceKustomize{NamePrefix: "prefix"}, + }, + }, + Status: v1alpha1.ApplicationStatus{ + Sync: v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeOutOfSync, + }, + Health: v1alpha1.HealthStatus{ + Status: health.HealthStatusProgressing, + Message: "health-message", + }, + }, + } + + windows := &v1alpha1.SyncWindows{ + { + Kind: "allow", + Schedule: "0 0 * * *", + Duration: "24h", + Applications: []string{ + "*-prod", + }, + ManualSync: true, + }, + { + Kind: "deny", + Schedule: "0 0 * * *", + Duration: "24h", + Namespaces: []string{ + "default", + }, + }, + { + Kind: "allow", + Schedule: "0 0 * * *", + Duration: "24h", + Clusters: []string{ + "in-cluster", + "cluster1", + }, + }, + } + + printAppSummaryTable(app, "url", windows) + return nil + }) + + expectation := `Name: argocd/test +Project: default +Server: local +Namespace: argocd +URL: url +Repo: test +Target: master +Path: /test +Helm Values: path1,path2 +Name Prefix: prefix +SyncWindow: Sync Denied +Assigned Windows: allow:0 0 * * *:24h,deny:0 0 * * *:24h,allow:0 0 * * *:24h +Sync Policy: Automated (Prune) +Sync Status: OutOfSync from master +Health Status: Progressing (health-message) +` + assert.Equalf(t, expectation, output, "Incorrect print app summary output %q, should be %q", output, expectation) +} + +func TestPrintAppConditions(t *testing.T) { + output, _ := captureOutput(func() error { + app := &v1alpha1.Application{ + Status: v1alpha1.ApplicationStatus{ + Conditions: []v1alpha1.ApplicationCondition{ + { + Type: v1alpha1.ApplicationConditionDeletionError, + Message: "test", + }, + { + Type: v1alpha1.ApplicationConditionExcludedResourceWarning, + Message: "test2", + }, + { + Type: v1alpha1.ApplicationConditionRepeatedResourceWarning, + Message: "test3", + }, + }, + }, + } + printAppConditions(os.Stdout, app) + return nil + }) + expectation := "CONDITION\tMESSAGE\tLAST TRANSITION\nDeletionError\ttest\t\nExcludedResourceWarning\ttest2\t\nRepeatedResourceWarning\ttest3\t\n" + if output != expectation { + t.Fatalf("Incorrect print app conditions output %q, should be %q", output, expectation) + } +} + +func TestPrintParams(t *testing.T) { + output, _ := captureOutput(func() error { + app := &v1alpha1.Application{ + Spec: v1alpha1.ApplicationSpec{ + Source: &v1alpha1.ApplicationSource{ + Helm: &v1alpha1.ApplicationSourceHelm{ + Parameters: []v1alpha1.HelmParameter{ + { + Name: "name1", + Value: "value1", + }, + { + Name: "name2", + Value: "value2", + }, + { + Name: "name3", + Value: "value3", + }, + }, + }, + }, + }, + } + printParams(app) + return nil + }) + expectation := "\n\nNAME VALUE\nname1 value1\nname2 value2\nname3 value3\n" + if output != expectation { + t.Fatalf("Incorrect print params output %q, should be %q", output, expectation) + } +} + +func TestAppUrlDefault(t *testing.T) { + t.Run("Plain text", func(t *testing.T) { + result := appURLDefault(argocdclient.NewClientOrDie(&argocdclient.ClientOptions{ + ServerAddr: "localhost:80", + PlainText: true, + }), "test") + expectation := "http://localhost:80/applications/test" + if result != expectation { + t.Fatalf("Incorrect url %q, should be %q", result, expectation) + } + }) + t.Run("https", func(t *testing.T) { + result := appURLDefault(argocdclient.NewClientOrDie(&argocdclient.ClientOptions{ + ServerAddr: "localhost:443", + PlainText: false, + }), "test") + expectation := "https://localhost/applications/test" + if result != expectation { + t.Fatalf("Incorrect url %q, should be %q", result, expectation) + } + }) +} + +func TestTruncateString(t *testing.T) { + result := truncateString("argocdtool", 2) + expectation := "ar..." + if result != expectation { + t.Fatalf("Incorrect truncate string %q, should be %q", result, expectation) + } +} + +func TestGetService(t *testing.T) { + t.Run("Server", func(t *testing.T) { + app := &v1alpha1.Application{ + Spec: v1alpha1.ApplicationSpec{ + Destination: v1alpha1.ApplicationDestination{ + Server: "test-server", + }, + }, + } + result := getServer(app) + expectation := "test-server" + if result != expectation { + t.Fatalf("Incorrect server %q, should be %q", result, expectation) + } + }) + t.Run("Name", func(t *testing.T) { + app := &v1alpha1.Application{ + Spec: v1alpha1.ApplicationSpec{ + Destination: v1alpha1.ApplicationDestination{ + Name: "test-name", + }, + }, + } + result := getServer(app) + expectation := "test-name" + if result != expectation { + t.Fatalf("Incorrect server name %q, should be %q", result, expectation) + } + }) +} + +func TestTargetObjects(t *testing.T) { + resources := []*v1alpha1.ResourceDiff{ + { + TargetState: "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"name\":\"test-helm-guestbook\",\"namespace\":\"argocd\"},\"spec\":{\"selector\":{\"app\":\"helm-guestbook\",\"release\":\"test\"},\"sessionAffinity\":\"None\",\"type\":\"ClusterIP\"},\"status\":{\"loadBalancer\":{}}}", + }, + { + TargetState: "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"name\":\"test-helm-guestbook\",\"namespace\":\"ns\"},\"spec\":{\"selector\":{\"app\":\"helm-guestbook\",\"release\":\"test\"},\"sessionAffinity\":\"None\",\"type\":\"ClusterIP\"},\"status\":{\"loadBalancer\":{}}}", + }, + } + objects, err := targetObjects(resources) + if err != nil { + t.Fatal("operation should finish without error") + } + + if len(objects) != 2 { + t.Fatalf("incorrect number of objects %v, should be 2", len(objects)) + } + + if objects[0].GetName() != "test-helm-guestbook" { + t.Fatalf("incorrect name %q, should be %q", objects[0].GetName(), "test-helm-guestbook") + } +} + +func TestTargetObjects_invalid(t *testing.T) { + resources := []*v1alpha1.ResourceDiff{{TargetState: "{"}} + _, err := targetObjects(resources) + assert.Error(t, err) +} + +func TestPrintApplicationNames(t *testing.T) { + output, _ := captureOutput(func() error { + app := &v1alpha1.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + } + printApplicationNames([]v1alpha1.Application{*app, *app}) + return nil + }) + expectation := "test\ntest\n" + if output != expectation { + t.Fatalf("Incorrect print params output %q, should be %q", output, expectation) + } +} + +func Test_unset(t *testing.T) { + kustomizeSource := &v1alpha1.ApplicationSource{ + Kustomize: &v1alpha1.ApplicationSourceKustomize{ + NamePrefix: "some-prefix", + NameSuffix: "some-suffix", + Version: "123", + Images: v1alpha1.KustomizeImages{ + "old1=new:tag", + "old2=new:tag", + }, + Replicas: []v1alpha1.KustomizeReplica{ + { + Name: "my-deployment", + Count: intstr.FromInt(2), + }, + { + Name: "my-statefulset", + Count: intstr.FromInt(4), + }, + }, + }, + } + + helmSource := &v1alpha1.ApplicationSource{ + Helm: &v1alpha1.ApplicationSourceHelm{ + IgnoreMissingValueFiles: true, + Parameters: []v1alpha1.HelmParameter{ + { + Name: "name-1", + Value: "value-1", + }, + { + Name: "name-2", + Value: "value-2", + }, + }, + PassCredentials: true, + ValuesObject: &runtime.RawExtension{Raw: []byte("some: yaml")}, + ValueFiles: []string{ + "values-1.yaml", + "values-2.yaml", + }, + }, + } + + pluginSource := &v1alpha1.ApplicationSource{ + Plugin: &v1alpha1.ApplicationSourcePlugin{ + Env: v1alpha1.Env{ + { + Name: "env-1", + Value: "env-value-1", + }, + { + Name: "env-2", + Value: "env-value-2", + }, + }, + }, + } + + assert.Equal(t, "some-prefix", kustomizeSource.Kustomize.NamePrefix) + updated, nothingToUnset := unset(kustomizeSource, unsetOpts{namePrefix: true}) + assert.Equal(t, "", kustomizeSource.Kustomize.NamePrefix) + assert.True(t, updated) + assert.False(t, nothingToUnset) + updated, nothingToUnset = unset(kustomizeSource, unsetOpts{namePrefix: true}) + assert.False(t, updated) + assert.False(t, nothingToUnset) + + assert.Equal(t, "some-suffix", kustomizeSource.Kustomize.NameSuffix) + updated, nothingToUnset = unset(kustomizeSource, unsetOpts{nameSuffix: true}) + assert.Equal(t, "", kustomizeSource.Kustomize.NameSuffix) + assert.True(t, updated) + assert.False(t, nothingToUnset) + updated, nothingToUnset = unset(kustomizeSource, unsetOpts{nameSuffix: true}) + assert.False(t, updated) + assert.False(t, nothingToUnset) + + assert.Equal(t, "123", kustomizeSource.Kustomize.Version) + updated, nothingToUnset = unset(kustomizeSource, unsetOpts{kustomizeVersion: true}) + assert.Equal(t, "", kustomizeSource.Kustomize.Version) + assert.True(t, updated) + assert.False(t, nothingToUnset) + updated, nothingToUnset = unset(kustomizeSource, unsetOpts{kustomizeVersion: true}) + assert.False(t, updated) + assert.False(t, nothingToUnset) + + assert.Equal(t, 2, len(kustomizeSource.Kustomize.Images)) + updated, nothingToUnset = unset(kustomizeSource, unsetOpts{kustomizeImages: []string{"old1=new:tag"}}) + assert.Equal(t, 1, len(kustomizeSource.Kustomize.Images)) + assert.True(t, updated) + assert.False(t, nothingToUnset) + updated, nothingToUnset = unset(kustomizeSource, unsetOpts{kustomizeImages: []string{"old1=new:tag"}}) + assert.False(t, updated) + assert.False(t, nothingToUnset) + + assert.Equal(t, 2, len(kustomizeSource.Kustomize.Replicas)) + updated, nothingToUnset = unset(kustomizeSource, unsetOpts{kustomizeReplicas: []string{"my-deployment"}}) + assert.Equal(t, 1, len(kustomizeSource.Kustomize.Replicas)) + assert.True(t, updated) + assert.False(t, nothingToUnset) + updated, nothingToUnset = unset(kustomizeSource, unsetOpts{kustomizeReplicas: []string{"my-deployment"}}) + assert.False(t, updated) + assert.False(t, nothingToUnset) + + assert.Equal(t, 2, len(helmSource.Helm.Parameters)) + updated, nothingToUnset = unset(helmSource, unsetOpts{parameters: []string{"name-1"}}) + assert.Equal(t, 1, len(helmSource.Helm.Parameters)) + assert.True(t, updated) + assert.False(t, nothingToUnset) + updated, nothingToUnset = unset(helmSource, unsetOpts{parameters: []string{"name-1"}}) + assert.False(t, updated) + assert.False(t, nothingToUnset) + + assert.Equal(t, 2, len(helmSource.Helm.ValueFiles)) + updated, nothingToUnset = unset(helmSource, unsetOpts{valuesFiles: []string{"values-1.yaml"}}) + assert.Equal(t, 1, len(helmSource.Helm.ValueFiles)) + assert.True(t, updated) + assert.False(t, nothingToUnset) + updated, nothingToUnset = unset(helmSource, unsetOpts{valuesFiles: []string{"values-1.yaml"}}) + assert.False(t, updated) + assert.False(t, nothingToUnset) + + assert.Equal(t, "some: yaml", helmSource.Helm.ValuesString()) + updated, nothingToUnset = unset(helmSource, unsetOpts{valuesLiteral: true}) + assert.Equal(t, "", helmSource.Helm.ValuesString()) + assert.True(t, updated) + assert.False(t, nothingToUnset) + updated, nothingToUnset = unset(helmSource, unsetOpts{valuesLiteral: true}) + assert.False(t, updated) + assert.False(t, nothingToUnset) + + assert.Equal(t, true, helmSource.Helm.IgnoreMissingValueFiles) + updated, nothingToUnset = unset(helmSource, unsetOpts{ignoreMissingValueFiles: true}) + assert.Equal(t, false, helmSource.Helm.IgnoreMissingValueFiles) + assert.True(t, updated) + assert.False(t, nothingToUnset) + updated, nothingToUnset = unset(helmSource, unsetOpts{ignoreMissingValueFiles: true}) + assert.False(t, updated) + assert.False(t, nothingToUnset) + + assert.Equal(t, true, helmSource.Helm.PassCredentials) + updated, nothingToUnset = unset(helmSource, unsetOpts{passCredentials: true}) + assert.Equal(t, false, helmSource.Helm.PassCredentials) + assert.True(t, updated) + assert.False(t, nothingToUnset) + updated, nothingToUnset = unset(helmSource, unsetOpts{passCredentials: true}) + assert.False(t, updated) + assert.False(t, nothingToUnset) + + assert.Equal(t, 2, len(pluginSource.Plugin.Env)) + updated, nothingToUnset = unset(pluginSource, unsetOpts{pluginEnvs: []string{"env-1"}}) + assert.Equal(t, 1, len(pluginSource.Plugin.Env)) + assert.True(t, updated) + assert.False(t, nothingToUnset) + updated, nothingToUnset = unset(pluginSource, unsetOpts{pluginEnvs: []string{"env-1"}}) + assert.False(t, updated) + assert.False(t, nothingToUnset) +} + +func Test_unset_nothingToUnset(t *testing.T) { + testCases := []struct { + name string + source v1alpha1.ApplicationSource + }{ + {"kustomize", v1alpha1.ApplicationSource{Kustomize: &v1alpha1.ApplicationSourceKustomize{}}}, + {"helm", v1alpha1.ApplicationSource{Helm: &v1alpha1.ApplicationSourceHelm{}}}, + {"plugin", v1alpha1.ApplicationSource{Plugin: &v1alpha1.ApplicationSourcePlugin{}}}, + } + + for _, testCase := range testCases { + testCaseCopy := testCase + + t.Run(testCaseCopy.name, func(t *testing.T) { + t.Parallel() + + updated, nothingToUnset := unset(&testCaseCopy.source, unsetOpts{}) + assert.False(t, updated) + assert.True(t, nothingToUnset) + }) + } +} + +func TestFilterAppResources(t *testing.T) { + // App resources + var ( + appReplicaSet1 = v1alpha1.ResourceStatus{ + Group: "apps", + Kind: "ReplicaSet", + Namespace: "default", + Name: "replicaSet-name1", + } + appReplicaSet2 = v1alpha1.ResourceStatus{ + Group: "apps", + Kind: "ReplicaSet", + Namespace: "default", + Name: "replicaSet-name2", + } + appJob = v1alpha1.ResourceStatus{ + Group: "batch", + Kind: "Job", + Namespace: "default", + Name: "job-name", + } + appService1 = v1alpha1.ResourceStatus{ + Group: "", + Kind: "Service", + Namespace: "default", + Name: "service-name1", + } + appService2 = v1alpha1.ResourceStatus{ + Group: "", + Kind: "Service", + Namespace: "default", + Name: "service-name2", + } + appDeployment = v1alpha1.ResourceStatus{ + Group: "apps", + Kind: "Deployment", + Namespace: "default", + Name: "deployment-name", + } + ) + app := v1alpha1.Application{ + Status: v1alpha1.ApplicationStatus{ + Resources: []v1alpha1.ResourceStatus{ + appReplicaSet1, appReplicaSet2, appJob, appService1, appService2, appDeployment}, + }, + } + // Resource filters + var ( + blankValues = v1alpha1.SyncOperationResource{ + Group: "", + Kind: "", + Name: "", + Namespace: "", + Exclude: false} + // *:*:* + includeAllResources = v1alpha1.SyncOperationResource{ + Group: "*", + Kind: "*", + Name: "*", + Namespace: "", + Exclude: false} + // !*:*:* + excludeAllResources = v1alpha1.SyncOperationResource{ + Group: "*", + Kind: "*", + Name: "*", + Namespace: "", + Exclude: true} + // *:Service:* + includeAllServiceResources = v1alpha1.SyncOperationResource{ + Group: "*", + Kind: "Service", + Name: "*", + Namespace: "", + Exclude: false} + // !*:Service:* + excludeAllServiceResources = v1alpha1.SyncOperationResource{ + Group: "*", + Kind: "Service", + Name: "*", + Namespace: "", + Exclude: true} + // apps:ReplicaSet:replicaSet-name1 + includeReplicaSet1Resource = v1alpha1.SyncOperationResource{ + Group: "apps", + Kind: "ReplicaSet", + Name: "replicaSet-name1", + Namespace: "", + Exclude: false} + // !apps:ReplicaSet:replicaSet-name2 + excludeReplicaSet2Resource = v1alpha1.SyncOperationResource{ + Group: "apps", + Kind: "ReplicaSet", + Name: "replicaSet-name2", + Namespace: "", + Exclude: true} + ) + + // Filtered resources + var ( + replicaSet1 = v1alpha1.SyncOperationResource{ + Group: "apps", + Kind: "ReplicaSet", + Namespace: "default", + Name: "replicaSet-name1", + } + replicaSet2 = v1alpha1.SyncOperationResource{ + Group: "apps", + Kind: "ReplicaSet", + Namespace: "default", + Name: "replicaSet-name2", + } + job = v1alpha1.SyncOperationResource{ + Group: "batch", + Kind: "Job", + Namespace: "default", + Name: "job-name", + } + service1 = v1alpha1.SyncOperationResource{ + Group: "", + Kind: "Service", + Namespace: "default", + Name: "service-name1", + } + service2 = v1alpha1.SyncOperationResource{ + Group: "", + Kind: "Service", + Namespace: "default", + Name: "service-name2", + } + deployment = v1alpha1.SyncOperationResource{ + Group: "apps", + Kind: "Deployment", + Namespace: "default", + Name: "deployment-name", + } + ) + tests := []struct { + testName string + selectedResources []*v1alpha1.SyncOperationResource + expectedResult []*v1alpha1.SyncOperationResource + }{ + // --resource apps:ReplicaSet:replicaSet-name1 --resource *:Service:* + {testName: "Include ReplicaSet replicaSet-name1 resouce and all service resources", + selectedResources: []*v1alpha1.SyncOperationResource{&includeAllServiceResources, &includeReplicaSet1Resource}, + expectedResult: []*v1alpha1.SyncOperationResource{&replicaSet1, &service1, &service2}, + }, + // --resource apps:ReplicaSet:replicaSet-name1 --resource !*:Service:* + {testName: "Include ReplicaSet replicaSet-name1 resouce and exclude all service resources", + selectedResources: []*v1alpha1.SyncOperationResource{&excludeAllServiceResources, &includeReplicaSet1Resource}, + expectedResult: []*v1alpha1.SyncOperationResource{&replicaSet1, &replicaSet2, &job, &deployment}, + }, + // --resource !apps:ReplicaSet:replicaSet-name2 --resource !*:Service:* + {testName: "Exclude ReplicaSet replicaSet-name2 resouce and all service resources", + selectedResources: []*v1alpha1.SyncOperationResource{&excludeReplicaSet2Resource, &excludeAllServiceResources}, + expectedResult: []*v1alpha1.SyncOperationResource{&replicaSet1, &replicaSet2, &job, &service1, &service2, &deployment}, + }, + // --resource !apps:ReplicaSet:replicaSet-name2 + {testName: "Exclude ReplicaSet replicaSet-name2 resouce", + selectedResources: []*v1alpha1.SyncOperationResource{&excludeReplicaSet2Resource}, + expectedResult: []*v1alpha1.SyncOperationResource{&replicaSet1, &job, &service1, &service2, &deployment}, + }, + // --resource apps:ReplicaSet:replicaSet-name1 + {testName: "Include ReplicaSet replicaSet-name1 resouce", + selectedResources: []*v1alpha1.SyncOperationResource{&includeReplicaSet1Resource}, + expectedResult: []*v1alpha1.SyncOperationResource{&replicaSet1}, + }, + // --resource !*:Service:* + {testName: "Exclude Service resouces", + selectedResources: []*v1alpha1.SyncOperationResource{&excludeAllServiceResources}, + expectedResult: []*v1alpha1.SyncOperationResource{&replicaSet1, &replicaSet2, &job, &deployment}, + }, + // --resource *:Service:* + {testName: "Include Service resouces", + selectedResources: []*v1alpha1.SyncOperationResource{&includeAllServiceResources}, + expectedResult: []*v1alpha1.SyncOperationResource{&service1, &service2}, + }, + // --resource !*:*:* + {testName: "Exclude all resouces", + selectedResources: []*v1alpha1.SyncOperationResource{&excludeAllResources}, + expectedResult: nil, + }, + // --resource *:*:* + {testName: "Include all resouces", + selectedResources: []*v1alpha1.SyncOperationResource{&includeAllResources}, + expectedResult: []*v1alpha1.SyncOperationResource{&replicaSet1, &replicaSet2, &job, &service1, &service2, &deployment}, + }, + {testName: "No Filters", + selectedResources: []*v1alpha1.SyncOperationResource{&blankValues}, + expectedResult: nil, + }, + {testName: "Empty Filter", + selectedResources: []*v1alpha1.SyncOperationResource{}, + expectedResult: nil, + }, + } + + for _, test := range tests { + t.Run(test.testName, func(t *testing.T) { + filteredResources := filterAppResources(&app, test.selectedResources) + assert.Equal(t, test.expectedResult, filteredResources) + }) + } +} + +func TestParseSelectedResources(t *testing.T) { + resources := []string{"v1alpha:Application:test", + "v1alpha:Application:namespace/test", + "!v1alpha:Application:test", + "apps:Deployment:default/test", + "!*:*:*"} + operationResources, err := parseSelectedResources(resources) + assert.NoError(t, err) + assert.Len(t, operationResources, 5) + assert.Equal(t, *operationResources[0], v1alpha1.SyncOperationResource{ + Namespace: "", + Name: "test", + Kind: application.ApplicationKind, + Group: "v1alpha", + }) + assert.Equal(t, *operationResources[1], v1alpha1.SyncOperationResource{ + Namespace: "namespace", + Name: "test", + Kind: application.ApplicationKind, + Group: "v1alpha", + }) + assert.Equal(t, *operationResources[2], v1alpha1.SyncOperationResource{ + Namespace: "", + Name: "test", + Kind: "Application", + Group: "v1alpha", + Exclude: true, + }) + assert.Equal(t, *operationResources[3], v1alpha1.SyncOperationResource{ + Namespace: "default", + Name: "test", + Kind: "Deployment", + Group: "apps", + Exclude: false, + }) + assert.Equal(t, *operationResources[4], v1alpha1.SyncOperationResource{ + Namespace: "", + Name: "*", + Kind: "*", + Group: "*", + Exclude: true, + }) +} + +func TestParseSelectedResourcesIncorrect(t *testing.T) { + resources := []string{"v1alpha:test", "v1alpha:Application:namespace/test"} + _, err := parseSelectedResources(resources) + assert.ErrorContains(t, err, "v1alpha:test") +} + +func TestParseSelectedResourcesIncorrectNamespace(t *testing.T) { + resources := []string{"v1alpha:Application:namespace/test/unknown"} + _, err := parseSelectedResources(resources) + assert.ErrorContains(t, err, "v1alpha:Application:namespace/test/unknown") + +} + +func TestParseSelectedResourcesEmptyList(t *testing.T) { + var resources []string + operationResources, err := parseSelectedResources(resources) + assert.NoError(t, err) + assert.Len(t, operationResources, 0) +} + +func TestPrintApplicationTableNotWide(t *testing.T) { + output, err := captureOutput(func() error { + app := &v1alpha1.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "app-name", + }, + Spec: v1alpha1.ApplicationSpec{ + Destination: v1alpha1.ApplicationDestination{ + Server: "http://localhost:8080", + Namespace: "default", + }, + Project: "prj", + }, + Status: v1alpha1.ApplicationStatus{ + Sync: v1alpha1.SyncStatus{ + Status: "OutOfSync", + }, + Health: v1alpha1.HealthStatus{ + Status: "Healthy", + }, + }, + } + output := "table" + printApplicationTable([]v1alpha1.Application{*app, *app}, &output) + return nil + }) + assert.NoError(t, err) + expectation := "NAME CLUSTER NAMESPACE PROJECT STATUS HEALTH SYNCPOLICY CONDITIONS\napp-name http://localhost:8080 default prj OutOfSync Healthy \napp-name http://localhost:8080 default prj OutOfSync Healthy \n" + assert.Equal(t, output, expectation) +} + +func TestPrintApplicationTableWide(t *testing.T) { + output, err := captureOutput(func() error { + app := &v1alpha1.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "app-name", + }, + Spec: v1alpha1.ApplicationSpec{ + Destination: v1alpha1.ApplicationDestination{ + Server: "http://localhost:8080", + Namespace: "default", + }, + Source: &v1alpha1.ApplicationSource{ + RepoURL: "https://github.com/argoproj/argocd-example-apps", + Path: "guestbook", + TargetRevision: "123", + }, + Project: "prj", + }, + Status: v1alpha1.ApplicationStatus{ + Sync: v1alpha1.SyncStatus{ + Status: "OutOfSync", + }, + Health: v1alpha1.HealthStatus{ + Status: "Healthy", + }, + }, + } + output := "wide" + printApplicationTable([]v1alpha1.Application{*app, *app}, &output) + return nil + }) + assert.NoError(t, err) + expectation := "NAME CLUSTER NAMESPACE PROJECT STATUS HEALTH SYNCPOLICY CONDITIONS REPO PATH TARGET\napp-name http://localhost:8080 default prj OutOfSync Healthy https://github.com/argoproj/argocd-example-apps guestbook 123\napp-name http://localhost:8080 default prj OutOfSync Healthy https://github.com/argoproj/argocd-example-apps guestbook 123\n" + assert.Equal(t, output, expectation) +} + +func TestResourceStateKey(t *testing.T) { + rst := resourceState{ + Group: "group", + Kind: "kind", + Namespace: "namespace", + Name: "name", + } + + key := rst.Key() + assert.Equal(t, "group/kind/namespace/name", key) +} + +func TestFormatItems(t *testing.T) { + rst := resourceState{ + Group: "group", + Kind: "kind", + Namespace: "namespace", + Name: "name", + Status: "status", + Health: "health", + Hook: "hook", + Message: "message", + } + items := rst.FormatItems() + assert.Equal(t, "group", items[1]) + assert.Equal(t, "kind", items[2]) + assert.Equal(t, "namespace", items[3]) + assert.Equal(t, "name", items[4]) + assert.Equal(t, "status", items[5]) + assert.Equal(t, "health", items[6]) + assert.Equal(t, "hook", items[7]) + assert.Equal(t, "message", items[8]) + +} + +func TestMerge(t *testing.T) { + rst := resourceState{ + Group: "group", + Kind: "kind", + Namespace: "namespace", + Name: "name", + Status: "status", + Health: "health", + Hook: "hook", + Message: "message", + } + + rstNew := resourceState{ + Group: "group", + Kind: "kind", + Namespace: "namespace", + Name: "name", + Status: "status", + Health: "health", + Hook: "hook2", + Message: "message2", + } + + updated := rst.Merge(&rstNew) + assert.True(t, updated) + assert.Equal(t, rstNew.Hook, rst.Hook) + assert.Equal(t, rstNew.Message, rst.Message) + assert.Equal(t, rstNew.Status, rst.Status) +} + +func TestMergeWitoutUpdate(t *testing.T) { + rst := resourceState{ + Group: "group", + Kind: "kind", + Namespace: "namespace", + Name: "name", + Status: "status", + Health: "health", + Hook: "hook", + Message: "message", + } + + rstNew := resourceState{ + Group: "group", + Kind: "kind", + Namespace: "namespace", + Name: "name", + Status: "status", + Health: "health", + Hook: "hook", + Message: "message", + } + + updated := rst.Merge(&rstNew) + assert.False(t, updated) +} + +func TestCheckResourceStatus(t *testing.T) { + t.Run("Degraded, Suspended and health status passed", func(t *testing.T) { + res := checkResourceStatus(watchOpts{ + suspended: true, + health: true, + degraded: true, + }, string(health.HealthStatusHealthy), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{}) + assert.True(t, res) + }) + t.Run("Degraded, Suspended and health status failed", func(t *testing.T) { + res := checkResourceStatus(watchOpts{ + suspended: true, + health: true, + degraded: true, + }, string(health.HealthStatusProgressing), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{}) + assert.False(t, res) + }) + t.Run("Suspended and health status passed", func(t *testing.T) { + res := checkResourceStatus(watchOpts{ + suspended: true, + health: true, + }, string(health.HealthStatusHealthy), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{}) + assert.True(t, res) + }) + t.Run("Suspended and health status failed", func(t *testing.T) { + res := checkResourceStatus(watchOpts{ + suspended: true, + health: true, + }, string(health.HealthStatusProgressing), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{}) + assert.False(t, res) + }) + t.Run("Suspended passed", func(t *testing.T) { + res := checkResourceStatus(watchOpts{ + suspended: true, + health: false, + }, string(health.HealthStatusSuspended), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{}) + assert.True(t, res) + }) + t.Run("Suspended failed", func(t *testing.T) { + res := checkResourceStatus(watchOpts{ + suspended: true, + health: false, + }, string(health.HealthStatusProgressing), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{}) + assert.False(t, res) + }) + t.Run("Health passed", func(t *testing.T) { + res := checkResourceStatus(watchOpts{ + suspended: false, + health: true, + }, string(health.HealthStatusHealthy), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{}) + assert.True(t, res) + }) + t.Run("Health failed", func(t *testing.T) { + res := checkResourceStatus(watchOpts{ + suspended: false, + health: true, + }, string(health.HealthStatusProgressing), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{}) + assert.False(t, res) + }) + t.Run("Synced passed", func(t *testing.T) { + res := checkResourceStatus(watchOpts{}, string(health.HealthStatusProgressing), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{}) + assert.True(t, res) + }) + t.Run("Synced failed", func(t *testing.T) { + res := checkResourceStatus(watchOpts{}, string(health.HealthStatusProgressing), string(v1alpha1.SyncStatusCodeOutOfSync), &v1alpha1.Operation{}) + assert.True(t, res) + }) + t.Run("Degraded passed", func(t *testing.T) { + res := checkResourceStatus(watchOpts{ + suspended: false, + health: false, + degraded: true, + }, string(health.HealthStatusDegraded), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{}) + assert.True(t, res) + }) + t.Run("Degraded failed", func(t *testing.T) { + res := checkResourceStatus(watchOpts{ + suspended: false, + health: false, + degraded: true, + }, string(health.HealthStatusProgressing), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{}) + assert.False(t, res) + }) +} + +func Test_hasAppChanged(t *testing.T) { + type args struct { + appReq *v1alpha1.Application + appRes *v1alpha1.Application + upsert bool + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "App has changed - Labels, Annotations, Finalizers empty", + args: args{ + appReq: testApp("foo", "default", map[string]string{}, map[string]string{}, []string{}), + appRes: testApp("foo", "foo", nil, nil, nil), + upsert: true, + }, + want: true, + }, + { + name: "App unchanged - Labels, Annotations, Finalizers populated", + args: args{ + appReq: testApp("foo", "default", map[string]string{"foo": "bar"}, map[string]string{"foo": "bar"}, []string{"foo"}), + appRes: testApp("foo", "default", map[string]string{"foo": "bar"}, map[string]string{"foo": "bar"}, []string{"foo"}), + upsert: true, + }, + want: false, + }, + { + name: "Apps unchanged - Using empty maps/list locally versus server returning nil", + args: args{ + appReq: testApp("foo", "default", map[string]string{}, map[string]string{}, []string{}), + appRes: testApp("foo", "default", nil, nil, nil), + upsert: true, + }, + want: false, + }, + { + name: "App unchanged - Using empty project locally versus server returning default", + args: args{ + appReq: testApp("foo", "", map[string]string{}, map[string]string{}, []string{}), + appRes: testApp("foo", "default", nil, nil, nil), + }, + want: false, + }, + { + name: "App unchanged - From upsert=false", + args: args{ + appReq: testApp("foo", "foo", map[string]string{}, map[string]string{}, []string{}), + appRes: testApp("foo", "default", nil, nil, nil), + upsert: false, + }, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := hasAppChanged(tt.args.appReq, tt.args.appRes, tt.args.upsert); got != tt.want { + t.Errorf("hasAppChanged() = %v, want %v", got, tt.want) + } + }) + } +} + +func testApp(name, project string, labels map[string]string, annotations map[string]string, finalizers []string) *v1alpha1.Application { + return &v1alpha1.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: labels, + Annotations: annotations, + Finalizers: finalizers, + }, + Spec: v1alpha1.ApplicationSpec{ + Source: &v1alpha1.ApplicationSource{ + RepoURL: "https://github.com/argoproj/argocd-example-apps.git", + }, + Project: project, + }, + } +} diff --git a/cmd/argocd/commands/applicationset.go b/cmd/argocd/commands/applicationset.go new file mode 100644 index 0000000000000..b38f8837598fb --- /dev/null +++ b/cmd/argocd/commands/applicationset.go @@ -0,0 +1,406 @@ +package commands + +import ( + "fmt" + "io" + "os" + "reflect" + "text/tabwriter" + + "github.com/mattn/go-isatty" + "github.com/spf13/cobra" + "google.golang.org/grpc/codes" + + "github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless" + cmdutil "github.com/argoproj/argo-cd/v2/cmd/util" + argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient" + "github.com/argoproj/argo-cd/v2/pkg/apiclient/applicationset" + arogappsetv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/argoproj/argo-cd/v2/util/argo" + "github.com/argoproj/argo-cd/v2/util/cli" + "github.com/argoproj/argo-cd/v2/util/errors" + "github.com/argoproj/argo-cd/v2/util/grpc" + argoio "github.com/argoproj/argo-cd/v2/util/io" + "github.com/argoproj/argo-cd/v2/util/templates" +) + +var ( + appSetExample = templates.Examples(` + # Get an ApplicationSet. + argocd appset get APPSETNAME + + # List all the ApplicationSets + argocd appset list + + # Create an ApplicationSet from a YAML stored in a file or at given URL + argocd appset create (...) + + # Delete an ApplicationSet + argocd appset delete APPSETNAME (APPSETNAME...) + `) +) + +// NewAppSetCommand returns a new instance of an `argocd appset` command +func NewAppSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { + var command = &cobra.Command{ + Use: "appset", + Short: "Manage ApplicationSets", + Example: appSetExample, + Run: func(c *cobra.Command, args []string) { + c.HelpFunc()(c, args) + os.Exit(1) + }, + } + command.AddCommand(NewApplicationSetGetCommand(clientOpts)) + command.AddCommand(NewApplicationSetCreateCommand(clientOpts)) + command.AddCommand(NewApplicationSetListCommand(clientOpts)) + command.AddCommand(NewApplicationSetDeleteCommand(clientOpts)) + return command +} + +// NewApplicationSetGetCommand returns a new instance of an `argocd appset get` command +func NewApplicationSetGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { + var ( + output string + showParams bool + ) + var command = &cobra.Command{ + Use: "get APPSETNAME", + Short: "Get ApplicationSet details", + Example: templates.Examples(` + # Get ApplicationSets + argocd appset get APPSETNAME + `), + Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + + if len(args) == 0 { + c.HelpFunc()(c, args) + os.Exit(1) + } + acdClient := headless.NewClientOrDie(clientOpts, c) + conn, appIf := acdClient.NewApplicationSetClientOrDie() + defer argoio.Close(conn) + + appSetName, appSetNs := argo.ParseFromQualifiedName(args[0], "") + + appSet, err := appIf.Get(ctx, &applicationset.ApplicationSetGetQuery{Name: appSetName, AppsetNamespace: appSetNs}) + errors.CheckError(err) + + switch output { + case "yaml", "json": + err := PrintResource(appSet, output) + errors.CheckError(err) + case "wide", "": + printAppSetSummaryTable(appSet) + + if len(appSet.Status.Conditions) > 0 { + fmt.Println() + w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) + printAppSetConditions(w, appSet) + _ = w.Flush() + fmt.Println() + } + if showParams { + printHelmParams(appSet.Spec.Template.Spec.GetSource().Helm) + } + default: + errors.CheckError(fmt.Errorf("unknown output format: %s", output)) + } + }, + } + command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide") + command.Flags().BoolVar(&showParams, "show-params", false, "Show ApplicationSet parameters and overrides") + return command +} + +// NewApplicationSetCreateCommand returns a new instance of an `argocd appset create` command +func NewApplicationSetCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { + var upsert bool + var command = &cobra.Command{ + Use: "create", + Short: "Create one or more ApplicationSets", + Example: templates.Examples(` + # Create ApplicationSets + argocd appset create (...) + `), + Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + + if len(args) == 0 { + c.HelpFunc()(c, args) + os.Exit(1) + } + argocdClient := headless.NewClientOrDie(clientOpts, c) + fileUrl := args[0] + appsets, err := cmdutil.ConstructApplicationSet(fileUrl) + errors.CheckError(err) + + if len(appsets) == 0 { + fmt.Printf("No ApplicationSets found while parsing the input file") + os.Exit(1) + } + + for _, appset := range appsets { + if appset.Name == "" { + err := fmt.Errorf("Error creating ApplicationSet %s. ApplicationSet does not have Name field set", appset) + errors.CheckError(err) + } + + conn, appIf := argocdClient.NewApplicationSetClientOrDie() + defer argoio.Close(conn) + + // Get app before creating to see if it is being updated or no change + existing, err := appIf.Get(ctx, &applicationset.ApplicationSetGetQuery{Name: appset.Name, AppsetNamespace: appset.Namespace}) + if grpc.UnwrapGRPCStatus(err).Code() != codes.NotFound { + errors.CheckError(err) + } + + appSetCreateRequest := applicationset.ApplicationSetCreateRequest{ + Applicationset: appset, + Upsert: upsert, + } + created, err := appIf.Create(ctx, &appSetCreateRequest) + errors.CheckError(err) + + var action string + if existing == nil { + action = "created" + } else if !hasAppSetChanged(existing, created, upsert) { + action = "unchanged" + } else { + action = "updated" + } + + fmt.Printf("ApplicationSet '%s' %s\n", created.ObjectMeta.Name, action) + } + }, + } + command.Flags().BoolVar(&upsert, "upsert", false, "Allows to override ApplicationSet with the same name even if supplied ApplicationSet spec is different from existing spec") + return command +} + +// NewApplicationSetListCommand returns a new instance of an `argocd appset list` command +func NewApplicationSetListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { + var ( + output string + selector string + projects []string + appSetNamespace string + ) + var command = &cobra.Command{ + Use: "list", + Short: "List ApplicationSets", + Example: templates.Examples(` + # List all ApplicationSets + argocd appset list + `), + Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + + conn, appIf := headless.NewClientOrDie(clientOpts, c).NewApplicationSetClientOrDie() + defer argoio.Close(conn) + appsets, err := appIf.List(ctx, &applicationset.ApplicationSetListQuery{Selector: selector, Projects: projects, AppsetNamespace: appSetNamespace}) + errors.CheckError(err) + + appsetList := appsets.Items + + switch output { + case "yaml", "json": + err := PrintResourceList(appsetList, output, false) + errors.CheckError(err) + case "name": + printApplicationSetNames(appsetList) + case "wide", "": + printApplicationSetTable(appsetList, &output) + default: + errors.CheckError(fmt.Errorf("unknown output format: %s", output)) + } + }, + } + command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: wide|name|json|yaml") + command.Flags().StringVarP(&selector, "selector", "l", "", "List applicationsets by label") + command.Flags().StringArrayVarP(&projects, "project", "p", []string{}, "Filter by project name") + command.Flags().StringVarP(&appSetNamespace, "appset-namespace", "N", "", "Only list applicationsets in namespace") + + return command +} + +// NewApplicationSetDeleteCommand returns a new instance of an `argocd appset delete` command +func NewApplicationSetDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { + var ( + noPrompt bool + ) + var command = &cobra.Command{ + Use: "delete", + Short: "Delete one or more ApplicationSets", + Example: templates.Examples(` + # Delete an applicationset + argocd appset delete APPSETNAME (APPSETNAME...) + `), + Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + + if len(args) == 0 { + c.HelpFunc()(c, args) + os.Exit(1) + } + conn, appIf := headless.NewClientOrDie(clientOpts, c).NewApplicationSetClientOrDie() + defer argoio.Close(conn) + var isTerminal bool = isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd()) + var isConfirmAll bool = false + var numOfApps = len(args) + var promptFlag = c.Flag("yes") + if promptFlag.Changed && promptFlag.Value.String() == "true" { + noPrompt = true + } + for _, appSetQualifiedName := range args { + + appSetName, appSetNs := argo.ParseFromQualifiedName(appSetQualifiedName, "") + + appsetDeleteReq := applicationset.ApplicationSetDeleteRequest{ + Name: appSetName, + AppsetNamespace: appSetNs, + } + + if isTerminal && !noPrompt { + var lowercaseAnswer string + if numOfApps == 1 { + lowercaseAnswer = cli.AskToProceedS("Are you sure you want to delete '" + appSetQualifiedName + "' and all its Applications? [y/n] ") + } else { + if !isConfirmAll { + lowercaseAnswer = cli.AskToProceedS("Are you sure you want to delete '" + appSetQualifiedName + "' and all its Applications? [y/n/A] where 'A' is to delete all specified ApplicationSets and their Applications without prompting") + if lowercaseAnswer == "a" || lowercaseAnswer == "all" { + lowercaseAnswer = "y" + isConfirmAll = true + } + } else { + lowercaseAnswer = "y" + } + } + if lowercaseAnswer == "y" || lowercaseAnswer == "yes" { + _, err := appIf.Delete(ctx, &appsetDeleteReq) + errors.CheckError(err) + fmt.Printf("applicationset '%s' deleted\n", appSetQualifiedName) + } else { + fmt.Println("The command to delete '" + appSetQualifiedName + "' was cancelled.") + } + } else { + _, err := appIf.Delete(ctx, &appsetDeleteReq) + errors.CheckError(err) + } + } + }, + } + command.Flags().BoolVarP(&noPrompt, "yes", "y", false, "Turn off prompting to confirm cascaded deletion of Application resources") + return command +} + +// Print simple list of application names +func printApplicationSetNames(apps []arogappsetv1.ApplicationSet) { + for _, app := range apps { + fmt.Println(app.QualifiedName()) + } +} + +// Print table of application data +func printApplicationSetTable(apps []arogappsetv1.ApplicationSet, output *string) { + w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) + var fmtStr string + headers := []interface{}{"NAME", "PROJECT", "SYNCPOLICY", "CONDITIONS"} + if *output == "wide" { + fmtStr = "%s\t%s\t%s\t%s\t%s\t%s\t%s\n" + headers = append(headers, "REPO", "PATH", "TARGET") + } else { + fmtStr = "%s\t%s\t%s\t%s\n" + } + _, _ = fmt.Fprintf(w, fmtStr, headers...) + for _, app := range apps { + conditions := make([]arogappsetv1.ApplicationSetCondition, 0) + for _, condition := range app.Status.Conditions { + if condition.Status == arogappsetv1.ApplicationSetConditionStatusTrue { + conditions = append(conditions, condition) + } + } + vals := []interface{}{ + app.QualifiedName(), + app.Spec.Template.Spec.Project, + app.Spec.SyncPolicy, + conditions, + } + if *output == "wide" { + vals = append(vals, app.Spec.Template.Spec.GetSource().RepoURL, app.Spec.Template.Spec.GetSource().Path, app.Spec.Template.Spec.GetSource().TargetRevision) + } + _, _ = fmt.Fprintf(w, fmtStr, vals...) + } + _ = w.Flush() +} + +func getServerForAppSet(appSet *arogappsetv1.ApplicationSet) string { + if appSet.Spec.Template.Spec.Destination.Server == "" { + return appSet.Spec.Template.Spec.Destination.Name + } + + return appSet.Spec.Template.Spec.Destination.Server +} + +func printAppSetSummaryTable(appSet *arogappsetv1.ApplicationSet) { + source := appSet.Spec.Template.Spec.GetSource() + fmt.Printf(printOpFmtStr, "Name:", appSet.QualifiedName()) + fmt.Printf(printOpFmtStr, "Project:", appSet.Spec.Template.Spec.GetProject()) + fmt.Printf(printOpFmtStr, "Server:", getServerForAppSet(appSet)) + fmt.Printf(printOpFmtStr, "Namespace:", appSet.Spec.Template.Spec.Destination.Namespace) + fmt.Printf(printOpFmtStr, "Repo:", source.RepoURL) + fmt.Printf(printOpFmtStr, "Target:", source.TargetRevision) + fmt.Printf(printOpFmtStr, "Path:", source.Path) + printAppSourceDetails(&source) + + var ( + syncPolicyStr string + syncPolicy = appSet.Spec.Template.Spec.SyncPolicy + ) + if syncPolicy != nil && syncPolicy.Automated != nil { + syncPolicyStr = "Automated" + if syncPolicy.Automated.Prune { + syncPolicyStr += " (Prune)" + } + } else { + syncPolicyStr = "" + } + fmt.Printf(printOpFmtStr, "SyncPolicy:", syncPolicyStr) + +} + +func printAppSetConditions(w io.Writer, appSet *arogappsetv1.ApplicationSet) { + _, _ = fmt.Fprintf(w, "CONDITION\tSTATUS\tMESSAGE\tLAST TRANSITION\n") + for _, item := range appSet.Status.Conditions { + _, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", item.Type, item.Status, item.Message, item.LastTransitionTime) + } +} + +func hasAppSetChanged(appReq, appRes *arogappsetv1.ApplicationSet, upsert bool) bool { + // upsert==false, no change occurred from create command + if !upsert { + return false + } + + // Server will return nils for empty labels, annotations, finalizers + if len(appReq.Labels) == 0 { + appReq.Labels = nil + } + if len(appReq.Annotations) == 0 { + appReq.Annotations = nil + } + if len(appReq.Finalizers) == 0 { + appReq.Finalizers = nil + } + + if reflect.DeepEqual(appRes.Spec, appReq.Spec) && + reflect.DeepEqual(appRes.Labels, appReq.Labels) && + reflect.DeepEqual(appRes.ObjectMeta.Annotations, appReq.Annotations) && + reflect.DeepEqual(appRes.Finalizers, appReq.Finalizers) { + return false + } + + return true +} diff --git a/cmd/argocd/commands/applicationset_test.go b/cmd/argocd/commands/applicationset_test.go new file mode 100644 index 0000000000000..18e5f85feebbc --- /dev/null +++ b/cmd/argocd/commands/applicationset_test.go @@ -0,0 +1,233 @@ +package commands + +import ( + "io" + "os" + "testing" + + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestPrintApplicationSetNames(t *testing.T) { + output, _ := captureOutput(func() error { + appSet := &v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + } + appSet2 := &v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "team-one", + Name: "test", + }, + } + printApplicationSetNames([]v1alpha1.ApplicationSet{*appSet, *appSet2}) + return nil + }) + expectation := "test\nteam-one/test\n" + if output != expectation { + t.Fatalf("Incorrect print params output %q, should be %q", output, expectation) + } +} + +func TestPrintApplicationSetTable(t *testing.T) { + output, err := captureOutput(func() error { + app := &v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "app-name", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Generators: []v1alpha1.ApplicationSetGenerator{ + { + Git: &v1alpha1.GitGenerator{ + RepoURL: "https://github.com/argoproj/argo-cd.git", + Revision: "head", + Directories: []v1alpha1.GitDirectoryGeneratorItem{ + { + Path: "applicationset/examples/git-generator-directory/cluster-addons/*", + }, + }, + }, + }, + }, + Template: v1alpha1.ApplicationSetTemplate{ + Spec: v1alpha1.ApplicationSpec{ + Project: "default", + }, + }, + }, + Status: v1alpha1.ApplicationSetStatus{ + Conditions: []v1alpha1.ApplicationSetCondition{ + { + Status: v1alpha1.ApplicationSetConditionStatusTrue, + Type: v1alpha1.ApplicationSetConditionResourcesUpToDate, + }, + }, + }, + } + + app2 := &v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "app-name", + Namespace: "team-two", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Generators: []v1alpha1.ApplicationSetGenerator{ + { + Git: &v1alpha1.GitGenerator{ + RepoURL: "https://github.com/argoproj/argo-cd.git", + Revision: "head", + Directories: []v1alpha1.GitDirectoryGeneratorItem{ + { + Path: "applicationset/examples/git-generator-directory/cluster-addons/*", + }, + }, + }, + }, + }, + Template: v1alpha1.ApplicationSetTemplate{ + Spec: v1alpha1.ApplicationSpec{ + Project: "default", + }, + }, + }, + Status: v1alpha1.ApplicationSetStatus{ + Conditions: []v1alpha1.ApplicationSetCondition{ + { + Status: v1alpha1.ApplicationSetConditionStatusTrue, + Type: v1alpha1.ApplicationSetConditionResourcesUpToDate, + }, + }, + }, + } + output := "table" + printApplicationSetTable([]v1alpha1.ApplicationSet{*app, *app2}, &output) + return nil + }) + assert.NoError(t, err) + expectation := "NAME PROJECT SYNCPOLICY CONDITIONS\napp-name default nil [{ResourcesUpToDate True }]\nteam-two/app-name default nil [{ResourcesUpToDate True }]\n" + assert.Equal(t, expectation, output) +} + +func TestPrintAppSetSummaryTable(t *testing.T) { + baseAppSet := &v1alpha1.ApplicationSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "app-name", + }, + Spec: v1alpha1.ApplicationSetSpec{ + Generators: []v1alpha1.ApplicationSetGenerator{ + { + Git: &v1alpha1.GitGenerator{ + RepoURL: "https://github.com/argoproj/argo-cd.git", + Revision: "head", + Directories: []v1alpha1.GitDirectoryGeneratorItem{ + { + Path: "applicationset/examples/git-generator-directory/cluster-addons/*", + }, + }, + }, + }, + }, + Template: v1alpha1.ApplicationSetTemplate{ + Spec: v1alpha1.ApplicationSpec{ + Project: "default", + }, + }, + }, + Status: v1alpha1.ApplicationSetStatus{ + Conditions: []v1alpha1.ApplicationSetCondition{ + { + Status: v1alpha1.ApplicationSetConditionStatusTrue, + Type: v1alpha1.ApplicationSetConditionResourcesUpToDate, + }, + }, + }, + } + + appsetSpecSyncPolicy := baseAppSet.DeepCopy() + appsetSpecSyncPolicy.Spec.SyncPolicy = &v1alpha1.ApplicationSetSyncPolicy{ + PreserveResourcesOnDeletion: true, + } + + appSetTemplateSpecSyncPolicy := baseAppSet.DeepCopy() + appSetTemplateSpecSyncPolicy.Spec.Template.Spec.SyncPolicy = &v1alpha1.SyncPolicy{ + Automated: &v1alpha1.SyncPolicyAutomated{ + SelfHeal: true, + }, + } + + appSetBothSyncPolicies := baseAppSet.DeepCopy() + appSetBothSyncPolicies.Spec.SyncPolicy = &v1alpha1.ApplicationSetSyncPolicy{ + PreserveResourcesOnDeletion: true, + } + appSetBothSyncPolicies.Spec.Template.Spec.SyncPolicy = &v1alpha1.SyncPolicy{ + Automated: &v1alpha1.SyncPolicyAutomated{ + SelfHeal: true, + }, + } + + for _, tt := range []struct { + name string + appSet *v1alpha1.ApplicationSet + expectedOutput string + }{ + { + name: "appset with only spec.syncPolicy set", + appSet: appsetSpecSyncPolicy, + expectedOutput: `Name: app-name +Project: default +Server: +Namespace: +Repo: +Target: +Path: +SyncPolicy: +`, + }, + { + name: "appset with only spec.template.spec.syncPolicy set", + appSet: appSetTemplateSpecSyncPolicy, + expectedOutput: `Name: app-name +Project: default +Server: +Namespace: +Repo: +Target: +Path: +SyncPolicy: Automated +`, + }, + { + name: "appset with both spec.SyncPolicy and spec.template.spec.syncPolicy set", + appSet: appSetBothSyncPolicies, + expectedOutput: `Name: app-name +Project: default +Server: +Namespace: +Repo: +Target: +Path: +SyncPolicy: Automated +`, + }, + } { + t.Run(tt.name, func(t *testing.T) { + oldStdout := os.Stdout + defer func() { + os.Stdout = oldStdout + }() + + r, w, _ := os.Pipe() + os.Stdout = w + + printAppSetSummaryTable(tt.appSet) + w.Close() + + out, err := io.ReadAll(r) + assert.NoError(t, err) + assert.Equal(t, tt.expectedOutput, string(out)) + }) + } +} diff --git a/cmd/argocd/commands/bcrypt.go b/cmd/argocd/commands/bcrypt.go new file mode 100644 index 0000000000000..6d8f87fd447a5 --- /dev/null +++ b/cmd/argocd/commands/bcrypt.go @@ -0,0 +1,38 @@ +package commands + +import ( + "fmt" + "log" + + "github.com/spf13/cobra" + "golang.org/x/crypto/bcrypt" +) + +// NewBcryptCmd represents the bcrypt command +func NewBcryptCmd() *cobra.Command { + var ( + password string + ) + var bcryptCmd = &cobra.Command{ + Use: "bcrypt", + Short: "Generate bcrypt hash for any password", + Example: `# Generate bcrypt hash for any password +argocd account bcrypt --password YOUR_PASSWORD`, + Run: func(cmd *cobra.Command, args []string) { + bytePassword := []byte(password) + // Hashing the password + hash, err := bcrypt.GenerateFromPassword(bytePassword, bcrypt.DefaultCost) + if err != nil { + log.Fatalf("Failed to genarate bcrypt hash: %v", err) + } + fmt.Fprint(cmd.OutOrStdout(), string(hash)) + }, + } + + bcryptCmd.Flags().StringVar(&password, "password", "", "Password for which bcrypt hash is generated") + err := bcryptCmd.MarkFlagRequired("password") + if err != nil { + return nil + } + return bcryptCmd +} diff --git a/cmd/argocd/commands/bcrypt_test.go b/cmd/argocd/commands/bcrypt_test.go new file mode 100644 index 0000000000000..ec00a73b0dcba --- /dev/null +++ b/cmd/argocd/commands/bcrypt_test.go @@ -0,0 +1,22 @@ +package commands + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + "golang.org/x/crypto/bcrypt" +) + +func TestGeneratePassword(t *testing.T) { + bcryptCmd := NewBcryptCmd() + bcryptCmd.SetArgs([]string{"--password", "abc"}) + output := new(bytes.Buffer) + bcryptCmd.SetOut(output) + err := bcryptCmd.Execute() + if err != nil { + return + } + err = bcrypt.CompareHashAndPassword(output.Bytes(), []byte("abc")) + assert.NoError(t, err) +} diff --git a/cmd/argocd/commands/cert.go b/cmd/argocd/commands/cert.go index 3ab8f8ba57efd..d443d57e337d4 100644 --- a/cmd/argocd/commands/cert.go +++ b/cmd/argocd/commands/cert.go @@ -1,7 +1,6 @@ package commands import ( - "context" "crypto/x509" "fmt" "os" @@ -11,6 +10,7 @@ import ( "github.com/spf13/cobra" + "github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless" argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient" certificatepkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/certificate" appsv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" @@ -64,7 +64,9 @@ func NewCertAddTLSCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command Use: "add-tls SERVERNAME", Short: "Add TLS certificate data for connecting to repository server SERVERNAME", Run: func(c *cobra.Command, args []string) { - conn, certIf := argocdclient.NewClientOrDie(clientOpts).NewCertClientOrDie() + ctx := c.Context() + + conn, certIf := headless.NewClientOrDie(clientOpts, c).NewCertClientOrDie() defer io.Close(conn) if len(args) != 1 { @@ -115,7 +117,7 @@ func NewCertAddTLSCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command CertType: "https", CertData: []byte(strings.Join(certificateArray, "\n")), }) - certificates, err := certIf.CreateCertificate(context.Background(), &certificatepkg.RepositoryCertificateCreateRequest{ + certificates, err := certIf.CreateCertificate(ctx, &certificatepkg.RepositoryCertificateCreateRequest{ Certificates: &appsv1.RepositoryCertificateList{ Items: certificateList, }, @@ -128,12 +130,12 @@ func NewCertAddTLSCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command } }, } - command.Flags().StringVar(&fromFile, "from", "", "read TLS certificate data from file (default is to read from stdin)") + command.Flags().StringVar(&fromFile, "from", "", "Read TLS certificate data from file (default is to read from stdin)") command.Flags().BoolVar(&upsert, "upsert", false, "Replace existing TLS certificate if certificate is different in input") return command } -// NewCertAddCommand returns a new instance of an `argocd cert add` command +// NewCertAddSSHCommand returns a new instance of an `argocd cert add` command func NewCertAddSSHCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { var ( fromFile string @@ -146,8 +148,9 @@ func NewCertAddSSHCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command Use: "add-ssh --batch", Short: "Add SSH known host entries for repository servers", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() - conn, certIf := argocdclient.NewClientOrDie(clientOpts).NewCertClientOrDie() + conn, certIf := headless.NewClientOrDie(clientOpts, c).NewCertClientOrDie() defer io.Close(conn) var sshKnownHostsLists []string @@ -190,7 +193,7 @@ func NewCertAddSSHCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command } certList := &appsv1.RepositoryCertificateList{Items: certificates} - response, err := certIf.CreateCertificate(context.Background(), &certificatepkg.RepositoryCertificateCreateRequest{ + response, err := certIf.CreateCertificate(ctx, &certificatepkg.RepositoryCertificateCreateRequest{ Certificates: certList, Upsert: upsert, }) @@ -215,11 +218,13 @@ func NewCertRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command Use: "rm REPOSERVER", Short: "Remove certificate of TYPE for REPOSERVER", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) < 1 { c.HelpFunc()(c, args) os.Exit(1) } - conn, certIf := argocdclient.NewClientOrDie(clientOpts).NewCertClientOrDie() + conn, certIf := headless.NewClientOrDie(clientOpts, c).NewCertClientOrDie() defer io.Close(conn) hostNamePattern := args[0] @@ -236,7 +241,7 @@ func NewCertRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command CertType: certType, CertSubType: certSubType, } - removed, err := certIf.DeleteCertificate(context.Background(), &certQuery) + removed, err := certIf.DeleteCertificate(ctx, &certQuery) errors.CheckError(err) if len(removed.Items) > 0 { for _, cert := range removed.Items { @@ -264,6 +269,8 @@ func NewCertListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { Use: "list", Short: "List configured certificates", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if certType != "" { switch certType { case "ssh": @@ -274,9 +281,9 @@ func NewCertListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { } } - conn, certIf := argocdclient.NewClientOrDie(clientOpts).NewCertClientOrDie() + conn, certIf := headless.NewClientOrDie(clientOpts, c).NewCertClientOrDie() defer io.Close(conn) - certificates, err := certIf.ListCertificates(context.Background(), &certificatepkg.RepositoryCertificateQuery{HostNamePattern: hostNamePattern, CertType: certType}) + certificates, err := certIf.ListCertificates(ctx, &certificatepkg.RepositoryCertificateQuery{HostNamePattern: hostNamePattern, CertType: certType}) errors.CheckError(err) switch output { @@ -293,9 +300,9 @@ func NewCertListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { } command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide") - command.Flags().StringVar(&sortOrder, "sort", "", "set display sort order for output format wide. One of: hostname|type") - command.Flags().StringVar(&certType, "cert-type", "", "only list certificates of given type, valid: 'ssh','https'") - command.Flags().StringVar(&hostNamePattern, "hostname-pattern", "", "only list certificates for hosts matching given glob-pattern") + command.Flags().StringVar(&sortOrder, "sort", "", "Set display sort order for output format wide. One of: hostname|type") + command.Flags().StringVar(&certType, "cert-type", "", "Only list certificates of given type, valid: 'ssh','https'") + command.Flags().StringVar(&hostNamePattern, "hostname-pattern", "", "Only list certificates for hosts matching given glob-pattern") return command } diff --git a/cmd/argocd/commands/cluster.go b/cmd/argocd/commands/cluster.go index 9539ebbafb715..a1d1589540af0 100644 --- a/cmd/argocd/commands/cluster.go +++ b/cmd/argocd/commands/cluster.go @@ -1,25 +1,41 @@ package commands import ( - "context" "fmt" "os" + "regexp" "strings" "text/tabwriter" + "github.com/mattn/go-isatty" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + "github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless" cmdutil "github.com/argoproj/argo-cd/v2/cmd/util" "github.com/argoproj/argo-cd/v2/common" argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient" clusterpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/cluster" argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/argoproj/argo-cd/v2/util/cli" "github.com/argoproj/argo-cd/v2/util/clusterauth" "github.com/argoproj/argo-cd/v2/util/errors" "github.com/argoproj/argo-cd/v2/util/io" + "github.com/argoproj/argo-cd/v2/util/text/label" +) + +const ( + // type of the cluster ID is 'name' + clusterIdTypeName = "name" + // cluster field is 'name' + clusterFieldName = "name" + // cluster field is 'namespaces' + clusterFieldNamespaces = "namespaces" + // indicates managing all namespaces + allNamespaces = "*" ) // NewClusterCommand returns a new instance of an `argocd cluster` command @@ -40,49 +56,54 @@ func NewClusterCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clientc # Get specific details about a cluster in plain text (wide) format: argocd cluster get example-cluster -o wide - # Remove a target cluster context from ArgoCD + # Remove a target cluster context from ArgoCD argocd cluster rm example-cluster -`, + + # Set a target cluster context from ArgoCD + argocd cluster set CLUSTER_NAME --name new-cluster-name --namespace '*' + argocd cluster set CLUSTER_NAME --name new-cluster-name --namespace namespace-one --namespace namespace-two`, } command.AddCommand(NewClusterAddCommand(clientOpts, pathOpts)) command.AddCommand(NewClusterGetCommand(clientOpts)) command.AddCommand(NewClusterListCommand(clientOpts)) - command.AddCommand(NewClusterRemoveCommand(clientOpts)) + command.AddCommand(NewClusterRemoveCommand(clientOpts, pathOpts)) command.AddCommand(NewClusterRotateAuthCommand(clientOpts)) + command.AddCommand(NewClusterSetCommand(clientOpts)) return command } // NewClusterAddCommand returns a new instance of an `argocd cluster add` command func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clientcmd.PathOptions) *cobra.Command { var ( - clusterOpts cmdutil.ClusterOptions + clusterOpts cmdutil.ClusterOptions + skipConfirmation bool + labels []string + annotations []string ) var command = &cobra.Command{ Use: "add CONTEXT", Short: fmt.Sprintf("%s cluster add CONTEXT", cliName), Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + var configAccess clientcmd.ConfigAccess = pathOpts if len(args) == 0 { log.Error("Choose a context name from:") cmdutil.PrintKubeContexts(configAccess) os.Exit(1) } - config, err := configAccess.GetStartingConfig() - errors.CheckError(err) - contextName := args[0] - clstContext := config.Contexts[contextName] - if clstContext == nil { - log.Fatalf("Context %s does not exist in kubeconfig", contextName) - } - overrides := clientcmd.ConfigOverrides{ - Context: *clstContext, + if clusterOpts.InCluster && clusterOpts.ClusterEndpoint != "" { + log.Fatal("Can only use one of --in-cluster or --cluster-endpoint") + return } - clientConfig := clientcmd.NewDefaultClientConfig(*config, &overrides) - conf, err := clientConfig.ClientConfig() - errors.CheckError(err) + contextName := args[0] + conf, err := getRestConfig(pathOpts, contextName) + errors.CheckError(err) + clientset, err := kubernetes.NewForConfig(conf) + errors.CheckError(err) managerBearerToken := "" var awsAuthConf *argoappv1.AWSAuthConfig var execProviderConf *argoappv1.ExecProviderConfig @@ -101,32 +122,59 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie } } else { // Install RBAC resources for managing the cluster - clientset, err := kubernetes.NewForConfig(conf) - errors.CheckError(err) if clusterOpts.ServiceAccount != "" { - managerBearerToken, err = clusterauth.GetServiceAccountBearerToken(clientset, clusterOpts.SystemNamespace, clusterOpts.ServiceAccount) + managerBearerToken, err = clusterauth.GetServiceAccountBearerToken(clientset, clusterOpts.SystemNamespace, clusterOpts.ServiceAccount, common.BearerTokenTimeout) } else { - managerBearerToken, err = clusterauth.InstallClusterManagerRBAC(clientset, clusterOpts.SystemNamespace, clusterOpts.Namespaces) + isTerminal := isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd()) + if isTerminal && !skipConfirmation { + accessLevel := "cluster" + if len(clusterOpts.Namespaces) > 0 { + accessLevel = "namespace" + } + message := fmt.Sprintf("WARNING: This will create a service account `argocd-manager` on the cluster referenced by context `%s` with full %s level privileges. Do you want to continue [y/N]? ", contextName, accessLevel) + if !cli.AskToProceed(message) { + os.Exit(1) + } + } + managerBearerToken, err = clusterauth.InstallClusterManagerRBAC(clientset, clusterOpts.SystemNamespace, clusterOpts.Namespaces, common.BearerTokenTimeout) } errors.CheckError(err) } - conn, clusterIf := argocdclient.NewClientOrDie(clientOpts).NewClusterClientOrDie() + + labelsMap, err := label.Parse(labels) + errors.CheckError(err) + annotationsMap, err := label.Parse(annotations) + errors.CheckError(err) + + conn, clusterIf := headless.NewClientOrDie(clientOpts, c).NewClusterClientOrDie() defer io.Close(conn) if clusterOpts.Name != "" { contextName = clusterOpts.Name } - clst := cmdutil.NewCluster(contextName, clusterOpts.Namespaces, conf, managerBearerToken, awsAuthConf, execProviderConf) - if clusterOpts.InCluster { - clst.Server = common.KubernetesInternalAPIServerAddr + clst := cmdutil.NewCluster(contextName, clusterOpts.Namespaces, clusterOpts.ClusterResources, conf, managerBearerToken, awsAuthConf, execProviderConf, labelsMap, annotationsMap) + if clusterOpts.InClusterEndpoint() { + clst.Server = argoappv1.KubernetesInternalAPIServerAddr + } else if clusterOpts.ClusterEndpoint == string(cmdutil.KubePublicEndpoint) { + endpoint, err := cmdutil.GetKubePublicEndpoint(clientset) + if err != nil || len(endpoint) == 0 { + log.Warnf("Failed to find the cluster endpoint from kube-public data: %v", err) + log.Infof("Falling back to the endpoint '%s' as listed in the kubeconfig context", clst.Server) + endpoint = clst.Server + } + clst.Server = endpoint } + if clusterOpts.Shard >= 0 { clst.Shard = &clusterOpts.Shard } + if clusterOpts.Project != "" { + clst.Project = clusterOpts.Project + } clstCreateReq := clusterpkg.ClusterCreateRequest{ Cluster: clst, Upsert: clusterOpts.Upsert, } - _, err = clusterIf.Create(context.Background(), &clstCreateReq) + _, err = clusterIf.Create(ctx, &clstCreateReq) errors.CheckError(err) fmt.Printf("Cluster '%s' added\n", clst.Server) }, @@ -135,29 +183,125 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie command.Flags().BoolVar(&clusterOpts.Upsert, "upsert", false, "Override an existing cluster with the same name even if the spec differs") command.Flags().StringVar(&clusterOpts.ServiceAccount, "service-account", "", fmt.Sprintf("System namespace service account to use for kubernetes resource management. If not set then default \"%s\" SA will be created", clusterauth.ArgoCDManagerServiceAccount)) command.Flags().StringVar(&clusterOpts.SystemNamespace, "system-namespace", common.DefaultSystemNamespace, "Use different system namespace") + command.Flags().BoolVarP(&skipConfirmation, "yes", "y", false, "Skip explicit confirmation") + command.Flags().StringArrayVar(&labels, "label", nil, "Set metadata labels (e.g. --label key=value)") + command.Flags().StringArrayVar(&annotations, "annotation", nil, "Set metadata annotations (e.g. --annotation key=value)") cmdutil.AddClusterFlags(command, &clusterOpts) return command } +func getRestConfig(pathOpts *clientcmd.PathOptions, ctxName string) (*rest.Config, error) { + config, err := pathOpts.GetStartingConfig() + if err != nil { + return nil, err + } + + clstContext := config.Contexts[ctxName] + if clstContext == nil { + return nil, fmt.Errorf("Context %s does not exist in kubeconfig", ctxName) + } + + overrides := clientcmd.ConfigOverrides{ + Context: *clstContext, + } + + clientConfig := clientcmd.NewDefaultClientConfig(*config, &overrides) + conf, err := clientConfig.ClientConfig() + if err != nil { + return nil, err + } + + return conf, nil +} + +// NewClusterSetCommand returns a new instance of an `argocd cluster set` command +func NewClusterSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { + var ( + clusterOptions cmdutil.ClusterOptions + clusterName string + ) + var command = &cobra.Command{ + Use: "set NAME", + Short: "Set cluster information", + Example: ` # Set cluster information + argocd cluster set CLUSTER_NAME --name new-cluster-name --namespace '*' + argocd cluster set CLUSTER_NAME --name new-cluster-name --namespace namespace-one --namespace namespace-two`, + Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 1 { + c.HelpFunc()(c, args) + os.Exit(1) + } + // name of the cluster whose fields have to be updated. + clusterName = args[0] + conn, clusterIf := headless.NewClientOrDie(clientOpts, c).NewClusterClientOrDie() + defer io.Close(conn) + // checks the fields that needs to be updated + updatedFields := checkFieldsToUpdate(clusterOptions) + namespaces := clusterOptions.Namespaces + // check if all namespaces have to be considered + if len(namespaces) == 1 && strings.EqualFold(namespaces[0], allNamespaces) { + namespaces[0] = "" + } + if updatedFields != nil { + clusterUpdateRequest := clusterpkg.ClusterUpdateRequest{ + Cluster: &argoappv1.Cluster{ + Name: clusterOptions.Name, + Namespaces: namespaces, + }, + UpdatedFields: updatedFields, + Id: &clusterpkg.ClusterID{ + Type: clusterIdTypeName, + Value: clusterName, + }, + } + _, err := clusterIf.Update(ctx, &clusterUpdateRequest) + errors.CheckError(err) + fmt.Printf("Cluster '%s' updated.\n", clusterName) + } else { + fmt.Print("Specify the cluster field to be updated.\n") + } + }, + } + command.Flags().StringVar(&clusterOptions.Name, "name", "", "Overwrite the cluster name") + command.Flags().StringArrayVar(&clusterOptions.Namespaces, "namespace", nil, "List of namespaces which are allowed to manage. Specify '*' to manage all namespaces") + return command +} + +// checkFieldsToUpdate returns the fields that needs to be updated +func checkFieldsToUpdate(clusterOptions cmdutil.ClusterOptions) []string { + var updatedFields []string + if clusterOptions.Name != "" { + updatedFields = append(updatedFields, clusterFieldName) + } + if clusterOptions.Namespaces != nil { + updatedFields = append(updatedFields, clusterFieldNamespaces) + } + return updatedFields +} + // NewClusterGetCommand returns a new instance of an `argocd cluster get` command func NewClusterGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { var ( output string ) var command = &cobra.Command{ - Use: "get SERVER", - Short: "Get cluster information", - Example: `argocd cluster get https://12.34.567.89`, + Use: "get SERVER/NAME", + Short: "Get cluster information", + Example: `argocd cluster get https://12.34.567.89 +argocd cluster get in-cluster`, Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) == 0 { c.HelpFunc()(c, args) os.Exit(1) } - conn, clusterIf := argocdclient.NewClientOrDie(clientOpts).NewClusterClientOrDie() + conn, clusterIf := headless.NewClientOrDie(clientOpts, c).NewClusterClientOrDie() defer io.Close(conn) clusters := make([]argoappv1.Cluster, 0) - for _, clusterName := range args { - clst, err := clusterIf.Get(context.Background(), &clusterpkg.ClusterQuery{Server: clusterName}) + for _, clusterSelector := range args { + clst, err := clusterIf.Get(ctx, getQueryBySelector(clusterSelector)) errors.CheckError(err) clusters = append(clusters, *clst) } @@ -211,50 +355,102 @@ func printClusterDetails(clusters []argoappv1.Cluster) { } } -// NewClusterRemoveCommand returns a new instance of an `argocd cluster list` command -func NewClusterRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { +// NewClusterRemoveCommand returns a new instance of an `argocd cluster rm` command +func NewClusterRemoveCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clientcmd.PathOptions) *cobra.Command { + var noPrompt bool var command = &cobra.Command{ - Use: "rm SERVER", - Short: "Remove cluster credentials", - Example: `argocd cluster rm https://12.34.567.89`, + Use: "rm SERVER/NAME", + Short: "Remove cluster credentials", + Example: `argocd cluster rm https://12.34.567.89 +argocd cluster rm cluster-name`, Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) == 0 { c.HelpFunc()(c, args) os.Exit(1) } - conn, clusterIf := argocdclient.NewClientOrDie(clientOpts).NewClusterClientOrDie() + conn, clusterIf := headless.NewClientOrDie(clientOpts, c).NewClusterClientOrDie() defer io.Close(conn) + var numOfClusters = len(args) + var isConfirmAll bool = false - // clientset, err := kubernetes.NewForConfig(conf) - // errors.CheckError(err) + for _, clusterSelector := range args { + clusterQuery := getQueryBySelector(clusterSelector) + var lowercaseAnswer string + if !noPrompt { + if numOfClusters == 1 { + lowercaseAnswer = cli.AskToProceedS("Are you sure you want to remove '" + clusterSelector + "'? Any Apps deploying to this cluster will go to health status Unknown.[y/n] ") + } else { + if !isConfirmAll { + lowercaseAnswer = cli.AskToProceedS("Are you sure you want to remove '" + clusterSelector + "'? Any Apps deploying to this cluster will go to health status Unknown.[y/n/A] where 'A' is to remove all specified clusters without prompting. Any Apps deploying to these clusters will go to health status Unknown. ") + if lowercaseAnswer == "a" { + lowercaseAnswer = "y" + isConfirmAll = true + } + } else { + lowercaseAnswer = "y" + } + } + } else { + lowercaseAnswer = "y" + } - for _, clusterName := range args { - // TODO(jessesuen): find the right context and remove manager RBAC artifacts - // err := clusterauth.UninstallClusterManagerRBAC(clientset) - // errors.CheckError(err) - _, err := clusterIf.Delete(context.Background(), &clusterpkg.ClusterQuery{Server: clusterName}) - errors.CheckError(err) - fmt.Printf("Cluster '%s' removed\n", clusterName) + if lowercaseAnswer == "y" { + // get the cluster name to use as context to delete RBAC on cluster + clst, err := clusterIf.Get(ctx, clusterQuery) + errors.CheckError(err) + + // remove cluster + _, err = clusterIf.Delete(ctx, clusterQuery) + errors.CheckError(err) + fmt.Printf("Cluster '%s' removed\n", clusterSelector) + + // remove RBAC from cluster + conf, err := getRestConfig(pathOpts, clst.Name) + errors.CheckError(err) + + clientset, err := kubernetes.NewForConfig(conf) + errors.CheckError(err) + + err = clusterauth.UninstallClusterManagerRBAC(clientset) + errors.CheckError(err) + } else { + fmt.Println("The command to remove '" + clusterSelector + "' was cancelled.") + } } }, } + command.Flags().BoolVarP(&noPrompt, "yes", "y", false, "Turn off prompting to confirm remove of cluster resources") return command } // Print table of cluster information func printClusterTable(clusters []argoappv1.Cluster) { w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) - _, _ = fmt.Fprintf(w, "SERVER\tNAME\tVERSION\tSTATUS\tMESSAGE\n") + _, _ = fmt.Fprintf(w, "SERVER\tNAME\tVERSION\tSTATUS\tMESSAGE\tPROJECT\n") for _, c := range clusters { server := c.Server if len(c.Namespaces) > 0 { server = fmt.Sprintf("%s (%d namespaces)", c.Server, len(c.Namespaces)) } - _, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", server, c.Name, c.ServerVersion, c.ConnectionState.Status, c.ConnectionState.Message) + _, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", server, c.Name, c.ServerVersion, c.ConnectionState.Status, c.ConnectionState.Message, c.Project) } _ = w.Flush() } +// Returns cluster query for getting cluster depending on the cluster selector +func getQueryBySelector(clusterSelector string) *clusterpkg.ClusterQuery { + var query clusterpkg.ClusterQuery + isServer, err := regexp.MatchString(`^https?://`, clusterSelector) + if isServer || err != nil { + query.Server = clusterSelector + } else { + query.Name = clusterSelector + } + return &query +} + // Print list of cluster servers func printClusterServers(clusters []argoappv1.Cluster) { for _, c := range clusters { @@ -271,9 +467,11 @@ func NewClusterListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman Use: "list", Short: "List configured clusters", Run: func(c *cobra.Command, args []string) { - conn, clusterIf := argocdclient.NewClientOrDie(clientOpts).NewClusterClientOrDie() + ctx := c.Context() + + conn, clusterIf := headless.NewClientOrDie(clientOpts, c).NewClusterClientOrDie() defer io.Close(conn) - clusters, err := clusterIf.List(context.Background(), &clusterpkg.ClusterQuery{}) + clusters, err := clusterIf.List(ctx, &clusterpkg.ClusterQuery{}) errors.CheckError(err) switch output { case "yaml", "json": @@ -295,22 +493,26 @@ func NewClusterListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman // NewClusterRotateAuthCommand returns a new instance of an `argocd cluster rotate-auth` command func NewClusterRotateAuthCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { var command = &cobra.Command{ - Use: "rotate-auth SERVER", - Short: fmt.Sprintf("%s cluster rotate-auth SERVER", cliName), - Example: fmt.Sprintf("%s cluster rotate-auth https://12.34.567.89", cliName), + Use: "rotate-auth SERVER/NAME", + Short: fmt.Sprintf("%s cluster rotate-auth SERVER/NAME", cliName), + Example: `argocd cluster rotate-auth https://12.34.567.89 +argocd cluster rotate-auth cluster-name`, Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 1 { c.HelpFunc()(c, args) os.Exit(1) } - conn, clusterIf := argocdclient.NewClientOrDie(clientOpts).NewClusterClientOrDie() + conn, clusterIf := headless.NewClientOrDie(clientOpts, c).NewClusterClientOrDie() defer io.Close(conn) - clusterQuery := clusterpkg.ClusterQuery{ - Server: args[0], - } - _, err := clusterIf.RotateAuth(context.Background(), &clusterQuery) + + cluster := args[0] + clusterQuery := getQueryBySelector(cluster) + _, err := clusterIf.RotateAuth(ctx, clusterQuery) errors.CheckError(err) - fmt.Printf("Cluster '%s' rotated auth\n", clusterQuery.Server) + + fmt.Printf("Cluster '%s' rotated auth\n", cluster) }, } return command diff --git a/cmd/argocd/commands/cluster_test.go b/cmd/argocd/commands/cluster_test.go index ebed69059a8e6..24e54ea122fc4 100644 --- a/cmd/argocd/commands/cluster_test.go +++ b/cmd/argocd/commands/cluster_test.go @@ -3,11 +3,28 @@ package commands import ( "testing" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" ) +func Test_getQueryBySelector(t *testing.T) { + query := getQueryBySelector("my-cluster") + assert.Equal(t, query.Name, "my-cluster") + assert.Equal(t, query.Server, "") + + query = getQueryBySelector("http://my-server") + assert.Equal(t, query.Name, "") + assert.Equal(t, query.Server, "http://my-server") + + query = getQueryBySelector("https://my-server") + assert.Equal(t, query.Name, "") + assert.Equal(t, query.Server, "https://my-server") +} + func Test_printClusterTable(t *testing.T) { printClusterTable([]v1alpha1.Cluster{ { @@ -29,3 +46,63 @@ func Test_printClusterTable(t *testing.T) { }, }) } + +func Test_getRestConfig(t *testing.T) { + type args struct { + pathOpts *clientcmd.PathOptions + ctxName string + } + pathOpts := &clientcmd.PathOptions{ + GlobalFile: "./testdata/config", + LoadingRules: clientcmd.NewDefaultClientConfigLoadingRules(), + } + tests := []struct { + name string + args args + expected *rest.Config + wantErr bool + expectedErr string + }{ + { + "Load config for context successfully", + args{ + pathOpts, + "argocd2.example.com:443", + }, + &rest.Config{Host: "argocd2.example.com:443"}, + false, + "", + }, + { + "Load config for current-context successfully", + args{ + pathOpts, + "localhost:8080", + }, + &rest.Config{Host: "localhost:8080"}, + false, + "", + }, + { + "Context not found", + args{ + pathOpts, + "not-exist", + }, + nil, + true, + "Context not-exist does not exist in kubeconfig", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got, err := getRestConfig(tt.args.pathOpts, tt.args.ctxName); err == nil { + require.Equal(t, got, tt.expected) + } else if tt.wantErr { + require.Equal(t, err.Error(), tt.expectedErr) + } else { + t.Errorf("An unexpected error occurred during test %s:\n%s", tt.name, err.Error()) + } + }) + } +} diff --git a/cmd/argocd/commands/common.go b/cmd/argocd/commands/common.go index e88c033fe0b9a..849b9a48f02b6 100644 --- a/cmd/argocd/commands/common.go +++ b/cmd/argocd/commands/common.go @@ -5,7 +5,7 @@ import ( "fmt" "reflect" - "github.com/ghodss/yaml" + "sigs.k8s.io/yaml" ) const ( @@ -22,13 +22,13 @@ func PrintResource(resource interface{}, output string) error { case "json": jsonBytes, err := json.MarshalIndent(resource, "", " ") if err != nil { - return err + return fmt.Errorf("unable to marshal resource to json: %w", err) } fmt.Println(string(jsonBytes)) case "yaml": yamlBytes, err := yaml.Marshal(resource) if err != nil { - return err + return fmt.Errorf("unable to marshal resource to yaml: %w", err) } fmt.Print(string(yamlBytes)) default: @@ -56,13 +56,13 @@ func PrintResourceList(resources interface{}, output string, single bool) error case "json": jsonBytes, err := json.MarshalIndent(resources, "", " ") if err != nil { - return err + return fmt.Errorf("unable to marshal resources to json: %w", err) } fmt.Println(string(jsonBytes)) case "yaml": yamlBytes, err := yaml.Marshal(resources) if err != nil { - return err + return fmt.Errorf("unable to marshal resources to yaml: %w", err) } fmt.Print(string(yamlBytes)) default: diff --git a/cmd/argocd/commands/common_test.go b/cmd/argocd/commands/common_test.go index ab8db5b0f0c35..c86429b32e0c8 100644 --- a/cmd/argocd/commands/common_test.go +++ b/cmd/argocd/commands/common_test.go @@ -1,7 +1,7 @@ package commands import ( - "io/ioutil" + "io" "os" "testing" @@ -59,7 +59,7 @@ func captureOutput(f func() error) (string, error) { os.Stdout = stdout return "", err } - str, err := ioutil.ReadAll(r) + str, err := io.ReadAll(r) os.Stdout = stdout if err != nil { return "", err diff --git a/cmd/argocd/commands/completion.go b/cmd/argocd/commands/completion.go index 3a698146f4e1c..7d3f5675ee95e 100644 --- a/cmd/argocd/commands/completion.go +++ b/cmd/argocd/commands/completion.go @@ -146,6 +146,7 @@ __argocd_custom_func() { ;; argocd_cluster_get | \ argocd_cluster_rm | \ + argocd_cluster_set | \ argocd_login | \ argocd_cluster_add) __argocd_list_servers @@ -203,8 +204,20 @@ To access completions in your current shell, run $ source <(argocd completion bash) Alternatively, write it to a file and source in .bash_profile -For zsh, output to a file in a directory referenced by the $fpath shell -variable. +For zsh, add the following to your ~/.zshrc file: +source <(argocd completion zsh) +compdef _argocd argocd + +Optionally, also add the following, in case you are getting errors involving compdef & compinit such as command not found: compdef: +autoload -Uz compinit +compinit +`, + Example: `# For bash +$ source <(argocd completion bash) + +# For zsh +$ argocd completion zsh > _argocd +$ source _argocd `, Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { diff --git a/cmd/argocd/commands/context.go b/cmd/argocd/commands/context.go index e4fcabef4cad3..51d003b4df9df 100644 --- a/cmd/argocd/commands/context.go +++ b/cmd/argocd/commands/context.go @@ -2,7 +2,6 @@ package commands import ( "fmt" - "io/ioutil" "os" "path" "strings" @@ -23,6 +22,14 @@ func NewContextCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { Use: "context [CONTEXT]", Aliases: []string{"ctx"}, Short: "Switch between contexts", + Example: `# List Argo CD Contexts +argocd context + +# Switch Argo CD context +argocd context cd.argoproj.io + +# Delete Argo CD context +argocd context cd.argoproj.io --delete`, Run: func(c *cobra.Command, args []string) { localCfg, err := localconfig.ReadLocalConfig(clientOpts.ConfigPath) @@ -50,7 +57,7 @@ func NewContextCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { prevCtxFile := path.Join(argoCDDir, ".prev-ctx") if ctxName == "-" { - prevCtxBytes, err := ioutil.ReadFile(prevCtxFile) + prevCtxBytes, err := os.ReadFile(prevCtxFile) errors.CheckError(err) ctxName = string(prevCtxBytes) } @@ -66,7 +73,7 @@ func NewContextCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { err = localconfig.WriteLocalConfig(*localCfg, clientOpts.ConfigPath) errors.CheckError(err) - err = ioutil.WriteFile(prevCtxFile, []byte(prevCtx), 0644) + err = os.WriteFile(prevCtxFile, []byte(prevCtx), 0644) errors.CheckError(err) fmt.Printf("Switched to context '%s'\n", localCfg.CurrentContext) }, diff --git a/cmd/argocd/commands/context_test.go b/cmd/argocd/commands/context_test.go index 70241d14554a4..c258485b8181f 100644 --- a/cmd/argocd/commands/context_test.go +++ b/cmd/argocd/commands/context_test.go @@ -1,13 +1,12 @@ package commands import ( - "io/ioutil" "os" "testing" - "github.com/stretchr/testify/assert" - "github.com/argoproj/argo-cd/v2/util/localconfig" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) const testConfig = `contexts: @@ -36,14 +35,16 @@ users: - auth-token: vErrYS3c3tReFRe$hToken name: localhost:8080` -const testConfigFilePath = "./testdata/config" +const testConfigFilePath = "./testdata/local.config" func TestContextDelete(t *testing.T) { - // Write the test config file - err := ioutil.WriteFile(testConfigFilePath, []byte(testConfig), os.ModePerm) + err := os.WriteFile(testConfigFilePath, []byte(testConfig), os.ModePerm) assert.NoError(t, err) + defer os.Remove(testConfigFilePath) + err = os.Chmod(testConfigFilePath, 0600) + require.NoError(t, err, "Could not change the file permission to 0600 %v", err) localConfig, err := localconfig.ReadLocalConfig(testConfigFilePath) assert.NoError(t, err) assert.Equal(t, localConfig.CurrentContext, "localhost:8080") @@ -73,9 +74,4 @@ func TestContextDelete(t *testing.T) { assert.NotContains(t, localConfig.Servers, localconfig.Server{PlainText: true, Server: "localhost:8080"}) assert.NotContains(t, localConfig.Users, localconfig.User{AuthToken: "vErrYS3c3tReFRe$hToken", Name: "localhost:8080"}) assert.Contains(t, localConfig.Contexts, localconfig.ContextRef{Name: "argocd2.example.com:443", Server: "argocd2.example.com:443", User: "argocd2.example.com:443"}) - - // Write the file again so that no conflicts are made in git - err = ioutil.WriteFile(testConfigFilePath, []byte(testConfig), os.ModePerm) - assert.NoError(t, err) - } diff --git a/cmd/argocd/commands/gpg.go b/cmd/argocd/commands/gpg.go index feb7502f9ed5c..7a48a915bebec 100644 --- a/cmd/argocd/commands/gpg.go +++ b/cmd/argocd/commands/gpg.go @@ -1,15 +1,14 @@ package commands import ( - "context" "fmt" - "io/ioutil" "os" "strings" "text/tabwriter" "github.com/spf13/cobra" + "github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless" argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient" gpgkeypkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/gpgkey" appsv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" @@ -44,9 +43,11 @@ func NewGPGListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { Use: "list", Short: "List configured GPG public keys", Run: func(c *cobra.Command, args []string) { - conn, gpgIf := argocdclient.NewClientOrDie(clientOpts).NewGPGKeyClientOrDie() + ctx := c.Context() + + conn, gpgIf := headless.NewClientOrDie(clientOpts, c).NewGPGKeyClientOrDie() defer argoio.Close(conn) - keys, err := gpgIf.List(context.Background(), &gpgkeypkg.GnuPGPublicKeyQuery{}) + keys, err := gpgIf.List(ctx, &gpgkeypkg.GnuPGPublicKeyQuery{}) errors.CheckError(err) switch output { case "yaml", "json": @@ -72,12 +73,14 @@ func NewGPGGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { Use: "get KEYID", Short: "Get the GPG public key with ID from the server", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 1 { errors.CheckError(fmt.Errorf("Missing KEYID argument")) } - conn, gpgIf := argocdclient.NewClientOrDie(clientOpts).NewGPGKeyClientOrDie() + conn, gpgIf := headless.NewClientOrDie(clientOpts, c).NewGPGKeyClientOrDie() defer argoio.Close(conn) - key, err := gpgIf.Get(context.Background(), &gpgkeypkg.GnuPGPublicKeyQuery{KeyID: args[0]}) + key, err := gpgIf.Get(ctx, &gpgkeypkg.GnuPGPublicKeyQuery{KeyID: args[0]}) errors.CheckError(err) switch output { case "yaml", "json": @@ -107,16 +110,18 @@ func NewGPGAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { Use: "add", Short: "Adds a GPG public key to the server's keyring", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if fromFile == "" { errors.CheckError(fmt.Errorf("--from is mandatory")) } - keyData, err := ioutil.ReadFile(fromFile) + keyData, err := os.ReadFile(fromFile) if err != nil { errors.CheckError(err) } - conn, gpgIf := argocdclient.NewClientOrDie(clientOpts).NewGPGKeyClientOrDie() + conn, gpgIf := headless.NewClientOrDie(clientOpts, c).NewGPGKeyClientOrDie() defer argoio.Close(conn) - resp, err := gpgIf.Create(context.Background(), &gpgkeypkg.GnuPGPublicKeyCreateRequest{Publickey: &appsv1.GnuPGPublicKey{KeyData: string(keyData)}}) + resp, err := gpgIf.Create(ctx, &gpgkeypkg.GnuPGPublicKeyCreateRequest{Publickey: &appsv1.GnuPGPublicKey{KeyData: string(keyData)}}) errors.CheckError(err) fmt.Printf("Created %d key(s) from input file", len(resp.Created.Items)) if len(resp.Skipped) > 0 { @@ -136,12 +141,14 @@ func NewGPGDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command Use: "rm KEYID", Short: "Removes a GPG public key from the server's keyring", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 1 { errors.CheckError(fmt.Errorf("Missing KEYID argument")) } - conn, gpgIf := argocdclient.NewClientOrDie(clientOpts).NewGPGKeyClientOrDie() + conn, gpgIf := headless.NewClientOrDie(clientOpts, c).NewGPGKeyClientOrDie() defer argoio.Close(conn) - _, err := gpgIf.Delete(context.Background(), &gpgkeypkg.GnuPGPublicKeyQuery{KeyID: args[0]}) + _, err := gpgIf.Delete(ctx, &gpgkeypkg.GnuPGPublicKeyQuery{KeyID: args[0]}) errors.CheckError(err) fmt.Printf("Deleted key with key ID %s\n", args[0]) }, diff --git a/cmd/argocd/commands/headless/headless.go b/cmd/argocd/commands/headless/headless.go new file mode 100644 index 0000000000000..070d9c9c83bcb --- /dev/null +++ b/cmd/argocd/commands/headless/headless.go @@ -0,0 +1,277 @@ +package headless + +import ( + "context" + "fmt" + "net" + "os" + "sync" + "time" + + "github.com/spf13/cobra" + + "github.com/argoproj/argo-cd/v2/cmd/argocd/commands/initialize" + "github.com/argoproj/argo-cd/v2/common" + + "github.com/alicebob/miniredis/v2" + "github.com/golang/protobuf/ptypes/empty" + "github.com/redis/go-redis/v9" + log "github.com/sirupsen/logrus" + "github.com/spf13/pflag" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes" + cache2 "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/utils/pointer" + + "github.com/argoproj/argo-cd/v2/pkg/apiclient" + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned" + repoapiclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient" + "github.com/argoproj/argo-cd/v2/server" + servercache "github.com/argoproj/argo-cd/v2/server/cache" + "github.com/argoproj/argo-cd/v2/util/cache" + appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate" + "github.com/argoproj/argo-cd/v2/util/cli" + "github.com/argoproj/argo-cd/v2/util/io" + kubeutil "github.com/argoproj/argo-cd/v2/util/kube" + "github.com/argoproj/argo-cd/v2/util/localconfig" +) + +type forwardCacheClient struct { + namespace string + context string + init sync.Once + client cache.CacheClient + compression cache.RedisCompressionType + err error + redisHaProxyName string + redisName string +} + +func (c *forwardCacheClient) doLazy(action func(client cache.CacheClient) error) error { + c.init.Do(func() { + overrides := clientcmd.ConfigOverrides{ + CurrentContext: c.context, + } + redisHaProxyPodLabelSelector := common.LabelKeyAppName + "=" + c.redisHaProxyName + redisPodLabelSelector := common.LabelKeyAppName + "=" + c.redisName + redisPort, err := kubeutil.PortForward(6379, c.namespace, &overrides, + redisHaProxyPodLabelSelector, redisPodLabelSelector) + if err != nil { + c.err = err + return + } + + redisClient := redis.NewClient(&redis.Options{Addr: fmt.Sprintf("localhost:%d", redisPort)}) + c.client = cache.NewRedisCache(redisClient, time.Hour, c.compression) + }) + if c.err != nil { + return c.err + } + return action(c.client) +} + +func (c *forwardCacheClient) Set(item *cache.Item) error { + return c.doLazy(func(client cache.CacheClient) error { + return client.Set(item) + }) +} + +func (c *forwardCacheClient) Get(key string, obj interface{}) error { + return c.doLazy(func(client cache.CacheClient) error { + return client.Get(key, obj) + }) +} + +func (c *forwardCacheClient) Delete(key string) error { + return c.doLazy(func(client cache.CacheClient) error { + return client.Delete(key) + }) +} + +func (c *forwardCacheClient) OnUpdated(ctx context.Context, key string, callback func() error) error { + return c.doLazy(func(client cache.CacheClient) error { + return client.OnUpdated(ctx, key, callback) + }) +} + +func (c *forwardCacheClient) NotifyUpdated(key string) error { + return c.doLazy(func(client cache.CacheClient) error { + return client.NotifyUpdated(key) + }) +} + +type forwardRepoClientset struct { + namespace string + context string + init sync.Once + repoClientset repoapiclient.Clientset + err error + repoServerName string +} + +func (c *forwardRepoClientset) NewRepoServerClient() (io.Closer, repoapiclient.RepoServerServiceClient, error) { + c.init.Do(func() { + overrides := clientcmd.ConfigOverrides{ + CurrentContext: c.context, + } + repoServerPodLabelSelector := common.LabelKeyAppName + "=" + c.repoServerName + repoServerPort, err := kubeutil.PortForward(8081, c.namespace, &overrides, repoServerPodLabelSelector) + if err != nil { + c.err = err + return + } + c.repoClientset = repoapiclient.NewRepoServerClientset(fmt.Sprintf("localhost:%d", repoServerPort), 60, repoapiclient.TLSConfiguration{ + DisableTLS: false, StrictValidation: false}) + }) + if c.err != nil { + return nil, nil, c.err + } + return c.repoClientset.NewRepoServerClient() +} + +func testAPI(ctx context.Context, clientOpts *apiclient.ClientOptions) error { + apiClient, err := apiclient.NewClient(clientOpts) + if err != nil { + return fmt.Errorf("failed to create API client: %w", err) + } + closer, versionClient, err := apiClient.NewVersionClient() + if err != nil { + return fmt.Errorf("failed to create version client: %w", err) + } + defer io.Close(closer) + _, err = versionClient.Version(ctx, &empty.Empty{}) + if err != nil { + return fmt.Errorf("failed to get version: %w", err) + } + return nil +} + +// MaybeStartLocalServer allows executing command in a headless mode. If we're in core mode, starts the Argo CD API +// server on the fly and changes provided client options to use started API server port. +// +// If the clientOpts enables core mode, but the local config does not have core mode enabled, this function will +// not start the local server. +func MaybeStartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions, ctxStr string, port *int, address *string, compression cache.RedisCompressionType) error { + flags := pflag.NewFlagSet("tmp", pflag.ContinueOnError) + clientConfig := cli.AddKubectlFlagsToSet(flags) + startInProcessAPI := clientOpts.Core + if !startInProcessAPI { + // Core mode is enabled on client options. Check the local config to see if we should start the API server. + localCfg, err := localconfig.ReadLocalConfig(clientOpts.ConfigPath) + if err != nil { + return fmt.Errorf("error reading local config: %w", err) + } + if localCfg != nil { + configCtx, err := localCfg.ResolveContext(clientOpts.Context) + if err != nil { + return fmt.Errorf("error resolving context: %w", err) + } + // There was a local config file, so determine whether core mode is enabled per the config file. + startInProcessAPI = configCtx.Server.Core + } + } + // If we're in core mode, start the API server on the fly. + if !startInProcessAPI { + return nil + } + + // get rid of logging error handler + runtime.ErrorHandlers = runtime.ErrorHandlers[1:] + cli.SetLogLevel(log.ErrorLevel.String()) + log.SetLevel(log.ErrorLevel) + os.Setenv(v1alpha1.EnvVarFakeInClusterConfig, "true") + if address == nil { + address = pointer.String("localhost") + } + if port == nil || *port == 0 { + addr := fmt.Sprintf("%s:0", *address) + ln, err := net.Listen("tcp", addr) + if err != nil { + return fmt.Errorf("failed to listen on %q: %w", addr, err) + } + port = &ln.Addr().(*net.TCPAddr).Port + io.Close(ln) + } + + restConfig, err := clientConfig.ClientConfig() + if err != nil { + return fmt.Errorf("error creating client config: %w", err) + } + appClientset, err := appclientset.NewForConfig(restConfig) + if err != nil { + return fmt.Errorf("error creating app clientset: %w", err) + } + kubeClientset, err := kubernetes.NewForConfig(restConfig) + if err != nil { + return fmt.Errorf("error creating kubernetes clientset: %w", err) + } + + namespace, _, err := clientConfig.Namespace() + if err != nil { + return fmt.Errorf("error getting namespace: %w", err) + } + + mr, err := miniredis.Run() + if err != nil { + return fmt.Errorf("error running miniredis: %w", err) + } + appstateCache := appstatecache.NewCache(cache.NewCache(&forwardCacheClient{namespace: namespace, context: ctxStr, compression: compression, redisHaProxyName: clientOpts.RedisHaProxyName, redisName: clientOpts.RedisName}), time.Hour) + srv := server.NewServer(ctx, server.ArgoCDServerOpts{ + EnableGZip: false, + Namespace: namespace, + ListenPort: *port, + AppClientset: appClientset, + DisableAuth: true, + RedisClient: redis.NewClient(&redis.Options{Addr: mr.Addr()}), + Cache: servercache.NewCache(appstateCache, 0, 0, 0), + KubeClientset: kubeClientset, + Insecure: true, + ListenHost: *address, + RepoClientset: &forwardRepoClientset{namespace: namespace, context: ctxStr, repoServerName: clientOpts.RepoServerName}, + EnableProxyExtension: false, + }) + srv.Init(ctx) + + lns, err := srv.Listen() + if err != nil { + return fmt.Errorf("failed to listen: %w", err) + } + go srv.Run(ctx, lns) + clientOpts.ServerAddr = fmt.Sprintf("%s:%d", *address, *port) + clientOpts.PlainText = true + if !cache2.WaitForCacheSync(ctx.Done(), srv.Initialized) { + log.Fatal("Timed out waiting for project cache to sync") + } + tries := 5 + for i := 0; i < tries; i++ { + err = testAPI(ctx, clientOpts) + if err == nil { + break + } + time.Sleep(time.Second) + } + if err != nil { + return fmt.Errorf("all retries failed: %w", err) + } + return nil +} + +// NewClientOrDie creates a new API client from a set of config options, or fails fatally if the new client creation fails. +func NewClientOrDie(opts *apiclient.ClientOptions, c *cobra.Command) apiclient.Client { + ctx := c.Context() + + ctxStr := initialize.RetrieveContextIfChanged(c.Flag("context")) + // If we're in core mode, start the API server on the fly and configure the client `opts` to use it. + // If we're not in core mode, this function call will do nothing. + err := MaybeStartLocalServer(ctx, opts, ctxStr, nil, nil, cache.RedisCompressionNone) + if err != nil { + log.Fatal(err) + } + client, err := apiclient.NewClient(opts) + if err != nil { + log.Fatal(err) + } + return client +} diff --git a/cmd/argocd/commands/initialize/cmd.go b/cmd/argocd/commands/initialize/cmd.go new file mode 100644 index 0000000000000..8f9da9f68783f --- /dev/null +++ b/cmd/argocd/commands/initialize/cmd.go @@ -0,0 +1,31 @@ +package initialize + +import ( + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/argoproj/argo-cd/v2/util/cli" +) + +func RetrieveContextIfChanged(contextFlag *pflag.Flag) string { + if contextFlag != nil && contextFlag.Changed { + return contextFlag.Value.String() + } + return "" +} + +// InitCommand allows executing command in a headless mode: on the fly starts Argo CD API server and +// changes provided client options to use started API server port +func InitCommand(cmd *cobra.Command) *cobra.Command { + flags := pflag.NewFlagSet("tmp", pflag.ContinueOnError) + cli.AddKubectlFlagsToSet(flags) + // copy k8s persistent flags into argocd command flags + flags.VisitAll(func(flag *pflag.Flag) { + // skip Kubernetes server flags since argocd has it's own server flag + if flag.Name == "server" { + return + } + cmd.Flags().AddFlag(flag) + }) + return cmd +} diff --git a/cmd/argocd/commands/initialize/cmd_test.go b/cmd/argocd/commands/initialize/cmd_test.go new file mode 100644 index 0000000000000..59f2efb55a12e --- /dev/null +++ b/cmd/argocd/commands/initialize/cmd_test.go @@ -0,0 +1,80 @@ +package initialize + +import ( + "testing" + + flag "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" +) + +type StringFlag struct { + // The exact value provided on the flag + value string +} + +func (f StringFlag) String() string { + return f.value +} + +func (f *StringFlag) Set(value string) error { + f.value = value + return nil +} + +func (f *StringFlag) Type() string { + return "string" +} + +func Test_FlagContextNotChanged(t *testing.T) { + res := RetrieveContextIfChanged(&flag.Flag{ + Name: "", + Shorthand: "", + Usage: "", + Value: &StringFlag{value: "test"}, + DefValue: "", + Changed: false, + NoOptDefVal: "", + Deprecated: "", + Hidden: false, + ShorthandDeprecated: "", + Annotations: nil, + }) + + assert.Equal(t, "", res) +} + +func Test_FlagContextChanged(t *testing.T) { + res := RetrieveContextIfChanged(&flag.Flag{ + Name: "", + Shorthand: "", + Usage: "", + Value: &StringFlag{value: "test"}, + DefValue: "", + Changed: true, + NoOptDefVal: "", + Deprecated: "", + Hidden: false, + ShorthandDeprecated: "", + Annotations: nil, + }) + + assert.Equal(t, "test", res) +} + +func Test_FlagContextNil(t *testing.T) { + res := RetrieveContextIfChanged(&flag.Flag{ + Name: "", + Shorthand: "", + Usage: "", + Value: nil, + DefValue: "", + Changed: false, + NoOptDefVal: "", + Deprecated: "", + Hidden: false, + ShorthandDeprecated: "", + Annotations: nil, + }) + + assert.Equal(t, "", res) +} diff --git a/cmd/argocd/commands/login.go b/cmd/argocd/commands/login.go index 8aa5e314be6af..3e2ad4e7d1b73 100644 --- a/cmd/argocd/commands/login.go +++ b/cmd/argocd/commands/login.go @@ -12,13 +12,14 @@ import ( "strings" "time" - "github.com/coreos/go-oidc" - "github.com/dgrijalva/jwt-go/v4" + "github.com/coreos/go-oidc/v3/oidc" + "github.com/golang-jwt/jwt/v4" log "github.com/sirupsen/logrus" "github.com/skratchdot/open-golang/open" "github.com/spf13/cobra" "golang.org/x/oauth2" + "github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless" argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient" sessionpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/session" settingspkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/settings" @@ -35,43 +36,60 @@ import ( // NewLoginCommand returns a new instance of `argocd login` command func NewLoginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Command { var ( - ctxName string - username string - password string - sso bool - ssoPort int + ctxName string + username string + password string + sso bool + ssoPort int + skipTestTLS bool ) var command = &cobra.Command{ Use: "login SERVER", Short: "Log in to Argo CD", Long: "Log in to Argo CD", + Example: `# Login to Argo CD using a username and password +argocd login cd.argoproj.io + +# Login to Argo CD using SSO +argocd login cd.argoproj.io --sso + +# Configure direct access using Kubernetes API server +argocd login cd.argoproj.io --core`, Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + var server string - if len(args) != 1 && !globalClientOpts.PortForward { + if len(args) != 1 && !globalClientOpts.PortForward && !globalClientOpts.Core { c.HelpFunc()(c, args) os.Exit(1) } if globalClientOpts.PortForward { server = "port-forward" + } else if globalClientOpts.Core { + server = "kubernetes" } else { server = args[0] - tlsTestResult, err := grpc_util.TestTLS(server) - errors.CheckError(err) - if !tlsTestResult.TLS { - if !globalClientOpts.PlainText { - if !cli.AskToProceed("WARNING: server is not configured with TLS. Proceed (y/n)? ") { - os.Exit(1) + + if !skipTestTLS { + dialTime := 30 * time.Second + tlsTestResult, err := grpc_util.TestTLS(server, dialTime) + errors.CheckError(err) + if !tlsTestResult.TLS { + if !globalClientOpts.PlainText { + if !cli.AskToProceed("WARNING: server is not configured with TLS. Proceed (y/n)? ") { + os.Exit(1) + } + globalClientOpts.PlainText = true } - globalClientOpts.PlainText = true - } - } else if tlsTestResult.InsecureErr != nil { - if !globalClientOpts.Insecure { - if !cli.AskToProceed(fmt.Sprintf("WARNING: server certificate had error: %s. Proceed insecurely (y/n)? ", tlsTestResult.InsecureErr)) { - os.Exit(1) + } else if tlsTestResult.InsecureErr != nil { + if !globalClientOpts.Insecure { + if !cli.AskToProceed(fmt.Sprintf("WARNING: server certificate had error: %s. Proceed insecurely (y/n)? ", tlsTestResult.InsecureErr)) { + os.Exit(1) + } + globalClientOpts.Insecure = true } - globalClientOpts.Insecure = true } } } @@ -80,15 +98,15 @@ func NewLoginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comman ServerAddr: server, Insecure: globalClientOpts.Insecure, PlainText: globalClientOpts.PlainText, + ClientCertFile: globalClientOpts.ClientCertFile, + ClientCertKeyFile: globalClientOpts.ClientCertKeyFile, GRPCWeb: globalClientOpts.GRPCWeb, GRPCWebRootPath: globalClientOpts.GRPCWebRootPath, PortForward: globalClientOpts.PortForward, PortForwardNamespace: globalClientOpts.PortForwardNamespace, Headers: globalClientOpts.Headers, + KubeOverrides: globalClientOpts.KubeOverrides, } - acdClient := argocdclient.NewClientOrDie(&clientOpts) - setConn, setIf := acdClient.NewSettingsClientOrDie() - defer io.Close(setConn) if ctxName == "" { ctxName = server @@ -101,28 +119,29 @@ func NewLoginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comman // Perform the login var tokenString string var refreshToken string - if !sso { - tokenString = passwordLogin(acdClient, username, password) - } else { - ctx := context.Background() - httpClient, err := acdClient.HTTPClient() - errors.CheckError(err) - ctx = oidc.ClientContext(ctx, httpClient) - acdSet, err := setIf.Get(ctx, &settingspkg.SettingsQuery{}) - errors.CheckError(err) - oauth2conf, provider, err := acdClient.OIDCConfig(ctx, acdSet) + if !globalClientOpts.Core { + acdClient := headless.NewClientOrDie(&clientOpts, c) + setConn, setIf := acdClient.NewSettingsClientOrDie() + defer io.Close(setConn) + if !sso { + tokenString = passwordLogin(ctx, acdClient, username, password) + } else { + httpClient, err := acdClient.HTTPClient() + errors.CheckError(err) + ctx = oidc.ClientContext(ctx, httpClient) + acdSet, err := setIf.Get(ctx, &settingspkg.SettingsQuery{}) + errors.CheckError(err) + oauth2conf, provider, err := acdClient.OIDCConfig(ctx, acdSet) + errors.CheckError(err) + tokenString, refreshToken = oauth2Login(ctx, ssoPort, acdSet.GetOIDCConfig(), oauth2conf, provider) + } + parser := jwt.NewParser(jwt.WithoutClaimsValidation()) + claims := jwt.MapClaims{} + _, _, err := parser.ParseUnverified(tokenString, &claims) errors.CheckError(err) - tokenString, refreshToken = oauth2Login(ctx, ssoPort, acdSet.GetOIDCConfig(), oauth2conf, provider) - } - - parser := &jwt.Parser{ - ValidationHelper: jwt.NewValidationHelper(jwt.WithoutClaimsValidation(), jwt.WithoutAudienceValidation()), + fmt.Printf("'%s' logged in successfully\n", userDisplayName(claims)) } - claims := jwt.MapClaims{} - _, _, err := parser.ParseUnverified(tokenString, &claims) - errors.CheckError(err) - fmt.Printf("'%s' logged in successfully\n", userDisplayName(claims)) // login successful. Persist the config localCfg, err := localconfig.ReadLocalConfig(globalClientOpts.ConfigPath) errors.CheckError(err) @@ -135,6 +154,7 @@ func NewLoginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comman Insecure: globalClientOpts.Insecure, GRPCWeb: globalClientOpts.GRPCWeb, GRPCWebRootPath: globalClientOpts.GRPCWebRootPath, + Core: globalClientOpts.Core, }) localCfg.UpsertUser(localconfig.User{ Name: ctxName, @@ -155,11 +175,13 @@ func NewLoginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comman fmt.Printf("Context '%s' updated\n", ctxName) }, } - command.Flags().StringVar(&ctxName, "name", "", "name to use for the context") - command.Flags().StringVar(&username, "username", "", "the username of an account to authenticate") - command.Flags().StringVar(&password, "password", "", "the password of an account to authenticate") - command.Flags().BoolVar(&sso, "sso", false, "perform SSO login") - command.Flags().IntVar(&ssoPort, "sso-port", DefaultSSOLocalPort, "port to run local OAuth2 login application") + command.Flags().StringVar(&ctxName, "name", "", "Name to use for the context") + command.Flags().StringVar(&username, "username", "", "The username of an account to authenticate") + command.Flags().StringVar(&password, "password", "", "The password of an account to authenticate") + command.Flags().BoolVar(&sso, "sso", false, "Perform SSO login") + command.Flags().IntVar(&ssoPort, "sso-port", DefaultSSOLocalPort, "Port to run local OAuth2 login application") + command.Flags(). + BoolVar(&skipTestTLS, "skip-test-tls", false, "Skip testing whether the server is configured with TLS (this can help when the command hangs for no apparent reason)") return command } @@ -175,7 +197,13 @@ func userDisplayName(claims jwt.MapClaims) string { // oauth2Login opens a browser, runs a temporary HTTP server to delegate OAuth2 login flow and // returns the JWT token and a refresh token (if supported) -func oauth2Login(ctx context.Context, port int, oidcSettings *settingspkg.OIDCConfig, oauth2conf *oauth2.Config, provider *oidc.Provider) (string, string) { +func oauth2Login( + ctx context.Context, + port int, + oidcSettings *settingspkg.OIDCConfig, + oauth2conf *oauth2.Config, + provider *oidc.Provider, +) (string, string) { oauth2conf.RedirectURL = fmt.Sprintf("http://localhost:%d/auth/callback", port) oidcConf, err := oidcutil.ParseConfig(provider) errors.CheckError(err) @@ -188,7 +216,10 @@ func oauth2Login(ctx context.Context, port int, oidcSettings *settingspkg.OIDCCo // completionChan is to signal flow completed. Non-empty string indicates error completionChan := make(chan string) // stateNonce is an OAuth2 state nonce - stateNonce := rand.RandString(10) + // According to the spec (https://www.rfc-editor.org/rfc/rfc6749#section-10.10), this must be guessable with + // probability <= 2^(-128). The following call generates one of 52^24 random strings, ~= 2^136 possibilities. + stateNonce, err := rand.String(24) + errors.CheckError(err) var tokenString string var refreshToken string @@ -198,7 +229,11 @@ func oauth2Login(ctx context.Context, port int, oidcSettings *settingspkg.OIDCCo } // PKCE implementation of https://tools.ietf.org/html/rfc7636 - codeVerifier := rand.RandStringCharset(43, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-._~") + codeVerifier, err := rand.StringFromCharset( + 43, + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-._~", + ) + errors.CheckError(err) codeChallengeHash := sha256.Sum256([]byte(codeVerifier)) codeChallenge := base64.RawURLEncoding.EncodeToString(codeChallengeHash[:]) @@ -282,7 +317,8 @@ func oauth2Login(ctx context.Context, port int, oidcSettings *settingspkg.OIDCCo opts = append(opts, oauth2.SetAuthURLParam("code_challenge_method", "S256")) url = oauth2conf.AuthCodeURL(stateNonce, opts...) case oidcutil.GrantTypeImplicit: - url = oidcutil.ImplicitFlowURL(oauth2conf, stateNonce, opts...) + url, err = oidcutil.ImplicitFlowURL(oauth2conf, stateNonce, opts...) + errors.CheckError(err) default: log.Fatalf("Unsupported grant type: %v", grantType) } @@ -309,7 +345,7 @@ func oauth2Login(ctx context.Context, port int, oidcSettings *settingspkg.OIDCCo return tokenString, refreshToken } -func passwordLogin(acdClient argocdclient.Client, username, password string) string { +func passwordLogin(ctx context.Context, acdClient argocdclient.Client, username, password string) string { username, password = cli.PromptCredentials(username, password) sessConn, sessionIf := acdClient.NewSessionClientOrDie() defer io.Close(sessConn) @@ -317,7 +353,7 @@ func passwordLogin(acdClient argocdclient.Client, username, password string) str Username: username, Password: password, } - createdSession, err := sessionIf.Create(context.Background(), &sessionRequest) + createdSession, err := sessionIf.Create(ctx, &sessionRequest) errors.CheckError(err) return createdSession.Token } diff --git a/cmd/argocd/commands/login_test.go b/cmd/argocd/commands/login_test.go index 1d5a205d982b7..3a7411b4b7fa3 100644 --- a/cmd/argocd/commands/login_test.go +++ b/cmd/argocd/commands/login_test.go @@ -3,12 +3,10 @@ package commands import ( "testing" - "github.com/dgrijalva/jwt-go/v4" + "github.com/golang-jwt/jwt/v4" "github.com/stretchr/testify/assert" ) -// - func Test_userDisplayName_email(t *testing.T) { claims := jwt.MapClaims{"iss": "qux", "sub": "foo", "email": "firstname.lastname@example.com", "groups": []string{"baz"}} actualName := userDisplayName(claims) diff --git a/cmd/argocd/commands/logout.go b/cmd/argocd/commands/logout.go index e15450299347f..f64c57ccc89cc 100644 --- a/cmd/argocd/commands/logout.go +++ b/cmd/argocd/commands/logout.go @@ -18,6 +18,10 @@ func NewLogoutCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comma Use: "logout CONTEXT", Short: "Log out from Argo CD", Long: "Log out from Argo CD", + Example: `# To log out of argocd +$ argocd logout +# This can be helpful for security reasons or when you want to switch between different Argo CD contexts or accounts. +`, Run: func(c *cobra.Command, args []string) { if len(args) == 0 { c.HelpFunc()(c, args) diff --git a/cmd/argocd/commands/logout_test.go b/cmd/argocd/commands/logout_test.go index 6fa17aacebebd..f70992c17bb93 100644 --- a/cmd/argocd/commands/logout_test.go +++ b/cmd/argocd/commands/logout_test.go @@ -1,29 +1,32 @@ package commands import ( - "io/ioutil" "os" "testing" - "github.com/argoproj/argo-cd/v2/pkg/apiclient" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient" "github.com/argoproj/argo-cd/v2/util/localconfig" ) func TestLogout(t *testing.T) { // Write the test config file - err := ioutil.WriteFile(testConfigFilePath, []byte(testConfig), os.ModePerm) + err := os.WriteFile(testConfigFilePath, []byte(testConfig), os.ModePerm) assert.NoError(t, err) + defer os.Remove(testConfigFilePath) + + err = os.Chmod(testConfigFilePath, 0600) + require.NoError(t, err) localConfig, err := localconfig.ReadLocalConfig(testConfigFilePath) assert.NoError(t, err) assert.Equal(t, localConfig.CurrentContext, "localhost:8080") assert.Contains(t, localConfig.Contexts, localconfig.ContextRef{Name: "localhost:8080", Server: "localhost:8080", User: "localhost:8080"}) - command := NewLogoutCommand(&apiclient.ClientOptions{ConfigPath: testConfigFilePath}) + command := NewLogoutCommand(&argocdclient.ClientOptions{ConfigPath: testConfigFilePath}) command.Run(nil, []string{"localhost:8080"}) localConfig, err = localconfig.ReadLocalConfig(testConfigFilePath) @@ -33,9 +36,4 @@ func TestLogout(t *testing.T) { assert.Contains(t, localConfig.Contexts, localconfig.ContextRef{Name: "argocd1.example.com:443", Server: "argocd1.example.com:443", User: "argocd1.example.com:443"}) assert.Contains(t, localConfig.Contexts, localconfig.ContextRef{Name: "argocd2.example.com:443", Server: "argocd2.example.com:443", User: "argocd2.example.com:443"}) assert.Contains(t, localConfig.Contexts, localconfig.ContextRef{Name: "localhost:8080", Server: "localhost:8080", User: "localhost:8080"}) - - // Write the file again so that no conflicts are made in git - err = ioutil.WriteFile(testConfigFilePath, []byte(testConfig), os.ModePerm) - assert.NoError(t, err) - } diff --git a/cmd/argocd/commands/project.go b/cmd/argocd/commands/project.go index 3adddda5cc221..dc894b4a79f27 100644 --- a/cmd/argocd/commands/project.go +++ b/cmd/argocd/commands/project.go @@ -11,12 +11,12 @@ import ( "time" humanize "github.com/dustin/go-humanize" - "github.com/ghodss/yaml" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/yaml" + "github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless" cmdutil "github.com/argoproj/argo-cd/v2/cmd/util" argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient" projectpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/project" @@ -26,6 +26,7 @@ import ( "github.com/argoproj/argo-cd/v2/util/git" "github.com/argoproj/argo-cd/v2/util/gpg" argoio "github.com/argoproj/argo-cd/v2/util/io" + "github.com/argoproj/argo-cd/v2/util/templates" ) type policyOpts struct { @@ -39,6 +40,19 @@ func NewProjectCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { var command = &cobra.Command{ Use: "proj", Short: "Manage projects", + Example: templates.Examples(` + # List all available projects + argocd proj list + + # Create a new project with name PROJECT + argocd proj create PROJECT + + # Delete the project with name PROJECT + argocd proj delete PROJECT + + # Edit the information on project with name PROJECT + argocd proj edit PROJECT + `), Run: func(c *cobra.Command, args []string) { c.HelpFunc()(c, args) os.Exit(1) @@ -88,13 +102,22 @@ func NewProjectCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm var command = &cobra.Command{ Use: "create PROJECT", Short: "Create a project", + Example: templates.Examples(` + # Create a new project with name PROJECT + argocd proj create PROJECT + + # Create a new project with name PROJECT from a file or URL to a kubernetes manifest + argocd proj create PROJECT -f FILE|URL + `), Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + proj, err := cmdutil.ConstructAppProj(fileURL, args, opts, c) errors.CheckError(err) - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer argoio.Close(conn) - _, err = projIf.Create(context.Background(), &projectpkg.ProjectCreateRequest{Project: proj, Upsert: upsert}) + _, err = projIf.Create(ctx, &projectpkg.ProjectCreateRequest{Project: proj, Upsert: upsert}) errors.CheckError(err) }, } @@ -116,16 +139,25 @@ func NewProjectSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command var command = &cobra.Command{ Use: "set PROJECT", Short: "Set project parameters", + Example: templates.Examples(` + # Set project parameters with some allowed cluster resources [RES1,RES2,...] for project with name PROJECT + argocd proj set PROJECT --allow-cluster-resource [RES1,RES2,...] + + # Set project parameters with some denied namespaced resources [RES1,RES2,...] for project with name PROJECT + argocd proj set PROJECT ---deny-namespaced-resource [RES1,RES2,...] + `), Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) == 0 { c.HelpFunc()(c, args) os.Exit(1) } projName := args[0] - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer argoio.Close(conn) - proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) + proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName}) errors.CheckError(err) if visited := cmdutil.SetProjSpecOptions(c.Flags(), &proj.Spec, &opts); visited == 0 { @@ -134,7 +166,7 @@ func NewProjectSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command os.Exit(1) } - _, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj}) + _, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj}) errors.CheckError(err) }, } @@ -147,7 +179,13 @@ func NewProjectAddSignatureKeyCommand(clientOpts *argocdclient.ClientOptions) *c var command = &cobra.Command{ Use: "add-signature-key PROJECT KEY-ID", Short: "Add GnuPG signature key to project", + Example: templates.Examples(` + # Add GnuPG signature key KEY-ID to project PROJECT + argocd proj add-signature-key PROJECT KEY-ID + `), Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 2 { c.HelpFunc()(c, args) os.Exit(1) @@ -159,10 +197,10 @@ func NewProjectAddSignatureKeyCommand(clientOpts *argocdclient.ClientOptions) *c log.Fatalf("%s is not a valid GnuPG key ID", signatureKey) } - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer argoio.Close(conn) - proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) + proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName}) errors.CheckError(err) for _, key := range proj.Spec.SignatureKeys { @@ -171,7 +209,7 @@ func NewProjectAddSignatureKeyCommand(clientOpts *argocdclient.ClientOptions) *c } } proj.Spec.SignatureKeys = append(proj.Spec.SignatureKeys, v1alpha1.SignatureKey{KeyID: signatureKey}) - _, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj}) + _, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj}) errors.CheckError(err) }, } @@ -183,7 +221,13 @@ func NewProjectRemoveSignatureKeyCommand(clientOpts *argocdclient.ClientOptions) var command = &cobra.Command{ Use: "remove-signature-key PROJECT KEY-ID", Short: "Remove GnuPG signature key from project", + Example: templates.Examples(` + # Remove GnuPG signature key KEY-ID from project PROJECT + argocd proj remove-signature-key PROJECT KEY-ID + `), Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 2 { c.HelpFunc()(c, args) os.Exit(1) @@ -191,10 +235,10 @@ func NewProjectRemoveSignatureKeyCommand(clientOpts *argocdclient.ClientOptions) projName := args[0] signatureKey := args[1] - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer argoio.Close(conn) - proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) + proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName}) errors.CheckError(err) index := -1 @@ -208,7 +252,7 @@ func NewProjectRemoveSignatureKeyCommand(clientOpts *argocdclient.ClientOptions) log.Fatal("Specified signature key is not configured for project") } else { proj.Spec.SignatureKeys = append(proj.Spec.SignatureKeys[:index], proj.Spec.SignatureKeys[index+1:]...) - _, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj}) + _, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj}) errors.CheckError(err) } }, @@ -219,33 +263,54 @@ func NewProjectRemoveSignatureKeyCommand(clientOpts *argocdclient.ClientOptions) // NewProjectAddDestinationCommand returns a new instance of an `argocd proj add-destination` command func NewProjectAddDestinationCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { + var nameInsteadServer bool + + buildApplicationDestination := func(destination string, namespace string, nameInsteadServer bool) v1alpha1.ApplicationDestination { + if nameInsteadServer { + return v1alpha1.ApplicationDestination{Name: destination, Namespace: namespace} + } + return v1alpha1.ApplicationDestination{Server: destination, Namespace: namespace} + } + var command = &cobra.Command{ - Use: "add-destination PROJECT SERVER NAMESPACE", + Use: "add-destination PROJECT SERVER/NAME NAMESPACE", Short: "Add project destination", + Example: templates.Examples(` + # Add project destination using a server URL (SERVER) in the specified namespace (NAMESPACE) on the project with name PROJECT + argocd proj add-destination PROJECT SERVER NAMESPACE + + # Add project destination using a server name (NAME) in the specified namespace (NAMESPACE) on the project with name PROJECT + argocd proj add-destination PROJECT NAME NAMESPACE --name + `), Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 3 { c.HelpFunc()(c, args) os.Exit(1) } projName := args[0] - server := args[1] namespace := args[2] - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + destination := buildApplicationDestination(args[1], namespace, nameInsteadServer) + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer argoio.Close(conn) - proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) + proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName}) errors.CheckError(err) for _, dest := range proj.Spec.Destinations { - if dest.Namespace == namespace && dest.Server == server { + dstServerExist := destination.Server != "" && dest.Server == destination.Server + dstNameExist := destination.Name != "" && dest.Name == destination.Name + if dest.Namespace == namespace && (dstServerExist || dstNameExist) { log.Fatal("Specified destination is already defined in project") } } - proj.Spec.Destinations = append(proj.Spec.Destinations, v1alpha1.ApplicationDestination{Server: server, Namespace: namespace}) - _, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj}) + proj.Spec.Destinations = append(proj.Spec.Destinations, destination) + _, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj}) errors.CheckError(err) }, } + command.Flags().BoolVar(&nameInsteadServer, "name", false, "Use name as destination instead server") return command } @@ -254,7 +319,13 @@ func NewProjectRemoveDestinationCommand(clientOpts *argocdclient.ClientOptions) var command = &cobra.Command{ Use: "remove-destination PROJECT SERVER NAMESPACE", Short: "Remove project destination", + Example: templates.Examples(` + # Remove the destination (SERVER) from the specified namespace (NAMESPACE) on the project with name PROJECT + argocd proj remove-destination PROJECT SERVER NAMESPACE + `), Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 3 { c.HelpFunc()(c, args) os.Exit(1) @@ -262,10 +333,10 @@ func NewProjectRemoveDestinationCommand(clientOpts *argocdclient.ClientOptions) projName := args[0] server := args[1] namespace := args[2] - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer argoio.Close(conn) - proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) + proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName}) errors.CheckError(err) index := -1 @@ -279,7 +350,7 @@ func NewProjectRemoveDestinationCommand(clientOpts *argocdclient.ClientOptions) log.Fatal("Specified destination does not exist in project") } else { proj.Spec.Destinations = append(proj.Spec.Destinations[:index], proj.Spec.Destinations[index+1:]...) - _, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj}) + _, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj}) errors.CheckError(err) } }, @@ -296,7 +367,16 @@ func NewProjectAddOrphanedIgnoreCommand(clientOpts *argocdclient.ClientOptions) var command = &cobra.Command{ Use: "add-orphaned-ignore PROJECT GROUP KIND", Short: "Add a resource to orphaned ignore list", + Example: templates.Examples(` + # Add a resource of the specified GROUP and KIND to orphaned ignore list on the project with name PROJECT + argocd proj add-orphaned-ignore PROJECT GROUP KIND + + # Add resources of the specified GROUP and KIND using a NAME pattern to orphaned ignore list on the project with name PROJECT + argocd proj add-orphaned-ignore PROJECT GROUP KIND --name NAME + `), Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 3 { c.HelpFunc()(c, args) os.Exit(1) @@ -304,10 +384,10 @@ func NewProjectAddOrphanedIgnoreCommand(clientOpts *argocdclient.ClientOptions) projName := args[0] group := args[1] kind := args[2] - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer argoio.Close(conn) - proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) + proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName}) errors.CheckError(err) if proj.Spec.OrphanedResources == nil { @@ -323,7 +403,7 @@ func NewProjectAddOrphanedIgnoreCommand(clientOpts *argocdclient.ClientOptions) } proj.Spec.OrphanedResources.Ignore = append(proj.Spec.OrphanedResources.Ignore, v1alpha1.OrphanedResourceKey{Group: group, Kind: kind, Name: name}) } - _, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj}) + _, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj}) errors.CheckError(err) }, } @@ -337,9 +417,18 @@ func NewProjectRemoveOrphanedIgnoreCommand(clientOpts *argocdclient.ClientOption name string ) var command = &cobra.Command{ - Use: "remove-orphaned-ignore PROJECT GROUP KIND NAME", + Use: "remove-orphaned-ignore PROJECT GROUP KIND", Short: "Remove a resource from orphaned ignore list", + Example: templates.Examples(` + # Remove a resource of the specified GROUP and KIND from orphaned ignore list on the project with name PROJECT + argocd proj remove-orphaned-ignore PROJECT GROUP KIND + + # Remove resources of the specified GROUP and KIND using a NAME pattern from orphaned ignore list on the project with name PROJECT + argocd proj remove-orphaned-ignore PROJECT GROUP KIND --name NAME + `), Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 3 { c.HelpFunc()(c, args) os.Exit(1) @@ -347,10 +436,10 @@ func NewProjectRemoveOrphanedIgnoreCommand(clientOpts *argocdclient.ClientOption projName := args[0] group := args[1] kind := args[2] - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer argoio.Close(conn) - proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) + proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName}) errors.CheckError(err) if proj.Spec.OrphanedResources == nil { @@ -369,7 +458,7 @@ func NewProjectRemoveOrphanedIgnoreCommand(clientOpts *argocdclient.ClientOption log.Fatal("Specified resource does not exist in the orphaned ignore of project") } else { proj.Spec.OrphanedResources.Ignore = append(proj.Spec.OrphanedResources.Ignore[:index], proj.Spec.OrphanedResources.Ignore[index+1:]...) - _, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj}) + _, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj}) errors.CheckError(err) } }, @@ -383,17 +472,23 @@ func NewProjectAddSourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.C var command = &cobra.Command{ Use: "add-source PROJECT URL", Short: "Add project source repository", + Example: templates.Examples(` + # Add a source repository (URL) to the project with name PROJECT + argocd proj add-source PROJECT URL + `), Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 2 { c.HelpFunc()(c, args) os.Exit(1) } projName := args[0] url := args[1] - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer argoio.Close(conn) - proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) + proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName}) errors.CheckError(err) for _, item := range proj.Spec.SourceRepos { @@ -407,7 +502,7 @@ func NewProjectAddSourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.C } } proj.Spec.SourceRepos = append(proj.Spec.SourceRepos, url) - _, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj}) + _, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj}) errors.CheckError(err) }, } @@ -423,7 +518,7 @@ func modifyResourcesList(list *[]metav1.GroupKind, add bool, listDesc string, gr } } fmt.Printf("Group '%s' and kind '%s' is added to %s resources\n", group, kind, listDesc) - *list = append(*list, v1.GroupKind{Group: group, Kind: kind}) + *list = append(*list, metav1.GroupKind{Group: group, Kind: kind}) return true } else { index := -1 @@ -443,7 +538,7 @@ func modifyResourcesList(list *[]metav1.GroupKind, add bool, listDesc string, gr } } -func modifyResourceListCmd(cmdUse, cmdDesc string, clientOpts *argocdclient.ClientOptions, allow bool, namespacedList bool) *cobra.Command { +func modifyResourceListCmd(cmdUse, cmdDesc, examples string, clientOpts *argocdclient.ClientOptions, allow bool, namespacedList bool) *cobra.Command { var ( listType string defaultList string @@ -454,18 +549,21 @@ func modifyResourceListCmd(cmdUse, cmdDesc string, clientOpts *argocdclient.Clie defaultList = "allow" } var command = &cobra.Command{ - Use: cmdUse, - Short: cmdDesc, + Use: cmdUse, + Short: cmdDesc, + Example: templates.Examples(examples), Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 3 { c.HelpFunc()(c, args) os.Exit(1) } projName, group, kind := args[0], args[1], args[2] - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer argoio.Close(conn) - proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) + proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName}) errors.CheckError(err) var list, allowList, denyList *[]metav1.GroupKind var listAction, listDesc string @@ -489,7 +587,7 @@ func modifyResourceListCmd(cmdUse, cmdDesc string, clientOpts *argocdclient.Clie } if modifyResourcesList(list, add, listAction+" "+listDesc, group, kind) { - _, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj}) + _, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj}) errors.CheckError(err) } }, @@ -502,28 +600,44 @@ func modifyResourceListCmd(cmdUse, cmdDesc string, clientOpts *argocdclient.Clie func NewProjectAllowNamespaceResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { use := "allow-namespace-resource PROJECT GROUP KIND" desc := "Removes a namespaced API resource from the deny list or add a namespaced API resource to the allow list" - return modifyResourceListCmd(use, desc, clientOpts, true, true) + examples := ` + # Removes a namespaced API resource with specified GROUP and KIND from the deny list or add a namespaced API resource to the allow list for project PROJECT + argocd proj allow-namespace-resource PROJECT GROUP KIND + ` + return modifyResourceListCmd(use, desc, examples, clientOpts, true, true) } // NewProjectDenyNamespaceResourceCommand returns a new instance of an `argocd proj deny-namespace-resource` command func NewProjectDenyNamespaceResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { use := "deny-namespace-resource PROJECT GROUP KIND" desc := "Adds a namespaced API resource to the deny list or removes a namespaced API resource from the allow list" - return modifyResourceListCmd(use, desc, clientOpts, false, true) + examples := ` + # Adds a namespaced API resource with specified GROUP and KIND from the deny list or removes a namespaced API resource from the allow list for project PROJECT + argocd proj deny-namespace-resource PROJECT GROUP KIND + ` + return modifyResourceListCmd(use, desc, examples, clientOpts, false, true) } // NewProjectDenyClusterResourceCommand returns a new instance of an `deny-cluster-resource` command func NewProjectDenyClusterResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { use := "deny-cluster-resource PROJECT GROUP KIND" desc := "Removes a cluster-scoped API resource from the allow list and adds it to deny list" - return modifyResourceListCmd(use, desc, clientOpts, false, false) + examples := ` + # Removes a cluster-scoped API resource with specified GROUP and KIND from the allow list and adds it to deny list for project PROJECT + argocd proj deny-cluster-resource PROJECT GROUP KIND + ` + return modifyResourceListCmd(use, desc, examples, clientOpts, false, false) } // NewProjectAllowClusterResourceCommand returns a new instance of an `argocd proj allow-cluster-resource` command func NewProjectAllowClusterResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { use := "allow-cluster-resource PROJECT GROUP KIND" desc := "Adds a cluster-scoped API resource to the allow list and removes it from deny list" - return modifyResourceListCmd(use, desc, clientOpts, true, false) + examples := ` + # Adds a cluster-scoped API resource with specified GROUP and KIND to the allow list and removes it from deny list for project PROJECT + argocd proj allow-cluster-resource PROJECT GROUP KIND + ` + return modifyResourceListCmd(use, desc, examples, clientOpts, true, false) } // NewProjectRemoveSourceCommand returns a new instance of an `argocd proj remove-src` command @@ -531,17 +645,23 @@ func NewProjectRemoveSourceCommand(clientOpts *argocdclient.ClientOptions) *cobr var command = &cobra.Command{ Use: "remove-source PROJECT URL", Short: "Remove project source repository", + Example: templates.Examples(` + # Remove URL source repository to project PROJECT + argocd proj remove-source PROJECT URL + `), Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 2 { c.HelpFunc()(c, args) os.Exit(1) } projName := args[0] url := args[1] - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer argoio.Close(conn) - proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) + proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName}) errors.CheckError(err) index := -1 @@ -555,7 +675,7 @@ func NewProjectRemoveSourceCommand(clientOpts *argocdclient.ClientOptions) *cobr fmt.Printf("Source repository '%s' does not exist in project\n", url) } else { proj.Spec.SourceRepos = append(proj.Spec.SourceRepos[:index], proj.Spec.SourceRepos[index+1:]...) - _, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj}) + _, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj}) errors.CheckError(err) } }, @@ -569,15 +689,21 @@ func NewProjectDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm var command = &cobra.Command{ Use: "delete PROJECT", Short: "Delete project", + Example: templates.Examples(` + # Delete the project with name PROJECT + argocd proj delete PROJECT + `), Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) == 0 { c.HelpFunc()(c, args) os.Exit(1) } - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer argoio.Close(conn) for _, name := range args { - _, err := projIf.Delete(context.Background(), &projectpkg.ProjectQuery{Name: name}) + _, err := projIf.Delete(ctx, &projectpkg.ProjectQuery{Name: name}) errors.CheckError(err) } }, @@ -610,10 +736,19 @@ func NewProjectListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman var command = &cobra.Command{ Use: "list", Short: "List projects", + Example: templates.Examples(` + # List all available projects + argocd proj list + + # List all available projects in yaml format + argocd proj list -o yaml + `), Run: func(c *cobra.Command, args []string) { - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + ctx := c.Context() + + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer argoio.Close(conn) - projects, err := projIf.List(context.Background(), &projectpkg.ProjectQuery{}) + projects, err := projIf.List(ctx, &projectpkg.ProjectQuery{}) errors.CheckError(err) switch output { case "yaml", "json": @@ -684,7 +819,7 @@ func printProjectLine(w io.Writer, p *v1alpha1.AppProject) { fmt.Fprintf(w, "%s\t%s\t%v\t%v\t%v\t%v\t%v\t%v\n", p.Name, p.Spec.Description, destinations, sourceRepos, clusterWhitelist, namespaceBlacklist, signatureKeys, formatOrphanedResources(p)) } -func printProject(p *v1alpha1.AppProject) { +func printProject(p *v1alpha1.AppProject, scopedRepositories []*v1alpha1.Repository, scopedClusters []*v1alpha1.Cluster) { const printProjFmtStr = "%-29s%s\n" fmt.Printf(printProjFmtStr, "Name:", p.Name) @@ -710,6 +845,16 @@ func printProject(p *v1alpha1.AppProject) { fmt.Printf(printProjFmtStr, "", p.Spec.SourceRepos[i]) } + //Print scoped repositories + scr0 := "" + if len(scopedRepositories) > 0 { + scr0 = scopedRepositories[0].Repo + } + fmt.Printf(printProjFmtStr, "Scoped Repositories:", scr0) + for i := 1; i < len(scopedRepositories); i++ { + fmt.Printf(printProjFmtStr, "", scopedRepositories[i].Repo) + } + // Print allowed cluster resources cwl0 := "" if len(p.Spec.ClusterResourceWhitelist) > 0 { @@ -720,6 +865,16 @@ func printProject(p *v1alpha1.AppProject) { fmt.Printf(printProjFmtStr, "", fmt.Sprintf("%s/%s", p.Spec.ClusterResourceWhitelist[i].Group, p.Spec.ClusterResourceWhitelist[i].Kind)) } + //Print scoped clusters + scc0 := "" + if len(scopedClusters) > 0 { + scc0 = scopedClusters[0].Server + } + fmt.Printf(printProjFmtStr, "Scoped Clusters:", scc0) + for i := 1; i < len(scopedClusters); i++ { + fmt.Printf(printProjFmtStr, "", scopedClusters[i].Server) + } + // Print denied namespaced resources rbl0 := "" if len(p.Spec.NamespaceResourceBlacklist) > 0 { @@ -753,23 +908,30 @@ func NewProjectGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command var command = &cobra.Command{ Use: "get PROJECT", Short: "Get project details", + Example: templates.Examples(` + # Get details from project PROJECT + argocd proj get PROJECT + + # Get details from project PROJECT in yaml format + argocd proj get PROJECT -o yaml + + `), Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 1 { c.HelpFunc()(c, args) os.Exit(1) } projName := args[0] - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() - defer argoio.Close(conn) - p, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) - errors.CheckError(err) + detailedProject := getProject(c, clientOpts, ctx, projName) switch output { case "yaml", "json": - err := PrintResource(p, output) + err := PrintResource(detailedProject.Project, output) errors.CheckError(err) case "wide", "": - printProject(p) + printProject(detailedProject.Project, detailedProject.Repositories, detailedProject.Clusters) default: errors.CheckError(fmt.Errorf("unknown output format: %s", output)) } @@ -779,19 +941,33 @@ func NewProjectGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command return command } +func getProject(c *cobra.Command, clientOpts *argocdclient.ClientOptions, ctx context.Context, projName string) *projectpkg.DetailedProjectsResponse { + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() + defer argoio.Close(conn) + detailedProject, err := projIf.GetDetailedProject(ctx, &projectpkg.ProjectQuery{Name: projName}) + errors.CheckError(err) + return detailedProject +} + func NewProjectEditCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { var command = &cobra.Command{ Use: "edit PROJECT", Short: "Edit project", + Example: templates.Examples(` + # Edit the information on project with name PROJECT + argocd proj edit PROJECT + `), Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 1 { c.HelpFunc()(c, args) os.Exit(1) } projName := args[0] - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer argoio.Close(conn) - proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) + proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName}) errors.CheckError(err) projData, err := json.Marshal(proj.Spec) errors.CheckError(err) @@ -801,23 +977,23 @@ func NewProjectEditCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman cli.InteractiveEdit(fmt.Sprintf("%s-*-edit.yaml", projName), projData, func(input []byte) error { input, err = yaml.YAMLToJSON(input) if err != nil { - return err + return fmt.Errorf("error converting YAML to JSON: %w", err) } updatedSpec := v1alpha1.AppProjectSpec{} err = json.Unmarshal(input, &updatedSpec) if err != nil { - return err + return fmt.Errorf("error unmarshaling input into application spec: %w", err) } - proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) + proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName}) if err != nil { - return err + return fmt.Errorf("could not get project by project name: %w", err) } proj.Spec = updatedSpec - _, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj}) + _, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj}) if err != nil { - return fmt.Errorf("Failed to update project:\n%v", err) + return fmt.Errorf("failed to update project:\n%w", err) } - return err + return nil }) }, } diff --git a/cmd/argocd/commands/project_role.go b/cmd/argocd/commands/project_role.go index 5442c0d8833d8..987e61914d858 100644 --- a/cmd/argocd/commands/project_role.go +++ b/cmd/argocd/commands/project_role.go @@ -1,7 +1,6 @@ package commands import ( - "context" "fmt" "os" "strconv" @@ -9,9 +8,10 @@ import ( "time" timeutil "github.com/argoproj/pkg/time" - jwtgo "github.com/dgrijalva/jwt-go/v4" + jwtgo "github.com/golang-jwt/jwt/v4" "github.com/spf13/cobra" + "github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless" argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient" projectpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/project" "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" @@ -57,16 +57,18 @@ func NewProjectRoleAddPolicyCommand(clientOpts *argocdclient.ClientOptions) *cob Use: "add-policy PROJECT ROLE-NAME", Short: "Add a policy to a project role", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 2 { c.HelpFunc()(c, args) os.Exit(1) } projName := args[0] roleName := args[1] - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer io.Close(conn) - proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) + proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName}) errors.CheckError(err) role, roleIndex, err := proj.GetRoleByName(roleName) @@ -75,7 +77,7 @@ func NewProjectRoleAddPolicyCommand(clientOpts *argocdclient.ClientOptions) *cob policy := fmt.Sprintf(policyTemplate, proj.Name, role.Name, opts.action, proj.Name, opts.object, opts.permission) proj.Spec.Roles[roleIndex].Policies = append(role.Policies, policy) - _, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj}) + _, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj}) errors.CheckError(err) }, } @@ -92,16 +94,18 @@ func NewProjectRoleRemovePolicyCommand(clientOpts *argocdclient.ClientOptions) * Use: "remove-policy PROJECT ROLE-NAME", Short: "Remove a policy from a role within a project", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 2 { c.HelpFunc()(c, args) os.Exit(1) } projName := args[0] roleName := args[1] - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer io.Close(conn) - proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) + proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName}) errors.CheckError(err) role, roleIndex, err := proj.GetRoleByName(roleName) @@ -120,7 +124,7 @@ func NewProjectRoleRemovePolicyCommand(clientOpts *argocdclient.ClientOptions) * } role.Policies[duplicateIndex] = role.Policies[len(role.Policies)-1] proj.Spec.Roles[roleIndex].Policies = role.Policies[:len(role.Policies)-1] - _, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj}) + _, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj}) errors.CheckError(err) }, } @@ -137,16 +141,18 @@ func NewProjectRoleCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra. Use: "create PROJECT ROLE-NAME", Short: "Create a project role", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 2 { c.HelpFunc()(c, args) os.Exit(1) } projName := args[0] roleName := args[1] - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer io.Close(conn) - proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) + proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName}) errors.CheckError(err) _, _, err = proj.GetRoleByName(roleName) @@ -156,7 +162,7 @@ func NewProjectRoleCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra. } proj.Spec.Roles = append(proj.Spec.Roles, v1alpha1.ProjectRole{Name: roleName, Description: description}) - _, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj}) + _, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj}) errors.CheckError(err) fmt.Printf("Role '%s' created\n", roleName) }, @@ -171,16 +177,18 @@ func NewProjectRoleDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra. Use: "delete PROJECT ROLE-NAME", Short: "Delete a project role", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 2 { c.HelpFunc()(c, args) os.Exit(1) } projName := args[0] roleName := args[1] - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer io.Close(conn) - proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) + proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName}) errors.CheckError(err) _, index, err := proj.GetRoleByName(roleName) @@ -191,7 +199,7 @@ func NewProjectRoleDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra. proj.Spec.Roles[index] = proj.Spec.Roles[len(proj.Spec.Roles)-1] proj.Spec.Roles = proj.Spec.Roles[:len(proj.Spec.Roles)-1] - _, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj}) + _, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj}) errors.CheckError(err) fmt.Printf("Role '%s' deleted\n", roleName) }, @@ -219,20 +227,22 @@ func NewProjectRoleCreateTokenCommand(clientOpts *argocdclient.ClientOptions) *c Short: "Create a project token", Aliases: []string{"token-create"}, Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 2 { c.HelpFunc()(c, args) os.Exit(1) } projName := args[0] roleName := args[1] - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer io.Close(conn) if expiresIn == "" { expiresIn = "0s" } duration, err := timeutil.ParseDuration(expiresIn) errors.CheckError(err) - tokenResponse, err := projIf.CreateToken(context.Background(), &projectpkg.ProjectTokenCreateRequest{ + tokenResponse, err := projIf.CreateToken(ctx, &projectpkg.ProjectTokenCreateRequest{ Project: projName, Role: roleName, ExpiresIn: int64(duration.Seconds()), @@ -265,7 +275,7 @@ func NewProjectRoleCreateTokenCommand(clientOpts *argocdclient.ClientOptions) *c }, } command.Flags().StringVarP(&expiresIn, "expires-in", "e", "", - "Duration before the token will expire, eg \"12h\", \"7d\". (Default: No expiration)", + "Duration before the token will expire, e.g. \"12h\", \"7d\". (Default: No expiration)", ) command.Flags().StringVarP(&tokenID, "id", "i", "", "Token unique identifier. (Default: Random UUID)") command.Flags().BoolVarP(&outputTokenOnly, "token-only", "t", false, "Output token only - for use in scripts.") @@ -282,6 +292,8 @@ func NewProjectRoleListTokensCommand(clientOpts *argocdclient.ClientOptions) *co Short: "List tokens for a given role.", Aliases: []string{"list-token", "token-list"}, Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 2 { c.HelpFunc()(c, args) os.Exit(1) @@ -289,10 +301,10 @@ func NewProjectRoleListTokensCommand(clientOpts *argocdclient.ClientOptions) *co projName := args[0] roleName := args[1] - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer io.Close(conn) - proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) + proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName}) errors.CheckError(err) role, _, err := proj.GetRoleByName(roleName) errors.CheckError(err) @@ -331,6 +343,8 @@ func NewProjectRoleDeleteTokenCommand(clientOpts *argocdclient.ClientOptions) *c Short: "Delete a project token", Aliases: []string{"token-delete", "remove-token"}, Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 3 { c.HelpFunc()(c, args) os.Exit(1) @@ -340,10 +354,10 @@ func NewProjectRoleDeleteTokenCommand(clientOpts *argocdclient.ClientOptions) *c issuedAt, err := strconv.ParseInt(args[2], 10, 64) errors.CheckError(err) - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer io.Close(conn) - _, err = projIf.DeleteToken(context.Background(), &projectpkg.ProjectTokenDeleteRequest{Project: projName, Role: roleName, Iat: issuedAt}) + _, err = projIf.DeleteToken(ctx, &projectpkg.ProjectTokenDeleteRequest{Project: projName, Role: roleName, Iat: issuedAt}) errors.CheckError(err) }, } @@ -376,15 +390,17 @@ func NewProjectRoleListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co Use: "list PROJECT", Short: "List all the roles in a project", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 1 { c.HelpFunc()(c, args) os.Exit(1) } projName := args[0] - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer io.Close(conn) - project, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) + project, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName}) errors.CheckError(err) switch output { case "json", "yaml": @@ -409,16 +425,18 @@ func NewProjectRoleGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com Use: "get PROJECT ROLE-NAME", Short: "Get the details of a specific role", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 2 { c.HelpFunc()(c, args) os.Exit(1) } projName := args[0] roleName := args[1] - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer io.Close(conn) - proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) + proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName}) errors.CheckError(err) role, _, err := proj.GetRoleByName(roleName) @@ -452,14 +470,16 @@ func NewProjectRoleAddGroupCommand(clientOpts *argocdclient.ClientOptions) *cobr Use: "add-group PROJECT ROLE-NAME GROUP-CLAIM", Short: "Add a group claim to a project role", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 3 { c.HelpFunc()(c, args) os.Exit(1) } projName, roleName, groupName := args[0], args[1], args[2] - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer io.Close(conn) - proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) + proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName}) errors.CheckError(err) updated, err := proj.AddGroupToRole(roleName, groupName) errors.CheckError(err) @@ -467,7 +487,7 @@ func NewProjectRoleAddGroupCommand(clientOpts *argocdclient.ClientOptions) *cobr fmt.Printf("Group '%s' already present in role '%s'\n", groupName, roleName) return } - _, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj}) + _, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj}) errors.CheckError(err) fmt.Printf("Group '%s' added to role '%s'\n", groupName, roleName) }, @@ -481,14 +501,16 @@ func NewProjectRoleRemoveGroupCommand(clientOpts *argocdclient.ClientOptions) *c Use: "remove-group PROJECT ROLE-NAME GROUP-CLAIM", Short: "Remove a group claim from a role within a project", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 3 { c.HelpFunc()(c, args) os.Exit(1) } projName, roleName, groupName := args[0], args[1], args[2] - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer io.Close(conn) - proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) + proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName}) errors.CheckError(err) updated, err := proj.RemoveGroupFromRole(roleName, groupName) errors.CheckError(err) @@ -496,7 +518,7 @@ func NewProjectRoleRemoveGroupCommand(clientOpts *argocdclient.ClientOptions) *c fmt.Printf("Group '%s' not present in role '%s'\n", groupName, roleName) return } - _, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj}) + _, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj}) errors.CheckError(err) fmt.Printf("Group '%s' removed from role '%s'\n", groupName, roleName) }, diff --git a/cmd/argocd/commands/projectwindows.go b/cmd/argocd/commands/projectwindows.go index 939263acc2e63..76679b5dc3eae 100644 --- a/cmd/argocd/commands/projectwindows.go +++ b/cmd/argocd/commands/projectwindows.go @@ -1,7 +1,6 @@ package commands import ( - "context" "fmt" "os" "strconv" @@ -10,6 +9,7 @@ import ( "github.com/spf13/cobra" + "github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless" argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient" projectpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/project" "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" @@ -36,13 +36,15 @@ func NewProjectWindowsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com return roleCommand } -// NewProjectSyncWindowsDisableManualSyncCommand returns a new instance of an `argocd proj windows disable-manual-sync` command +// NewProjectWindowsDisableManualSyncCommand returns a new instance of an `argocd proj windows disable-manual-sync` command func NewProjectWindowsDisableManualSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { var command = &cobra.Command{ Use: "disable-manual-sync PROJECT ID", Short: "Disable manual sync for a sync window", Long: "Disable manual sync for a sync window. Requires ID which can be found by running \"argocd proj windows list PROJECT\"", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 2 { c.HelpFunc()(c, args) os.Exit(1) @@ -52,10 +54,10 @@ func NewProjectWindowsDisableManualSyncCommand(clientOpts *argocdclient.ClientOp id, err := strconv.Atoi(args[1]) errors.CheckError(err) - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer io.Close(conn) - proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) + proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName}) errors.CheckError(err) for i, window := range proj.Spec.SyncWindows { @@ -64,7 +66,7 @@ func NewProjectWindowsDisableManualSyncCommand(clientOpts *argocdclient.ClientOp } } - _, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj}) + _, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj}) errors.CheckError(err) }, } @@ -78,6 +80,8 @@ func NewProjectWindowsEnableManualSyncCommand(clientOpts *argocdclient.ClientOpt Short: "Enable manual sync for a sync window", Long: "Enable manual sync for a sync window. Requires ID which can be found by running \"argocd proj windows list PROJECT\"", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 2 { c.HelpFunc()(c, args) os.Exit(1) @@ -87,10 +91,10 @@ func NewProjectWindowsEnableManualSyncCommand(clientOpts *argocdclient.ClientOpt id, err := strconv.Atoi(args[1]) errors.CheckError(err) - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer io.Close(conn) - proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) + proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName}) errors.CheckError(err) for i, window := range proj.Spec.SyncWindows { @@ -99,7 +103,7 @@ func NewProjectWindowsEnableManualSyncCommand(clientOpts *argocdclient.ClientOpt } } - _, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj}) + _, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj}) errors.CheckError(err) }, } @@ -116,26 +120,46 @@ func NewProjectWindowsAddWindowCommand(clientOpts *argocdclient.ClientOptions) * namespaces []string clusters []string manualSync bool + timeZone string ) var command = &cobra.Command{ Use: "add PROJECT", Short: "Add a sync window to a project", + Example: `# Add a 1 hour allow sync window +argocd proj windows add PROJECT \ + --kind allow \ + --schedule "0 22 * * *" \ + --duration 1h \ + --applications "*" + +# Add a deny sync window with the ability to manually sync. +argocd proj windows add PROJECT \ + --kind deny \ + --schedule "30 10 * * *" \ + --duration 30m \ + --applications "prod-\\*,website" \ + --namespaces "default,\\*-prod" \ + --clusters "prod,staging" \ + --manual-sync + `, Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 1 { c.HelpFunc()(c, args) os.Exit(1) } projName := args[0] - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer io.Close(conn) - proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) + proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName}) errors.CheckError(err) - err = proj.Spec.AddWindow(kind, schedule, duration, applications, namespaces, clusters, manualSync) + err = proj.Spec.AddWindow(kind, schedule, duration, applications, namespaces, clusters, manualSync, timeZone) errors.CheckError(err) - _, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj}) + _, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj}) errors.CheckError(err) }, } @@ -146,16 +170,19 @@ func NewProjectWindowsAddWindowCommand(clientOpts *argocdclient.ClientOptions) * command.Flags().StringSliceVar(&namespaces, "namespaces", []string{}, "Namespaces that the schedule will be applied to. Comma separated, wildcards supported (e.g. --namespaces default,\\*-prod)") command.Flags().StringSliceVar(&clusters, "clusters", []string{}, "Clusters that the schedule will be applied to. Comma separated, wildcards supported (e.g. --clusters prod,staging)") command.Flags().BoolVar(&manualSync, "manual-sync", false, "Allow manual syncs for both deny and allow windows") + command.Flags().StringVar(&timeZone, "time-zone", "UTC", "Time zone of the sync window") return command } -// NewProjectWindowsAddWindowCommand returns a new instance of an `argocd proj windows delete` command +// NewProjectWindowsDeleteCommand returns a new instance of an `argocd proj windows delete` command func NewProjectWindowsDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { var command = &cobra.Command{ Use: "delete PROJECT ID", Short: "Delete a sync window from a project. Requires ID which can be found by running \"argocd proj windows list PROJECT\"", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 2 { c.HelpFunc()(c, args) os.Exit(1) @@ -165,16 +192,16 @@ func NewProjectWindowsDeleteCommand(clientOpts *argocdclient.ClientOptions) *cob id, err := strconv.Atoi(args[1]) errors.CheckError(err) - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer io.Close(conn) - proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) + proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName}) errors.CheckError(err) err = proj.Spec.DeleteWindow(id) errors.CheckError(err) - _, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj}) + _, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj}) errors.CheckError(err) }, } @@ -189,12 +216,19 @@ func NewProjectWindowsUpdateCommand(clientOpts *argocdclient.ClientOptions) *cob applications []string namespaces []string clusters []string + timeZone string ) var command = &cobra.Command{ Use: "update PROJECT ID", Short: "Update a project sync window", Long: "Update a project sync window. Requires ID which can be found by running \"argocd proj windows list PROJECT\"", + Example: `# Change a sync window's schedule +argocd proj windows update PROJECT ID \ + --schedule "0 20 * * *" +`, Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 2 { c.HelpFunc()(c, args) os.Exit(1) @@ -204,22 +238,22 @@ func NewProjectWindowsUpdateCommand(clientOpts *argocdclient.ClientOptions) *cob id, err := strconv.Atoi(args[1]) errors.CheckError(err) - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer io.Close(conn) - proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) + proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName}) errors.CheckError(err) for i, window := range proj.Spec.SyncWindows { if id == i { - err := window.Update(schedule, duration, applications, namespaces, clusters) + err := window.Update(schedule, duration, applications, namespaces, clusters, timeZone) if err != nil { errors.CheckError(err) } } } - _, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj}) + _, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj}) errors.CheckError(err) }, } @@ -228,6 +262,7 @@ func NewProjectWindowsUpdateCommand(clientOpts *argocdclient.ClientOptions) *cob command.Flags().StringSliceVar(&applications, "applications", []string{}, "Applications that the schedule will be applied to. Comma separated, wildcards supported (e.g. --applications prod-\\*,website)") command.Flags().StringSliceVar(&namespaces, "namespaces", []string{}, "Namespaces that the schedule will be applied to. Comma separated, wildcards supported (e.g. --namespaces default,\\*-prod)") command.Flags().StringSliceVar(&clusters, "clusters", []string{}, "Clusters that the schedule will be applied to. Comma separated, wildcards supported (e.g. --clusters prod,staging)") + command.Flags().StringVar(&timeZone, "time-zone", "UTC", "Time zone of the sync window. (e.g. --time-zone \"America/New_York\")") return command } @@ -240,15 +275,17 @@ func NewProjectWindowsListCommand(clientOpts *argocdclient.ClientOptions) *cobra Use: "list PROJECT", Short: "List project sync windows", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 1 { c.HelpFunc()(c, args) os.Exit(1) } projName := args[0] - conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie() + conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie() defer io.Close(conn) - proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName}) + proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName}) errors.CheckError(err) switch output { case "yaml", "json": diff --git a/cmd/argocd/commands/relogin.go b/cmd/argocd/commands/relogin.go index 59b10fd9bf26b..92affe05b2e5b 100644 --- a/cmd/argocd/commands/relogin.go +++ b/cmd/argocd/commands/relogin.go @@ -1,14 +1,14 @@ package commands import ( - "context" "fmt" "os" - "github.com/coreos/go-oidc" + "github.com/coreos/go-oidc/v3/oidc" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless" argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient" settingspkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/settings" "github.com/argoproj/argo-cd/v2/util/errors" @@ -28,6 +28,8 @@ func NewReloginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comm Short: "Refresh an expired authenticate token", Long: "Refresh an expired authenticate token", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 0 { c.HelpFunc()(c, args) os.Exit(1) @@ -43,25 +45,26 @@ func NewReloginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comm var tokenString string var refreshToken string clientOpts := argocdclient.ClientOptions{ - ConfigPath: "", - ServerAddr: configCtx.Server.Server, - Insecure: configCtx.Server.Insecure, - GRPCWeb: globalClientOpts.GRPCWeb, - GRPCWebRootPath: globalClientOpts.GRPCWebRootPath, - PlainText: configCtx.Server.PlainText, - Headers: globalClientOpts.Headers, + ConfigPath: "", + ServerAddr: configCtx.Server.Server, + Insecure: configCtx.Server.Insecure, + ClientCertFile: globalClientOpts.ClientCertFile, + ClientCertKeyFile: globalClientOpts.ClientCertKeyFile, + GRPCWeb: globalClientOpts.GRPCWeb, + GRPCWebRootPath: globalClientOpts.GRPCWebRootPath, + PlainText: configCtx.Server.PlainText, + Headers: globalClientOpts.Headers, } - acdClient := argocdclient.NewClientOrDie(&clientOpts) + acdClient := headless.NewClientOrDie(&clientOpts, c) claims, err := configCtx.User.Claims() errors.CheckError(err) if claims.Issuer == session.SessionManagerClaimsIssuer { fmt.Printf("Relogging in as '%s'\n", localconfig.GetUsername(claims.Subject)) - tokenString = passwordLogin(acdClient, localconfig.GetUsername(claims.Subject), password) + tokenString = passwordLogin(ctx, acdClient, localconfig.GetUsername(claims.Subject), password) } else { fmt.Println("Reinitiating SSO login") setConn, setIf := acdClient.NewSettingsClientOrDie() defer argoio.Close(setConn) - ctx := context.Background() httpClient, err := acdClient.HTTPClient() errors.CheckError(err) ctx = oidc.ClientContext(ctx, httpClient) @@ -81,8 +84,20 @@ func NewReloginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comm errors.CheckError(err) fmt.Printf("Context '%s' updated\n", localCfg.CurrentContext) }, + Example: ` +# Reinitiates the login with previous contexts +argocd relogin + +# Reinitiates the login with password +argocd relogin --password YOUR_PASSWORD + +# Configure direct access using Kubernetes API server +argocd login cd.argoproj.io --core + +# If user logged in with - "argocd login cd.argoproj.io" with sso login +# The command - "argocd relogin" will Reinitiates SSO login and updates the server context`, } - command.Flags().StringVar(&password, "password", "", "the password of an account to authenticate") - command.Flags().IntVar(&ssoPort, "sso-port", DefaultSSOLocalPort, "port to run local OAuth2 login application") + command.Flags().StringVar(&password, "password", "", "The password of an account to authenticate") + command.Flags().IntVar(&ssoPort, "sso-port", DefaultSSOLocalPort, "Port to run local OAuth2 login application") return command } diff --git a/cmd/argocd/commands/relogin_test.go b/cmd/argocd/commands/relogin_test.go new file mode 100644 index 0000000000000..eb6c4cd2d2f2d --- /dev/null +++ b/cmd/argocd/commands/relogin_test.go @@ -0,0 +1,64 @@ +package commands + +import ( + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + + argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient" +) + +func TestNewReloginCommand(t *testing.T) { + globalClientOpts := argocdclient.ClientOptions{ + ConfigPath: "/path/to/config", + } + + cmd := NewReloginCommand(&globalClientOpts) + + assert.Equal(t, "relogin", cmd.Use, "Unexpected command Use") + assert.Equal(t, "Refresh an expired authenticate token", cmd.Short, "Unexpected command Short") + assert.Equal(t, "Refresh an expired authenticate token", cmd.Long, "Unexpected command Long") + + // Assert command flags + passwordFlag := cmd.Flags().Lookup("password") + assert.NotNil(t, passwordFlag, "Expected flag --password to be defined") + assert.Equal(t, "", passwordFlag.Value.String(), "Unexpected default value for --password flag") + + ssoPortFlag := cmd.Flags().Lookup("sso-port") + port, err := strconv.Atoi(ssoPortFlag.Value.String()) + assert.NotNil(t, ssoPortFlag, "Expected flag --sso-port to be defined") + assert.NoError(t, err, "Failed to convert sso-port flag value to integer") + assert.Equal(t, 8085, port, "Unexpected default value for --sso-port flag") +} + +func TestNewReloginCommandWithGlobalClientOptions(t *testing.T) { + globalClientOpts := argocdclient.ClientOptions{ + ConfigPath: "/path/to/config", + ServerAddr: "https://argocd-server.example.com", + Insecure: true, + ClientCertFile: "/path/to/client-cert", + ClientCertKeyFile: "/path/to/client-cert-key", + GRPCWeb: true, + GRPCWebRootPath: "/path/to/grpc-web-root-path", + PlainText: true, + Headers: []string{"header1", "header2"}, + } + + cmd := NewReloginCommand(&globalClientOpts) + + assert.Equal(t, "relogin", cmd.Use, "Unexpected command Use") + assert.Equal(t, "Refresh an expired authenticate token", cmd.Short, "Unexpected command Short") + assert.Equal(t, "Refresh an expired authenticate token", cmd.Long, "Unexpected command Long") + + // Assert command flags + passwordFlag := cmd.Flags().Lookup("password") + assert.NotNil(t, passwordFlag, "Expected flag --password to be defined") + assert.Equal(t, "", passwordFlag.Value.String(), "Unexpected default value for --password flag") + + ssoPortFlag := cmd.Flags().Lookup("sso-port") + port, err := strconv.Atoi(ssoPortFlag.Value.String()) + assert.NotNil(t, ssoPortFlag, "Expected flag --sso-port to be defined") + assert.NoError(t, err, "Failed to convert sso-port flag value to integer") + assert.Equal(t, 8085, port, "Unexpected default value for --sso-port flag") +} diff --git a/cmd/argocd/commands/repo.go b/cmd/argocd/commands/repo.go index a36e3500d2104..2bf9714a06f11 100644 --- a/cmd/argocd/commands/repo.go +++ b/cmd/argocd/commands/repo.go @@ -1,15 +1,15 @@ package commands import ( - "context" "fmt" - "io/ioutil" "os" + "strconv" "text/tabwriter" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless" cmdutil "github.com/argoproj/argo-cd/v2/cmd/util" argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient" repositorypkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/repository" @@ -29,6 +29,19 @@ func NewRepoCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { c.HelpFunc()(c, args) os.Exit(1) }, + Example: ` +# Add git repository connection parameters +argocd repo add git@git.example.com:repos/repo + +# Get a Configured Repository by URL +argocd repo get https://github.com/yourusername/your-repo.git + +# List Configured Repositories +argocd repo list + +# Remove Repository Credentials +argocd repo rm https://github.com/yourusername/your-repo.git +`, } command.AddCommand(NewRepoAddCommand(clientOpts)) @@ -71,6 +84,9 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { # Add a private Git repository on GitHub Enterprise via GitHub App argocd repo add https://ghe.example.com/repos/repo --github-app-id 1 --github-app-installation-id 2 --github-app-private-key-path test.private-key.pem --github-app-enterprise-base-url https://ghe.example.com/api/v3 + + # Add a private Git repository on Google Cloud Sources via GCP service account credentials + argocd repo add https://source.developers.google.com/p/my-google-cloud-project/r/my-repo --gcp-service-account-key-path service-account-key.json ` var command = &cobra.Command{ @@ -78,6 +94,8 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { Short: "Add git repository connection parameters", Example: repoAddExamples, Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 1 { c.HelpFunc()(c, args) os.Exit(1) @@ -89,7 +107,7 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { // Specifying ssh-private-key-path is only valid for SSH repositories if repoOpts.SshPrivateKeyPath != "" { if ok, _ := git.IsSSHURL(repoOpts.Repo.Repo); ok { - keyData, err := ioutil.ReadFile(repoOpts.SshPrivateKeyPath) + keyData, err := os.ReadFile(repoOpts.SshPrivateKeyPath) if err != nil { log.Fatal(err) } @@ -110,9 +128,9 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { // Specifying tls-client-cert-path is only valid for HTTPS repositories if repoOpts.TlsClientCertPath != "" { if git.IsHTTPSURL(repoOpts.Repo.Repo) { - tlsCertData, err := ioutil.ReadFile(repoOpts.TlsClientCertPath) + tlsCertData, err := os.ReadFile(repoOpts.TlsClientCertPath) errors.CheckError(err) - tlsCertKey, err := ioutil.ReadFile(repoOpts.TlsClientCertKeyPath) + tlsCertKey, err := os.ReadFile(repoOpts.TlsClientCertKeyPath) errors.CheckError(err) repoOpts.Repo.TLSClientCertData = string(tlsCertData) repoOpts.Repo.TLSClientCertKey = string(tlsCertKey) @@ -125,7 +143,7 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { // Specifying github-app-private-key-path is only valid for HTTPS repositories if repoOpts.GithubAppPrivateKeyPath != "" { if git.IsHTTPSURL(repoOpts.Repo.Repo) { - githubAppPrivateKey, err := ioutil.ReadFile(repoOpts.GithubAppPrivateKeyPath) + githubAppPrivateKey, err := os.ReadFile(repoOpts.GithubAppPrivateKeyPath) errors.CheckError(err) repoOpts.Repo.GithubAppPrivateKey = string(githubAppPrivateKey) } else { @@ -134,6 +152,17 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { } } + if repoOpts.GCPServiceAccountKeyPath != "" { + if git.IsHTTPSURL(repoOpts.Repo.Repo) { + gcpServiceAccountKey, err := os.ReadFile(repoOpts.GCPServiceAccountKeyPath) + errors.CheckError(err) + repoOpts.Repo.GCPServiceAccountKey = string(gcpServiceAccountKey) + } else { + err := fmt.Errorf("--gcp-service-account-key-path is only supported for HTTPS repositories") + errors.CheckError(err) + } + } + // Set repository connection properties only when creating repository, not // when creating repository credentials. // InsecureIgnoreHostKey is deprecated and only here for backwards compat @@ -144,12 +173,14 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { repoOpts.Repo.GithubAppId = repoOpts.GithubAppId repoOpts.Repo.GithubAppInstallationId = repoOpts.GithubAppInstallationId repoOpts.Repo.GitHubAppEnterpriseBaseURL = repoOpts.GitHubAppEnterpriseBaseURL + repoOpts.Repo.Proxy = repoOpts.Proxy + repoOpts.Repo.ForceHttpBasicAuth = repoOpts.ForceHttpBasicAuth if repoOpts.Repo.Type == "helm" && repoOpts.Repo.Name == "" { errors.CheckError(fmt.Errorf("Must specify --name for repos of type 'helm'")) } - conn, repoIf := argocdclient.NewClientOrDie(clientOpts).NewRepoClientOrDie() + conn, repoIf := headless.NewClientOrDie(clientOpts, c).NewRepoClientOrDie() defer io.Close(conn) // If the user set a username, but didn't supply password via --password, @@ -180,8 +211,12 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { GithubAppID: repoOpts.Repo.GithubAppId, GithubAppInstallationID: repoOpts.Repo.GithubAppInstallationId, GithubAppEnterpriseBaseUrl: repoOpts.Repo.GitHubAppEnterpriseBaseURL, + Proxy: repoOpts.Proxy, + Project: repoOpts.Repo.Project, + GcpServiceAccountKey: repoOpts.Repo.GCPServiceAccountKey, + ForceHttpBasicAuth: repoOpts.Repo.ForceHttpBasicAuth, } - _, err := repoIf.ValidateAccess(context.Background(), &repoAccessReq) + _, err := repoIf.ValidateAccess(ctx, &repoAccessReq) errors.CheckError(err) repoCreateReq := repositorypkg.RepoCreateRequest{ @@ -189,7 +224,7 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { Upsert: repoOpts.Upsert, } - createdRepo, err := repoIf.Create(context.Background(), &repoCreateReq) + createdRepo, err := repoIf.CreateRepository(ctx, &repoCreateReq) errors.CheckError(err) fmt.Printf("Repository '%s' added\n", createdRepo.Repo) }, @@ -199,20 +234,22 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { return command } -// NewRepoRemoveCommand returns a new instance of an `argocd repo list` command +// NewRepoRemoveCommand returns a new instance of an `argocd repo remove` command func NewRepoRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { var command = &cobra.Command{ Use: "rm REPO", Short: "Remove repository credentials", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) == 0 { c.HelpFunc()(c, args) os.Exit(1) } - conn, repoIf := argocdclient.NewClientOrDie(clientOpts).NewRepoClientOrDie() + conn, repoIf := headless.NewClientOrDie(clientOpts, c).NewRepoClientOrDie() defer io.Close(conn) for _, repoURL := range args { - _, err := repoIf.Delete(context.Background(), &repositorypkg.RepoQuery{Repo: repoURL}) + _, err := repoIf.DeleteRepository(ctx, &repositorypkg.RepoQuery{Repo: repoURL}) errors.CheckError(err) fmt.Printf("Repository '%s' removed\n", repoURL) } @@ -224,19 +261,16 @@ func NewRepoRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command // Print table of repo info func printRepoTable(repos appsv1.Repositories) { w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) - _, _ = fmt.Fprintf(w, "TYPE\tNAME\tREPO\tINSECURE\tOCI\tLFS\tCREDS\tSTATUS\tMESSAGE\n") + _, _ = fmt.Fprintf(w, "TYPE\tNAME\tREPO\tINSECURE\tOCI\tLFS\tCREDS\tSTATUS\tMESSAGE\tPROJECT\n") for _, r := range repos { var hasCreds string - if !r.HasCredentials() { - hasCreds = "false" + if r.InheritedCreds { + hasCreds = "inherited" } else { - if r.InheritedCreds { - hasCreds = "inherited" - } else { - hasCreds = "true" - } + hasCreds = strconv.FormatBool(r.HasCredentials()) } - _, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%v\t%v\t%v\t%s\t%s\t%s\n", r.Type, r.Name, r.Repo, r.IsInsecure(), r.EnableOCI, r.EnableLFS, hasCreds, r.ConnectionState.Status, r.ConnectionState.Message) + + _, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%v\t%v\t%v\t%s\t%s\t%s\t%s\n", r.Type, r.Name, r.Repo, r.IsInsecure(), r.EnableOCI, r.EnableLFS, hasCreds, r.ConnectionState.Status, r.ConnectionState.Message, r.Project) } _ = w.Flush() } @@ -248,7 +282,7 @@ func printRepoUrls(repos appsv1.Repositories) { } } -// NewRepoListCommand returns a new instance of an `argocd repo rm` command +// NewRepoListCommand returns a new instance of an `argocd repo list` command func NewRepoListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { var ( output string @@ -258,7 +292,9 @@ func NewRepoListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { Use: "list", Short: "List configured repositories", Run: func(c *cobra.Command, args []string) { - conn, repoIf := argocdclient.NewClientOrDie(clientOpts).NewRepoClientOrDie() + ctx := c.Context() + + conn, repoIf := headless.NewClientOrDie(clientOpts, c).NewRepoClientOrDie() defer io.Close(conn) forceRefresh := false switch refresh { @@ -269,7 +305,7 @@ func NewRepoListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { err := fmt.Errorf("--refresh must be one of: 'hard'") errors.CheckError(err) } - repos, err := repoIf.List(context.Background(), &repositorypkg.RepoQuery{ForceRefresh: forceRefresh}) + repos, err := repoIf.ListRepositories(ctx, &repositorypkg.RepoQuery{ForceRefresh: forceRefresh}) errors.CheckError(err) switch output { case "yaml", "json": @@ -286,11 +322,11 @@ func NewRepoListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { }, } command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|url") - command.Flags().StringVar(&refresh, "refresh", "", "Force a cache refresh on connection status") + command.Flags().StringVar(&refresh, "refresh", "", "Force a cache refresh on connection status , must be one of: 'hard'") return command } -// NewRepoGetCommand returns a new instance of an `argocd repo rm` command +// NewRepoGetCommand returns a new instance of an `argocd repo get` command func NewRepoGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { var ( output string @@ -300,6 +336,8 @@ func NewRepoGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { Use: "get", Short: "Get a configured repository by URL", Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 1 { c.HelpFunc()(c, args) os.Exit(1) @@ -307,7 +345,7 @@ func NewRepoGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { // Repository URL repoURL := args[0] - conn, repoIf := argocdclient.NewClientOrDie(clientOpts).NewRepoClientOrDie() + conn, repoIf := headless.NewClientOrDie(clientOpts, c).NewRepoClientOrDie() defer io.Close(conn) forceRefresh := false switch refresh { @@ -318,7 +356,7 @@ func NewRepoGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { err := fmt.Errorf("--refresh must be one of: 'hard'") errors.CheckError(err) } - repo, err := repoIf.Get(context.Background(), &repositorypkg.RepoQuery{Repo: repoURL, ForceRefresh: forceRefresh}) + repo, err := repoIf.Get(ctx, &repositorypkg.RepoQuery{Repo: repoURL, ForceRefresh: forceRefresh}) errors.CheckError(err) switch output { case "yaml", "json": @@ -335,6 +373,6 @@ func NewRepoGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { }, } command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|url") - command.Flags().StringVar(&refresh, "refresh", "", "Force a cache refresh on connection status") + command.Flags().StringVar(&refresh, "refresh", "", "Force a cache refresh on connection status , must be one of: 'hard'") return command } diff --git a/cmd/argocd/commands/repocreds.go b/cmd/argocd/commands/repocreds.go index e007bcac05689..cf764e7d84de9 100644 --- a/cmd/argocd/commands/repocreds.go +++ b/cmd/argocd/commands/repocreds.go @@ -1,15 +1,15 @@ package commands import ( - "context" "fmt" - "io/ioutil" "os" "text/tabwriter" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless" + "github.com/argoproj/argo-cd/v2/common" argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient" repocredspkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/repocreds" appsv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" @@ -17,6 +17,7 @@ import ( "github.com/argoproj/argo-cd/v2/util/errors" "github.com/argoproj/argo-cd/v2/util/git" "github.com/argoproj/argo-cd/v2/util/io" + "github.com/argoproj/argo-cd/v2/util/templates" ) // NewRepoCredsCommand returns a new instance of an `argocd repocreds` command @@ -24,6 +25,16 @@ func NewRepoCredsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command var command = &cobra.Command{ Use: "repocreds", Short: "Manage repository connection parameters", + Example: templates.Examples(` + # Add credentials with user/pass authentication to use for all repositories under the specified URL + argocd repocreds add URL --username USERNAME --password PASSWORD + + # List all the configured repository credentials + argocd repocreds list + + # Remove credentials for the repositories with speficied URL + argocd repocreds rm URL + `), Run: func(c *cobra.Command, args []string) { c.HelpFunc()(c, args) os.Exit(1) @@ -39,12 +50,13 @@ func NewRepoCredsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command // NewRepoCredsAddCommand returns a new instance of an `argocd repocreds add` command func NewRepoCredsAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command { var ( - repo appsv1.RepoCreds - upsert bool - sshPrivateKeyPath string - tlsClientCertPath string - tlsClientCertKeyPath string - githubAppPrivateKeyPath string + repo appsv1.RepoCreds + upsert bool + sshPrivateKeyPath string + tlsClientCertPath string + tlsClientCertKeyPath string + githubAppPrivateKeyPath string + gcpServiceAccountKeyPath string ) // For better readability and easier formatting @@ -59,6 +71,12 @@ func NewRepoCredsAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comma # Add credentials with GitHub App authentication to use for all repositories under https://ghe.example.com/repos argocd repocreds add https://ghe.example.com/repos/ --github-app-id 1 --github-app-installation-id 2 --github-app-private-key-path test.private-key.pem --github-app-enterprise-base-url https://ghe.example.com/api/v3 + + # Add credentials with helm oci registry so that these oci registry urls do not need to be added as repos individually. + argocd repocreds add localhost:5000/myrepo --enable-oci --type helm + + # Add credentials with GCP credentials for all repositories under https://source.developers.google.com/p/my-google-cloud-project/r/ + argocd repocreds add https://source.developers.google.com/p/my-google-cloud-project/r/ --gcp-service-account-key-path service-account-key.json ` var command = &cobra.Command{ @@ -66,6 +84,8 @@ func NewRepoCredsAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comma Short: "Add git repository connection parameters", Example: repocredsAddExamples, Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) != 1 { c.HelpFunc()(c, args) os.Exit(1) @@ -77,7 +97,7 @@ func NewRepoCredsAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comma // Specifying ssh-private-key-path is only valid for SSH repositories if sshPrivateKeyPath != "" { if ok, _ := git.IsSSHURL(repo.URL); ok { - keyData, err := ioutil.ReadFile(sshPrivateKeyPath) + keyData, err := os.ReadFile(sshPrivateKeyPath) if err != nil { log.Fatal(err) } @@ -98,9 +118,9 @@ func NewRepoCredsAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comma // Specifying tls-client-cert-path is only valid for HTTPS repositories if tlsClientCertPath != "" { if git.IsHTTPSURL(repo.URL) { - tlsCertData, err := ioutil.ReadFile(tlsClientCertPath) + tlsCertData, err := os.ReadFile(tlsClientCertPath) errors.CheckError(err) - tlsCertKey, err := ioutil.ReadFile(tlsClientCertKeyPath) + tlsCertKey, err := os.ReadFile(tlsClientCertKeyPath) errors.CheckError(err) repo.TLSClientCertData = string(tlsCertData) repo.TLSClientCertKey = string(tlsCertKey) @@ -113,7 +133,7 @@ func NewRepoCredsAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comma // Specifying github-app-private-key-path is only valid for HTTPS repositories if githubAppPrivateKeyPath != "" { if git.IsHTTPSURL(repo.URL) { - githubAppPrivateKey, err := ioutil.ReadFile(githubAppPrivateKeyPath) + githubAppPrivateKey, err := os.ReadFile(githubAppPrivateKeyPath) errors.CheckError(err) repo.GithubAppPrivateKey = string(githubAppPrivateKey) } else { @@ -122,7 +142,19 @@ func NewRepoCredsAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comma } } - conn, repoIf := argocdclient.NewClientOrDie(clientOpts).NewRepoCredsClientOrDie() + // Specifying gcpServiceAccountKeyPath is only valid for HTTPS repositories + if gcpServiceAccountKeyPath != "" { + if git.IsHTTPSURL(repo.URL) { + gcpServiceAccountKey, err := os.ReadFile(gcpServiceAccountKeyPath) + errors.CheckError(err) + repo.GCPServiceAccountKey = string(gcpServiceAccountKey) + } else { + err := fmt.Errorf("--gcp-service-account-key-path is only supported for HTTPS repositories") + errors.CheckError(err) + } + } + + conn, repoIf := headless.NewClientOrDie(clientOpts, c).NewRepoCredsClientOrDie() defer io.Close(conn) // If the user set a username, but didn't supply password via --password, @@ -136,7 +168,7 @@ func NewRepoCredsAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comma Upsert: upsert, } - createdRepo, err := repoIf.CreateRepositoryCredentials(context.Background(), &repoCreateReq) + createdRepo, err := repoIf.CreateRepositoryCredentials(ctx, &repoCreateReq) errors.CheckError(err) fmt.Printf("Repository credentials for '%s' added\n", createdRepo.URL) }, @@ -151,6 +183,10 @@ func NewRepoCredsAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comma command.Flags().StringVar(&githubAppPrivateKeyPath, "github-app-private-key-path", "", "private key of the GitHub Application") command.Flags().StringVar(&repo.GitHubAppEnterpriseBaseURL, "github-app-enterprise-base-url", "", "base url to use when using GitHub Enterprise (e.g. https://ghe.example.com/api/v3") command.Flags().BoolVar(&upsert, "upsert", false, "Override an existing repository with the same name even if the spec differs") + command.Flags().BoolVar(&repo.EnableOCI, "enable-oci", false, "Specifies whether helm-oci support should be enabled for this repo") + command.Flags().StringVar(&repo.Type, "type", common.DefaultRepoType, "type of the repository, \"git\" or \"helm\"") + command.Flags().StringVar(&gcpServiceAccountKeyPath, "gcp-service-account-key-path", "", "service account key for the Google Cloud Platform") + command.Flags().BoolVar(&repo.ForceHttpBasicAuth, "force-http-basic-auth", false, "whether to force basic auth when connecting via HTTP") return command } @@ -159,15 +195,21 @@ func NewRepoCredsRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co var command = &cobra.Command{ Use: "rm CREDSURL", Short: "Remove repository credentials", + Example: templates.Examples(` + # Remove credentials for the repositories with URL https://git.example.com/repos + argocd repocreds rm https://git.example.com/repos/ + `), Run: func(c *cobra.Command, args []string) { + ctx := c.Context() + if len(args) == 0 { c.HelpFunc()(c, args) os.Exit(1) } - conn, repoIf := argocdclient.NewClientOrDie(clientOpts).NewRepoCredsClientOrDie() + conn, repoIf := headless.NewClientOrDie(clientOpts, c).NewRepoCredsClientOrDie() defer io.Close(conn) for _, repoURL := range args { - _, err := repoIf.DeleteRepositoryCredentials(context.Background(), &repocredspkg.RepoCredsDeleteRequest{Url: repoURL}) + _, err := repoIf.DeleteRepositoryCredentials(ctx, &repocredspkg.RepoCredsDeleteRequest{Url: repoURL}) errors.CheckError(err) fmt.Printf("Repository credentials for '%s' removed\n", repoURL) } @@ -204,10 +246,19 @@ func NewRepoCredsListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm var command = &cobra.Command{ Use: "list", Short: "List configured repository credentials", + Example: templates.Examples(` + # List all the configured repository credentials + argocd repocreds list + + # List all the configured repository credentials in json format + argocd repocreds list -o json + `), Run: func(c *cobra.Command, args []string) { - conn, repoIf := argocdclient.NewClientOrDie(clientOpts).NewRepoCredsClientOrDie() + ctx := c.Context() + + conn, repoIf := headless.NewClientOrDie(clientOpts, c).NewRepoCredsClientOrDie() defer io.Close(conn) - repos, err := repoIf.ListRepositoryCredentials(context.Background(), &repocredspkg.RepoCredsQuery{}) + repos, err := repoIf.ListRepositoryCredentials(ctx, &repocredspkg.RepoCredsQuery{}) errors.CheckError(err) switch output { case "yaml", "json": diff --git a/cmd/argocd/commands/root.go b/cmd/argocd/commands/root.go index 42c274090bbd1..5c3b984e5bff5 100644 --- a/cmd/argocd/commands/root.go +++ b/cmd/argocd/commands/root.go @@ -1,13 +1,19 @@ package commands import ( + "fmt" + "github.com/spf13/cobra" "k8s.io/client-go/tools/clientcmd" + "github.com/argoproj/argo-cd/v2/cmd/argocd/commands/admin" + "github.com/argoproj/argo-cd/v2/cmd/argocd/commands/initialize" cmdutil "github.com/argoproj/argo-cd/v2/cmd/util" + "github.com/argoproj/argo-cd/v2/common" argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient" "github.com/argoproj/argo-cd/v2/util/cli" "github.com/argoproj/argo-cd/v2/util/config" + "github.com/argoproj/argo-cd/v2/util/env" "github.com/argoproj/argo-cd/v2/util/errors" "github.com/argoproj/argo-cd/v2/util/localconfig" ) @@ -35,22 +41,25 @@ func NewCommand() *cobra.Command { c.HelpFunc()(c, args) }, DisableAutoGenTag: true, + SilenceUsage: true, } command.AddCommand(NewCompletionCommand()) - command.AddCommand(NewVersionCmd(&clientOpts)) - command.AddCommand(NewClusterCommand(&clientOpts, pathOpts)) - command.AddCommand(NewApplicationCommand(&clientOpts)) + command.AddCommand(initialize.InitCommand(NewVersionCmd(&clientOpts, nil))) + command.AddCommand(initialize.InitCommand(NewClusterCommand(&clientOpts, pathOpts))) + command.AddCommand(initialize.InitCommand(NewApplicationCommand(&clientOpts))) + command.AddCommand(initialize.InitCommand(NewAppSetCommand(&clientOpts))) command.AddCommand(NewLoginCommand(&clientOpts)) command.AddCommand(NewReloginCommand(&clientOpts)) - command.AddCommand(NewRepoCommand(&clientOpts)) - command.AddCommand(NewRepoCredsCommand(&clientOpts)) + command.AddCommand(initialize.InitCommand(NewRepoCommand(&clientOpts))) + command.AddCommand(initialize.InitCommand(NewRepoCredsCommand(&clientOpts))) command.AddCommand(NewContextCommand(&clientOpts)) - command.AddCommand(NewProjectCommand(&clientOpts)) - command.AddCommand(NewAccountCommand(&clientOpts)) + command.AddCommand(initialize.InitCommand(NewProjectCommand(&clientOpts))) + command.AddCommand(initialize.InitCommand(NewAccountCommand(&clientOpts))) command.AddCommand(NewLogoutCommand(&clientOpts)) - command.AddCommand(NewCertCommand(&clientOpts)) - command.AddCommand(NewGPGCommand(&clientOpts)) + command.AddCommand(initialize.InitCommand(NewCertCommand(&clientOpts))) + command.AddCommand(initialize.InitCommand(NewGPGCommand(&clientOpts))) + command.AddCommand(admin.NewAdminCommand(&clientOpts)) defaultLocalConfigPath, err := localconfig.DefaultLocalConfigPath() errors.CheckError(err) @@ -69,5 +78,16 @@ func NewCommand() *cobra.Command { command.PersistentFlags().StringSliceVarP(&clientOpts.Headers, "header", "H", []string{}, "Sets additional header to all requests made by Argo CD CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers)") command.PersistentFlags().BoolVar(&clientOpts.PortForward, "port-forward", config.GetBoolFlag("port-forward"), "Connect to a random argocd-server port using port forwarding") command.PersistentFlags().StringVar(&clientOpts.PortForwardNamespace, "port-forward-namespace", config.GetFlag("port-forward-namespace", ""), "Namespace name which should be used for port forwarding") + command.PersistentFlags().IntVar(&clientOpts.HttpRetryMax, "http-retry-max", 0, "Maximum number of retries to establish http connection to Argo CD server") + command.PersistentFlags().BoolVar(&clientOpts.Core, "core", false, "If set to true then CLI talks directly to Kubernetes instead of talking to Argo CD API server") + command.PersistentFlags().StringVar(&clientOpts.ServerName, "server-name", env.StringFromEnv(common.EnvServerName, common.DefaultServerName), fmt.Sprintf("Name of the Argo CD API server; set this or the %s environment variable when the server's name label differs from the default, for example when installing via the Helm chart", common.EnvServerName)) + command.PersistentFlags().StringVar(&clientOpts.AppControllerName, "controller-name", env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName), fmt.Sprintf("Name of the Argo CD Application controller; set this or the %s environment variable when the controller's name label differs from the default, for example when installing via the Helm chart", common.EnvAppControllerName)) + command.PersistentFlags().StringVar(&clientOpts.RedisHaProxyName, "redis-haproxy-name", env.StringFromEnv(common.EnvRedisHaProxyName, common.DefaultRedisHaProxyName), fmt.Sprintf("Name of the Redis HA Proxy; set this or the %s environment variable when the HA Proxy's name label differs from the default, for example when installing via the Helm chart", common.EnvRedisHaProxyName)) + command.PersistentFlags().StringVar(&clientOpts.RedisName, "redis-name", env.StringFromEnv(common.EnvRedisName, common.DefaultRedisName), fmt.Sprintf("Name of the Redis deployment; set this or the %s environment variable when the Redis's name label differs from the default, for example when installing via the Helm chart", common.EnvRedisName)) + command.PersistentFlags().StringVar(&clientOpts.RepoServerName, "repo-server-name", env.StringFromEnv(common.EnvRepoServerName, common.DefaultRepoServerName), fmt.Sprintf("Name of the Argo CD Repo server; set this or the %s environment variable when the server's name label differs from the default, for example when installing via the Helm chart", common.EnvRepoServerName)) + + clientOpts.KubeOverrides = &clientcmd.ConfigOverrides{} + command.PersistentFlags().StringVar(&clientOpts.KubeOverrides.CurrentContext, "kube-context", "", "Directs the command to the given kube-context") + return command } diff --git a/cmd/argocd/commands/testdata/config b/cmd/argocd/commands/testdata/config index 5c35cd1e4af12..0df9325b6d339 100644 --- a/cmd/argocd/commands/testdata/config +++ b/cmd/argocd/commands/testdata/config @@ -1,25 +1,31 @@ +clusters: +- cluster: + server: argocd1.example.com:443 + name: argocd1.example.com:443 +- cluster: + server: argocd2.example.com:443 + name: argocd2.example.com:443 +- cluster: + server: localhost:8080 + name: localhost:8080 contexts: -- name: argocd1.example.com:443 - server: argocd1.example.com:443 - user: argocd1.example.com:443 -- name: argocd2.example.com:443 - server: argocd2.example.com:443 - user: argocd2.example.com:443 -- name: localhost:8080 - server: localhost:8080 - user: localhost:8080 -current-context: localhost:8080 -servers: -- server: argocd1.example.com:443 -- server: argocd2.example.com:443 -- plain-text: true - server: localhost:8080 -users: -- auth-token: vErrYS3c3tReFRe$hToken +- context: + server: argocd1.example.com:443 + user: argocd1.example.com:443 + cluster: argocd1.example.com:443 name: argocd1.example.com:443 - refresh-token: vErrYS3c3tReFRe$hToken -- auth-token: vErrYS3c3tReFRe$hToken +- context: + server: argocd2.example.com:443 + user: argocd2.example.com:443 + cluster: argocd2.example.com:443 name: argocd2.example.com:443 - refresh-token: vErrYS3c3tReFRe$hToken -- auth-token: vErrYS3c3tReFRe$hToken - name: localhost:8080 \ No newline at end of file +- context: + server: localhost:8080 + user: localhost:8080 + cluster: localhost:8080 + name: localhost:8080 +current-context: localhost:8080 +users: +- name: argocd1.example.com:443 +- name: argocd2.example.com:443 +- name: localhost:8080 \ No newline at end of file diff --git a/cmd/argocd/commands/tree.go b/cmd/argocd/commands/tree.go new file mode 100644 index 0000000000000..5261adb5b7f4a --- /dev/null +++ b/cmd/argocd/commands/tree.go @@ -0,0 +1,168 @@ +package commands + +import ( + "fmt" + "strings" + "text/tabwriter" + "time" + + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/argoproj/gitops-engine/pkg/health" + "k8s.io/apimachinery/pkg/util/duration" +) + +const ( + firstElemPrefix = `├─` + lastElemPrefix = `└─` + indent = " " + pipe = `│ ` +) + +func extractHealthStatusAndReason(node v1alpha1.ResourceNode) (healthStatus health.HealthStatusCode, reason string) { + if node.Health != nil { + healthStatus = node.Health.Status + reason = node.Health.Message + } + return +} + +func treeViewAppGet(prefix string, uidToNodeMap map[string]v1alpha1.ResourceNode, parentToChildMap map[string][]string, parent v1alpha1.ResourceNode, mapNodeNameToResourceState map[string]*resourceState, w *tabwriter.Writer) { + healthStatus, _ := extractHealthStatusAndReason(parent) + if mapNodeNameToResourceState[parent.Kind+"/"+parent.Name] != nil { + value := mapNodeNameToResourceState[parent.Kind+"/"+parent.Name] + _, _ = fmt.Fprintf(w, "%s%s\t%s\t%s\t%s\n", printPrefix(prefix), parent.Kind+"/"+value.Name, value.Status, value.Health, value.Message) + } else { + _, _ = fmt.Fprintf(w, "%s%s\t%s\t%s\t%s\n", printPrefix(prefix), parent.Kind+"/"+parent.Name, "", healthStatus, "") + } + chs := parentToChildMap[parent.UID] + for i, childUid := range chs { + var p string + switch i { + case len(chs) - 1: + p = prefix + lastElemPrefix + default: + p = prefix + firstElemPrefix + } + treeViewAppGet(p, uidToNodeMap, parentToChildMap, uidToNodeMap[childUid], mapNodeNameToResourceState, w) + } + +} + +func detailedTreeViewAppGet(prefix string, uidToNodeMap map[string]v1alpha1.ResourceNode, parentChildMap map[string][]string, parent v1alpha1.ResourceNode, mapNodeNameToResourceState map[string]*resourceState, w *tabwriter.Writer) { + healthStatus, reason := extractHealthStatusAndReason(parent) + var age = "" + if parent.CreatedAt != nil { + age = duration.HumanDuration(time.Since(parent.CreatedAt.Time)) + } + + if mapNodeNameToResourceState[parent.Kind+"/"+parent.Name] != nil { + value := mapNodeNameToResourceState[parent.Kind+"/"+parent.Name] + _, _ = fmt.Fprintf(w, "%s%s\t%s\t%s\t%s\t%s\t%s\n", printPrefix(prefix), parent.Kind+"/"+value.Name, value.Status, value.Health, age, value.Message, reason) + } else { + _, _ = fmt.Fprintf(w, "%s%s\t%s\t%s\t%s\t%s\t%s\n", printPrefix(prefix), parent.Kind+"/"+parent.Name, "", healthStatus, age, "", reason) + + } + chs := parentChildMap[parent.UID] + for i, child := range chs { + var p string + switch i { + case len(chs) - 1: + p = prefix + lastElemPrefix + default: + p = prefix + firstElemPrefix + } + detailedTreeViewAppGet(p, uidToNodeMap, parentChildMap, uidToNodeMap[child], mapNodeNameToResourceState, w) + } +} + +func treeViewAppResourcesNotOrphaned(prefix string, uidToNodeMap map[string]v1alpha1.ResourceNode, parentChildMap map[string][]string, parent v1alpha1.ResourceNode, w *tabwriter.Writer) { + if len(parent.ParentRefs) == 0 { + _, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", parent.Group, parent.Kind, parent.Namespace, parent.Name, "No") + } + chs := parentChildMap[parent.UID] + for i, child := range chs { + var p string + switch i { + case len(chs) - 1: + p = prefix + lastElemPrefix + default: + p = prefix + firstElemPrefix + } + treeViewAppResourcesNotOrphaned(p, uidToNodeMap, parentChildMap, uidToNodeMap[child], w) + } +} + +func treeViewAppResourcesOrphaned(prefix string, uidToNodeMap map[string]v1alpha1.ResourceNode, parentChildMap map[string][]string, parent v1alpha1.ResourceNode, w *tabwriter.Writer) { + _, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", parent.Group, parent.Kind, parent.Namespace, parent.Name, "Yes") + chs := parentChildMap[parent.UID] + for i, child := range chs { + var p string + switch i { + case len(chs) - 1: + p = prefix + lastElemPrefix + default: + p = prefix + firstElemPrefix + } + treeViewAppResourcesOrphaned(p, uidToNodeMap, parentChildMap, uidToNodeMap[child], w) + } +} + +func detailedTreeViewAppResourcesNotOrphaned(prefix string, uidToNodeMap map[string]v1alpha1.ResourceNode, parentChildMap map[string][]string, parent v1alpha1.ResourceNode, w *tabwriter.Writer) { + + if len(parent.ParentRefs) == 0 { + healthStatus, reason := extractHealthStatusAndReason(parent) + var age = "" + if parent.CreatedAt != nil { + age = duration.HumanDuration(time.Since(parent.CreatedAt.Time)) + } + _, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", parent.Group, parent.Kind, parent.Namespace, parent.Name, "No", age, healthStatus, reason) + } + chs := parentChildMap[parent.UID] + for i, child := range chs { + var p string + switch i { + case len(chs) - 1: + p = prefix + lastElemPrefix + default: + p = prefix + firstElemPrefix + } + detailedTreeViewAppResourcesNotOrphaned(p, uidToNodeMap, parentChildMap, uidToNodeMap[child], w) + } +} + +func detailedTreeViewAppResourcesOrphaned(prefix string, uidToNodeMap map[string]v1alpha1.ResourceNode, parentChildMap map[string][]string, parent v1alpha1.ResourceNode, w *tabwriter.Writer) { + healthStatus, reason := extractHealthStatusAndReason(parent) + var age = "" + if parent.CreatedAt != nil { + age = duration.HumanDuration(time.Since(parent.CreatedAt.Time)) + } + _, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", parent.Group, parent.Kind, parent.Namespace, parent.Name, "Yes", age, healthStatus, reason) + + chs := parentChildMap[parent.UID] + for i, child := range chs { + var p string + switch i { + case len(chs) - 1: + p = prefix + lastElemPrefix + default: + p = prefix + firstElemPrefix + } + detailedTreeViewAppResourcesOrphaned(p, uidToNodeMap, parentChildMap, uidToNodeMap[child], w) + } +} + +func printPrefix(p string) string { + + if strings.HasSuffix(p, firstElemPrefix) { + p = strings.Replace(p, firstElemPrefix, pipe, strings.Count(p, firstElemPrefix)-1) + } else { + p = strings.ReplaceAll(p, firstElemPrefix, pipe) + } + + if strings.HasSuffix(p, lastElemPrefix) { + p = strings.Replace(p, lastElemPrefix, strings.Repeat(" ", len([]rune(lastElemPrefix))), strings.Count(p, lastElemPrefix)-1) + } else { + p = strings.ReplaceAll(p, lastElemPrefix, strings.Repeat(" ", len([]rune(lastElemPrefix)))) + } + return p +} diff --git a/cmd/argocd/commands/tree_test.go b/cmd/argocd/commands/tree_test.go new file mode 100644 index 0000000000000..91ffb9b963d01 --- /dev/null +++ b/cmd/argocd/commands/tree_test.go @@ -0,0 +1,216 @@ +package commands + +import ( + "bytes" + "testing" + "text/tabwriter" + + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/stretchr/testify/assert" +) + +func TestTreeViewAppGet(t *testing.T) { + var parent v1alpha1.ResourceNode + parent.ResourceRef = v1alpha1.ResourceRef{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"} + objs := make(map[string]v1alpha1.ResourceNode) + objs["87f3aab0-f634-4b2c-959a-7ddd30675ed0"] = parent + var child v1alpha1.ResourceNode + child.ResourceRef = v1alpha1.ResourceRef{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5", UID: "75c30dce-1b66-414f-a86c-573a74be0f40"} + child.ParentRefs = []v1alpha1.ResourceRef{{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"}} + + objs["75c30dce-1b66-414f-a86c-573a74be0f40"] = child + + childMapping := make(map[string][]string) + childMapping["87f3aab0-f634-4b2c-959a-7ddd30675ed0"] = []string{"75c30dce-1b66-414f-a86c-573a74be0f40"} + + stateMap := make(map[string]*resourceState) + stateMap["Rollout/numalogic-rollout-demo"] = &resourceState{ + Status: "Running", + Health: "Healthy", + Hook: "", + Message: "No Issues", + Name: "sandbox-rollout-numalogic-demo", + Kind: "Rollout", + Group: "argoproj.io", + } + + buf := &bytes.Buffer{} + w := tabwriter.NewWriter(buf, 0, 0, 2, ' ', 0) + treeViewAppGet("", objs, childMapping, parent, stateMap, w) + if err := w.Flush(); err != nil { + t.Fatal(err) + } + output := buf.String() + assert.Contains(t, output, "ReplicaSet") + assert.Contains(t, output, "Rollout") + assert.Contains(t, output, "numalogic-rollout") + assert.Contains(t, output, "Healthy") + assert.Contains(t, output, "No Issues") +} + +func TestTreeViewDetailedAppGet(t *testing.T) { + var parent v1alpha1.ResourceNode + parent.ResourceRef = v1alpha1.ResourceRef{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"} + objs := make(map[string]v1alpha1.ResourceNode) + objs["87f3aab0-f634-4b2c-959a-7ddd30675ed0"] = parent + var child v1alpha1.ResourceNode + child.ResourceRef = v1alpha1.ResourceRef{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5", UID: "75c30dce-1b66-414f-a86c-573a74be0f40"} + child.ParentRefs = []v1alpha1.ResourceRef{{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"}} + child.Health = &v1alpha1.HealthStatus{Status: "Degraded", Message: "Readiness Gate failed"} + objs["75c30dce-1b66-414f-a86c-573a74be0f40"] = child + + childMapping := make(map[string][]string) + childMapping["87f3aab0-f634-4b2c-959a-7ddd30675ed0"] = []string{"75c30dce-1b66-414f-a86c-573a74be0f40"} + + stateMap := make(map[string]*resourceState) + stateMap["Rollout/numalogic-rollout-demo"] = &resourceState{ + Status: "Running", + Health: "Healthy", + Hook: "", + Message: "No Issues", + Name: "sandbox-rollout-numalogic-demo", + Kind: "Rollout", + Group: "argoproj.io", + } + + buf := &bytes.Buffer{} + w := tabwriter.NewWriter(buf, 0, 0, 2, ' ', 0) + detailedTreeViewAppGet("", objs, childMapping, parent, stateMap, w) + if err := w.Flush(); err != nil { + t.Fatal(err) + } + + output := buf.String() + + assert.Contains(t, output, "ReplicaSet") + assert.Contains(t, output, "Rollout") + assert.Contains(t, output, "numalogic-rollout") + assert.Contains(t, output, "Healthy") + assert.Contains(t, output, "No Issues") + assert.Contains(t, output, "Degraded") + assert.Contains(t, output, "Readiness Gate failed") +} + +func TestTreeViewAppResources(t *testing.T) { + var parent v1alpha1.ResourceNode + parent.ResourceRef = v1alpha1.ResourceRef{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"} + objs := make(map[string]v1alpha1.ResourceNode) + objs["87f3aab0-f634-4b2c-959a-7ddd30675ed0"] = parent + var child v1alpha1.ResourceNode + child.ResourceRef = v1alpha1.ResourceRef{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5", UID: "75c30dce-1b66-414f-a86c-573a74be0f40"} + child.ParentRefs = []v1alpha1.ResourceRef{{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"}} + + objs["75c30dce-1b66-414f-a86c-573a74be0f40"] = child + + childMapping := make(map[string][]string) + childMapping["87f3aab0-f634-4b2c-959a-7ddd30675ed0"] = []string{"75c30dce-1b66-414f-a86c-573a74be0f40"} + + buf := &bytes.Buffer{} + w := tabwriter.NewWriter(buf, 0, 0, 2, ' ', 0) + + treeViewAppResourcesNotOrphaned("", objs, childMapping, parent, w) + + var orphan v1alpha1.ResourceNode + orphan.ResourceRef = v1alpha1.ResourceRef{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcdnk457d5", UID: "75c30dce-1b66-41hf-a86c-573a74be0f40"} + objsOrphan := make(map[string]v1alpha1.ResourceNode) + objsOrphan["75c30dce-1b66-41hf-a86c-573a74be0f40"] = orphan + orphanchildMapping := make(map[string][]string) + orphanParent := orphan + + treeViewAppResourcesOrphaned("", objsOrphan, orphanchildMapping, orphanParent, w) + if err := w.Flush(); err != nil { + t.Fatal(err) + } + output := buf.String() + + assert.Contains(t, output, "ReplicaSet") + assert.Contains(t, output, "Rollout") + assert.Contains(t, output, "numalogic-rollout") + assert.Contains(t, output, "argoproj.io") + assert.Contains(t, output, "No") + assert.Contains(t, output, "Yes") + assert.Contains(t, output, "numalogic-rollout-demo-5dcdnk457d5") +} + +func TestTreeViewDetailedAppResources(t *testing.T) { + var parent v1alpha1.ResourceNode + parent.ResourceRef = v1alpha1.ResourceRef{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"} + objs := make(map[string]v1alpha1.ResourceNode) + objs["87f3aab0-f634-4b2c-959a-7ddd30675ed0"] = parent + var child v1alpha1.ResourceNode + child.ResourceRef = v1alpha1.ResourceRef{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5", UID: "75c30dce-1b66-414f-a86c-573a74be0f40"} + child.ParentRefs = []v1alpha1.ResourceRef{{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"}} + objs["75c30dce-1b66-414f-a86c-573a74be0f40"] = child + childMapping := make(map[string][]string) + childMapping["87f3aab0-f634-4b2c-959a-7ddd30675ed0"] = []string{"75c30dce-1b66-414f-a86c-573a74be0f40"} + buf := &bytes.Buffer{} + w := tabwriter.NewWriter(buf, 0, 0, 2, ' ', 0) + detailedTreeViewAppResourcesNotOrphaned("", objs, childMapping, parent, w) + var orphan v1alpha1.ResourceNode + orphan.ResourceRef = v1alpha1.ResourceRef{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcdnk457d5", UID: "75c30dce-1b66-41hf-a86c-573a74be0f40"} + orphan.Health = &v1alpha1.HealthStatus{ + Status: "Degraded", + Message: "Readiness Gate failed", + } + objsOrphan := make(map[string]v1alpha1.ResourceNode) + objsOrphan["75c30dce-1b66-41hf-a86c-573a74be0f40"] = orphan + + orphanchildMapping := make(map[string][]string) + orphanParent := orphan + detailedTreeViewAppResourcesOrphaned("", objsOrphan, orphanchildMapping, orphanParent, w) + if err := w.Flush(); err != nil { + t.Fatal(err) + } + output := buf.String() + + assert.Contains(t, output, "ReplicaSet") + assert.Contains(t, output, "Rollout") + assert.Contains(t, output, "numalogic-rollout") + assert.Contains(t, output, "argoproj.io") + assert.Contains(t, output, "No") + assert.Contains(t, output, "Yes") + assert.Contains(t, output, "numalogic-rollout-demo-5dcdnk457d5") + assert.Contains(t, output, "Degraded") + assert.Contains(t, output, "Readiness Gate failed") +} + +func TestPrintPrefix(t *testing.T) { + tests := []struct { + input string + expected string + name string + }{ + { + input: "", + expected: "", + name: "empty string", + }, + { + input: firstElemPrefix, + expected: firstElemPrefix, + name: "only first element prefix", + }, + { + input: lastElemPrefix, + expected: lastElemPrefix, + name: "only last element prefix", + }, + { + input: firstElemPrefix + firstElemPrefix, + expected: pipe + firstElemPrefix, + name: "double first element prefix", + }, + { + input: firstElemPrefix + lastElemPrefix, + expected: pipe + lastElemPrefix, + name: "first then last element prefix", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := printPrefix(test.input) + assert.Equal(t, test.expected, got) + }) + } +} diff --git a/cmd/argocd/commands/version.go b/cmd/argocd/commands/version.go index 7efbd20e3916f..8c69c4195c3ad 100644 --- a/cmd/argocd/commands/version.go +++ b/cmd/argocd/commands/version.go @@ -8,6 +8,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless" "github.com/argoproj/argo-cd/v2/common" argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient" "github.com/argoproj/argo-cd/v2/pkg/apiclient/version" @@ -16,7 +17,7 @@ import ( ) // NewVersionCmd returns a new `version` command to be used as a sub-command to root -func NewVersionCmd(clientOpts *argocdclient.ClientOptions) *cobra.Command { +func NewVersionCmd(clientOpts *argocdclient.ClientOptions, serverVersion *version.VersionMessage) *cobra.Command { var ( short bool client bool @@ -39,8 +40,9 @@ func NewVersionCmd(clientOpts *argocdclient.ClientOptions) *cobra.Command { argocd version --short -o yaml `, Run: func(cmd *cobra.Command, args []string) { - cv := common.GetVersion() + ctx := cmd.Context() + cv := common.GetVersion() switch output { case "yaml", "json": v := make(map[string]interface{}) @@ -52,7 +54,12 @@ func NewVersionCmd(clientOpts *argocdclient.ClientOptions) *cobra.Command { } if !client { - sv := getServerVersion(clientOpts) + var sv *version.VersionMessage + if serverVersion == nil { + sv = getServerVersion(ctx, clientOpts, cmd) + } else { + sv = serverVersion + } if short { v["server"] = map[string]string{"argocd-server": sv.Version} @@ -64,11 +71,15 @@ func NewVersionCmd(clientOpts *argocdclient.ClientOptions) *cobra.Command { err := PrintResource(v, output) errors.CheckError(err) case "wide", "short", "": - printClientVersion(&cv, short || (output == "short")) - + fmt.Fprint(cmd.OutOrStdout(), printClientVersion(&cv, short || (output == "short"))) if !client { - sv := getServerVersion(clientOpts) - printServerVersion(sv, short || (output == "short")) + var sv *version.VersionMessage + if serverVersion == nil { + sv = getServerVersion(ctx, clientOpts, cmd) + } else { + sv = serverVersion + } + fmt.Fprint(cmd.OutOrStdout(), printServerVersion(sv, short || (output == "short"))) } default: log.Fatalf("unknown output format: %s", output) @@ -81,75 +92,78 @@ func NewVersionCmd(clientOpts *argocdclient.ClientOptions) *cobra.Command { return &versionCmd } -func getServerVersion(options *argocdclient.ClientOptions) *version.VersionMessage { - conn, versionIf := argocdclient.NewClientOrDie(options).NewVersionClientOrDie() +func getServerVersion(ctx context.Context, options *argocdclient.ClientOptions, c *cobra.Command) *version.VersionMessage { + conn, versionIf := headless.NewClientOrDie(options, c).NewVersionClientOrDie() defer argoio.Close(conn) - v, err := versionIf.Version(context.Background(), &empty.Empty{}) + v, err := versionIf.Version(ctx, &empty.Empty{}) errors.CheckError(err) return v } -func printClientVersion(version *common.Version, short bool) { - fmt.Printf("%s: %s\n", cliName, version) - +func printClientVersion(version *common.Version, short bool) string { + output := fmt.Sprintf("%s: %s\n", cliName, version) if short { - return + return output } - - fmt.Printf(" BuildDate: %s\n", version.BuildDate) - fmt.Printf(" GitCommit: %s\n", version.GitCommit) - fmt.Printf(" GitTreeState: %s\n", version.GitTreeState) + output += fmt.Sprintf(" BuildDate: %s\n", version.BuildDate) + output += fmt.Sprintf(" GitCommit: %s\n", version.GitCommit) + output += fmt.Sprintf(" GitTreeState: %s\n", version.GitTreeState) if version.GitTag != "" { - fmt.Printf(" GitTag: %s\n", version.GitTag) + output += fmt.Sprintf(" GitTag: %s\n", version.GitTag) + } + output += fmt.Sprintf(" GoVersion: %s\n", version.GoVersion) + output += fmt.Sprintf(" Compiler: %s\n", version.Compiler) + output += fmt.Sprintf(" Platform: %s\n", version.Platform) + if version.ExtraBuildInfo != "" { + output += fmt.Sprintf(" ExtraBuildInfo: %s\n", version.ExtraBuildInfo) } - fmt.Printf(" GoVersion: %s\n", version.GoVersion) - fmt.Printf(" Compiler: %s\n", version.Compiler) - fmt.Printf(" Platform: %s\n", version.Platform) + return output } -func printServerVersion(version *version.VersionMessage, short bool) { - fmt.Printf("%s: %s\n", "argocd-server", version.Version) +func printServerVersion(version *version.VersionMessage, short bool) string { + output := fmt.Sprintf("%s: %s\n", "argocd-server", version.Version) if short { - return + return output } if version.BuildDate != "" { - fmt.Printf(" BuildDate: %s\n", version.BuildDate) + output += fmt.Sprintf(" BuildDate: %s\n", version.BuildDate) } if version.GitCommit != "" { - fmt.Printf(" GitCommit: %s\n", version.GitCommit) + output += fmt.Sprintf(" GitCommit: %s\n", version.GitCommit) } if version.GitTreeState != "" { - fmt.Printf(" GitTreeState: %s\n", version.GitTreeState) + output += fmt.Sprintf(" GitTreeState: %s\n", version.GitTreeState) } if version.GitTag != "" { - fmt.Printf(" GitTag: %s\n", version.GitTag) + output += fmt.Sprintf(" GitTag: %s\n", version.GitTag) } if version.GoVersion != "" { - fmt.Printf(" GoVersion: %s\n", version.GoVersion) + output += fmt.Sprintf(" GoVersion: %s\n", version.GoVersion) } if version.Compiler != "" { - fmt.Printf(" Compiler: %s\n", version.Compiler) + output += fmt.Sprintf(" Compiler: %s\n", version.Compiler) } if version.Platform != "" { - fmt.Printf(" Platform: %s\n", version.Platform) + output += fmt.Sprintf(" Platform: %s\n", version.Platform) } - if version.KsonnetVersion != "" { - fmt.Printf(" Ksonnet Version: %s\n", version.KsonnetVersion) + if version.ExtraBuildInfo != "" { + output += fmt.Sprintf(" ExtraBuildInfo: %s\n", version.ExtraBuildInfo) } if version.KustomizeVersion != "" { - fmt.Printf(" Kustomize Version: %s\n", version.KustomizeVersion) + output += fmt.Sprintf(" Kustomize Version: %s\n", version.KustomizeVersion) } if version.HelmVersion != "" { - fmt.Printf(" Helm Version: %s\n", version.HelmVersion) + output += fmt.Sprintf(" Helm Version: %s\n", version.HelmVersion) } if version.KubectlVersion != "" { - fmt.Printf(" Kubectl Version: %s\n", version.KubectlVersion) + output += fmt.Sprintf(" Kubectl Version: %s\n", version.KubectlVersion) } if version.JsonnetVersion != "" { - fmt.Printf(" Jsonnet Version: %s\n", version.JsonnetVersion) + output += fmt.Sprintf(" Jsonnet Version: %s\n", version.JsonnetVersion) } + return output } diff --git a/cmd/argocd/commands/version_test.go b/cmd/argocd/commands/version_test.go new file mode 100644 index 0000000000000..3312e5ad958b6 --- /dev/null +++ b/cmd/argocd/commands/version_test.go @@ -0,0 +1,37 @@ +package commands + +import ( + "bytes" + "testing" + + argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient" + "github.com/argoproj/argo-cd/v2/pkg/apiclient/version" + "github.com/stretchr/testify/assert" +) + +func TestShortVersionClient(t *testing.T) { + buf := new(bytes.Buffer) + cmd := NewVersionCmd(&argocdclient.ClientOptions{}, nil) + cmd.SetOut(buf) + cmd.SetArgs([]string{"version", "--short", "--client"}) + err := cmd.Execute() + if err != nil { + t.Fatal("Failed to execute short version command") + } + output := buf.String() + assert.Equal(t, output, "argocd: v99.99.99+unknown\n") +} + +func TestShortVersion(t *testing.T) { + serverVersion := &version.VersionMessage{Version: "v99.99.99+unknown"} + buf := new(bytes.Buffer) + cmd := NewVersionCmd(&argocdclient.ClientOptions{}, serverVersion) + cmd.SetOut(buf) + cmd.SetArgs([]string{"argocd", "version", "--short"}) + err := cmd.Execute() + if err != nil { + t.Fatal("Failed to execute short version command") + } + output := buf.String() + assert.Equal(t, output, "argocd: v99.99.99+unknown\nargocd-server: v99.99.99+unknown\n") +} diff --git a/cmd/main.go b/cmd/main.go index 842715c6c8a80..d972863992bce 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -1,17 +1,20 @@ package main import ( - "fmt" "os" "path/filepath" "github.com/spf13/cobra" appcontroller "github.com/argoproj/argo-cd/v2/cmd/argocd-application-controller/commands" + applicationset "github.com/argoproj/argo-cd/v2/cmd/argocd-applicationset-controller/commands" + cmpserver "github.com/argoproj/argo-cd/v2/cmd/argocd-cmp-server/commands" dex "github.com/argoproj/argo-cd/v2/cmd/argocd-dex/commands" + gitaskpass "github.com/argoproj/argo-cd/v2/cmd/argocd-git-ask-pass/commands" + k8sauth "github.com/argoproj/argo-cd/v2/cmd/argocd-k8s-auth/commands" + notification "github.com/argoproj/argo-cd/v2/cmd/argocd-notification/commands" reposerver "github.com/argoproj/argo-cd/v2/cmd/argocd-repo-server/commands" apiserver "github.com/argoproj/argo-cd/v2/cmd/argocd-server/commands" - util "github.com/argoproj/argo-cd/v2/cmd/argocd-util/commands" cli "github.com/argoproj/argo-cd/v2/cmd/argocd/commands" ) @@ -29,36 +32,29 @@ func main() { switch binaryName { case "argocd", "argocd-linux-amd64", "argocd-darwin-amd64", "argocd-windows-amd64.exe": command = cli.NewCommand() - case "argocd-util", "argocd-util-linux-amd64", "argocd-util-darwin-amd64", "argocd-util-windows-amd64.exe": - command = util.NewCommand() case "argocd-server": command = apiserver.NewCommand() case "argocd-application-controller": command = appcontroller.NewCommand() case "argocd-repo-server": command = reposerver.NewCommand() + case "argocd-cmp-server": + command = cmpserver.NewCommand() case "argocd-dex": command = dex.NewCommand() + case "argocd-notifications": + command = notification.NewCommand() + case "argocd-git-ask-pass": + command = gitaskpass.NewCommand() + case "argocd-applicationset-controller": + command = applicationset.NewCommand() + case "argocd-k8s-auth": + command = k8sauth.NewCommand() default: - if len(os.Args[1:]) > 0 { - // trying to guess between argocd and argocd-util by matching sub command - for _, cmd := range []*cobra.Command{cli.NewCommand(), util.NewCommand()} { - if _, _, err := cmd.Find(os.Args[1:]); err == nil { - command = cmd - break - } - } - } - - if command == nil { - fmt.Printf("Unknown binary name '%s'.Use '%s' environment variable to specify required binary name "+ - "(possible values 'argocd' or 'argocd-util').\n", binaryName, binaryNameEnv) - os.Exit(1) - } + command = cli.NewCommand() } if err := command.Execute(); err != nil { - fmt.Println(err) os.Exit(1) } } diff --git a/cmd/util/app.go b/cmd/util/app.go index 5513845f457da..d64c5ed02e6cb 100644 --- a/cmd/util/app.go +++ b/cmd/util/app.go @@ -3,71 +3,82 @@ package util import ( "bufio" "fmt" - "io/ioutil" + "io" "net/url" "os" "strings" "time" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "github.com/argoproj/gitops-engine/pkg/utils/kube" + log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/pflag" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/pointer" - "github.com/argoproj/argo-cd/v2/common" "github.com/argoproj/argo-cd/v2/pkg/apis/application" argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/argoproj/argo-cd/v2/util/argo" "github.com/argoproj/argo-cd/v2/util/config" "github.com/argoproj/argo-cd/v2/util/errors" "github.com/argoproj/argo-cd/v2/util/text/label" ) type AppOptions struct { - repoURL string - appPath string - chart string - env string - revision string - revisionHistoryLimit int - destName string - destServer string - destNamespace string - Parameters []string - valuesFiles []string - values string - releaseName string - helmSets []string - helmSetStrings []string - helmSetFiles []string - helmVersion string - project string - syncPolicy string - syncOptions []string - autoPrune bool - selfHeal bool - allowEmpty bool - namePrefix string - nameSuffix string - directoryRecurse bool - configManagementPlugin string - jsonnetTlaStr []string - jsonnetTlaCode []string - jsonnetExtVarStr []string - jsonnetExtVarCode []string - jsonnetLibs []string - kustomizeImages []string - kustomizeVersion string - kustomizeCommonLabels []string - kustomizeCommonAnnotations []string - pluginEnvs []string - Validate bool - directoryExclude string - directoryInclude string - retryLimit int64 - retryBackoffDuration time.Duration - retryBackoffMaxDuration time.Duration - retryBackoffFactor int64 + repoURL string + appPath string + chart string + env string + revision string + revisionHistoryLimit int + destName string + destServer string + destNamespace string + Parameters []string + valuesFiles []string + ignoreMissingValueFiles bool + values string + releaseName string + helmSets []string + helmSetStrings []string + helmSetFiles []string + helmVersion string + helmPassCredentials bool + helmSkipCrds bool + project string + syncPolicy string + syncOptions []string + autoPrune bool + selfHeal bool + allowEmpty bool + namePrefix string + nameSuffix string + directoryRecurse bool + configManagementPlugin string + jsonnetTlaStr []string + jsonnetTlaCode []string + jsonnetExtVarStr []string + jsonnetExtVarCode []string + jsonnetLibs []string + kustomizeImages []string + kustomizeReplicas []string + kustomizeVersion string + kustomizeCommonLabels []string + kustomizeCommonAnnotations []string + kustomizeForceCommonLabels bool + kustomizeForceCommonAnnotations bool + kustomizeNamespace string + pluginEnvs []string + Validate bool + directoryExclude string + directoryInclude string + retryLimit int64 + retryBackoffDuration time.Duration + retryBackoffMaxDuration time.Duration + retryBackoffFactor int64 } func AddAppFlags(command *cobra.Command, opts *AppOptions) { @@ -76,18 +87,21 @@ func AddAppFlags(command *cobra.Command, opts *AppOptions) { command.Flags().StringVar(&opts.chart, "helm-chart", "", "Helm Chart name") command.Flags().StringVar(&opts.env, "env", "", "Application environment to monitor") command.Flags().StringVar(&opts.revision, "revision", "", "The tracking source branch, tag, commit or Helm chart version the application will sync to") - command.Flags().IntVar(&opts.revisionHistoryLimit, "revision-history-limit", common.RevisionHistoryLimit, "How many items to keep in revision history") + command.Flags().IntVar(&opts.revisionHistoryLimit, "revision-history-limit", argoappv1.RevisionHistoryLimit, "How many items to keep in revision history") command.Flags().StringVar(&opts.destServer, "dest-server", "", "K8s cluster URL (e.g. https://kubernetes.default.svc)") command.Flags().StringVar(&opts.destName, "dest-name", "", "K8s cluster Name (e.g. minikube)") - command.Flags().StringVar(&opts.destNamespace, "dest-namespace", "", "K8s target namespace (overrides the namespace specified in the ksonnet app.yaml)") + command.Flags().StringVar(&opts.destNamespace, "dest-namespace", "", "K8s target namespace") command.Flags().StringArrayVarP(&opts.Parameters, "parameter", "p", []string{}, "set a parameter override (e.g. -p guestbook=image=example/guestbook:latest)") command.Flags().StringArrayVar(&opts.valuesFiles, "values", []string{}, "Helm values file(s) to use") + command.Flags().BoolVar(&opts.ignoreMissingValueFiles, "ignore-missing-value-files", false, "Ignore locally missing valueFiles when setting helm template --values") command.Flags().StringVar(&opts.values, "values-literal-file", "", "Filename or URL to import as a literal Helm values block") command.Flags().StringVar(&opts.releaseName, "release-name", "", "Helm release-name") command.Flags().StringVar(&opts.helmVersion, "helm-version", "", "Helm version") + command.Flags().BoolVar(&opts.helmPassCredentials, "helm-pass-credentials", false, "Pass credentials to all domain") command.Flags().StringArrayVar(&opts.helmSets, "helm-set", []string{}, "Helm set values on the command line (can be repeated to set several values: --helm-set key1=val1 --helm-set key2=val2)") command.Flags().StringArrayVar(&opts.helmSetStrings, "helm-set-string", []string{}, "Helm set STRING values on the command line (can be repeated to set several values: --helm-set-string key1=val1 --helm-set-string key2=val2)") command.Flags().StringArrayVar(&opts.helmSetFiles, "helm-set-file", []string{}, "Helm set values from respective files specified via the command line (can be repeated to set several values: --helm-set-file key1=path1 --helm-set-file key2=path2)") + command.Flags().BoolVar(&opts.helmSkipCrds, "helm-skip-crds", false, "Skip helm crd installation step") command.Flags().StringVar(&opts.project, "project", "", "Application project name") command.Flags().StringVar(&opts.syncPolicy, "sync-policy", "", "Set the sync policy (one of: none, automated (aliases of automated: auto, automatic))") command.Flags().StringArrayVar(&opts.syncOptions, "sync-option", []string{}, "Add or remove a sync option, e.g add `Prune=false`. Remove using `!` prefix, e.g. `!Prune=false`") @@ -105,80 +119,95 @@ func AddAppFlags(command *cobra.Command, opts *AppOptions) { command.Flags().StringArrayVar(&opts.jsonnetExtVarCode, "jsonnet-ext-var-code", []string{}, "Jsonnet ext var") command.Flags().StringArrayVar(&opts.jsonnetLibs, "jsonnet-libs", []string{}, "Additional jsonnet libs (prefixed by repoRoot)") command.Flags().StringArrayVar(&opts.kustomizeImages, "kustomize-image", []string{}, "Kustomize images (e.g. --kustomize-image node:8.15.0 --kustomize-image mysql=mariadb,alpine@sha256:24a0c4b4a4c0eb97a1aabb8e29f18e917d05abfe1b7a7c07857230879ce7d3d)") + command.Flags().StringArrayVar(&opts.kustomizeReplicas, "kustomize-replica", []string{}, "Kustomize replicas (e.g. --kustomize-replica my-development=2 --kustomize-replica my-statefulset=4)") command.Flags().StringArrayVar(&opts.pluginEnvs, "plugin-env", []string{}, "Additional plugin envs") command.Flags().BoolVar(&opts.Validate, "validate", true, "Validation of repo and cluster") command.Flags().StringArrayVar(&opts.kustomizeCommonLabels, "kustomize-common-label", []string{}, "Set common labels in Kustomize") command.Flags().StringArrayVar(&opts.kustomizeCommonAnnotations, "kustomize-common-annotation", []string{}, "Set common labels in Kustomize") + command.Flags().BoolVar(&opts.kustomizeForceCommonLabels, "kustomize-force-common-label", false, "Force common labels in Kustomize") + command.Flags().BoolVar(&opts.kustomizeForceCommonAnnotations, "kustomize-force-common-annotation", false, "Force common annotations in Kustomize") + command.Flags().StringVar(&opts.kustomizeNamespace, "kustomize-namespace", "", "Kustomize namespace") command.Flags().StringVar(&opts.directoryExclude, "directory-exclude", "", "Set glob expression used to exclude files from application source path") command.Flags().StringVar(&opts.directoryInclude, "directory-include", "", "Set glob expression used to include files from application source path") command.Flags().Int64Var(&opts.retryLimit, "sync-retry-limit", 0, "Max number of allowed sync retries") - command.Flags().DurationVar(&opts.retryBackoffDuration, "sync-retry-backoff-duration", common.DefaultSyncRetryDuration, "Sync retry backoff base duration. Input needs to be a duration (e.g. 2m, 1h)") - command.Flags().DurationVar(&opts.retryBackoffMaxDuration, "sync-retry-backoff-max-duration", common.DefaultSyncRetryMaxDuration, "Max sync retry backoff duration. Input needs to be a duration (e.g. 2m, 1h)") - command.Flags().Int64Var(&opts.retryBackoffFactor, "sync-retry-backoff-factor", common.DefaultSyncRetryFactor, "Factor multiplies the base duration after each failed sync retry") + command.Flags().DurationVar(&opts.retryBackoffDuration, "sync-retry-backoff-duration", argoappv1.DefaultSyncRetryDuration, "Sync retry backoff base duration. Input needs to be a duration (e.g. 2m, 1h)") + command.Flags().DurationVar(&opts.retryBackoffMaxDuration, "sync-retry-backoff-max-duration", argoappv1.DefaultSyncRetryMaxDuration, "Max sync retry backoff duration. Input needs to be a duration (e.g. 2m, 1h)") + command.Flags().Int64Var(&opts.retryBackoffFactor, "sync-retry-backoff-factor", argoappv1.DefaultSyncRetryFactor, "Factor multiplies the base duration after each failed sync retry") } func SetAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, appOpts *AppOptions) int { visited := 0 + if flags == nil { + return visited + } flags.Visit(func(f *pflag.Flag) { visited++ + source := spec.GetSourcePtr() + if source == nil { + source = &argoappv1.ApplicationSource{} + } switch f.Name { case "repo": - spec.Source.RepoURL = appOpts.repoURL + source.RepoURL = appOpts.repoURL case "path": - spec.Source.Path = appOpts.appPath + source.Path = appOpts.appPath case "helm-chart": - spec.Source.Chart = appOpts.chart - case "env": - setKsonnetOpt(&spec.Source, &appOpts.env) + source.Chart = appOpts.chart case "revision": - spec.Source.TargetRevision = appOpts.revision + source.TargetRevision = appOpts.revision case "revision-history-limit": i := int64(appOpts.revisionHistoryLimit) spec.RevisionHistoryLimit = &i case "values": - setHelmOpt(&spec.Source, helmOpts{valueFiles: appOpts.valuesFiles}) + setHelmOpt(source, helmOpts{valueFiles: appOpts.valuesFiles}) + case "ignore-missing-value-files": + setHelmOpt(source, helmOpts{ignoreMissingValueFiles: appOpts.ignoreMissingValueFiles}) case "values-literal-file": var data []byte // read uri parsedURL, err := url.ParseRequestURI(appOpts.values) if err != nil || !(parsedURL.Scheme == "http" || parsedURL.Scheme == "https") { - data, err = ioutil.ReadFile(appOpts.values) + data, err = os.ReadFile(appOpts.values) } else { data, err = config.ReadRemoteFile(appOpts.values) } errors.CheckError(err) - setHelmOpt(&spec.Source, helmOpts{values: string(data)}) + setHelmOpt(source, helmOpts{values: string(data)}) case "release-name": - setHelmOpt(&spec.Source, helmOpts{releaseName: appOpts.releaseName}) + setHelmOpt(source, helmOpts{releaseName: appOpts.releaseName}) case "helm-version": - setHelmOpt(&spec.Source, helmOpts{version: appOpts.helmVersion}) + setHelmOpt(source, helmOpts{version: appOpts.helmVersion}) + case "helm-pass-credentials": + setHelmOpt(source, helmOpts{passCredentials: appOpts.helmPassCredentials}) case "helm-set": - setHelmOpt(&spec.Source, helmOpts{helmSets: appOpts.helmSets}) + setHelmOpt(source, helmOpts{helmSets: appOpts.helmSets}) case "helm-set-string": - setHelmOpt(&spec.Source, helmOpts{helmSetStrings: appOpts.helmSetStrings}) + setHelmOpt(source, helmOpts{helmSetStrings: appOpts.helmSetStrings}) case "helm-set-file": - setHelmOpt(&spec.Source, helmOpts{helmSetFiles: appOpts.helmSetFiles}) + setHelmOpt(source, helmOpts{helmSetFiles: appOpts.helmSetFiles}) + case "helm-skip-crds": + setHelmOpt(source, helmOpts{skipCrds: appOpts.helmSkipCrds}) case "directory-recurse": - if spec.Source.Directory != nil { - spec.Source.Directory.Recurse = appOpts.directoryRecurse + if source.Directory != nil { + source.Directory.Recurse = appOpts.directoryRecurse } else { - spec.Source.Directory = &argoappv1.ApplicationSourceDirectory{Recurse: appOpts.directoryRecurse} + source.Directory = &argoappv1.ApplicationSourceDirectory{Recurse: appOpts.directoryRecurse} } case "directory-exclude": - if spec.Source.Directory != nil { - spec.Source.Directory.Exclude = appOpts.directoryExclude + if source.Directory != nil { + source.Directory.Exclude = appOpts.directoryExclude } else { - spec.Source.Directory = &argoappv1.ApplicationSourceDirectory{Exclude: appOpts.directoryExclude} + source.Directory = &argoappv1.ApplicationSourceDirectory{Exclude: appOpts.directoryExclude} } case "directory-include": - if spec.Source.Directory != nil { - spec.Source.Directory.Include = appOpts.directoryInclude + if source.Directory != nil { + source.Directory.Include = appOpts.directoryInclude } else { - spec.Source.Directory = &argoappv1.ApplicationSourceDirectory{Include: appOpts.directoryInclude} + source.Directory = &argoappv1.ApplicationSourceDirectory{Include: appOpts.directoryInclude} } case "config-management-plugin": - spec.Source.Plugin = &argoappv1.ApplicationSourcePlugin{Name: appOpts.configManagementPlugin} + source.Plugin = &argoappv1.ApplicationSourcePlugin{Name: appOpts.configManagementPlugin} case "dest-name": spec.Destination.Name = appOpts.destName case "dest-server": @@ -188,33 +217,41 @@ func SetAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, ap case "project": spec.Project = appOpts.project case "nameprefix": - setKustomizeOpt(&spec.Source, kustomizeOpts{namePrefix: appOpts.namePrefix}) + setKustomizeOpt(source, kustomizeOpts{namePrefix: appOpts.namePrefix}) case "namesuffix": - setKustomizeOpt(&spec.Source, kustomizeOpts{nameSuffix: appOpts.nameSuffix}) + setKustomizeOpt(source, kustomizeOpts{nameSuffix: appOpts.nameSuffix}) case "kustomize-image": - setKustomizeOpt(&spec.Source, kustomizeOpts{images: appOpts.kustomizeImages}) + setKustomizeOpt(source, kustomizeOpts{images: appOpts.kustomizeImages}) + case "kustomize-replica": + setKustomizeOpt(source, kustomizeOpts{replicas: appOpts.kustomizeReplicas}) case "kustomize-version": - setKustomizeOpt(&spec.Source, kustomizeOpts{version: appOpts.kustomizeVersion}) + setKustomizeOpt(source, kustomizeOpts{version: appOpts.kustomizeVersion}) + case "kustomize-namespace": + setKustomizeOpt(source, kustomizeOpts{namespace: appOpts.kustomizeNamespace}) case "kustomize-common-label": parsedLabels, err := label.Parse(appOpts.kustomizeCommonLabels) errors.CheckError(err) - setKustomizeOpt(&spec.Source, kustomizeOpts{commonLabels: parsedLabels}) + setKustomizeOpt(source, kustomizeOpts{commonLabels: parsedLabels}) case "kustomize-common-annotation": parsedAnnotations, err := label.Parse(appOpts.kustomizeCommonAnnotations) errors.CheckError(err) - setKustomizeOpt(&spec.Source, kustomizeOpts{commonAnnotations: parsedAnnotations}) + setKustomizeOpt(source, kustomizeOpts{commonAnnotations: parsedAnnotations}) + case "kustomize-force-common-label": + setKustomizeOpt(source, kustomizeOpts{forceCommonLabels: appOpts.kustomizeForceCommonLabels}) + case "kustomize-force-common-annotation": + setKustomizeOpt(source, kustomizeOpts{forceCommonAnnotations: appOpts.kustomizeForceCommonAnnotations}) case "jsonnet-tla-str": - setJsonnetOpt(&spec.Source, appOpts.jsonnetTlaStr, false) + setJsonnetOpt(source, appOpts.jsonnetTlaStr, false) case "jsonnet-tla-code": - setJsonnetOpt(&spec.Source, appOpts.jsonnetTlaCode, true) + setJsonnetOpt(source, appOpts.jsonnetTlaCode, true) case "jsonnet-ext-var-str": - setJsonnetOptExtVar(&spec.Source, appOpts.jsonnetExtVarStr, false) + setJsonnetOptExtVar(source, appOpts.jsonnetExtVarStr, false) case "jsonnet-ext-var-code": - setJsonnetOptExtVar(&spec.Source, appOpts.jsonnetExtVarCode, true) + setJsonnetOptExtVar(source, appOpts.jsonnetExtVarCode, true) case "jsonnet-libs": - setJsonnetOptLibs(&spec.Source, appOpts.jsonnetLibs) + setJsonnetOptLibs(source, appOpts.jsonnetLibs) case "plugin-env": - setPluginOptEnvs(&spec.Source, appOpts.pluginEnvs) + setPluginOptEnvs(source, appOpts.pluginEnvs) case "sync-policy": switch appOpts.syncPolicy { case "none": @@ -271,6 +308,7 @@ func SetAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, ap log.Fatalf("Invalid sync-retry-limit [%d]", appOpts.retryLimit) } } + spec.Source = source }) if flags.Changed("auto-prune") { if spec.SyncPolicy == nil || spec.SyncPolicy.Automated == nil { @@ -294,25 +332,17 @@ func SetAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, ap return visited } -func setKsonnetOpt(src *argoappv1.ApplicationSource, env *string) { - if src.Ksonnet == nil { - src.Ksonnet = &argoappv1.ApplicationSourceKsonnet{} - } - if env != nil { - src.Ksonnet.Environment = *env - } - if src.Ksonnet.IsZero() { - src.Ksonnet = nil - } -} - type kustomizeOpts struct { - namePrefix string - nameSuffix string - images []string - version string - commonLabels map[string]string - commonAnnotations map[string]string + namePrefix string + nameSuffix string + images []string + replicas []string + version string + commonLabels map[string]string + commonAnnotations map[string]string + forceCommonLabels bool + forceCommonAnnotations bool + namespace string } func setKustomizeOpt(src *argoappv1.ApplicationSource, opts kustomizeOpts) { @@ -328,15 +358,32 @@ func setKustomizeOpt(src *argoappv1.ApplicationSource, opts kustomizeOpts) { if opts.nameSuffix != "" { src.Kustomize.NameSuffix = opts.nameSuffix } + if opts.namespace != "" { + src.Kustomize.Namespace = opts.namespace + } if opts.commonLabels != nil { src.Kustomize.CommonLabels = opts.commonLabels } if opts.commonAnnotations != nil { src.Kustomize.CommonAnnotations = opts.commonAnnotations } + if opts.forceCommonLabels { + src.Kustomize.ForceCommonLabels = opts.forceCommonLabels + } + if opts.forceCommonAnnotations { + src.Kustomize.ForceCommonAnnotations = opts.forceCommonAnnotations + } for _, image := range opts.images { src.Kustomize.MergeImage(argoappv1.KustomizeImage(image)) } + for _, replica := range opts.replicas { + r, err := argoappv1.NewKustomizeReplica(replica) + if err != nil { + log.Fatal(err) + } + src.Kustomize.MergeReplica(*r) + } + if src.Kustomize.IsZero() { src.Kustomize = nil } @@ -357,13 +404,16 @@ func setPluginOptEnvs(src *argoappv1.ApplicationSource, envs []string) { } type helmOpts struct { - valueFiles []string - values string - releaseName string - version string - helmSets []string - helmSetStrings []string - helmSetFiles []string + valueFiles []string + ignoreMissingValueFiles bool + values string + releaseName string + version string + helmSets []string + helmSetStrings []string + helmSetFiles []string + passCredentials bool + skipCrds bool } func setHelmOpt(src *argoappv1.ApplicationSource, opts helmOpts) { @@ -373,8 +423,14 @@ func setHelmOpt(src *argoappv1.ApplicationSource, opts helmOpts) { if len(opts.valueFiles) > 0 { src.Helm.ValueFiles = opts.valueFiles } + if opts.ignoreMissingValueFiles { + src.Helm.IgnoreMissingValueFiles = opts.ignoreMissingValueFiles + } if len(opts.values) > 0 { - src.Helm.Values = opts.values + err := src.Helm.SetValuesString(opts.values) + if err != nil { + log.Fatal(err) + } } if opts.releaseName != "" { src.Helm.ReleaseName = opts.releaseName @@ -382,6 +438,12 @@ func setHelmOpt(src *argoappv1.ApplicationSource, opts helmOpts) { if opts.version != "" { src.Helm.Version = opts.version } + if opts.passCredentials { + src.Helm.PassCredentials = opts.passCredentials + } + if opts.skipCrds { + src.Helm.SkipCrds = opts.skipCrds + } for _, text := range opts.helmSets { p, err := argoappv1.NewHelmParameter(text, false) if err != nil { @@ -434,61 +496,28 @@ func setJsonnetOptLibs(src *argoappv1.ApplicationSource, libs []string) { } // SetParameterOverrides updates an existing or appends a new parameter override in the application -// If the app is a ksonnet app, then parameters are expected to be in the form: component=param=value -// Otherwise, the app is assumed to be a helm app and is expected to be in the form: +// The app is assumed to be a helm app and is expected to be in the form: // param=value func SetParameterOverrides(app *argoappv1.Application, parameters []string) { if len(parameters) == 0 { return } + source := app.Spec.GetSource() var sourceType argoappv1.ApplicationSourceType - if st, _ := app.Spec.Source.ExplicitType(); st != nil { + if st, _ := source.ExplicitType(); st != nil { sourceType = *st } else if app.Status.SourceType != "" { sourceType = app.Status.SourceType } else { - // HACK: we don't know the source type, so make an educated guess based on the supplied - // parameter string. This code handles the corner case where app doesn't exist yet, and the - // command is something like: `argocd app create MYAPP -p foo=bar` - // This logic is not foolproof, but when ksonnet is deprecated, this will no longer matter - // since helm will remain as the only source type which has parameters. - if len(strings.SplitN(parameters[0], "=", 3)) == 3 { - sourceType = argoappv1.ApplicationSourceTypeKsonnet - } else if len(strings.SplitN(parameters[0], "=", 2)) == 2 { + if len(strings.SplitN(parameters[0], "=", 2)) == 2 { sourceType = argoappv1.ApplicationSourceTypeHelm } } switch sourceType { - case argoappv1.ApplicationSourceTypeKsonnet: - if app.Spec.Source.Ksonnet == nil { - app.Spec.Source.Ksonnet = &argoappv1.ApplicationSourceKsonnet{} - } - for _, paramStr := range parameters { - parts := strings.SplitN(paramStr, "=", 3) - if len(parts) != 3 { - log.Fatalf("Expected ksonnet parameter of the form: component=param=value. Received: %s", paramStr) - } - newParam := argoappv1.KsonnetParameter{ - Component: parts[0], - Name: parts[1], - Value: parts[2], - } - found := false - for i, cp := range app.Spec.Source.Ksonnet.Parameters { - if cp.Component == newParam.Component && cp.Name == newParam.Name { - found = true - app.Spec.Source.Ksonnet.Parameters[i] = newParam - break - } - } - if !found { - app.Spec.Source.Ksonnet.Parameters = append(app.Spec.Source.Ksonnet.Parameters, newParam) - } - } case argoappv1.ApplicationSourceTypeHelm: - if app.Spec.Source.Helm == nil { - app.Spec.Source.Helm = &argoappv1.ApplicationSourceHelm{} + if source.Helm == nil { + source.Helm = &argoappv1.ApplicationSourceHelm{} } for _, p := range parameters { newParam, err := argoappv1.NewHelmParameter(p, false) @@ -496,46 +525,112 @@ func SetParameterOverrides(app *argoappv1.Application, parameters []string) { log.Error(err) continue } - app.Spec.Source.Helm.AddParameter(*newParam) + source.Helm.AddParameter(*newParam) } default: - log.Fatalf("Parameters can only be set against Ksonnet or Helm applications") + log.Fatalf("Parameters can only be set against Helm applications") + } +} + +func readApps(yml []byte, apps *[]*argoappv1.Application) error { + yamls, _ := kube.SplitYAMLToString(yml) + + var err error + + for _, yml := range yamls { + var app argoappv1.Application + err = config.Unmarshal([]byte(yml), &app) + *apps = append(*apps, &app) + if err != nil { + return err + } } + + return err } -func readAppFromStdin(app *argoappv1.Application) error { +func readAppsFromStdin(apps *[]*argoappv1.Application) error { reader := bufio.NewReader(os.Stdin) - err := config.UnmarshalReader(reader, &app) + data, err := io.ReadAll(reader) + if err != nil { + return err + } + err = readApps(data, apps) if err != nil { return fmt.Errorf("unable to read manifest from stdin: %v", err) } return nil } -func readAppFromURI(fileURL string, app *argoappv1.Application) error { - parsedURL, err := url.ParseRequestURI(fileURL) - if err != nil || !(parsedURL.Scheme == "http" || parsedURL.Scheme == "https") { - err = config.UnmarshalLocalFile(fileURL, &app) - } else { - err = config.UnmarshalRemoteFile(fileURL, &app) +func readAppsFromURI(fileURL string, apps *[]*argoappv1.Application) error { + + readFilePayload := func() ([]byte, error) { + parsedURL, err := url.ParseRequestURI(fileURL) + if err != nil || !(parsedURL.Scheme == "http" || parsedURL.Scheme == "https") { + return os.ReadFile(fileURL) + } + return config.ReadRemoteFile(fileURL) } - return err + + yml, err := readFilePayload() + if err != nil { + return err + } + + return readApps(yml, apps) } -func ConstructApp(fileURL, appName string, labels, args []string, appOpts AppOptions, flags *pflag.FlagSet) (*argoappv1.Application, error) { - var app argoappv1.Application - if fileURL == "-" { - // read stdin - err := readAppFromStdin(&app) - if err != nil { - return nil, err - } - } else if fileURL != "" { - // read uri - err := readAppFromURI(fileURL, &app) - if err != nil { - return nil, err +func constructAppsFromStdin() ([]*argoappv1.Application, error) { + apps := make([]*argoappv1.Application, 0) + // read stdin + err := readAppsFromStdin(&apps) + if err != nil { + return nil, err + } + return apps, nil +} + +func constructAppsBaseOnName(appName string, labels, annotations, args []string, appOpts AppOptions, flags *pflag.FlagSet) ([]*argoappv1.Application, error) { + var app *argoappv1.Application + + // read arguments + if len(args) == 1 { + if appName != "" && appName != args[0] { + return nil, fmt.Errorf("--name argument '%s' does not match app name %s", appName, args[0]) } + appName = args[0] + } + appName, appNs := argo.ParseFromQualifiedName(appName, "") + app = &argoappv1.Application{ + TypeMeta: v1.TypeMeta{ + Kind: application.ApplicationKind, + APIVersion: application.Group + "/v1alpha1", + }, + ObjectMeta: v1.ObjectMeta{ + Name: appName, + Namespace: appNs, + }, + Spec: argoappv1.ApplicationSpec{ + Source: &argoappv1.ApplicationSource{}, + }, + } + SetAppSpecOptions(flags, &app.Spec, &appOpts) + SetParameterOverrides(app, appOpts.Parameters) + mergeLabels(app, labels) + setAnnotations(app, annotations) + return []*argoappv1.Application{ + app, + }, nil +} + +func constructAppsFromFileUrl(fileURL, appName string, labels, annotations, args []string, appOpts AppOptions, flags *pflag.FlagSet) ([]*argoappv1.Application, error) { + apps := make([]*argoappv1.Application, 0) + // read uri + err := readAppsFromURI(fileURL, &apps) + if err != nil { + return nil, err + } + for _, app := range apps { if len(args) == 1 && args[0] != app.Name { return nil, fmt.Errorf("app name '%s' does not match app spec metadata.name '%s'", args[0], app.Name) } @@ -546,30 +641,20 @@ func ConstructApp(fileURL, appName string, labels, args []string, appOpts AppOpt return nil, fmt.Errorf("app.Name is empty. --name argument can be used to provide app.Name") } SetAppSpecOptions(flags, &app.Spec, &appOpts) - SetParameterOverrides(&app, appOpts.Parameters) - mergeLabels(&app, labels) - } else { - // read arguments - if len(args) == 1 { - if appName != "" && appName != args[0] { - return nil, fmt.Errorf("--name argument '%s' does not match app name %s", appName, args[0]) - } - appName = args[0] - } - app = argoappv1.Application{ - TypeMeta: v1.TypeMeta{ - Kind: application.ApplicationKind, - APIVersion: application.Group + "/v1alpha1", - }, - ObjectMeta: v1.ObjectMeta{ - Name: appName, - }, - } - SetAppSpecOptions(flags, &app.Spec, &appOpts) - SetParameterOverrides(&app, appOpts.Parameters) - mergeLabels(&app, labels) + SetParameterOverrides(app, appOpts.Parameters) + mergeLabels(app, labels) + setAnnotations(app, annotations) + } + return apps, nil +} + +func ConstructApps(fileURL, appName string, labels, annotations, args []string, appOpts AppOptions, flags *pflag.FlagSet) ([]*argoappv1.Application, error) { + if fileURL == "-" { + return constructAppsFromStdin() + } else if fileURL != "" { + return constructAppsFromFileUrl(fileURL, appName, labels, annotations, args, appOpts, flags) } - return &app, nil + return constructAppsBaseOnName(appName, labels, annotations, args, appOpts, flags) } func mergeLabels(app *argoappv1.Application, labels []string) { @@ -588,3 +673,64 @@ func mergeLabels(app *argoappv1.Application, labels []string) { app.SetLabels(mergedLabels) } + +func setAnnotations(app *argoappv1.Application, annotations []string) { + if len(annotations) > 0 && app.Annotations == nil { + app.Annotations = map[string]string{} + } + for _, a := range annotations { + annotation := strings.SplitN(a, "=", 2) + if len(annotation) == 2 { + app.Annotations[annotation[0]] = annotation[1] + } else { + app.Annotations[annotation[0]] = "" + } + } +} + +// LiveObjects deserializes the list of live states into unstructured objects +func LiveObjects(resources []*argoappv1.ResourceDiff) ([]*unstructured.Unstructured, error) { + objs := make([]*unstructured.Unstructured, len(resources)) + for i, resState := range resources { + obj, err := resState.LiveObject() + if err != nil { + return nil, err + } + objs[i] = obj + } + return objs, nil +} + +func FilterResources(groupChanged bool, resources []*argoappv1.ResourceDiff, group, kind, namespace, resourceName string, all bool) ([]*unstructured.Unstructured, error) { + liveObjs, err := LiveObjects(resources) + errors.CheckError(err) + filteredObjects := make([]*unstructured.Unstructured, 0) + for i := range liveObjs { + obj := liveObjs[i] + if obj == nil { + continue + } + gvk := obj.GroupVersionKind() + if groupChanged && group != gvk.Group { + continue + } + if namespace != "" && namespace != obj.GetNamespace() { + continue + } + if resourceName != "" && resourceName != obj.GetName() { + continue + } + if kind != "" && kind != gvk.Kind { + continue + } + deepCopy := obj.DeepCopy() + filteredObjects = append(filteredObjects, deepCopy) + } + if len(filteredObjects) == 0 { + return nil, fmt.Errorf("No matching resource found") + } + if len(filteredObjects) > 1 && !all { + return nil, fmt.Errorf("Multiple resources match inputs. Use the --all flag to patch multiple resources") + } + return filteredObjects, nil +} diff --git a/cmd/util/app_test.go b/cmd/util/app_test.go index 211e836d2c3e5..2f49a3cc4c8c4 100644 --- a/cmd/util/app_test.go +++ b/cmd/util/app_test.go @@ -1,12 +1,16 @@ package util import ( + "os" "testing" + log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/stretchr/testify/assert" "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + + "k8s.io/apimachinery/pkg/util/intstr" ) func Test_setHelmOpt(t *testing.T) { @@ -20,6 +24,11 @@ func Test_setHelmOpt(t *testing.T) { setHelmOpt(&src, helmOpts{valueFiles: []string{"foo"}}) assert.Equal(t, []string{"foo"}, src.Helm.ValueFiles) }) + t.Run("IgnoreMissingValueFiles", func(t *testing.T) { + src := v1alpha1.ApplicationSource{} + setHelmOpt(&src, helmOpts{ignoreMissingValueFiles: true}) + assert.Equal(t, true, src.Helm.IgnoreMissingValueFiles) + }) t.Run("ReleaseName", func(t *testing.T) { src := v1alpha1.ApplicationSource{} setHelmOpt(&src, helmOpts{releaseName: "foo"}) @@ -45,6 +54,16 @@ func Test_setHelmOpt(t *testing.T) { setHelmOpt(&src, helmOpts{version: "v3"}) assert.Equal(t, "v3", src.Helm.Version) }) + t.Run("HelmPassCredentials", func(t *testing.T) { + src := v1alpha1.ApplicationSource{} + setHelmOpt(&src, helmOpts{passCredentials: true}) + assert.Equal(t, true, src.Helm.PassCredentials) + }) + t.Run("HelmSkipCrds", func(t *testing.T) { + src := v1alpha1.ApplicationSource{} + setHelmOpt(&src, helmOpts{skipCrds: true}) + assert.Equal(t, true, src.Helm.SkipCrds) + }) } func Test_setKustomizeOpt(t *testing.T) { @@ -68,11 +87,32 @@ func Test_setKustomizeOpt(t *testing.T) { setKustomizeOpt(&src, kustomizeOpts{images: []string{"org/image:v1", "org/image:v2"}}) assert.Equal(t, &v1alpha1.ApplicationSourceKustomize{Images: v1alpha1.KustomizeImages{v1alpha1.KustomizeImage("org/image:v2")}}, src.Kustomize) }) + t.Run("Replicas", func(t *testing.T) { + src := v1alpha1.ApplicationSource{} + testReplicasString := []string{"my-deployment=2", "my-statefulset=4"} + testReplicas := v1alpha1.KustomizeReplicas{ + { + Name: "my-deployment", + Count: intstr.FromInt(2), + }, + { + Name: "my-statefulset", + Count: intstr.FromInt(4), + }, + } + setKustomizeOpt(&src, kustomizeOpts{replicas: testReplicasString}) + assert.Equal(t, &v1alpha1.ApplicationSourceKustomize{Replicas: testReplicas}, src.Kustomize) + }) t.Run("Version", func(t *testing.T) { src := v1alpha1.ApplicationSource{} setKustomizeOpt(&src, kustomizeOpts{version: "v0.1"}) assert.Equal(t, &v1alpha1.ApplicationSourceKustomize{Version: "v0.1"}, src.Kustomize) }) + t.Run("Namespace", func(t *testing.T) { + src := v1alpha1.ApplicationSource{} + setKustomizeOpt(&src, kustomizeOpts{namespace: "custom-namespace"}) + assert.Equal(t, &v1alpha1.ApplicationSourceKustomize{Namespace: "custom-namespace"}, src.Kustomize) + }) t.Run("Common labels", func(t *testing.T) { src := v1alpha1.ApplicationSource{} setKustomizeOpt(&src, kustomizeOpts{commonLabels: map[string]string{"foo1": "bar1", "foo2": "bar2"}}) @@ -131,7 +171,9 @@ func (f *appOptionsFixture) SetFlag(key, value string) error { func newAppOptionsFixture() *appOptionsFixture { fixture := &appOptionsFixture{ - spec: &v1alpha1.ApplicationSpec{}, + spec: &v1alpha1.ApplicationSpec{ + Source: &v1alpha1.ApplicationSource{}, + }, command: &cobra.Command{}, options: &AppOptions{}, } @@ -171,4 +213,211 @@ func Test_setAppSpecOptions(t *testing.T) { assert.NoError(t, f.SetFlag("sync-retry-limit", "0")) assert.Nil(t, f.spec.SyncPolicy.Retry) }) + t.Run("Kustomize", func(t *testing.T) { + assert.NoError(t, f.SetFlag("kustomize-replica", "my-deployment=2")) + assert.NoError(t, f.SetFlag("kustomize-replica", "my-statefulset=4")) + assert.Equal(t, f.spec.Source.Kustomize.Replicas, v1alpha1.KustomizeReplicas{{Name: "my-deployment", Count: intstr.FromInt(2)}, {Name: "my-statefulset", Count: intstr.FromInt(4)}}) + }) +} + +func Test_setAnnotations(t *testing.T) { + t.Run("Annotations", func(t *testing.T) { + app := v1alpha1.Application{} + setAnnotations(&app, []string{"hoge=foo", "huga=bar"}) + assert.Equal(t, map[string]string{"hoge": "foo", "huga": "bar"}, app.Annotations) + }) + t.Run("Annotations value contains equal", func(t *testing.T) { + app := v1alpha1.Application{} + setAnnotations(&app, []string{"hoge=foo=bar"}) + assert.Equal(t, map[string]string{"hoge": "foo=bar"}, app.Annotations) + }) + t.Run("Annotations empty value", func(t *testing.T) { + app := v1alpha1.Application{} + setAnnotations(&app, []string{"hoge"}) + assert.Equal(t, map[string]string{"hoge": ""}, app.Annotations) + }) +} + +const appsYaml = `--- +# Source: apps/templates/helm.yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: sth1 + namespace: argocd + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + destination: + namespace: sth + server: 'https://kubernetes.default.svc' + project: default + source: + repoURL: 'https://github.com/pasha-codefresh/argocd-example-apps' + targetRevision: HEAD + path: apps + helm: + valueFiles: + - values.yaml +--- +# Source: apps/templates/helm.yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: sth2 + namespace: argocd + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + destination: + namespace: sth + server: 'https://kubernetes.default.svc' + project: default + source: + repoURL: 'https://github.com/pasha-codefresh/argocd-example-apps' + targetRevision: HEAD + path: apps + helm: + valueFiles: + - values.yaml` + +func TestReadAppsFromURI(t *testing.T) { + file, err := os.CreateTemp(os.TempDir(), "") + if err != nil { + panic(err) + } + defer func() { + _ = os.Remove(file.Name()) + }() + + _, _ = file.WriteString(appsYaml) + _ = file.Sync() + + apps := make([]*v1alpha1.Application, 0) + err = readAppsFromURI(file.Name(), &apps) + assert.NoError(t, err) + assert.Equal(t, 2, len(apps)) + + assert.Equal(t, "sth1", apps[0].Name) + assert.Equal(t, "sth2", apps[1].Name) + +} + +func TestConstructAppFromStdin(t *testing.T) { + file, err := os.CreateTemp(os.TempDir(), "") + if err != nil { + panic(err) + } + defer func() { + _ = os.Remove(file.Name()) + }() + + _, _ = file.WriteString(appsYaml) + _ = file.Sync() + + if _, err := file.Seek(0, 0); err != nil { + log.Fatal(err) + } + + os.Stdin = file + + apps, err := ConstructApps("-", "test", []string{}, []string{}, []string{}, AppOptions{}, nil) + + if err := file.Close(); err != nil { + log.Fatal(err) + } + assert.NoError(t, err) + assert.Equal(t, 2, len(apps)) + assert.Equal(t, "sth1", apps[0].Name) + assert.Equal(t, "sth2", apps[1].Name) + +} + +func TestConstructBasedOnName(t *testing.T) { + apps, err := ConstructApps("", "test", []string{}, []string{}, []string{}, AppOptions{}, nil) + + assert.NoError(t, err) + assert.Equal(t, 1, len(apps)) + assert.Equal(t, "test", apps[0].Name) +} + +func TestFilterResources(t *testing.T) { + + t.Run("Filter by ns", func(t *testing.T) { + + resources := []*v1alpha1.ResourceDiff{ + { + LiveState: "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"name\":\"test-helm-guestbook\",\"namespace\":\"argocd\"},\"spec\":{\"selector\":{\"app\":\"helm-guestbook\",\"release\":\"test\"},\"sessionAffinity\":\"None\",\"type\":\"ClusterIP\"},\"status\":{\"loadBalancer\":{}}}", + }, + { + LiveState: "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"name\":\"test-helm-guestbook\",\"namespace\":\"ns\"},\"spec\":{\"selector\":{\"app\":\"helm-guestbook\",\"release\":\"test\"},\"sessionAffinity\":\"None\",\"type\":\"ClusterIP\"},\"status\":{\"loadBalancer\":{}}}", + }, + } + + filteredResources, err := FilterResources(false, resources, "g", "Service", "ns", "test-helm-guestbook", true) + assert.NoError(t, err) + assert.Len(t, filteredResources, 1) + }) + + t.Run("Filter by kind", func(t *testing.T) { + + resources := []*v1alpha1.ResourceDiff{ + { + LiveState: "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"name\":\"test-helm-guestbook\",\"namespace\":\"argocd\"},\"spec\":{\"selector\":{\"app\":\"helm-guestbook\",\"release\":\"test\"},\"sessionAffinity\":\"None\",\"type\":\"ClusterIP\"},\"status\":{\"loadBalancer\":{}}}", + }, + { + LiveState: "{\"apiVersion\":\"v1\",\"kind\":\"Deployment\",\"metadata\":{\"name\":\"test-helm-guestbook\",\"namespace\":\"argocd\"},\"spec\":{\"selector\":{\"app\":\"helm-guestbook\",\"release\":\"test\"},\"sessionAffinity\":\"None\",\"type\":\"ClusterIP\"},\"status\":{\"loadBalancer\":{}}}", + }, + } + + filteredResources, err := FilterResources(false, resources, "g", "Deployment", "argocd", "test-helm-guestbook", true) + assert.NoError(t, err) + assert.Len(t, filteredResources, 1) + }) + + t.Run("Filter by name", func(t *testing.T) { + + resources := []*v1alpha1.ResourceDiff{ + { + LiveState: "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"name\":\"test-helm-guestbook\",\"namespace\":\"argocd\"},\"spec\":{\"selector\":{\"app\":\"helm-guestbook\",\"release\":\"test\"},\"sessionAffinity\":\"None\",\"type\":\"ClusterIP\"},\"status\":{\"loadBalancer\":{}}}", + }, + { + LiveState: "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"name\":\"test-helm\",\"namespace\":\"argocd\"},\"spec\":{\"selector\":{\"app\":\"helm-guestbook\",\"release\":\"test\"},\"sessionAffinity\":\"None\",\"type\":\"ClusterIP\"},\"status\":{\"loadBalancer\":{}}}", + }, + } + + filteredResources, err := FilterResources(false, resources, "g", "Service", "argocd", "test-helm", true) + assert.NoError(t, err) + assert.Len(t, filteredResources, 1) + }) + + t.Run("Filter no result", func(t *testing.T) { + resources := []*v1alpha1.ResourceDiff{ + { + LiveState: "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"name\":\"test-helm-guestbook\",\"namespace\":\"argocd\"},\"spec\":{\"selector\":{\"app\":\"helm-guestbook\",\"release\":\"test\"},\"sessionAffinity\":\"None\",\"type\":\"ClusterIP\"},\"status\":{\"loadBalancer\":{}}}", + }, + { + LiveState: "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"name\":\"test-helm\",\"namespace\":\"argocd\"},\"spec\":{\"selector\":{\"app\":\"helm-guestbook\",\"release\":\"test\"},\"sessionAffinity\":\"None\",\"type\":\"ClusterIP\"},\"status\":{\"loadBalancer\":{}}}", + }, + } + + filteredResources, err := FilterResources(false, resources, "g", "Service", "argocd-unknown", "test-helm", true) + assert.ErrorContains(t, err, "No matching resource found") + assert.Nil(t, filteredResources) + }) + + t.Run("Filter multiple results", func(t *testing.T) { + resources := []*v1alpha1.ResourceDiff{ + { + LiveState: "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"name\":\"test-helm\",\"namespace\":\"argocd\"},\"spec\":{\"selector\":{\"app\":\"helm-guestbook\",\"release\":\"test\"},\"sessionAffinity\":\"None\",\"type\":\"ClusterIP\"},\"status\":{\"loadBalancer\":{}}}", + }, + { + LiveState: "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"name\":\"test-helm\",\"namespace\":\"argocd\"},\"spec\":{\"selector\":{\"app\":\"helm-guestbook\",\"release\":\"test\"},\"sessionAffinity\":\"None\",\"type\":\"ClusterIP\"},\"status\":{\"loadBalancer\":{}}}", + }, + } + + filteredResources, err := FilterResources(false, resources, "g", "Service", "argocd", "test-helm", false) + assert.ErrorContains(t, err, "Use the --all flag") + assert.Nil(t, filteredResources) + }) } diff --git a/cmd/util/applicationset.go b/cmd/util/applicationset.go new file mode 100644 index 0000000000000..2b096aa6aa036 --- /dev/null +++ b/cmd/util/applicationset.go @@ -0,0 +1,66 @@ +package util + +import ( + "fmt" + "net/url" + "os" + + argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/argoproj/argo-cd/v2/util/config" + "github.com/argoproj/gitops-engine/pkg/utils/kube" +) + +func ConstructApplicationSet(fileURL string) ([]*argoprojiov1alpha1.ApplicationSet, error) { + if fileURL != "" { + return constructAppsetFromFileUrl(fileURL) + } + return nil, nil +} + +func constructAppsetFromFileUrl(fileURL string) ([]*argoprojiov1alpha1.ApplicationSet, error) { + appset := make([]*argoprojiov1alpha1.ApplicationSet, 0) + // read uri + err := readAppsetFromURI(fileURL, &appset) + if err != nil { + return nil, fmt.Errorf("error reading applicationset from file %s: %s", fileURL, err) + } + + return appset, nil +} + +func readAppsetFromURI(fileURL string, appset *[]*argoprojiov1alpha1.ApplicationSet) error { + + readFilePayload := func() ([]byte, error) { + parsedURL, err := url.ParseRequestURI(fileURL) + if err != nil || !(parsedURL.Scheme == "http" || parsedURL.Scheme == "https") { + return os.ReadFile(fileURL) + } + return config.ReadRemoteFile(fileURL) + } + + yml, err := readFilePayload() + if err != nil { + return fmt.Errorf("error reading file payload: %w", err) + } + + return readAppset(yml, appset) +} + +func readAppset(yml []byte, appsets *[]*argoprojiov1alpha1.ApplicationSet) error { + yamls, err := kube.SplitYAMLToString(yml) + if err != nil { + return fmt.Errorf("error splitting YAML to string: %w", err) + } + + for _, yml := range yamls { + var appset argoprojiov1alpha1.ApplicationSet + err = config.Unmarshal([]byte(yml), &appset) + if err != nil { + return fmt.Errorf("error unmarshalling appset: %w", err) + } + *appsets = append(*appsets, &appset) + + } + // we reach here if there is no error found while reading the Application Set + return nil +} diff --git a/cmd/util/applicationset_test.go b/cmd/util/applicationset_test.go new file mode 100644 index 0000000000000..c15e58a61af14 --- /dev/null +++ b/cmd/util/applicationset_test.go @@ -0,0 +1,40 @@ +package util + +import ( + "testing" + + argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/stretchr/testify/assert" +) + +var appSet = `apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + generators: + - list: + elements: + - cluster: engineering-dev + url: https://1.2.3.4 + template: + metadata: + name: '{{cluster}}-guestbook' + spec: + source: + repoURL: https://github.com/infra-team/cluster-deployments.git + targetRevision: HEAD + path: guestbook/{{cluster}} + destination: + server: '{{url}}' + namespace: guestbook +` + +func TestReadAppSet(t *testing.T) { + var appSets []*argoprojiov1alpha1.ApplicationSet + err := readAppset([]byte(appSet), &appSets) + if err != nil { + t.Logf("Failed reading appset file") + } + assert.Equal(t, len(appSets), 1) +} diff --git a/cmd/util/cluster.go b/cmd/util/cluster.go index 4b2687cf4f4bd..95c071c882b12 100644 --- a/cmd/util/cluster.go +++ b/cmd/util/cluster.go @@ -1,21 +1,33 @@ package util import ( + "context" "fmt" - "io/ioutil" "os" "sort" "strings" "text/tabwriter" "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + clientcmdapiv1 "k8s.io/client-go/tools/clientcmd/api/v1" + "sigs.k8s.io/yaml" argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" "github.com/argoproj/argo-cd/v2/util/errors" ) +type ClusterEndpoint string + +const ( + KubeConfigEndpoint ClusterEndpoint = "kubeconfig" + KubePublicEndpoint ClusterEndpoint = "kube-public" + KubeInternalEndpoint ClusterEndpoint = "internal" +) + func PrintKubeContexts(ca clientcmd.ConfigAccess) { config, err := ca.GetStartingConfig() errors.CheckError(err) @@ -55,7 +67,7 @@ func PrintKubeContexts(ca clientcmd.ConfigAccess) { } } -func NewCluster(name string, namespaces []string, conf *rest.Config, managerBearerToken string, awsAuthConf *argoappv1.AWSAuthConfig, execProviderConf *argoappv1.ExecProviderConfig) *argoappv1.Cluster { +func NewCluster(name string, namespaces []string, clusterResources bool, conf *rest.Config, managerBearerToken string, awsAuthConf *argoappv1.AWSAuthConfig, execProviderConf *argoappv1.ExecProviderConfig, labels, annotations map[string]string) *argoappv1.Cluster { tlsClientConfig := argoappv1.TLSClientConfig{ Insecure: conf.TLSClientConfig.Insecure, ServerName: conf.TLSClientConfig.ServerName, @@ -64,30 +76,33 @@ func NewCluster(name string, namespaces []string, conf *rest.Config, managerBear KeyData: conf.TLSClientConfig.KeyData, } if len(conf.TLSClientConfig.CAData) == 0 && conf.TLSClientConfig.CAFile != "" { - data, err := ioutil.ReadFile(conf.TLSClientConfig.CAFile) + data, err := os.ReadFile(conf.TLSClientConfig.CAFile) errors.CheckError(err) tlsClientConfig.CAData = data } if len(conf.TLSClientConfig.CertData) == 0 && conf.TLSClientConfig.CertFile != "" { - data, err := ioutil.ReadFile(conf.TLSClientConfig.CertFile) + data, err := os.ReadFile(conf.TLSClientConfig.CertFile) errors.CheckError(err) tlsClientConfig.CertData = data } if len(conf.TLSClientConfig.KeyData) == 0 && conf.TLSClientConfig.KeyFile != "" { - data, err := ioutil.ReadFile(conf.TLSClientConfig.KeyFile) + data, err := os.ReadFile(conf.TLSClientConfig.KeyFile) errors.CheckError(err) tlsClientConfig.KeyData = data } clst := argoappv1.Cluster{ - Server: conf.Host, - Name: name, - Namespaces: namespaces, + Server: conf.Host, + Name: name, + Namespaces: namespaces, + ClusterResources: clusterResources, Config: argoappv1.ClusterConfig{ TLSClientConfig: tlsClientConfig, AWSAuthConfig: awsAuthConf, ExecProviderConfig: execProviderConf, }, + Labels: labels, + Annotations: annotations, } // Bearer token will preferentially be used for auth if present, @@ -100,6 +115,30 @@ func NewCluster(name string, namespaces []string, conf *rest.Config, managerBear return &clst } +// GetKubePublicEndpoint returns the kubernetes apiserver endpoint as published +// in the kube-public. +func GetKubePublicEndpoint(client kubernetes.Interface) (string, error) { + clusterInfo, err := client.CoreV1().ConfigMaps("kube-public").Get(context.TODO(), "cluster-info", metav1.GetOptions{}) + if err != nil { + return "", err + } + kubeconfig, ok := clusterInfo.Data["kubeconfig"] + if !ok { + return "", fmt.Errorf("cluster-info does not contain a public kubeconfig") + } + // Parse Kubeconfig and get server address + config := &clientcmdapiv1.Config{} + err = yaml.Unmarshal([]byte(kubeconfig), config) + if err != nil { + return "", fmt.Errorf("failed to parse cluster-info kubeconfig: %v", err) + } + if len(config.Clusters) == 0 { + return "", fmt.Errorf("cluster-info kubeconfig does not have any clusters") + } + + return config.Clusters[0].Cluster.Server, nil +} + type ClusterOptions struct { InCluster bool Upsert bool @@ -108,13 +147,22 @@ type ClusterOptions struct { AwsClusterName string SystemNamespace string Namespaces []string + ClusterResources bool Name string + Project string Shard int64 ExecProviderCommand string ExecProviderArgs []string ExecProviderEnv map[string]string ExecProviderAPIVersion string ExecProviderInstallHint string + ClusterEndpoint string +} + +// InClusterEndpoint returns true if ArgoCD should reference the in-cluster +// endpoint when registering the target cluster. +func (o ClusterOptions) InClusterEndpoint() bool { + return o.InCluster || o.ClusterEndpoint == string(KubeInternalEndpoint) } func AddClusterFlags(command *cobra.Command, opts *ClusterOptions) { @@ -122,11 +170,14 @@ func AddClusterFlags(command *cobra.Command, opts *ClusterOptions) { command.Flags().StringVar(&opts.AwsClusterName, "aws-cluster-name", "", "AWS Cluster name if set then aws cli eks token command will be used to access cluster") command.Flags().StringVar(&opts.AwsRoleArn, "aws-role-arn", "", "Optional AWS role arn. If set then AWS IAM Authenticator assumes a role to perform cluster operations instead of the default AWS credential provider chain.") command.Flags().StringArrayVar(&opts.Namespaces, "namespace", nil, "List of namespaces which are allowed to manage") + command.Flags().BoolVar(&opts.ClusterResources, "cluster-resources", false, "Indicates if cluster level resources should be managed. The setting is used only if list of managed namespaces is not empty.") command.Flags().StringVar(&opts.Name, "name", "", "Overwrite the cluster name") + command.Flags().StringVar(&opts.Project, "project", "", "project of the cluster") command.Flags().Int64Var(&opts.Shard, "shard", -1, "Cluster shard number; inferred from hostname if not set") command.Flags().StringVar(&opts.ExecProviderCommand, "exec-command", "", "Command to run to provide client credentials to the cluster. You may need to build a custom ArgoCD image to ensure the command is available at runtime.") command.Flags().StringArrayVar(&opts.ExecProviderArgs, "exec-command-args", nil, "Arguments to supply to the --exec-command executable") command.Flags().StringToStringVar(&opts.ExecProviderEnv, "exec-command-env", nil, "Environment vars to set when running the --exec-command executable") command.Flags().StringVar(&opts.ExecProviderAPIVersion, "exec-command-api-version", "", "Preferred input version of the ExecInfo for the --exec-command executable") command.Flags().StringVar(&opts.ExecProviderInstallHint, "exec-command-install-hint", "", "Text shown to the user when the --exec-command executable doesn't seem to be present") + command.Flags().StringVar(&opts.ClusterEndpoint, "cluster-endpoint", "", "Cluster endpoint to use. Can be one of the following: 'kubeconfig', 'kube-public', or 'internal'.") } diff --git a/cmd/util/cluster_test.go b/cmd/util/cluster_test.go index 80cbbe60c2948..37e05bf6e58cb 100644 --- a/cmd/util/cluster_test.go +++ b/cmd/util/cluster_test.go @@ -5,13 +5,21 @@ import ( "testing" "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/rest" + clientcmdapiv1 "k8s.io/client-go/tools/clientcmd/api/v1" + "sigs.k8s.io/yaml" "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" ) func Test_newCluster(t *testing.T) { - clusterWithData := NewCluster("test-cluster", []string{"test-namespace"}, &rest.Config{ + labels := map[string]string{"key1": "val1"} + annotations := map[string]string{"key2": "val2"} + clusterWithData := NewCluster("test-cluster", []string{"test-namespace"}, false, &rest.Config{ TLSClientConfig: rest.TLSClientConfig{ Insecure: false, ServerName: "test-endpoint.example.com", @@ -23,13 +31,15 @@ func Test_newCluster(t *testing.T) { }, "test-bearer-token", &v1alpha1.AWSAuthConfig{}, - &v1alpha1.ExecProviderConfig{}) + &v1alpha1.ExecProviderConfig{}, labels, annotations) assert.Equal(t, "test-cert-data", string(clusterWithData.Config.CertData)) assert.Equal(t, "test-key-data", string(clusterWithData.Config.KeyData)) assert.Equal(t, "", clusterWithData.Config.BearerToken) + assert.Equal(t, labels, clusterWithData.Labels) + assert.Equal(t, annotations, clusterWithData.Annotations) - clusterWithFiles := NewCluster("test-cluster", []string{"test-namespace"}, &rest.Config{ + clusterWithFiles := NewCluster("test-cluster", []string{"test-namespace"}, false, &rest.Config{ TLSClientConfig: rest.TLSClientConfig{ Insecure: false, ServerName: "test-endpoint.example.com", @@ -41,13 +51,15 @@ func Test_newCluster(t *testing.T) { }, "test-bearer-token", &v1alpha1.AWSAuthConfig{}, - &v1alpha1.ExecProviderConfig{}) + &v1alpha1.ExecProviderConfig{}, labels, nil) assert.True(t, strings.Contains(string(clusterWithFiles.Config.CertData), "test-cert-data")) assert.True(t, strings.Contains(string(clusterWithFiles.Config.KeyData), "test-key-data")) assert.Equal(t, "", clusterWithFiles.Config.BearerToken) + assert.Equal(t, labels, clusterWithFiles.Labels) + assert.Nil(t, clusterWithFiles.Annotations) - clusterWithBearerToken := NewCluster("test-cluster", []string{"test-namespace"}, &rest.Config{ + clusterWithBearerToken := NewCluster("test-cluster", []string{"test-namespace"}, false, &rest.Config{ TLSClientConfig: rest.TLSClientConfig{ Insecure: false, ServerName: "test-endpoint.example.com", @@ -57,7 +69,115 @@ func Test_newCluster(t *testing.T) { }, "test-bearer-token", &v1alpha1.AWSAuthConfig{}, - &v1alpha1.ExecProviderConfig{}) + &v1alpha1.ExecProviderConfig{}, nil, nil) assert.Equal(t, "test-bearer-token", clusterWithBearerToken.Config.BearerToken) + assert.Nil(t, clusterWithBearerToken.Labels) + assert.Nil(t, clusterWithBearerToken.Annotations) +} + +func TestGetKubePublicEndpoint(t *testing.T) { + cases := []struct { + name string + clusterInfo *corev1.ConfigMap + expectedEndpoint string + expectError bool + }{ + { + name: "has public endpoint", + clusterInfo: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "kube-public", + Name: "cluster-info", + }, + Data: map[string]string{ + "kubeconfig": kubeconfigFixture("https://test-cluster:6443"), + }, + }, + expectedEndpoint: "https://test-cluster:6443", + }, + { + name: "no cluster-info", + expectError: true, + }, + { + name: "no kubeconfig in cluster-info", + clusterInfo: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "kube-public", + Name: "cluster-info", + }, + Data: map[string]string{ + "argo": "the project, not the movie", + }, + }, + expectError: true, + }, + { + name: "no clusters in cluster-info kubeconfig", + clusterInfo: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "kube-public", + Name: "cluster-info", + }, + Data: map[string]string{ + "kubeconfig": kubeconfigFixture(""), + }, + }, + expectError: true, + }, + { + name: "can't parse kubeconfig", + clusterInfo: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "kube-public", + Name: "cluster-info", + }, + Data: map[string]string{ + "kubeconfig": "this is not valid YAML", + }, + }, + expectError: true, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + objects := []runtime.Object{} + if tc.clusterInfo != nil { + objects = append(objects, tc.clusterInfo) + } + clientset := fake.NewSimpleClientset(objects...) + endpoint, err := GetKubePublicEndpoint(clientset) + if err != nil && !tc.expectError { + t.Fatalf("unexpected error: %v", err) + } + if err == nil && tc.expectError { + t.Error("expected error to be returned, received none") + } + if endpoint != tc.expectedEndpoint { + t.Errorf("expected endpoint %s, got %s", tc.expectedEndpoint, endpoint) + } + }) + } + +} + +func kubeconfigFixture(endpoint string) string { + kubeconfig := &clientcmdapiv1.Config{} + if len(endpoint) > 0 { + kubeconfig.Clusters = []clientcmdapiv1.NamedCluster{ + { + Name: "test-kube", + Cluster: clientcmdapiv1.Cluster{ + Server: endpoint, + }, + }, + } + } + configYAML, err := yaml.Marshal(kubeconfig) + if err != nil { + return "" + } + return string(configYAML) } diff --git a/cmd/util/common.go b/cmd/util/common.go index 1210b0ad9c1ec..7c7b629ab4c98 100644 --- a/cmd/util/common.go +++ b/cmd/util/common.go @@ -1,85 +1,6 @@ package util -import ( - "encoding/json" - "fmt" - - "github.com/ghodss/yaml" - v1 "k8s.io/api/core/v1" - - "github.com/argoproj/gitops-engine/pkg/utils/kube" -) - var ( LogFormat string LogLevel string ) - -// PrintResource prints a single resource in YAML or JSON format to stdout according to the output format -func PrintResources(resources []interface{}, output string) error { - for i, resource := range resources { - filteredResource, err := omitFields(resource) - if err != nil { - return err - } - resources[i] = filteredResource - } - var obj interface{} = resources - if len(resources) == 1 { - obj = resources[0] - } - - switch output { - case "json": - jsonBytes, err := json.MarshalIndent(obj, "", " ") - if err != nil { - return err - } - - fmt.Println(string(jsonBytes)) - case "yaml": - yamlBytes, err := yaml.Marshal(obj) - if err != nil { - return err - } - // marshaled YAML already ends with the new line character - fmt.Print(string(yamlBytes)) - default: - return fmt.Errorf("unknown output format: %s", output) - } - return nil -} - -// omit fields such as status, creationTimestamp and metadata.namespace in k8s objects -func omitFields(resource interface{}) (interface{}, error) { - jsonBytes, err := json.Marshal(resource) - if err != nil { - return nil, err - } - - toMap := make(map[string]interface{}) - err = json.Unmarshal([]byte(string(jsonBytes)), &toMap) - if err != nil { - return nil, err - } - - delete(toMap, "status") - if v, ok := toMap["metadata"]; ok { - if metadata, ok := v.(map[string]interface{}); ok { - delete(metadata, "creationTimestamp") - delete(metadata, "namespace") - } - } - return toMap, nil -} - -// ConvertSecretData converts kubernetes secret's data to stringData -func ConvertSecretData(secret *v1.Secret) { - secret.Kind = kube.SecretKind - secret.APIVersion = "v1" - secret.StringData = map[string]string{} - for k, v := range secret.Data { - secret.StringData[k] = string(v) - } - secret.Data = map[string][]byte{} -} diff --git a/cmd/util/project.go b/cmd/util/project.go index 09e90caf5b5b8..ef157f6873081 100644 --- a/cmd/util/project.go +++ b/cmd/util/project.go @@ -20,10 +20,11 @@ import ( ) type ProjectOpts struct { - Description string - destinations []string - Sources []string - SignatureKeys []string + Description string + destinations []string + Sources []string + SignatureKeys []string + SourceNamespaces []string orphanedResourcesEnabled bool orphanedResourcesWarn bool @@ -45,6 +46,7 @@ func AddProjFlags(command *cobra.Command, opts *ProjectOpts) { command.Flags().StringArrayVar(&opts.deniedClusterResources, "deny-cluster-resource", []string{}, "List of denied cluster level resources") command.Flags().StringArrayVar(&opts.allowedNamespacedResources, "allow-namespaced-resource", []string{}, "List of allowed namespaced resources") command.Flags().StringArrayVar(&opts.deniedNamespacedResources, "deny-namespaced-resource", []string{}, "List of denied namespaced resources") + command.Flags().StringSliceVar(&opts.SourceNamespaces, "source-namespaces", []string{}, "List of source namespaces for applications") } @@ -92,7 +94,7 @@ func (opts *ProjectOpts) GetDestinations() []v1alpha1.ApplicationDestination { return destinations } -// TODO: Get configured keys and emit warning when a key is specified that is not configured +// GetSignatureKeys TODO: Get configured keys and emit warning when a key is specified that is not configured func (opts *ProjectOpts) GetSignatureKeys() []v1alpha1.SignatureKey { signatureKeys := make([]v1alpha1.SignatureKey, 0) for _, keyStr := range opts.SignatureKeys { @@ -104,6 +106,10 @@ func (opts *ProjectOpts) GetSignatureKeys() []v1alpha1.SignatureKey { return signatureKeys } +func (opts *ProjectOpts) GetSourceNamespaces() []string { + return opts.SourceNamespaces +} + func GetOrphanedResourcesSettings(flagSet *pflag.FlagSet, opts ProjectOpts) *v1alpha1.OrphanedResourcesMonitorSettings { warnChanged := flagSet.Changed("orphaned-resources-warn") if opts.orphanedResourcesEnabled || warnChanged { @@ -132,7 +138,10 @@ func readProjFromURI(fileURL string, proj *v1alpha1.AppProject) error { } else { err = config.UnmarshalRemoteFile(fileURL, &proj) } - return err + if err != nil { + return fmt.Errorf("error reading proj from uri: %w", err) + } + return nil } func SetProjSpecOptions(flags *pflag.FlagSet, spec *v1alpha1.AppProjectSpec, projOpts *ProjectOpts) int { @@ -156,6 +165,8 @@ func SetProjSpecOptions(flags *pflag.FlagSet, spec *v1alpha1.AppProjectSpec, pro spec.NamespaceResourceWhitelist = projOpts.GetAllowedNamespacedResources() case "deny-namespaced-resource": spec.NamespaceResourceBlacklist = projOpts.GetDeniedNamespacedResources() + case "source-namespaces": + spec.SourceNamespaces = projOpts.GetSourceNamespaces() } }) if flags.Changed("orphaned-resources") || flags.Changed("orphaned-resources-warn") { @@ -197,6 +208,5 @@ func ConstructAppProj(fileURL string, args []string, opts ProjectOpts, c *cobra. proj.Name = args[0] } SetProjSpecOptions(c.Flags(), &proj.Spec, &opts) - return &proj, nil } diff --git a/cmd/util/repo.go b/cmd/util/repo.go index 69d89ad93db45..b60c30a071311 100644 --- a/cmd/util/repo.go +++ b/cmd/util/repo.go @@ -21,11 +21,15 @@ type RepoOptions struct { GithubAppInstallationId int64 GithubAppPrivateKeyPath string GitHubAppEnterpriseBaseURL string + Proxy string + GCPServiceAccountKeyPath string + ForceHttpBasicAuth bool } func AddRepoFlags(command *cobra.Command, opts *RepoOptions) { command.Flags().StringVar(&opts.Repo.Type, "type", common.DefaultRepoType, "type of the repository, \"git\" or \"helm\"") command.Flags().StringVar(&opts.Repo.Name, "name", "", "name of the repository, mandatory for repositories of type helm") + command.Flags().StringVar(&opts.Repo.Project, "project", "", "project of the repository") command.Flags().StringVar(&opts.Repo.Username, "username", "", "username to the repository") command.Flags().StringVar(&opts.Repo.Password, "password", "", "password to the repository") command.Flags().StringVar(&opts.SshPrivateKeyPath, "ssh-private-key-path", "", "path to the private ssh key (e.g. ~/.ssh/id_rsa)") @@ -39,4 +43,7 @@ func AddRepoFlags(command *cobra.Command, opts *RepoOptions) { command.Flags().Int64Var(&opts.GithubAppInstallationId, "github-app-installation-id", 0, "installation id of the GitHub Application") command.Flags().StringVar(&opts.GithubAppPrivateKeyPath, "github-app-private-key-path", "", "private key of the GitHub Application") command.Flags().StringVar(&opts.GitHubAppEnterpriseBaseURL, "github-app-enterprise-base-url", "", "base url to use when using GitHub Enterprise (e.g. https://ghe.example.com/api/v3") + command.Flags().StringVar(&opts.Proxy, "proxy", "", "use proxy to access repository") + command.Flags().StringVar(&opts.GCPServiceAccountKeyPath, "gcp-service-account-key-path", "", "service account key for the Google Cloud Platform") + command.Flags().BoolVar(&opts.ForceHttpBasicAuth, "force-http-basic-auth", false, "whether to force use of basic auth when connecting repository via HTTP") } diff --git a/cmpserver/apiclient/clientset.go b/cmpserver/apiclient/clientset.go new file mode 100644 index 0000000000000..025625ff8092e --- /dev/null +++ b/cmpserver/apiclient/clientset.go @@ -0,0 +1,65 @@ +package apiclient + +import ( + "context" + "time" + + grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" + grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" + log "github.com/sirupsen/logrus" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + grpc_util "github.com/argoproj/argo-cd/v2/util/grpc" + "github.com/argoproj/argo-cd/v2/util/io" +) + +const ( + // MaxGRPCMessageSize contains max grpc message size + MaxGRPCMessageSize = 100 * 1024 * 1024 +) + +// Clientset represents config management plugin server api clients +type Clientset interface { + NewConfigManagementPluginClient() (io.Closer, ConfigManagementPluginServiceClient, error) +} + +type clientSet struct { + address string +} + +func (c *clientSet) NewConfigManagementPluginClient() (io.Closer, ConfigManagementPluginServiceClient, error) { + conn, err := NewConnection(c.address) + if err != nil { + return nil, nil, err + } + return conn, NewConfigManagementPluginServiceClient(conn), nil +} + +func NewConnection(address string) (*grpc.ClientConn, error) { + retryOpts := []grpc_retry.CallOption{ + grpc_retry.WithMax(3), + grpc_retry.WithBackoff(grpc_retry.BackoffLinear(1000 * time.Millisecond)), + } + unaryInterceptors := []grpc.UnaryClientInterceptor{grpc_retry.UnaryClientInterceptor(retryOpts...)} + dialOpts := []grpc.DialOption{ + grpc.WithStreamInterceptor(grpc_retry.StreamClientInterceptor(retryOpts...)), + grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(unaryInterceptors...)), + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(MaxGRPCMessageSize), grpc.MaxCallSendMsgSize(MaxGRPCMessageSize)), + grpc.WithUnaryInterceptor(grpc_util.OTELUnaryClientInterceptor()), + grpc.WithStreamInterceptor(grpc_util.OTELStreamClientInterceptor()), + } + + dialOpts = append(dialOpts, grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc_util.BlockingDial(context.Background(), "unix", address, nil, dialOpts...) + if err != nil { + log.Errorf("Unable to connect to config management plugin service with address %s", address) + return nil, err + } + return conn, nil +} + +// NewConfigManagementPluginClientSet creates new instance of config management plugin server Clientset +func NewConfigManagementPluginClientSet(address string) Clientset { + return &clientSet{address: address} +} diff --git a/cmpserver/apiclient/plugin.pb.go b/cmpserver/apiclient/plugin.pb.go new file mode 100644 index 0000000000000..29ebca3ae3afc --- /dev/null +++ b/cmpserver/apiclient/plugin.pb.go @@ -0,0 +1,2213 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cmpserver/plugin/plugin.proto + +package apiclient + +import ( + context "context" + fmt "fmt" + apiclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// AppStreamRequest is the request object used to send the application's +// files over a stream. +type AppStreamRequest struct { + // Types that are valid to be assigned to Request: + // *AppStreamRequest_Metadata + // *AppStreamRequest_File + Request isAppStreamRequest_Request `protobuf_oneof:"request"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AppStreamRequest) Reset() { *m = AppStreamRequest{} } +func (m *AppStreamRequest) String() string { return proto.CompactTextString(m) } +func (*AppStreamRequest) ProtoMessage() {} +func (*AppStreamRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b21875a7079a06ed, []int{0} +} +func (m *AppStreamRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AppStreamRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AppStreamRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AppStreamRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AppStreamRequest.Merge(m, src) +} +func (m *AppStreamRequest) XXX_Size() int { + return m.Size() +} +func (m *AppStreamRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AppStreamRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AppStreamRequest proto.InternalMessageInfo + +type isAppStreamRequest_Request interface { + isAppStreamRequest_Request() + MarshalTo([]byte) (int, error) + Size() int +} + +type AppStreamRequest_Metadata struct { + Metadata *ManifestRequestMetadata `protobuf:"bytes,1,opt,name=metadata,proto3,oneof" json:"metadata,omitempty"` +} +type AppStreamRequest_File struct { + File *File `protobuf:"bytes,2,opt,name=file,proto3,oneof" json:"file,omitempty"` +} + +func (*AppStreamRequest_Metadata) isAppStreamRequest_Request() {} +func (*AppStreamRequest_File) isAppStreamRequest_Request() {} + +func (m *AppStreamRequest) GetRequest() isAppStreamRequest_Request { + if m != nil { + return m.Request + } + return nil +} + +func (m *AppStreamRequest) GetMetadata() *ManifestRequestMetadata { + if x, ok := m.GetRequest().(*AppStreamRequest_Metadata); ok { + return x.Metadata + } + return nil +} + +func (m *AppStreamRequest) GetFile() *File { + if x, ok := m.GetRequest().(*AppStreamRequest_File); ok { + return x.File + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*AppStreamRequest) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*AppStreamRequest_Metadata)(nil), + (*AppStreamRequest_File)(nil), + } +} + +// ManifestRequestMetadata defines the metada related to the file being sent +// to the CMP server. +type ManifestRequestMetadata struct { + // appName refers to the ArgoCD Application name + AppName string `protobuf:"bytes,1,opt,name=appName,proto3" json:"appName,omitempty"` + // appRelPath points to the application relative path inside the tarball + AppRelPath string `protobuf:"bytes,2,opt,name=appRelPath,proto3" json:"appRelPath,omitempty"` + // checksum is used to verify the integrity of the file + Checksum string `protobuf:"bytes,3,opt,name=checksum,proto3" json:"checksum,omitempty"` + // size relates to the file size in bytes + Size_ int64 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` + // env is a list with the environment variables needed to generate manifests + Env []*EnvEntry `protobuf:"bytes,5,rep,name=env,proto3" json:"env,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ManifestRequestMetadata) Reset() { *m = ManifestRequestMetadata{} } +func (m *ManifestRequestMetadata) String() string { return proto.CompactTextString(m) } +func (*ManifestRequestMetadata) ProtoMessage() {} +func (*ManifestRequestMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_b21875a7079a06ed, []int{1} +} +func (m *ManifestRequestMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ManifestRequestMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ManifestRequestMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ManifestRequestMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_ManifestRequestMetadata.Merge(m, src) +} +func (m *ManifestRequestMetadata) XXX_Size() int { + return m.Size() +} +func (m *ManifestRequestMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_ManifestRequestMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_ManifestRequestMetadata proto.InternalMessageInfo + +func (m *ManifestRequestMetadata) GetAppName() string { + if m != nil { + return m.AppName + } + return "" +} + +func (m *ManifestRequestMetadata) GetAppRelPath() string { + if m != nil { + return m.AppRelPath + } + return "" +} + +func (m *ManifestRequestMetadata) GetChecksum() string { + if m != nil { + return m.Checksum + } + return "" +} + +func (m *ManifestRequestMetadata) GetSize_() int64 { + if m != nil { + return m.Size_ + } + return 0 +} + +func (m *ManifestRequestMetadata) GetEnv() []*EnvEntry { + if m != nil { + return m.Env + } + return nil +} + +// EnvEntry represents an entry in the application's environment +type EnvEntry struct { + // Name is the name of the variable, usually expressed in uppercase + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Value is the value of the variable + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnvEntry) Reset() { *m = EnvEntry{} } +func (m *EnvEntry) String() string { return proto.CompactTextString(m) } +func (*EnvEntry) ProtoMessage() {} +func (*EnvEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_b21875a7079a06ed, []int{2} +} +func (m *EnvEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EnvEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EnvEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvEntry.Merge(m, src) +} +func (m *EnvEntry) XXX_Size() int { + return m.Size() +} +func (m *EnvEntry) XXX_DiscardUnknown() { + xxx_messageInfo_EnvEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_EnvEntry proto.InternalMessageInfo + +func (m *EnvEntry) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *EnvEntry) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +type ManifestResponse struct { + Manifests []string `protobuf:"bytes,1,rep,name=manifests,proto3" json:"manifests,omitempty"` + SourceType string `protobuf:"bytes,2,opt,name=sourceType,proto3" json:"sourceType,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ManifestResponse) Reset() { *m = ManifestResponse{} } +func (m *ManifestResponse) String() string { return proto.CompactTextString(m) } +func (*ManifestResponse) ProtoMessage() {} +func (*ManifestResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b21875a7079a06ed, []int{3} +} +func (m *ManifestResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ManifestResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ManifestResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ManifestResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ManifestResponse.Merge(m, src) +} +func (m *ManifestResponse) XXX_Size() int { + return m.Size() +} +func (m *ManifestResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ManifestResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ManifestResponse proto.InternalMessageInfo + +func (m *ManifestResponse) GetManifests() []string { + if m != nil { + return m.Manifests + } + return nil +} + +func (m *ManifestResponse) GetSourceType() string { + if m != nil { + return m.SourceType + } + return "" +} + +type RepositoryResponse struct { + IsSupported bool `protobuf:"varint,1,opt,name=isSupported,proto3" json:"isSupported,omitempty"` + IsDiscoveryEnabled bool `protobuf:"varint,2,opt,name=isDiscoveryEnabled,proto3" json:"isDiscoveryEnabled,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RepositoryResponse) Reset() { *m = RepositoryResponse{} } +func (m *RepositoryResponse) String() string { return proto.CompactTextString(m) } +func (*RepositoryResponse) ProtoMessage() {} +func (*RepositoryResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b21875a7079a06ed, []int{4} +} +func (m *RepositoryResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RepositoryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RepositoryResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RepositoryResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RepositoryResponse.Merge(m, src) +} +func (m *RepositoryResponse) XXX_Size() int { + return m.Size() +} +func (m *RepositoryResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RepositoryResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RepositoryResponse proto.InternalMessageInfo + +func (m *RepositoryResponse) GetIsSupported() bool { + if m != nil { + return m.IsSupported + } + return false +} + +func (m *RepositoryResponse) GetIsDiscoveryEnabled() bool { + if m != nil { + return m.IsDiscoveryEnabled + } + return false +} + +// ParametersAnnouncementResponse contains a list of announcements. This list represents all the parameters which a CMP +// is able to accept. +type ParametersAnnouncementResponse struct { + ParameterAnnouncements []*apiclient.ParameterAnnouncement `protobuf:"bytes,1,rep,name=parameterAnnouncements,proto3" json:"parameterAnnouncements,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ParametersAnnouncementResponse) Reset() { *m = ParametersAnnouncementResponse{} } +func (m *ParametersAnnouncementResponse) String() string { return proto.CompactTextString(m) } +func (*ParametersAnnouncementResponse) ProtoMessage() {} +func (*ParametersAnnouncementResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b21875a7079a06ed, []int{5} +} +func (m *ParametersAnnouncementResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ParametersAnnouncementResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ParametersAnnouncementResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ParametersAnnouncementResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParametersAnnouncementResponse.Merge(m, src) +} +func (m *ParametersAnnouncementResponse) XXX_Size() int { + return m.Size() +} +func (m *ParametersAnnouncementResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ParametersAnnouncementResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ParametersAnnouncementResponse proto.InternalMessageInfo + +func (m *ParametersAnnouncementResponse) GetParameterAnnouncements() []*apiclient.ParameterAnnouncement { + if m != nil { + return m.ParameterAnnouncements + } + return nil +} + +type File struct { + Chunk []byte `protobuf:"bytes,1,opt,name=chunk,proto3" json:"chunk,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *File) Reset() { *m = File{} } +func (m *File) String() string { return proto.CompactTextString(m) } +func (*File) ProtoMessage() {} +func (*File) Descriptor() ([]byte, []int) { + return fileDescriptor_b21875a7079a06ed, []int{6} +} +func (m *File) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *File) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_File.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *File) XXX_Merge(src proto.Message) { + xxx_messageInfo_File.Merge(m, src) +} +func (m *File) XXX_Size() int { + return m.Size() +} +func (m *File) XXX_DiscardUnknown() { + xxx_messageInfo_File.DiscardUnknown(m) +} + +var xxx_messageInfo_File proto.InternalMessageInfo + +func (m *File) GetChunk() []byte { + if m != nil { + return m.Chunk + } + return nil +} + +func init() { + proto.RegisterType((*AppStreamRequest)(nil), "plugin.AppStreamRequest") + proto.RegisterType((*ManifestRequestMetadata)(nil), "plugin.ManifestRequestMetadata") + proto.RegisterType((*EnvEntry)(nil), "plugin.EnvEntry") + proto.RegisterType((*ManifestResponse)(nil), "plugin.ManifestResponse") + proto.RegisterType((*RepositoryResponse)(nil), "plugin.RepositoryResponse") + proto.RegisterType((*ParametersAnnouncementResponse)(nil), "plugin.ParametersAnnouncementResponse") + proto.RegisterType((*File)(nil), "plugin.File") +} + +func init() { proto.RegisterFile("cmpserver/plugin/plugin.proto", fileDescriptor_b21875a7079a06ed) } + +var fileDescriptor_b21875a7079a06ed = []byte{ + // 576 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x94, 0xdd, 0x6e, 0x12, 0x4f, + 0x14, 0xc0, 0xbb, 0x85, 0xb6, 0x70, 0x68, 0xf2, 0x27, 0x93, 0x7f, 0x74, 0x25, 0x2d, 0xe2, 0x5e, + 0x18, 0x6e, 0x84, 0x04, 0xbd, 0x35, 0xb1, 0x55, 0x6c, 0xa3, 0xc1, 0x90, 0xa9, 0x37, 0x7a, 0x37, + 0x1d, 0x0e, 0x30, 0x76, 0x77, 0x66, 0x9c, 0x99, 0xdd, 0x04, 0xbd, 0xf1, 0x3d, 0x7c, 0x00, 0x5f, + 0xc5, 0x4b, 0x1f, 0xc1, 0xf4, 0x49, 0x0c, 0xb3, 0xbb, 0x40, 0x6c, 0x8b, 0x57, 0x7b, 0x3e, 0x7f, + 0x7b, 0xbe, 0x32, 0x70, 0xcc, 0x13, 0x6d, 0xd1, 0x64, 0x68, 0xfa, 0x3a, 0x4e, 0x67, 0x42, 0x16, + 0x9f, 0x9e, 0x36, 0xca, 0x29, 0xb2, 0x9f, 0x6b, 0xad, 0xe1, 0x4c, 0xb8, 0x79, 0x7a, 0xd9, 0xe3, + 0x2a, 0xe9, 0x33, 0x33, 0x53, 0xda, 0xa8, 0x4f, 0x5e, 0x78, 0xc2, 0x27, 0xfd, 0x6c, 0xd0, 0x37, + 0xa8, 0x55, 0x81, 0xf1, 0xa2, 0x70, 0xca, 0x2c, 0x36, 0xc4, 0x1c, 0x17, 0x7d, 0x0b, 0xa0, 0x79, + 0xa2, 0xf5, 0x85, 0x33, 0xc8, 0x12, 0x8a, 0x9f, 0x53, 0xb4, 0x8e, 0x3c, 0x87, 0x5a, 0x82, 0x8e, + 0x4d, 0x98, 0x63, 0x61, 0xd0, 0x09, 0xba, 0x8d, 0xc1, 0xc3, 0x5e, 0x51, 0xc4, 0x88, 0x49, 0x31, + 0x45, 0xeb, 0x8a, 0xd0, 0x51, 0x11, 0x76, 0xbe, 0x43, 0x57, 0x29, 0x24, 0x82, 0xea, 0x54, 0xc4, + 0x18, 0xee, 0xfa, 0xd4, 0xc3, 0x32, 0xf5, 0xb5, 0x88, 0xf1, 0x7c, 0x87, 0x7a, 0xdf, 0x69, 0x1d, + 0x0e, 0x4c, 0x8e, 0x88, 0x7e, 0x04, 0x70, 0xff, 0x0e, 0x2c, 0x09, 0xe1, 0x80, 0x69, 0xfd, 0x8e, + 0x25, 0xe8, 0x0b, 0xa9, 0xd3, 0x52, 0x25, 0x6d, 0x00, 0xa6, 0x35, 0xc5, 0x78, 0xcc, 0xdc, 0xdc, + 0xff, 0xaa, 0x4e, 0x37, 0x2c, 0xa4, 0x05, 0x35, 0x3e, 0x47, 0x7e, 0x65, 0xd3, 0x24, 0xac, 0x78, + 0xef, 0x4a, 0x27, 0x04, 0xaa, 0x56, 0x7c, 0xc1, 0xb0, 0xda, 0x09, 0xba, 0x15, 0xea, 0x65, 0x12, + 0x41, 0x05, 0x65, 0x16, 0xee, 0x75, 0x2a, 0xdd, 0xc6, 0xa0, 0x59, 0xd6, 0x3c, 0x94, 0xd9, 0x50, + 0x3a, 0xb3, 0xa0, 0x4b, 0x67, 0xf4, 0x0c, 0x6a, 0xa5, 0x61, 0xc9, 0x90, 0xeb, 0xb2, 0xbc, 0x4c, + 0xfe, 0x87, 0xbd, 0x8c, 0xc5, 0x29, 0x16, 0xe5, 0xe4, 0x4a, 0x34, 0x86, 0xe6, 0xba, 0x3d, 0xab, + 0x95, 0xb4, 0x48, 0x8e, 0xa0, 0x9e, 0x14, 0x36, 0x1b, 0x06, 0x9d, 0x4a, 0xb7, 0x4e, 0xd7, 0x86, + 0x65, 0x6f, 0x56, 0xa5, 0x86, 0xe3, 0xfb, 0x85, 0x2e, 0x61, 0x1b, 0x96, 0x68, 0x0a, 0x84, 0xae, + 0x16, 0xb9, 0x62, 0x76, 0xa0, 0x21, 0xec, 0x45, 0xaa, 0xb5, 0x32, 0x0e, 0x27, 0xbe, 0xb0, 0x1a, + 0xdd, 0x34, 0x91, 0x1e, 0x10, 0x61, 0x5f, 0x09, 0xcb, 0x55, 0x86, 0x66, 0x31, 0x94, 0xec, 0x32, + 0xc6, 0x89, 0xe7, 0xd7, 0xe8, 0x2d, 0x9e, 0xe8, 0x2b, 0xb4, 0xc7, 0xcc, 0xb0, 0x04, 0x1d, 0x1a, + 0x7b, 0x22, 0xa5, 0x4a, 0x25, 0xc7, 0x04, 0xe5, 0xba, 0x8f, 0x0f, 0x70, 0x4f, 0x97, 0x11, 0x9b, + 0x01, 0x79, 0x53, 0x8d, 0xc1, 0xa3, 0xde, 0xc6, 0xc5, 0x8d, 0x6f, 0x8b, 0xa4, 0x77, 0x00, 0xa2, + 0x23, 0xa8, 0x2e, 0x2f, 0x66, 0x39, 0x54, 0x3e, 0x4f, 0xe5, 0x95, 0x6f, 0xe8, 0x90, 0xe6, 0xca, + 0xe0, 0xfb, 0x2e, 0x1c, 0xbf, 0x54, 0x72, 0x2a, 0x66, 0x23, 0x26, 0xd9, 0xcc, 0xe7, 0x8c, 0xfd, + 0xce, 0x2e, 0xd0, 0x64, 0x82, 0x23, 0x79, 0x03, 0xcd, 0x33, 0x94, 0x68, 0x98, 0xc3, 0x72, 0xfc, + 0x24, 0x2c, 0xf7, 0xfa, 0xf7, 0xc9, 0xb7, 0xc2, 0x9b, 0x07, 0x9e, 0xb7, 0x18, 0xed, 0x74, 0x03, + 0xf2, 0x16, 0xfe, 0x1b, 0x31, 0xc7, 0xe7, 0xeb, 0xa9, 0x6f, 0x41, 0xb5, 0x4a, 0xcf, 0xcd, 0x1d, + 0x79, 0x18, 0x83, 0x07, 0x67, 0xe8, 0x6e, 0x1f, 0xec, 0x16, 0xec, 0xe3, 0xd2, 0xb3, 0x7d, 0x25, + 0xcb, 0x5f, 0x9c, 0xbe, 0xf8, 0x79, 0xdd, 0x0e, 0x7e, 0x5d, 0xb7, 0x83, 0xdf, 0xd7, 0xed, 0xe0, + 0xe3, 0xe0, 0x1f, 0x4f, 0xc5, 0xfa, 0xc1, 0x61, 0x5a, 0xf0, 0x58, 0xa0, 0x74, 0x97, 0xfb, 0xfe, + 0x79, 0x78, 0xfa, 0x27, 0x00, 0x00, 0xff, 0xff, 0x23, 0x88, 0x8e, 0xd3, 0x8e, 0x04, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ConfigManagementPluginServiceClient is the client API for ConfigManagementPluginService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ConfigManagementPluginServiceClient interface { + // GenerateManifests receive a stream containing a tgz archive with all required files necessary + // to generate manifests + GenerateManifest(ctx context.Context, opts ...grpc.CallOption) (ConfigManagementPluginService_GenerateManifestClient, error) + // MatchRepository returns whether or not the given application is supported by the plugin + MatchRepository(ctx context.Context, opts ...grpc.CallOption) (ConfigManagementPluginService_MatchRepositoryClient, error) + // GetParametersAnnouncement gets a list of parameter announcements for the given app + GetParametersAnnouncement(ctx context.Context, opts ...grpc.CallOption) (ConfigManagementPluginService_GetParametersAnnouncementClient, error) +} + +type configManagementPluginServiceClient struct { + cc *grpc.ClientConn +} + +func NewConfigManagementPluginServiceClient(cc *grpc.ClientConn) ConfigManagementPluginServiceClient { + return &configManagementPluginServiceClient{cc} +} + +func (c *configManagementPluginServiceClient) GenerateManifest(ctx context.Context, opts ...grpc.CallOption) (ConfigManagementPluginService_GenerateManifestClient, error) { + stream, err := c.cc.NewStream(ctx, &_ConfigManagementPluginService_serviceDesc.Streams[0], "/plugin.ConfigManagementPluginService/GenerateManifest", opts...) + if err != nil { + return nil, err + } + x := &configManagementPluginServiceGenerateManifestClient{stream} + return x, nil +} + +type ConfigManagementPluginService_GenerateManifestClient interface { + Send(*AppStreamRequest) error + CloseAndRecv() (*ManifestResponse, error) + grpc.ClientStream +} + +type configManagementPluginServiceGenerateManifestClient struct { + grpc.ClientStream +} + +func (x *configManagementPluginServiceGenerateManifestClient) Send(m *AppStreamRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *configManagementPluginServiceGenerateManifestClient) CloseAndRecv() (*ManifestResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(ManifestResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *configManagementPluginServiceClient) MatchRepository(ctx context.Context, opts ...grpc.CallOption) (ConfigManagementPluginService_MatchRepositoryClient, error) { + stream, err := c.cc.NewStream(ctx, &_ConfigManagementPluginService_serviceDesc.Streams[1], "/plugin.ConfigManagementPluginService/MatchRepository", opts...) + if err != nil { + return nil, err + } + x := &configManagementPluginServiceMatchRepositoryClient{stream} + return x, nil +} + +type ConfigManagementPluginService_MatchRepositoryClient interface { + Send(*AppStreamRequest) error + CloseAndRecv() (*RepositoryResponse, error) + grpc.ClientStream +} + +type configManagementPluginServiceMatchRepositoryClient struct { + grpc.ClientStream +} + +func (x *configManagementPluginServiceMatchRepositoryClient) Send(m *AppStreamRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *configManagementPluginServiceMatchRepositoryClient) CloseAndRecv() (*RepositoryResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(RepositoryResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *configManagementPluginServiceClient) GetParametersAnnouncement(ctx context.Context, opts ...grpc.CallOption) (ConfigManagementPluginService_GetParametersAnnouncementClient, error) { + stream, err := c.cc.NewStream(ctx, &_ConfigManagementPluginService_serviceDesc.Streams[2], "/plugin.ConfigManagementPluginService/GetParametersAnnouncement", opts...) + if err != nil { + return nil, err + } + x := &configManagementPluginServiceGetParametersAnnouncementClient{stream} + return x, nil +} + +type ConfigManagementPluginService_GetParametersAnnouncementClient interface { + Send(*AppStreamRequest) error + CloseAndRecv() (*ParametersAnnouncementResponse, error) + grpc.ClientStream +} + +type configManagementPluginServiceGetParametersAnnouncementClient struct { + grpc.ClientStream +} + +func (x *configManagementPluginServiceGetParametersAnnouncementClient) Send(m *AppStreamRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *configManagementPluginServiceGetParametersAnnouncementClient) CloseAndRecv() (*ParametersAnnouncementResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(ParametersAnnouncementResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ConfigManagementPluginServiceServer is the server API for ConfigManagementPluginService service. +type ConfigManagementPluginServiceServer interface { + // GenerateManifests receive a stream containing a tgz archive with all required files necessary + // to generate manifests + GenerateManifest(ConfigManagementPluginService_GenerateManifestServer) error + // MatchRepository returns whether or not the given application is supported by the plugin + MatchRepository(ConfigManagementPluginService_MatchRepositoryServer) error + // GetParametersAnnouncement gets a list of parameter announcements for the given app + GetParametersAnnouncement(ConfigManagementPluginService_GetParametersAnnouncementServer) error +} + +// UnimplementedConfigManagementPluginServiceServer can be embedded to have forward compatible implementations. +type UnimplementedConfigManagementPluginServiceServer struct { +} + +func (*UnimplementedConfigManagementPluginServiceServer) GenerateManifest(srv ConfigManagementPluginService_GenerateManifestServer) error { + return status.Errorf(codes.Unimplemented, "method GenerateManifest not implemented") +} +func (*UnimplementedConfigManagementPluginServiceServer) MatchRepository(srv ConfigManagementPluginService_MatchRepositoryServer) error { + return status.Errorf(codes.Unimplemented, "method MatchRepository not implemented") +} +func (*UnimplementedConfigManagementPluginServiceServer) GetParametersAnnouncement(srv ConfigManagementPluginService_GetParametersAnnouncementServer) error { + return status.Errorf(codes.Unimplemented, "method GetParametersAnnouncement not implemented") +} + +func RegisterConfigManagementPluginServiceServer(s *grpc.Server, srv ConfigManagementPluginServiceServer) { + s.RegisterService(&_ConfigManagementPluginService_serviceDesc, srv) +} + +func _ConfigManagementPluginService_GenerateManifest_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ConfigManagementPluginServiceServer).GenerateManifest(&configManagementPluginServiceGenerateManifestServer{stream}) +} + +type ConfigManagementPluginService_GenerateManifestServer interface { + SendAndClose(*ManifestResponse) error + Recv() (*AppStreamRequest, error) + grpc.ServerStream +} + +type configManagementPluginServiceGenerateManifestServer struct { + grpc.ServerStream +} + +func (x *configManagementPluginServiceGenerateManifestServer) SendAndClose(m *ManifestResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *configManagementPluginServiceGenerateManifestServer) Recv() (*AppStreamRequest, error) { + m := new(AppStreamRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _ConfigManagementPluginService_MatchRepository_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ConfigManagementPluginServiceServer).MatchRepository(&configManagementPluginServiceMatchRepositoryServer{stream}) +} + +type ConfigManagementPluginService_MatchRepositoryServer interface { + SendAndClose(*RepositoryResponse) error + Recv() (*AppStreamRequest, error) + grpc.ServerStream +} + +type configManagementPluginServiceMatchRepositoryServer struct { + grpc.ServerStream +} + +func (x *configManagementPluginServiceMatchRepositoryServer) SendAndClose(m *RepositoryResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *configManagementPluginServiceMatchRepositoryServer) Recv() (*AppStreamRequest, error) { + m := new(AppStreamRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _ConfigManagementPluginService_GetParametersAnnouncement_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ConfigManagementPluginServiceServer).GetParametersAnnouncement(&configManagementPluginServiceGetParametersAnnouncementServer{stream}) +} + +type ConfigManagementPluginService_GetParametersAnnouncementServer interface { + SendAndClose(*ParametersAnnouncementResponse) error + Recv() (*AppStreamRequest, error) + grpc.ServerStream +} + +type configManagementPluginServiceGetParametersAnnouncementServer struct { + grpc.ServerStream +} + +func (x *configManagementPluginServiceGetParametersAnnouncementServer) SendAndClose(m *ParametersAnnouncementResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *configManagementPluginServiceGetParametersAnnouncementServer) Recv() (*AppStreamRequest, error) { + m := new(AppStreamRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _ConfigManagementPluginService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.ConfigManagementPluginService", + HandlerType: (*ConfigManagementPluginServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "GenerateManifest", + Handler: _ConfigManagementPluginService_GenerateManifest_Handler, + ClientStreams: true, + }, + { + StreamName: "MatchRepository", + Handler: _ConfigManagementPluginService_MatchRepository_Handler, + ClientStreams: true, + }, + { + StreamName: "GetParametersAnnouncement", + Handler: _ConfigManagementPluginService_GetParametersAnnouncement_Handler, + ClientStreams: true, + }, + }, + Metadata: "cmpserver/plugin/plugin.proto", +} + +func (m *AppStreamRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AppStreamRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AppStreamRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Request != nil { + { + size := m.Request.Size() + i -= size + if _, err := m.Request.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *AppStreamRequest_Metadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AppStreamRequest_Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Metadata != nil { + { + size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPlugin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *AppStreamRequest_File) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AppStreamRequest_File) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.File != nil { + { + size, err := m.File.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPlugin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *ManifestRequestMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ManifestRequestMetadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ManifestRequestMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPlugin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.Size_ != 0 { + i = encodeVarintPlugin(dAtA, i, uint64(m.Size_)) + i-- + dAtA[i] = 0x20 + } + if len(m.Checksum) > 0 { + i -= len(m.Checksum) + copy(dAtA[i:], m.Checksum) + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Checksum))) + i-- + dAtA[i] = 0x1a + } + if len(m.AppRelPath) > 0 { + i -= len(m.AppRelPath) + copy(dAtA[i:], m.AppRelPath) + i = encodeVarintPlugin(dAtA, i, uint64(len(m.AppRelPath))) + i-- + dAtA[i] = 0x12 + } + if len(m.AppName) > 0 { + i -= len(m.AppName) + copy(dAtA[i:], m.AppName) + i = encodeVarintPlugin(dAtA, i, uint64(len(m.AppName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EnvEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ManifestResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ManifestResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ManifestResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.SourceType) > 0 { + i -= len(m.SourceType) + copy(dAtA[i:], m.SourceType) + i = encodeVarintPlugin(dAtA, i, uint64(len(m.SourceType))) + i-- + dAtA[i] = 0x12 + } + if len(m.Manifests) > 0 { + for iNdEx := len(m.Manifests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Manifests[iNdEx]) + copy(dAtA[i:], m.Manifests[iNdEx]) + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Manifests[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RepositoryResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RepositoryResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RepositoryResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.IsDiscoveryEnabled { + i-- + if m.IsDiscoveryEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.IsSupported { + i-- + if m.IsSupported { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ParametersAnnouncementResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ParametersAnnouncementResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ParametersAnnouncementResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ParameterAnnouncements) > 0 { + for iNdEx := len(m.ParameterAnnouncements) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ParameterAnnouncements[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPlugin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *File) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *File) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *File) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Chunk) > 0 { + i -= len(m.Chunk) + copy(dAtA[i:], m.Chunk) + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Chunk))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { + offset -= sovPlugin(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AppStreamRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Request != nil { + n += m.Request.Size() + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AppStreamRequest_Metadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Metadata != nil { + l = m.Metadata.Size() + n += 1 + l + sovPlugin(uint64(l)) + } + return n +} +func (m *AppStreamRequest_File) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.File != nil { + l = m.File.Size() + n += 1 + l + sovPlugin(uint64(l)) + } + return n +} +func (m *ManifestRequestMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AppName) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.AppRelPath) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Checksum) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if m.Size_ != 0 { + n += 1 + sovPlugin(uint64(m.Size_)) + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovPlugin(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EnvEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ManifestResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Manifests) > 0 { + for _, s := range m.Manifests { + l = len(s) + n += 1 + l + sovPlugin(uint64(l)) + } + } + l = len(m.SourceType) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RepositoryResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IsSupported { + n += 2 + } + if m.IsDiscoveryEnabled { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ParametersAnnouncementResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ParameterAnnouncements) > 0 { + for _, e := range m.ParameterAnnouncements { + l = e.Size() + n += 1 + l + sovPlugin(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *File) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Chunk) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovPlugin(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozPlugin(x uint64) (n int) { + return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *AppStreamRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AppStreamRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AppStreamRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ManifestRequestMetadata{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Request = &AppStreamRequest_Metadata{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &File{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Request = &AppStreamRequest_File{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ManifestRequestMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ManifestRequestMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ManifestRequestMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppRelPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppRelPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Checksum", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Checksum = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) + } + m.Size_ = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Size_ |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, &EnvEntry{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnvEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnvEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ManifestResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ManifestResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ManifestResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Manifests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Manifests = append(m.Manifests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RepositoryResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RepositoryResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RepositoryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsSupported", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsSupported = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsDiscoveryEnabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsDiscoveryEnabled = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ParametersAnnouncementResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParametersAnnouncementResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParametersAnnouncementResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParameterAnnouncements", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ParameterAnnouncements = append(m.ParameterAnnouncements, &apiclient.ParameterAnnouncement{}) + if err := m.ParameterAnnouncements[len(m.ParameterAnnouncements)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *File) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: File: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: File: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Chunk", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Chunk = append(m.Chunk[:0], dAtA[iNdEx:postIndex]...) + if m.Chunk == nil { + m.Chunk = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPlugin(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthPlugin + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupPlugin + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthPlugin + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupPlugin = fmt.Errorf("proto: unexpected end of group") +) diff --git a/cmpserver/plugin/config.go b/cmpserver/plugin/config.go new file mode 100644 index 0000000000000..faa718ff9fd2e --- /dev/null +++ b/cmpserver/plugin/config.go @@ -0,0 +1,105 @@ +package plugin + +import ( + "fmt" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/argoproj/argo-cd/v2/common" + "github.com/argoproj/argo-cd/v2/reposerver/apiclient" + configUtil "github.com/argoproj/argo-cd/v2/util/config" +) + +const ( + ConfigManagementPluginKind string = "ConfigManagementPlugin" +) + +type PluginConfig struct { + metav1.TypeMeta `json:",inline"` + Metadata metav1.ObjectMeta `json:"metadata"` + Spec PluginConfigSpec `json:"spec"` +} + +type PluginConfigSpec struct { + Version string `json:"version"` + Init Command `json:"init,omitempty"` + Generate Command `json:"generate"` + Discover Discover `json:"discover"` + Parameters Parameters `yaml:"parameters"` + PreserveFileMode bool `json:"preserveFileMode,omitempty"` +} + +// Discover holds find and fileName +type Discover struct { + Find Find `json:"find"` + FileName string `json:"fileName"` +} + +func (d Discover) IsDefined() bool { + return d.FileName != "" || d.Find.Glob != "" || len(d.Find.Command.Command) > 0 +} + +// Command holds binary path and arguments list +type Command struct { + Command []string `json:"command,omitempty"` + Args []string `json:"args,omitempty"` +} + +// Find holds find command or glob pattern +type Find struct { + Command + Glob string `json:"glob"` +} + +// Parameters holds static and dynamic configurations +type Parameters struct { + Static []*apiclient.ParameterAnnouncement `yaml:"static"` + Dynamic Command `yaml:"dynamic"` +} + +// Dynamic hold the dynamic announcements for CMP's +type Dynamic struct { + Command +} + +func ReadPluginConfig(filePath string) (*PluginConfig, error) { + path := fmt.Sprintf("%s/%s", strings.TrimRight(filePath, "/"), common.PluginConfigFileName) + + var config PluginConfig + err := configUtil.UnmarshalLocalFile(path, &config) + if err != nil { + return nil, err + } + + if err = ValidatePluginConfig(config); err != nil { + return nil, err + } + + return &config, nil +} + +func ValidatePluginConfig(config PluginConfig) error { + if config.Metadata.Name == "" { + return fmt.Errorf("invalid plugin configuration file. metadata.name should be non-empty.") + } + if config.TypeMeta.Kind != ConfigManagementPluginKind { + return fmt.Errorf("invalid plugin configuration file. kind should be %s, found %s", ConfigManagementPluginKind, config.TypeMeta.Kind) + } + if len(config.Spec.Generate.Command) == 0 { + return fmt.Errorf("invalid plugin configuration file. spec.generate command should be non-empty") + } + // discovery field is optional as apps can now specify plugin names directly + return nil +} + +func (cfg *PluginConfig) Address() string { + var address string + pluginSockFilePath := common.GetPluginSockFilePath() + if cfg.Spec.Version != "" { + address = fmt.Sprintf("%s/%s-%s.sock", pluginSockFilePath, cfg.Metadata.Name, cfg.Spec.Version) + } else { + address = fmt.Sprintf("%s/%s.sock", pluginSockFilePath, cfg.Metadata.Name) + } + return address +} diff --git a/cmpserver/plugin/config_test.go b/cmpserver/plugin/config_test.go new file mode 100644 index 0000000000000..9e22dab1d3741 --- /dev/null +++ b/cmpserver/plugin/config_test.go @@ -0,0 +1,215 @@ +package plugin + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/argoproj/argo-cd/v2/common" +) + +func Test_IsDefined(t *testing.T) { + testCases := []struct { + name string + discover Discover + expected bool + }{ + { + name: "empty discover", + discover: Discover{}, + expected: false, + }, + { + name: "discover with find", + discover: Discover{ + Find: Find{ + Glob: "glob", + }, + }, + expected: true, + }, + { + name: "discover with fileName", + discover: Discover{ + FileName: "fileName", + }, + expected: true, + }, + { + name: "discover with empty command", + discover: Discover{ + Find: Find{ + Command: Command{ + Command: []string{}, + }, + }, + }, + expected: false, + }, + { + name: "discover with command", + discover: Discover{ + Find: Find{ + Command: Command{ + Command: []string{"command"}, + }, + }, + }, + expected: true, + }, + } + + for _, tc := range testCases { + tcc := tc + t.Run(tcc.name, func(t *testing.T) { + t.Parallel() + + actual := tcc.discover.IsDefined() + assert.Equal(t, tcc.expected, actual) + }) + } +} + +func Test_ReadPluginConfig(t *testing.T) { + testCases := []struct { + name string + fileContents string + expected *PluginConfig + expectedErr string + }{ + { + name: "empty metadata", + fileContents: ` +metadata: +`, + expected: nil, + expectedErr: "invalid plugin configuration file. metadata.name should be non-empty.", + }, + { + name: "empty metadata name", + fileContents: ` +metadata: + name: "" +`, + expected: nil, + expectedErr: "invalid plugin configuration file. metadata.name should be non-empty.", + }, + { + name: "invalid kind", + fileContents: ` +kind: invalid +metadata: + name: name +`, + expected: nil, + expectedErr: "invalid plugin configuration file. kind should be ConfigManagementPlugin, found invalid", + }, + { + name: "empty generate command", + fileContents: ` +kind: ConfigManagementPlugin +metadata: + name: name +`, + expected: nil, + expectedErr: "invalid plugin configuration file. spec.generate command should be non-empty", + }, + { + name: "valid config", + fileContents: ` +kind: ConfigManagementPlugin +metadata: + name: name +spec: + generate: + command: [command] +`, + expected: &PluginConfig{ + TypeMeta: v1.TypeMeta{ + Kind: ConfigManagementPluginKind, + }, + Metadata: v1.ObjectMeta{ + Name: "name", + }, + Spec: PluginConfigSpec{ + Generate: Command{ + Command: []string{"command"}, + }, + }, + }, + }, + } + + for _, tc := range testCases { + tcc := tc + t.Run(tcc.name, func(t *testing.T) { + t.Parallel() + // write test string to temporary file + tempDir := t.TempDir() + tempFile, err := os.Create(filepath.Join(tempDir, "plugin.yaml")) + require.NoError(t, err) + err = tempFile.Close() + require.NoError(t, err) + err = os.WriteFile(tempFile.Name(), []byte(tcc.fileContents), 0644) + require.NoError(t, err) + config, err := ReadPluginConfig(tempDir) + if tcc.expectedErr != "" { + assert.EqualError(t, err, tcc.expectedErr) + } else { + assert.NoError(t, err) + } + assert.Equal(t, tcc.expected, config) + }) + } +} + +func Test_PluginConfig_Address(t *testing.T) { + testCases := []struct { + name string + config *PluginConfig + expected string + }{ + { + name: "no version specified", + config: &PluginConfig{ + TypeMeta: v1.TypeMeta{ + Kind: ConfigManagementPluginKind, + }, + Metadata: v1.ObjectMeta{ + Name: "name", + }, + }, + expected: "name", + }, + { + name: "version specified", + config: &PluginConfig{ + TypeMeta: v1.TypeMeta{ + Kind: ConfigManagementPluginKind, + }, + Metadata: v1.ObjectMeta{ + Name: "name", + }, + Spec: PluginConfigSpec{ + Version: "version", + }, + }, + expected: "name-version", + }, + } + + for _, tc := range testCases { + tcc := tc + t.Run(tcc.name, func(t *testing.T) { + t.Parallel() + actual := tcc.config.Address() + expectedAddress := fmt.Sprintf("%s/%s.sock", common.GetPluginSockFilePath(), tcc.expected) + assert.Equal(t, expectedAddress, actual) + }) + } +} diff --git a/cmpserver/plugin/plugin.go b/cmpserver/plugin/plugin.go new file mode 100644 index 0000000000000..f03b73f24dcf6 --- /dev/null +++ b/cmpserver/plugin/plugin.go @@ -0,0 +1,440 @@ +package plugin + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "time" + "unicode" + + "github.com/argoproj/pkg/rand" + + "github.com/argoproj/argo-cd/v2/cmpserver/apiclient" + "github.com/argoproj/argo-cd/v2/common" + repoclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient" + "github.com/argoproj/argo-cd/v2/util/buffered_context" + "github.com/argoproj/argo-cd/v2/util/cmp" + "github.com/argoproj/argo-cd/v2/util/io/files" + + "github.com/argoproj/gitops-engine/pkg/utils/kube" + "github.com/cyphar/filepath-securejoin" + "github.com/mattn/go-zglob" + log "github.com/sirupsen/logrus" +) + +// cmpTimeoutBuffer is the amount of time before the request deadline to timeout server-side work. It makes sure there's +// enough time before the client times out to send a meaningful error message. +const cmpTimeoutBuffer = 100 * time.Millisecond + +// Service implements ConfigManagementPluginService interface +type Service struct { + initConstants CMPServerInitConstants +} + +type CMPServerInitConstants struct { + PluginConfig PluginConfig +} + +// NewService returns a new instance of the ConfigManagementPluginService +func NewService(initConstants CMPServerInitConstants) *Service { + return &Service{ + initConstants: initConstants, + } +} + +func (s *Service) Init(workDir string) error { + err := os.RemoveAll(workDir) + if err != nil { + return fmt.Errorf("error removing workdir %q: %w", workDir, err) + } + err = os.MkdirAll(workDir, 0700) + if err != nil { + return fmt.Errorf("error creating workdir %q: %w", workDir, err) + } + return nil +} + +func runCommand(ctx context.Context, command Command, path string, env []string) (string, error) { + if len(command.Command) == 0 { + return "", fmt.Errorf("Command is empty") + } + cmd := exec.CommandContext(ctx, command.Command[0], append(command.Command[1:], command.Args...)...) + + cmd.Env = env + cmd.Dir = path + + execId, err := rand.RandString(5) + if err != nil { + return "", err + } + logCtx := log.WithFields(log.Fields{"execID": execId}) + + argsToLog := getCommandArgsToLog(cmd) + logCtx.WithFields(log.Fields{"dir": cmd.Dir}).Info(argsToLog) + + var stdout bytes.Buffer + var stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + // Make sure the command is killed immediately on timeout. https://stackoverflow.com/a/38133948/684776 + cmd.SysProcAttr = newSysProcAttr(true) + + start := time.Now() + err = cmd.Start() + if err != nil { + return "", err + } + + go func() { + <-ctx.Done() + // Kill by group ID to make sure child processes are killed. The - tells `kill` that it's a group ID. + // Since we didn't set Pgid in SysProcAttr, the group ID is the same as the process ID. https://pkg.go.dev/syscall#SysProcAttr + + // Sending a TERM signal first to allow any potential cleanup if needed, and then sending a KILL signal + _ = sysCallTerm(-cmd.Process.Pid) + + // modify cleanup timeout to allow process to cleanup + cleanupTimeout := 5 * time.Second + time.Sleep(cleanupTimeout) + + _ = sysCallKill(-cmd.Process.Pid) + }() + + err = cmd.Wait() + + duration := time.Since(start) + output := stdout.String() + + logCtx.WithFields(log.Fields{"duration": duration}).Debug(output) + + if err != nil { + err := newCmdError(argsToLog, errors.New(err.Error()), strings.TrimSpace(stderr.String())) + logCtx.Error(err.Error()) + return strings.TrimSuffix(output, "\n"), err + } + if len(output) == 0 { + log.WithFields(log.Fields{ + "stderr": stderr.String(), + "command": command, + }).Warn("Plugin command returned zero output") + } + + return strings.TrimSuffix(output, "\n"), nil +} + +// getCommandArgsToLog represents the given command in a way that we can copy-and-paste into a terminal +func getCommandArgsToLog(cmd *exec.Cmd) string { + var argsToLog []string + for _, arg := range cmd.Args { + containsSpace := false + for _, r := range arg { + if unicode.IsSpace(r) { + containsSpace = true + break + } + } + if containsSpace { + // add quotes and escape any internal quotes + argsToLog = append(argsToLog, strconv.Quote(arg)) + } else { + argsToLog = append(argsToLog, arg) + } + } + args := strings.Join(argsToLog, " ") + return args +} + +type CmdError struct { + Args string + Stderr string + Cause error +} + +func (ce *CmdError) Error() string { + res := fmt.Sprintf("`%v` failed %v", ce.Args, ce.Cause) + if ce.Stderr != "" { + res = fmt.Sprintf("%s: %s", res, ce.Stderr) + } + return res +} + +func newCmdError(args string, cause error, stderr string) *CmdError { + return &CmdError{Args: args, Stderr: stderr, Cause: cause} +} + +// Environ returns a list of environment variables in name=value format from a list of variables +func environ(envVars []*apiclient.EnvEntry) []string { + var environ []string + for _, item := range envVars { + if item != nil && item.Name != "" && item.Value != "" { + environ = append(environ, fmt.Sprintf("%s=%s", item.Name, item.Value)) + } + } + return environ +} + +// getTempDirMustCleanup creates a temporary directory and returns a cleanup function. +func getTempDirMustCleanup(baseDir string) (workDir string, cleanup func(), err error) { + workDir, err = files.CreateTempDir(baseDir) + if err != nil { + return "", nil, fmt.Errorf("error creating temp dir: %w", err) + } + cleanup = func() { + if err := os.RemoveAll(workDir); err != nil { + log.WithFields(map[string]interface{}{ + common.SecurityField: common.SecurityHigh, + common.SecurityCWEField: common.SecurityCWEIncompleteCleanup, + }).Errorf("Failed to clean up temp directory: %s", err) + } + } + return workDir, cleanup, nil +} + +type Stream interface { + Recv() (*apiclient.AppStreamRequest, error) + Context() context.Context +} + +type GenerateManifestStream interface { + Stream + SendAndClose(response *apiclient.ManifestResponse) error +} + +// GenerateManifest runs generate command from plugin config file and returns generated manifest files +func (s *Service) GenerateManifest(stream apiclient.ConfigManagementPluginService_GenerateManifestServer) error { + return s.generateManifestGeneric(stream) +} + +func (s *Service) generateManifestGeneric(stream GenerateManifestStream) error { + ctx, cancel := buffered_context.WithEarlierDeadline(stream.Context(), cmpTimeoutBuffer) + defer cancel() + workDir, cleanup, err := getTempDirMustCleanup(common.GetCMPWorkDir()) + if err != nil { + return fmt.Errorf("error creating workdir for manifest generation: %w", err) + } + defer cleanup() + + metadata, err := cmp.ReceiveRepoStream(ctx, stream, workDir, s.initConstants.PluginConfig.Spec.PreserveFileMode) + if err != nil { + return fmt.Errorf("generate manifest error receiving stream: %w", err) + } + + appPath := filepath.Clean(filepath.Join(workDir, metadata.AppRelPath)) + if !strings.HasPrefix(appPath, workDir) { + return fmt.Errorf("illegal appPath: out of workDir bound") + } + response, err := s.generateManifest(ctx, appPath, metadata.GetEnv()) + if err != nil { + return fmt.Errorf("error generating manifests: %w", err) + } + err = stream.SendAndClose(response) + if err != nil { + return fmt.Errorf("error sending manifest response: %w", err) + } + return nil +} + +// generateManifest runs generate command from plugin config file and returns generated manifest files +func (s *Service) generateManifest(ctx context.Context, appDir string, envEntries []*apiclient.EnvEntry) (*apiclient.ManifestResponse, error) { + if deadline, ok := ctx.Deadline(); ok { + log.Infof("Generating manifests with deadline %v from now", time.Until(deadline)) + } else { + log.Info("Generating manifests with no request-level timeout") + } + + config := s.initConstants.PluginConfig + + env := append(os.Environ(), environ(envEntries)...) + if len(config.Spec.Init.Command) > 0 { + _, err := runCommand(ctx, config.Spec.Init, appDir, env) + if err != nil { + return &apiclient.ManifestResponse{}, err + } + } + + out, err := runCommand(ctx, config.Spec.Generate, appDir, env) + if err != nil { + return &apiclient.ManifestResponse{}, err + } + + manifests, err := kube.SplitYAMLToString([]byte(out)) + if err != nil { + sanitizedManifests := manifests + if len(sanitizedManifests) > 1000 { + sanitizedManifests = manifests[:1000] + } + log.Debugf("Failed to split generated manifests. Beginning of generated manifests: %q", sanitizedManifests) + return &apiclient.ManifestResponse{}, err + } + + return &apiclient.ManifestResponse{ + Manifests: manifests, + }, err +} + +type MatchRepositoryStream interface { + Stream + SendAndClose(response *apiclient.RepositoryResponse) error +} + +// MatchRepository receives the application stream and checks whether +// its repository type is supported by the config management plugin +// server. +// The checks are implemented in the following order: +// 1. If spec.Discover.FileName is provided it finds for a name match in Applications files +// 2. If spec.Discover.Find.Glob is provided if finds for a glob match in Applications files +// 3. Otherwise it runs the spec.Discover.Find.Command +func (s *Service) MatchRepository(stream apiclient.ConfigManagementPluginService_MatchRepositoryServer) error { + return s.matchRepositoryGeneric(stream) +} + +func (s *Service) matchRepositoryGeneric(stream MatchRepositoryStream) error { + bufferedCtx, cancel := buffered_context.WithEarlierDeadline(stream.Context(), cmpTimeoutBuffer) + defer cancel() + + workDir, cleanup, err := getTempDirMustCleanup(common.GetCMPWorkDir()) + if err != nil { + return fmt.Errorf("error creating workdir for repository matching: %w", err) + } + defer cleanup() + + metadata, err := cmp.ReceiveRepoStream(bufferedCtx, stream, workDir, s.initConstants.PluginConfig.Spec.PreserveFileMode) + if err != nil { + return fmt.Errorf("match repository error receiving stream: %w", err) + } + + isSupported, isDiscoveryEnabled, err := s.matchRepository(bufferedCtx, workDir, metadata.GetEnv(), metadata.GetAppRelPath()) + if err != nil { + return fmt.Errorf("match repository error: %w", err) + } + repoResponse := &apiclient.RepositoryResponse{IsSupported: isSupported, IsDiscoveryEnabled: isDiscoveryEnabled} + + err = stream.SendAndClose(repoResponse) + if err != nil { + return fmt.Errorf("error sending match repository response: %w", err) + } + return nil +} + +func (s *Service) matchRepository(ctx context.Context, workdir string, envEntries []*apiclient.EnvEntry, appRelPath string) (isSupported bool, isDiscoveryEnabled bool, err error) { + config := s.initConstants.PluginConfig + + appPath, err := securejoin.SecureJoin(workdir, appRelPath) + if err != nil { + log.WithFields(map[string]interface{}{ + common.SecurityField: common.SecurityHigh, + common.SecurityCWEField: common.SecurityCWEIncompleteCleanup, + }).Errorf("error joining workdir %q and appRelPath %q: %v", workdir, appRelPath, err) + } + + if config.Spec.Discover.FileName != "" { + log.Debugf("config.Spec.Discover.FileName is provided") + pattern := filepath.Join(appPath, config.Spec.Discover.FileName) + matches, err := filepath.Glob(pattern) + if err != nil { + e := fmt.Errorf("error finding filename match for pattern %q: %w", pattern, err) + log.Debug(e) + return false, true, e + } + return len(matches) > 0, true, nil + } + + if config.Spec.Discover.Find.Glob != "" { + log.Debugf("config.Spec.Discover.Find.Glob is provided") + pattern := filepath.Join(appPath, config.Spec.Discover.Find.Glob) + // filepath.Glob doesn't have '**' support hence selecting third-party lib + // https://github.com/golang/go/issues/11862 + matches, err := zglob.Glob(pattern) + if err != nil { + e := fmt.Errorf("error finding glob match for pattern %q: %w", pattern, err) + log.Debug(e) + return false, true, e + } + + return len(matches) > 0, true, nil + } + + if len(config.Spec.Discover.Find.Command.Command) > 0 { + log.Debugf("Going to try runCommand.") + env := append(os.Environ(), environ(envEntries)...) + find, err := runCommand(ctx, config.Spec.Discover.Find.Command, appPath, env) + if err != nil { + return false, true, fmt.Errorf("error running find command: %w", err) + } + return find != "", true, nil + } + + return false, false, nil +} + +// ParametersAnnouncementStream defines an interface able to send/receive a stream of parameter announcements. +type ParametersAnnouncementStream interface { + Stream + SendAndClose(response *apiclient.ParametersAnnouncementResponse) error +} + +// GetParametersAnnouncement gets parameter announcements for a given Application and repo contents. +func (s *Service) GetParametersAnnouncement(stream apiclient.ConfigManagementPluginService_GetParametersAnnouncementServer) error { + bufferedCtx, cancel := buffered_context.WithEarlierDeadline(stream.Context(), cmpTimeoutBuffer) + defer cancel() + + workDir, cleanup, err := getTempDirMustCleanup(common.GetCMPWorkDir()) + if err != nil { + return fmt.Errorf("error creating workdir for generating parameter announcements: %w", err) + } + defer cleanup() + + metadata, err := cmp.ReceiveRepoStream(bufferedCtx, stream, workDir, s.initConstants.PluginConfig.Spec.PreserveFileMode) + if err != nil { + return fmt.Errorf("parameters announcement error receiving stream: %w", err) + } + appPath := filepath.Clean(filepath.Join(workDir, metadata.AppRelPath)) + if !strings.HasPrefix(appPath, workDir) { + return fmt.Errorf("illegal appPath: out of workDir bound") + } + + repoResponse, err := getParametersAnnouncement(bufferedCtx, appPath, s.initConstants.PluginConfig.Spec.Parameters.Static, s.initConstants.PluginConfig.Spec.Parameters.Dynamic, metadata.GetEnv()) + if err != nil { + return fmt.Errorf("get parameters announcement error: %w", err) + } + + err = stream.SendAndClose(repoResponse) + if err != nil { + return fmt.Errorf("error sending parameters announcement response: %w", err) + } + return nil +} + +func getParametersAnnouncement(ctx context.Context, appDir string, announcements []*repoclient.ParameterAnnouncement, command Command, envEntries []*apiclient.EnvEntry) (*apiclient.ParametersAnnouncementResponse, error) { + augmentedAnnouncements := announcements + + if len(command.Command) > 0 { + env := append(os.Environ(), environ(envEntries)...) + stdout, err := runCommand(ctx, command, appDir, env) + if err != nil { + return nil, fmt.Errorf("error executing dynamic parameter output command: %w", err) + } + + var dynamicParamAnnouncements []*repoclient.ParameterAnnouncement + err = json.Unmarshal([]byte(stdout), &dynamicParamAnnouncements) + if err != nil { + return nil, fmt.Errorf("error unmarshaling dynamic parameter output into ParametersAnnouncementResponse: %w", err) + } + + // dynamic goes first, because static should take precedence by being later. + augmentedAnnouncements = append(dynamicParamAnnouncements, announcements...) + } + + repoResponse := &apiclient.ParametersAnnouncementResponse{ + ParameterAnnouncements: augmentedAnnouncements, + } + return repoResponse, nil +} diff --git a/cmpserver/plugin/plugin.proto b/cmpserver/plugin/plugin.proto new file mode 100644 index 0000000000000..16d4268d5939f --- /dev/null +++ b/cmpserver/plugin/plugin.proto @@ -0,0 +1,74 @@ +syntax = "proto3"; +option go_package = "github.com/argoproj/argo-cd/v2/cmpserver/apiclient"; + +package plugin; + +import "github.com/argoproj/argo-cd/v2/reposerver/repository/repository.proto"; + +// AppStreamRequest is the request object used to send the application's +// files over a stream. +message AppStreamRequest { + oneof request { + ManifestRequestMetadata metadata = 1; + File file = 2; + } +} + +// ManifestRequestMetadata defines the metada related to the file being sent +// to the CMP server. +message ManifestRequestMetadata { + // appName refers to the ArgoCD Application name + string appName = 1; + // appRelPath points to the application relative path inside the tarball + string appRelPath = 2; + // checksum is used to verify the integrity of the file + string checksum = 3; + // size relates to the file size in bytes + int64 size = 4; + // env is a list with the environment variables needed to generate manifests + repeated EnvEntry env = 5; +} + +// EnvEntry represents an entry in the application's environment +message EnvEntry { + // Name is the name of the variable, usually expressed in uppercase + string name = 1; + // Value is the value of the variable + string value = 2; +} + +message ManifestResponse { + repeated string manifests = 1; + string sourceType = 2; +} + +message RepositoryResponse { + bool isSupported = 1; + bool isDiscoveryEnabled = 2; +} + +// ParametersAnnouncementResponse contains a list of announcements. This list represents all the parameters which a CMP +// is able to accept. +message ParametersAnnouncementResponse { + repeated repository.ParameterAnnouncement parameterAnnouncements = 1; +} + +message File { + bytes chunk = 1; +} + +// ConfigManagementPlugin Service +service ConfigManagementPluginService { + // GenerateManifests receive a stream containing a tgz archive with all required files necessary + // to generate manifests + rpc GenerateManifest(stream AppStreamRequest) returns (ManifestResponse) { + } + + // MatchRepository returns whether or not the given application is supported by the plugin + rpc MatchRepository(stream AppStreamRequest) returns (RepositoryResponse) { + } + + // GetParametersAnnouncement gets a list of parameter announcements for the given app + rpc GetParametersAnnouncement(stream AppStreamRequest) returns (ParametersAnnouncementResponse) { + } +} diff --git a/cmpserver/plugin/plugin_test.go b/cmpserver/plugin/plugin_test.go new file mode 100644 index 0000000000000..b253dc414cbdc --- /dev/null +++ b/cmpserver/plugin/plugin_test.go @@ -0,0 +1,807 @@ +package plugin + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "os/exec" + "path" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/metadata" + "gopkg.in/yaml.v2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/argoproj/argo-cd/v2/cmpserver/apiclient" + repoclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient" + "github.com/argoproj/argo-cd/v2/test" + "github.com/argoproj/argo-cd/v2/util/cmp" + "github.com/argoproj/argo-cd/v2/util/tgzstream" +) + +func newService(configFilePath string) (*Service, error) { + config, err := ReadPluginConfig(configFilePath) + if err != nil { + return nil, err + } + + initConstants := CMPServerInitConstants{ + PluginConfig: *config, + } + + service := &Service{ + initConstants: initConstants, + } + return service, nil +} + +func (s *Service) WithGenerateCommand(command Command) *Service { + s.initConstants.PluginConfig.Spec.Generate = command + return s +} + +type pluginOpt func(*CMPServerInitConstants) + +func withDiscover(d Discover) pluginOpt { + return func(cic *CMPServerInitConstants) { + cic.PluginConfig.Spec.Discover = d + } +} + +func buildPluginConfig(opts ...pluginOpt) *CMPServerInitConstants { + cic := &CMPServerInitConstants{ + PluginConfig: PluginConfig{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigManagementPlugin", + APIVersion: "argoproj.io/v1alpha1", + }, + Metadata: metav1.ObjectMeta{ + Name: "some-plugin", + }, + Spec: PluginConfigSpec{ + Version: "v1.0", + }, + }, + } + for _, opt := range opts { + opt(cic) + } + return cic +} + +func TestMatchRepository(t *testing.T) { + type fixture struct { + service *Service + path string + env []*apiclient.EnvEntry + } + setup := func(t *testing.T, opts ...pluginOpt) *fixture { + t.Helper() + cic := buildPluginConfig(opts...) + path := filepath.Join(test.GetTestDir(t), "testdata", "kustomize") + s := NewService(*cic) + return &fixture{ + service: s, + path: path, + env: []*apiclient.EnvEntry{{Name: "ENV_VAR", Value: "1"}}, + } + } + t.Run("will match plugin by filename", func(t *testing.T) { + // given + d := Discover{ + FileName: "kustomization.yaml", + } + f := setup(t, withDiscover(d)) + + // when + match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env, ".") + + // then + assert.NoError(t, err) + assert.True(t, match) + assert.True(t, discovery) + }) + t.Run("will not match plugin by filename if file not found", func(t *testing.T) { + // given + d := Discover{ + FileName: "not_found.yaml", + } + f := setup(t, withDiscover(d)) + + // when + match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env, ".") + + // then + assert.NoError(t, err) + assert.False(t, match) + assert.True(t, discovery) + }) + t.Run("will not match a pattern with a syntax error", func(t *testing.T) { + // given + d := Discover{ + FileName: "[", + } + f := setup(t, withDiscover(d)) + + // when + _, _, err := f.service.matchRepository(context.Background(), f.path, f.env, ".") + + // then + assert.ErrorContains(t, err, "syntax error") + }) + t.Run("will match plugin by glob", func(t *testing.T) { + // given + d := Discover{ + Find: Find{ + Glob: "**/*/plugin.yaml", + }, + } + f := setup(t, withDiscover(d)) + + // when + match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env, ".") + + // then + assert.NoError(t, err) + assert.True(t, match) + assert.True(t, discovery) + }) + t.Run("will not match plugin by glob if not found", func(t *testing.T) { + // given + d := Discover{ + Find: Find{ + Glob: "**/*/not_found.yaml", + }, + } + f := setup(t, withDiscover(d)) + + // when + match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env, ".") + + // then + assert.NoError(t, err) + assert.False(t, match) + assert.True(t, discovery) + }) + t.Run("will throw an error for a bad pattern", func(t *testing.T) { + // given + d := Discover{ + Find: Find{ + Glob: "does-not-exist", + }, + } + f := setup(t, withDiscover(d)) + + // when + _, _, err := f.service.matchRepository(context.Background(), f.path, f.env, ".") + + // then + assert.ErrorContains(t, err, "error finding glob match for pattern") + }) + t.Run("will match plugin by command when returns any output", func(t *testing.T) { + // given + d := Discover{ + Find: Find{ + Command: Command{ + Command: []string{"echo", "test"}, + }, + }, + } + f := setup(t, withDiscover(d)) + + // when + match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env, ".") + + // then + assert.NoError(t, err) + assert.True(t, match) + assert.True(t, discovery) + }) + t.Run("will not match plugin by command when returns no output", func(t *testing.T) { + // given + d := Discover{ + Find: Find{ + Command: Command{ + Command: []string{"echo"}, + }, + }, + } + f := setup(t, withDiscover(d)) + + // when + match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env, ".") + // then + assert.NoError(t, err) + assert.False(t, match) + assert.True(t, discovery) + }) + t.Run("will match plugin because env var defined", func(t *testing.T) { + // given + d := Discover{ + Find: Find{ + Command: Command{ + Command: []string{"sh", "-c", "echo -n $ENV_VAR"}, + }, + }, + } + f := setup(t, withDiscover(d)) + + // when + match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env, ".") + + // then + assert.NoError(t, err) + assert.True(t, match) + assert.True(t, discovery) + }) + t.Run("will not match plugin because no env var defined", func(t *testing.T) { + // given + d := Discover{ + Find: Find{ + Command: Command{ + // Use printf instead of echo since OSX prints the "-n" when there's no additional arg. + Command: []string{"sh", "-c", `printf "%s" "$ENV_NO_VAR"`}, + }, + }, + } + f := setup(t, withDiscover(d)) + + // when + match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env, ".") + + // then + assert.NoError(t, err) + assert.False(t, match) + assert.True(t, discovery) + }) + t.Run("will not match plugin by command when command fails", func(t *testing.T) { + // given + d := Discover{ + Find: Find{ + Command: Command{ + Command: []string{"cat", "nil"}, + }, + }, + } + f := setup(t, withDiscover(d)) + + // when + match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env, ".") + + // then + assert.Error(t, err) + assert.False(t, match) + assert.True(t, discovery) + }) + t.Run("will not match plugin as discovery is not set", func(t *testing.T) { + // given + d := Discover{} + f := setup(t, withDiscover(d)) + + // when + match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env, ".") + + // then + assert.NoError(t, err) + assert.False(t, match) + assert.False(t, discovery) + }) +} + +func Test_Negative_ConfigFile_DoesnotExist(t *testing.T) { + configFilePath := "./testdata/kustomize-neg/config" + service, err := newService(configFilePath) + require.Error(t, err) + require.Nil(t, service) +} + +func TestGenerateManifest(t *testing.T) { + configFilePath := "./testdata/kustomize/config" + + t.Run("successful generate", func(t *testing.T) { + service, err := newService(configFilePath) + require.NoError(t, err) + + res1, err := service.generateManifest(context.Background(), "testdata/kustomize", nil) + require.NoError(t, err) + require.NotNil(t, res1) + + expectedOutput := "{\"apiVersion\":\"v1\",\"data\":{\"foo\":\"bar\"},\"kind\":\"ConfigMap\",\"metadata\":{\"name\":\"my-map\"}}" + if res1 != nil { + require.Equal(t, expectedOutput, res1.Manifests[0]) + } + }) + t.Run("bad generate command", func(t *testing.T) { + service, err := newService(configFilePath) + require.NoError(t, err) + service.WithGenerateCommand(Command{Command: []string{"bad-command"}}) + + res, err := service.generateManifest(context.Background(), "testdata/kustomize", nil) + assert.ErrorContains(t, err, "executable file not found") + assert.Nil(t, res.Manifests) + }) + t.Run("bad yaml output", func(t *testing.T) { + service, err := newService(configFilePath) + require.NoError(t, err) + service.WithGenerateCommand(Command{Command: []string{"echo", "invalid yaml: }"}}) + + res, err := service.generateManifest(context.Background(), "testdata/kustomize", nil) + assert.ErrorContains(t, err, "failed to unmarshal manifest") + assert.Nil(t, res.Manifests) + }) +} + +func TestGenerateManifest_deadline_exceeded(t *testing.T) { + configFilePath := "./testdata/kustomize/config" + service, err := newService(configFilePath) + require.NoError(t, err) + + expiredCtx, cancel := context.WithTimeout(context.Background(), time.Second*0) + defer cancel() + _, err = service.generateManifest(expiredCtx, "", nil) + assert.ErrorContains(t, err, "context deadline exceeded") +} + +// TestRunCommandContextTimeout makes sure the command dies at timeout rather than sleeping past the timeout. +func TestRunCommandContextTimeout(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 990*time.Millisecond) + defer cancel() + // Use a subshell so there's a child command. + command := Command{ + Command: []string{"sh", "-c"}, + Args: []string{"sleep 5"}, + } + before := time.Now() + _, err := runCommand(ctx, command, "", []string{}) + after := time.Now() + assert.Error(t, err) // The command should time out, causing an error. + assert.Less(t, after.Sub(before), 1*time.Second) +} + +func TestRunCommandEmptyCommand(t *testing.T) { + _, err := runCommand(context.Background(), Command{}, "", nil) + assert.ErrorContains(t, err, "Command is empty") +} + +// TestRunCommandContextTimeoutWithGracefulTermination makes sure that the process is given enough time to cleanup before sending SIGKILL. +func TestRunCommandContextTimeoutWithCleanup(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 900*time.Millisecond) + defer cancel() + + // Use a subshell so there's a child command. + // This command sleeps for 4 seconds which is currently less than the 5 second delay between SIGTERM and SIGKILL signal and then exits successfully. + command := Command{ + Command: []string{"sh", "-c"}, + Args: []string{`(trap 'echo "cleanup completed"; exit' TERM; sleep 4)`}, + } + + before := time.Now() + output, err := runCommand(ctx, command, "", []string{}) + after := time.Now() + + assert.Error(t, err) // The command should time out, causing an error. + assert.Less(t, after.Sub(before), 1*time.Second) + // The command should still have completed the cleanup after termination. + assert.Contains(t, output, "cleanup completed") +} + +func Test_getParametersAnnouncement_empty_command(t *testing.T) { + staticYAML := ` +- name: static-a +- name: static-b +` + static := &[]*repoclient.ParameterAnnouncement{} + err := yaml.Unmarshal([]byte(staticYAML), static) + require.NoError(t, err) + command := Command{ + Command: []string{"echo"}, + Args: []string{`[]`}, + } + res, err := getParametersAnnouncement(context.Background(), "", *static, command, []*apiclient.EnvEntry{}) + require.NoError(t, err) + assert.Equal(t, []*repoclient.ParameterAnnouncement{{Name: "static-a"}, {Name: "static-b"}}, res.ParameterAnnouncements) +} + +func Test_getParametersAnnouncement_no_command(t *testing.T) { + staticYAML := ` +- name: static-a +- name: static-b +` + static := &[]*repoclient.ParameterAnnouncement{} + err := yaml.Unmarshal([]byte(staticYAML), static) + require.NoError(t, err) + command := Command{} + res, err := getParametersAnnouncement(context.Background(), "", *static, command, []*apiclient.EnvEntry{}) + require.NoError(t, err) + assert.Equal(t, []*repoclient.ParameterAnnouncement{{Name: "static-a"}, {Name: "static-b"}}, res.ParameterAnnouncements) +} + +func Test_getParametersAnnouncement_static_and_dynamic(t *testing.T) { + staticYAML := ` +- name: static-a +- name: static-b +` + static := &[]*repoclient.ParameterAnnouncement{} + err := yaml.Unmarshal([]byte(staticYAML), static) + require.NoError(t, err) + command := Command{ + Command: []string{"echo"}, + Args: []string{`[{"name": "dynamic-a"}, {"name": "dynamic-b"}]`}, + } + res, err := getParametersAnnouncement(context.Background(), "", *static, command, []*apiclient.EnvEntry{}) + require.NoError(t, err) + expected := []*repoclient.ParameterAnnouncement{ + {Name: "dynamic-a"}, + {Name: "dynamic-b"}, + {Name: "static-a"}, + {Name: "static-b"}, + } + assert.Equal(t, expected, res.ParameterAnnouncements) +} + +func Test_getParametersAnnouncement_invalid_json(t *testing.T) { + command := Command{ + Command: []string{"echo"}, + Args: []string{`[`}, + } + _, err := getParametersAnnouncement(context.Background(), "", []*repoclient.ParameterAnnouncement{}, command, []*apiclient.EnvEntry{}) + assert.Error(t, err) + assert.Contains(t, err.Error(), "unexpected end of JSON input") +} + +func Test_getParametersAnnouncement_bad_command(t *testing.T) { + command := Command{ + Command: []string{"exit"}, + Args: []string{"1"}, + } + _, err := getParametersAnnouncement(context.Background(), "", []*repoclient.ParameterAnnouncement{}, command, []*apiclient.EnvEntry{}) + assert.Error(t, err) + assert.Contains(t, err.Error(), "error executing dynamic parameter output command") +} + +func Test_getTempDirMustCleanup(t *testing.T) { + tempDir := t.TempDir() + + // Induce a directory create error to verify error handling. + err := os.Chmod(tempDir, 0000) + require.NoError(t, err) + _, _, err = getTempDirMustCleanup(path.Join(tempDir, "test")) + assert.ErrorContains(t, err, "error creating temp dir") + + err = os.Chmod(tempDir, 0700) + require.NoError(t, err) + workDir, cleanup, err := getTempDirMustCleanup(tempDir) + require.NoError(t, err) + require.DirExists(t, workDir) + cleanup() + assert.NoDirExists(t, workDir) +} + +func TestService_Init(t *testing.T) { + // Set up a base directory containing a test directory and a test file. + tempDir := t.TempDir() + workDir := path.Join(tempDir, "workDir") + err := os.MkdirAll(workDir, 0700) + require.NoError(t, err) + testfile := path.Join(workDir, "testfile") + file, err := os.Create(testfile) + require.NoError(t, err) + err = file.Close() + require.NoError(t, err) + + // Make the base directory read-only so Init's cleanup fails. + err = os.Chmod(tempDir, 0000) + require.NoError(t, err) + s := NewService(CMPServerInitConstants{PluginConfig: PluginConfig{}}) + err = s.Init(workDir) + assert.ErrorContains(t, err, "error removing workdir", "Init must throw an error if it can't remove the work directory") + + // Make the base directory writable so Init's cleanup succeeds. + err = os.Chmod(tempDir, 0700) + require.NoError(t, err) + err = s.Init(workDir) + assert.NoError(t, err) + assert.DirExists(t, workDir) + assert.NoFileExists(t, testfile) +} + +func TestEnviron(t *testing.T) { + t.Run("empty environ", func(t *testing.T) { + env := environ([]*apiclient.EnvEntry{}) + assert.Nil(t, env) + }) + t.Run("env vars with empty names or values", func(t *testing.T) { + env := environ([]*apiclient.EnvEntry{ + {Value: "test"}, + {Name: "test"}, + }) + assert.Nil(t, env) + }) + t.Run("proper env vars", func(t *testing.T) { + env := environ([]*apiclient.EnvEntry{ + {Name: "name1", Value: "value1"}, + {Name: "name2", Value: "value2"}, + }) + assert.Equal(t, []string{"name1=value1", "name2=value2"}, env) + }) +} + +type MockGenerateManifestStream struct { + metadataSent bool + fileSent bool + metadataRequest *apiclient.AppStreamRequest + fileRequest *apiclient.AppStreamRequest + response *apiclient.ManifestResponse +} + +func NewMockGenerateManifestStream(repoPath, appPath string, env []string) (*MockGenerateManifestStream, error) { + tgz, mr, err := cmp.GetCompressedRepoAndMetadata(repoPath, appPath, env, nil, nil) + if err != nil { + return nil, err + } + defer tgzstream.CloseAndDelete(tgz) + + tgzBuffer := bytes.NewBuffer(nil) + _, err = io.Copy(tgzBuffer, tgz) + if err != nil { + return nil, fmt.Errorf("failed to copy manifest targz to a byte buffer: %w", err) + } + + return &MockGenerateManifestStream{ + metadataRequest: mr, + fileRequest: cmp.AppFileRequest(tgzBuffer.Bytes()), + }, nil +} + +func (m *MockGenerateManifestStream) SendAndClose(response *apiclient.ManifestResponse) error { + m.response = response + return nil +} + +func (m *MockGenerateManifestStream) Recv() (*apiclient.AppStreamRequest, error) { + if !m.metadataSent { + m.metadataSent = true + return m.metadataRequest, nil + } + + if !m.fileSent { + m.fileSent = true + return m.fileRequest, nil + } + return nil, io.EOF +} + +func (m *MockGenerateManifestStream) Context() context.Context { + return context.Background() +} + +func TestService_GenerateManifest(t *testing.T) { + configFilePath := "./testdata/kustomize/config" + service, err := newService(configFilePath) + require.NoError(t, err) + + t.Run("successful generate", func(t *testing.T) { + s, err := NewMockGenerateManifestStream("./testdata/kustomize", "./testdata/kustomize", nil) + require.NoError(t, err) + err = service.generateManifestGeneric(s) + require.NoError(t, err) + require.NotNil(t, s.response) + assert.Equal(t, []string{"{\"apiVersion\":\"v1\",\"data\":{\"foo\":\"bar\"},\"kind\":\"ConfigMap\",\"metadata\":{\"name\":\"my-map\"}}"}, s.response.Manifests) + }) + + t.Run("out-of-bounds app path", func(t *testing.T) { + s, err := NewMockGenerateManifestStream("./testdata/kustomize", "./testdata/kustomize", nil) + require.NoError(t, err) + // set a malicious app path on the metadata + s.metadataRequest.Request.(*apiclient.AppStreamRequest_Metadata).Metadata.AppRelPath = "../out-of-bounds" + err = service.generateManifestGeneric(s) + require.ErrorContains(t, err, "illegal appPath") + assert.Nil(t, s.response) + }) +} + +type MockMatchRepositoryStream struct { + metadataSent bool + fileSent bool + metadataRequest *apiclient.AppStreamRequest + fileRequest *apiclient.AppStreamRequest + response *apiclient.RepositoryResponse +} + +func NewMockMatchRepositoryStream(repoPath, appPath string, env []string) (*MockMatchRepositoryStream, error) { + tgz, mr, err := cmp.GetCompressedRepoAndMetadata(repoPath, appPath, env, nil, nil) + if err != nil { + return nil, err + } + defer tgzstream.CloseAndDelete(tgz) + + tgzBuffer := bytes.NewBuffer(nil) + _, err = io.Copy(tgzBuffer, tgz) + if err != nil { + return nil, fmt.Errorf("failed to copy manifest targz to a byte buffer: %w", err) + } + + return &MockMatchRepositoryStream{ + metadataRequest: mr, + fileRequest: cmp.AppFileRequest(tgzBuffer.Bytes()), + }, nil +} + +func (m *MockMatchRepositoryStream) SendAndClose(response *apiclient.RepositoryResponse) error { + m.response = response + return nil +} + +func (m *MockMatchRepositoryStream) Recv() (*apiclient.AppStreamRequest, error) { + if !m.metadataSent { + m.metadataSent = true + return m.metadataRequest, nil + } + + if !m.fileSent { + m.fileSent = true + return m.fileRequest, nil + } + return nil, io.EOF +} + +func (m *MockMatchRepositoryStream) Context() context.Context { + return context.Background() +} + +func TestService_MatchRepository(t *testing.T) { + configFilePath := "./testdata/kustomize/config" + service, err := newService(configFilePath) + require.NoError(t, err) + + t.Run("supported app", func(t *testing.T) { + s, err := NewMockMatchRepositoryStream("./testdata/kustomize", "./testdata/kustomize", nil) + require.NoError(t, err) + err = service.matchRepositoryGeneric(s) + require.NoError(t, err) + require.NotNil(t, s.response) + assert.True(t, s.response.IsSupported) + }) + + t.Run("unsupported app", func(t *testing.T) { + s, err := NewMockMatchRepositoryStream("./testdata/ksonnet", "./testdata/ksonnet", nil) + require.NoError(t, err) + err = service.matchRepositoryGeneric(s) + require.NoError(t, err) + require.NotNil(t, s.response) + assert.False(t, s.response.IsSupported) + }) +} + +type MockParametersAnnouncementStream struct { + metadataSent bool + fileSent bool + metadataRequest *apiclient.AppStreamRequest + fileRequest *apiclient.AppStreamRequest + response *apiclient.ParametersAnnouncementResponse +} + +func NewMockParametersAnnouncementStream(repoPath, appPath string, env []string) (*MockParametersAnnouncementStream, error) { + tgz, mr, err := cmp.GetCompressedRepoAndMetadata(repoPath, appPath, env, nil, nil) + if err != nil { + return nil, err + } + defer tgzstream.CloseAndDelete(tgz) + + tgzBuffer := bytes.NewBuffer(nil) + _, err = io.Copy(tgzBuffer, tgz) + if err != nil { + return nil, fmt.Errorf("failed to copy manifest targz to a byte buffer: %w", err) + } + + return &MockParametersAnnouncementStream{ + metadataRequest: mr, + fileRequest: cmp.AppFileRequest(tgzBuffer.Bytes()), + }, nil +} + +func (m *MockParametersAnnouncementStream) SendAndClose(response *apiclient.ParametersAnnouncementResponse) error { + m.response = response + return nil +} + +func (m *MockParametersAnnouncementStream) Recv() (*apiclient.AppStreamRequest, error) { + if !m.metadataSent { + m.metadataSent = true + return m.metadataRequest, nil + } + + if !m.fileSent { + m.fileSent = true + return m.fileRequest, nil + } + return nil, io.EOF +} + +func (m *MockParametersAnnouncementStream) SetHeader(metadata.MD) error { + return nil +} + +func (m *MockParametersAnnouncementStream) SendHeader(metadata.MD) error { + return nil +} + +func (m *MockParametersAnnouncementStream) SetTrailer(metadata.MD) {} + +func (m *MockParametersAnnouncementStream) Context() context.Context { + return context.Background() +} + +func (m *MockParametersAnnouncementStream) SendMsg(interface{}) error { + return nil +} + +func (m *MockParametersAnnouncementStream) RecvMsg(interface{}) error { + return nil +} + +func TestService_GetParametersAnnouncement(t *testing.T) { + configFilePath := "./testdata/kustomize/config" + service, err := newService(configFilePath) + require.NoError(t, err) + + t.Run("successful response", func(t *testing.T) { + s, err := NewMockParametersAnnouncementStream("./testdata/kustomize", "./testdata/kustomize", []string{"MUST_BE_SET=yep"}) + require.NoError(t, err) + err = service.GetParametersAnnouncement(s) + require.NoError(t, err) + require.NotNil(t, s.response) + require.Len(t, s.response.ParameterAnnouncements, 2) + assert.Equal(t, repoclient.ParameterAnnouncement{Name: "dynamic-test-param", String_: "yep"}, *s.response.ParameterAnnouncements[0]) + assert.Equal(t, repoclient.ParameterAnnouncement{Name: "test-param", String_: "test-value"}, *s.response.ParameterAnnouncements[1]) + }) + t.Run("out of bounds app", func(t *testing.T) { + s, err := NewMockParametersAnnouncementStream("./testdata/kustomize", "./testdata/kustomize", []string{"MUST_BE_SET=yep"}) + require.NoError(t, err) + // set a malicious app path on the metadata + s.metadataRequest.Request.(*apiclient.AppStreamRequest_Metadata).Metadata.AppRelPath = "../out-of-bounds" + err = service.GetParametersAnnouncement(s) + require.ErrorContains(t, err, "illegal appPath") + require.Nil(t, s.response) + }) + t.Run("fails when script fails", func(t *testing.T) { + s, err := NewMockParametersAnnouncementStream("./testdata/kustomize", "./testdata/kustomize", []string{"WRONG_ENV_VAR=oops"}) + require.NoError(t, err) + err = service.GetParametersAnnouncement(s) + require.ErrorContains(t, err, "error executing dynamic parameter output command") + require.Nil(t, s.response) + }) +} + +func Test_getCommandArgsToLog(t *testing.T) { + testCases := []struct { + name string + args []string + expected string + }{ + { + name: "no spaces", + args: []string{"sh", "-c", "cat"}, + expected: "sh -c cat", + }, + { + name: "spaces", + args: []string{"sh", "-c", `echo "hello world"`}, + expected: `sh -c "echo \"hello world\""`, + }, + } + + for _, tc := range testCases { + tcc := tc + t.Run(tcc.name, func(t *testing.T) { + t.Parallel() + assert.Equal(t, tcc.expected, getCommandArgsToLog(exec.Command(tcc.args[0], tcc.args[1:]...))) + }) + } +} diff --git a/cmpserver/plugin/plugin_unix.go b/cmpserver/plugin/plugin_unix.go new file mode 100644 index 0000000000000..ea6b7b5493910 --- /dev/null +++ b/cmpserver/plugin/plugin_unix.go @@ -0,0 +1,20 @@ +//go:build !windows +// +build !windows + +package plugin + +import ( + "syscall" +) + +func newSysProcAttr(setpgid bool) *syscall.SysProcAttr { + return &syscall.SysProcAttr{Setpgid: setpgid} +} + +func sysCallKill(pid int) error { + return syscall.Kill(pid, syscall.SIGKILL) +} + +func sysCallTerm(pid int) error { + return syscall.Kill(pid, syscall.SIGTERM) +} diff --git a/cmpserver/plugin/plugin_windows.go b/cmpserver/plugin/plugin_windows.go new file mode 100644 index 0000000000000..b8873a9793601 --- /dev/null +++ b/cmpserver/plugin/plugin_windows.go @@ -0,0 +1,20 @@ +//go:build windows +// +build windows + +package plugin + +import ( + "syscall" +) + +func newSysProcAttr(setpgid bool) *syscall.SysProcAttr { + return &syscall.SysProcAttr{} +} + +func sysCallKill(pid int) error { + return nil +} + +func sysCallTerm(pid int) error { + return nil +} diff --git a/cmpserver/plugin/testdata/ksonnet/config/plugin.yaml b/cmpserver/plugin/testdata/ksonnet/config/plugin.yaml new file mode 100644 index 0000000000000..0a14afe5d3bf5 --- /dev/null +++ b/cmpserver/plugin/testdata/ksonnet/config/plugin.yaml @@ -0,0 +1,13 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ConfigManagementPlugin +metadata: + name: ksonnet +spec: + version: v1.0 + init: + command: [ks, version] + generate: + command: [sh, -c, "ks show $ARGOCD_APP_ENV"] + discover: + find: + glob: "**/*/main.jsonnet" diff --git a/test/e2e/testdata/helm2/values.yaml b/cmpserver/plugin/testdata/ksonnet/main.jsonnet similarity index 100% rename from test/e2e/testdata/helm2/values.yaml rename to cmpserver/plugin/testdata/ksonnet/main.jsonnet diff --git a/cmpserver/plugin/testdata/kustomize-neg/config/plugin-bad.yaml b/cmpserver/plugin/testdata/kustomize-neg/config/plugin-bad.yaml new file mode 100644 index 0000000000000..b6b7a154a7aee --- /dev/null +++ b/cmpserver/plugin/testdata/kustomize-neg/config/plugin-bad.yaml @@ -0,0 +1,14 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ConfigManagementPlugin +metadata: + name: kustomize +spec: + version: v1.0 + init: + command: [kustomize, version] + generate: + command: [sh, -c, "cd testdata/kustomize && kustomize build"] + discover: + find: + command: [sh, -c, find . -name kustomization.yaml] + glob: "**/*/kustomization.yaml" diff --git a/test/e2e/testdata/helm2/templates/config-map.yaml b/cmpserver/plugin/testdata/kustomize/cm.yaml similarity index 100% rename from test/e2e/testdata/helm2/templates/config-map.yaml rename to cmpserver/plugin/testdata/kustomize/cm.yaml diff --git a/cmpserver/plugin/testdata/kustomize/config/plugin.yaml b/cmpserver/plugin/testdata/kustomize/config/plugin.yaml new file mode 100644 index 0000000000000..bdca45e9ae45e --- /dev/null +++ b/cmpserver/plugin/testdata/kustomize/config/plugin.yaml @@ -0,0 +1,33 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ConfigManagementPlugin +metadata: + name: kustomize +spec: + version: v1.0 + init: + command: [sh, -c] + args: + - | + kustomize version + generate: + command: [sh, -c] + args: + - | + kustomize build + discover: + find: + command: [sh, -c, find . -name kustomization.yaml] + glob: "**/kustomization.yaml" + parameters: + static: + - name: test-param + string: test-value + dynamic: + command: [sh, -c] + args: + - | + # Make sure env vars are making it to the plugin. + if [ -z "$MUST_BE_SET" ]; then + exit 1 + fi + echo "[{\"name\": \"dynamic-test-param\", \"string\": \"$MUST_BE_SET\"}]" diff --git a/cmpserver/plugin/testdata/kustomize/kustomization.yaml b/cmpserver/plugin/testdata/kustomize/kustomization.yaml new file mode 100644 index 0000000000000..146ae143e6332 --- /dev/null +++ b/cmpserver/plugin/testdata/kustomize/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: +- ./cm.yaml diff --git a/cmpserver/server.go b/cmpserver/server.go new file mode 100644 index 0000000000000..bbb493f6b1d66 --- /dev/null +++ b/cmpserver/server.go @@ -0,0 +1,131 @@ +package cmpserver + +import ( + "fmt" + "net" + "os" + "os/signal" + "syscall" + + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + + grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" + grpc_logrus "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + log "github.com/sirupsen/logrus" + "google.golang.org/grpc" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/reflection" + + "github.com/argoproj/argo-cd/v2/cmpserver/apiclient" + "github.com/argoproj/argo-cd/v2/cmpserver/plugin" + "github.com/argoproj/argo-cd/v2/common" + versionpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/version" + "github.com/argoproj/argo-cd/v2/server/version" + "github.com/argoproj/argo-cd/v2/util/errors" + grpc_util "github.com/argoproj/argo-cd/v2/util/grpc" + "google.golang.org/grpc/keepalive" +) + +// ArgoCDCMPServer is the config management plugin server implementation +type ArgoCDCMPServer struct { + log *log.Entry + opts []grpc.ServerOption + initConstants plugin.CMPServerInitConstants + stopCh chan os.Signal + doneCh chan interface{} + sig os.Signal +} + +// NewServer returns a new instance of the Argo CD config management plugin server +func NewServer(initConstants plugin.CMPServerInitConstants) (*ArgoCDCMPServer, error) { + if os.Getenv(common.EnvEnableGRPCTimeHistogramEnv) == "true" { + grpc_prometheus.EnableHandlingTimeHistogram() + } + + serverLog := log.NewEntry(log.StandardLogger()) + streamInterceptors := []grpc.StreamServerInterceptor{ + otelgrpc.StreamServerInterceptor(), + grpc_logrus.StreamServerInterceptor(serverLog), + grpc_prometheus.StreamServerInterceptor, + grpc_util.PanicLoggerStreamServerInterceptor(serverLog), + } + unaryInterceptors := []grpc.UnaryServerInterceptor{ + otelgrpc.UnaryServerInterceptor(), + grpc_logrus.UnaryServerInterceptor(serverLog), + grpc_prometheus.UnaryServerInterceptor, + grpc_util.PanicLoggerUnaryServerInterceptor(serverLog), + } + + serverOpts := []grpc.ServerOption{ + grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(unaryInterceptors...)), + grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(streamInterceptors...)), + grpc.MaxRecvMsgSize(apiclient.MaxGRPCMessageSize), + grpc.MaxSendMsgSize(apiclient.MaxGRPCMessageSize), + grpc.KeepaliveEnforcementPolicy( + keepalive.EnforcementPolicy{ + MinTime: common.GRPCKeepAliveEnforcementMinimum, + }, + ), + } + + return &ArgoCDCMPServer{ + log: serverLog, + opts: serverOpts, + stopCh: make(chan os.Signal), + doneCh: make(chan interface{}), + initConstants: initConstants, + }, nil +} + +func (a *ArgoCDCMPServer) Run() { + config := a.initConstants.PluginConfig + + // Listen on the socket address + _ = os.Remove(config.Address()) + listener, err := net.Listen("unix", config.Address()) + errors.CheckError(err) + log.Infof("argocd-cmp-server %s serving on %s", common.GetVersion(), listener.Addr()) + + signal.Notify(a.stopCh, syscall.SIGINT, syscall.SIGTERM) + go a.Shutdown(config.Address()) + + grpcServer, err := a.CreateGRPC() + errors.CheckError(err) + err = grpcServer.Serve(listener) + errors.CheckError(err) + + if a.sig != nil { + <-a.doneCh + } +} + +// CreateGRPC creates new configured grpc server +func (a *ArgoCDCMPServer) CreateGRPC() (*grpc.Server, error) { + server := grpc.NewServer(a.opts...) + versionpkg.RegisterVersionServiceServer(server, version.NewServer(nil, func() (bool, error) { + return true, nil + })) + pluginService := plugin.NewService(a.initConstants) + err := pluginService.Init(common.GetCMPWorkDir()) + if err != nil { + return nil, fmt.Errorf("error initializing plugin service: %s", err) + } + apiclient.RegisterConfigManagementPluginServiceServer(server, pluginService) + + healthService := health.NewServer() + grpc_health_v1.RegisterHealthServer(server, healthService) + + // Register reflection service on gRPC server. + reflection.Register(server) + + return server, nil +} + +func (a *ArgoCDCMPServer) Shutdown(address string) { + defer signal.Stop(a.stopCh) + a.sig = <-a.stopCh + _ = os.Remove(address) + close(a.doneCh) +} diff --git a/common/common.go b/common/common.go index 260a18f6cbe3d..d7c2d24738b58 100644 --- a/common/common.go +++ b/common/common.go @@ -1,9 +1,20 @@ package common import ( + "errors" "os" + "path/filepath" "strconv" "time" + + "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// Component names +const ( + ApplicationController = "argocd-application-controller" ) // Default service addresses and URLS of Argo CD internal services @@ -11,21 +22,25 @@ const ( // DefaultRepoServerAddr is the gRPC address of the Argo CD repo server DefaultRepoServerAddr = "argocd-repo-server:8081" // DefaultDexServerAddr is the HTTP address of the Dex OIDC server, which we run a reverse proxy against - DefaultDexServerAddr = "http://argocd-dex-server:5556" + DefaultDexServerAddr = "argocd-dex-server:5556" // DefaultRedisAddr is the default redis address DefaultRedisAddr = "argocd-redis:6379" ) // Kubernetes ConfigMap and Secret resource names which hold Argo CD settings const ( - ArgoCDConfigMapName = "argocd-cm" - ArgoCDSecretName = "argocd-secret" - ArgoCDRBACConfigMapName = "argocd-rbac-cm" - // Contains SSH known hosts data for connecting repositories. Will get mounted as volume to pods + ArgoCDConfigMapName = "argocd-cm" + ArgoCDSecretName = "argocd-secret" + ArgoCDNotificationsConfigMapName = "argocd-notifications-cm" + ArgoCDNotificationsSecretName = "argocd-notifications-secret" + ArgoCDRBACConfigMapName = "argocd-rbac-cm" + // ArgoCDKnownHostsConfigMapName contains SSH known hosts data for connecting repositories. Will get mounted as volume to pods ArgoCDKnownHostsConfigMapName = "argocd-ssh-known-hosts-cm" - // Contains TLS certificate data for connecting repositories. Will get mounted as volume to pods + // ArgoCDTLSCertsConfigMapName contains TLS certificate data for connecting repositories. Will get mounted as volume to pods ArgoCDTLSCertsConfigMapName = "argocd-tls-certs-cm" ArgoCDGPGKeysConfigMapName = "argocd-gpg-keys-cm" + // ArgoCDAppControllerShardConfigMapName contains the application controller to shard mapping + ArgoCDAppControllerShardConfigMapName = "argocd-app-controller-shard-cm" ) // Some default configurables @@ -43,44 +58,66 @@ const ( DefaultPortRepoServerMetrics = 8084 ) +// DefaultAddressAPIServer for ArgoCD components +const ( + DefaultAddressAdminDashboard = "localhost" + DefaultAddressAPIServer = "0.0.0.0" + DefaultAddressAPIServerMetrics = "0.0.0.0" + DefaultAddressRepoServer = "0.0.0.0" + DefaultAddressRepoServerMetrics = "0.0.0.0" +) + // Default paths on the pod's file system const ( - // The default path where TLS certificates for repositories are located + // DefaultPathTLSConfig is the default path where TLS certificates for repositories are located DefaultPathTLSConfig = "/app/config/tls" - // The default path where SSH known hosts are stored + // DefaultPathSSHConfig is the default path where SSH known hosts are stored DefaultPathSSHConfig = "/app/config/ssh" - // Default name for the SSH known hosts file + // DefaultSSHKnownHostsName is the Default name for the SSH known hosts file DefaultSSHKnownHostsName = "ssh_known_hosts" - // Default path to GnuPG home directory + // DefaultGnuPgHomePath is the Default path to GnuPG home directory DefaultGnuPgHomePath = "/app/config/gpg/keys" - // Default path to repo server TLS endpoint config + // DefaultAppConfigPath is the Default path to repo server TLS endpoint config DefaultAppConfigPath = "/app/config" -) - -const ( - DefaultSyncRetryDuration = 5 * time.Second - DefaultSyncRetryMaxDuration = 3 * time.Minute - DefaultSyncRetryFactor = int64(2) + // DefaultPluginSockFilePath is the Default path to cmp server plugin socket file + DefaultPluginSockFilePath = "/home/argocd/cmp-server/plugins" + // DefaultPluginConfigFilePath is the Default path to cmp server plugin configuration file + DefaultPluginConfigFilePath = "/home/argocd/cmp-server/config" + // PluginConfigFileName is the Plugin Config File is a ConfigManagementPlugin manifest located inside the plugin container + PluginConfigFileName = "plugin.yaml" ) // Argo CD application related constants const ( - // KubernetesInternalAPIServerAddr is address of the k8s API server when accessing internal to the cluster - KubernetesInternalAPIServerAddr = "https://kubernetes.default.svc" - // DefaultAppProjectName contains name of 'default' app project, which is available in every Argo CD installation - DefaultAppProjectName = "default" + // ArgoCDAdminUsername is the username of the 'admin' user ArgoCDAdminUsername = "admin" // ArgoCDUserAgentName is the default user-agent name used by the gRPC API client library and grpc-gateway ArgoCDUserAgentName = "argocd-client" + // ArgoCDSSAManager is the default argocd manager name used by server-side apply syncs + ArgoCDSSAManager = "argocd-controller" // AuthCookieName is the HTTP cookie name where we store our auth token AuthCookieName = "argocd.token" - // RevisionHistoryLimit is the max number of successful sync to keep in history - RevisionHistoryLimit = 10 + // StateCookieName is the HTTP cookie name that holds temporary nonce tokens for CSRF protection + StateCookieName = "argocd.oauthstate" + // StateCookieMaxAge is the maximum age of the oauth state cookie + StateCookieMaxAge = time.Minute * 5 + // ChangePasswordSSOTokenMaxAge is the max token age for password change operation ChangePasswordSSOTokenMaxAge = time.Minute * 5 // GithubAppCredsExpirationDuration is the default time used to cache the GitHub app credentials GithubAppCredsExpirationDuration = time.Minute * 60 + + // PasswordPatten is the default password patten + PasswordPatten = `^.{8,32}$` + + // LegacyShardingAlgorithm is the default value for Sharding Algorithm it uses an `uid` based distribution (non-uniform) + LegacyShardingAlgorithm = "legacy" + // RoundRobinShardingAlgorithm is a flag value that can be opted for Sharding Algorithm it uses an equal distribution accross all shards + RoundRobinShardingAlgorithm = "round-robin" + DefaultShardingAlgorithm = LegacyShardingAlgorithm + // AppControllerHeartbeatUpdateRetryCount is the retry count for updating the Shard Mapping to the Shard Mapping ConfigMap used by Application Controller + AppControllerHeartbeatUpdateRetryCount = 3 ) // Dex related constants @@ -110,37 +147,29 @@ const ( // LabelKeyAppInstance is the label key to use to uniquely identify the instance of an application // The Argo CD application name is used as the instance name LabelKeyAppInstance = "app.kubernetes.io/instance" - // LegacyLabelApplicationName is the legacy label (v0.10 and below) and is superceded by 'app.kubernetes.io/instance' + // LabelKeyAppName is the label key to use to uniquely identify the name of the Kubernetes application + LabelKeyAppName = "app.kubernetes.io/name" + // LabelKeyLegacyApplicationName is the legacy label (v0.10 and below) and is superseded by 'app.kubernetes.io/instance' LabelKeyLegacyApplicationName = "applications.argoproj.io/app-name" - // LabelKeySecretType contains the type of argocd secret (currently: 'cluster') + // LabelKeySecretType contains the type of argocd secret (currently: 'cluster', 'repository', 'repo-config' or 'repo-creds') LabelKeySecretType = "argocd.argoproj.io/secret-type" // LabelValueSecretTypeCluster indicates a secret type of cluster LabelValueSecretTypeCluster = "cluster" + // LabelValueSecretTypeRepository indicates a secret type of repository + LabelValueSecretTypeRepository = "repository" + // LabelValueSecretTypeRepoCreds indicates a secret type of repository credentials + LabelValueSecretTypeRepoCreds = "repo-creds" + + // AnnotationKeyAppInstance is the Argo CD application name is used as the instance name + AnnotationKeyAppInstance = "argocd.argoproj.io/tracking-id" // AnnotationCompareOptions is a comma-separated list of options for comparison AnnotationCompareOptions = "argocd.argoproj.io/compare-options" - // AnnotationKeyRefresh is the annotation key which indicates that app needs to be refreshed. Removed by application controller after app is refreshed. - // Might take values 'normal'/'hard'. Value 'hard' means manifest cache and target cluster state cache should be invalidated before refresh. - AnnotationKeyRefresh = "argocd.argoproj.io/refresh" // AnnotationKeyManagedBy is annotation name which indicates that k8s resource is managed by an application. AnnotationKeyManagedBy = "managed-by" // AnnotationValueManagedByArgoCD is a 'managed-by' annotation value for resources managed by Argo CD AnnotationValueManagedByArgoCD = "argocd.argoproj.io" - // ResourcesFinalizerName is the finalizer value which we inject to finalize deletion of an application - ResourcesFinalizerName = "resources-finalizer.argocd.argoproj.io" - - // ForegroundPropagationPolicyFinalizer is the finalizer we inject to delete application with foreground propagation policy - ForegroundPropagationPolicyFinalizer = "resources-finalizer.argocd.argoproj.io/foreground" - - // BackgroundPropagationPolicyFinalizer is the finalizer we inject to delete application with background propagation policy - BackgroundPropagationPolicyFinalizer = "resources-finalizer.argocd.argoproj.io/background" - - // AnnotationKeyManifestGeneratePaths is an annotation that contains a list of semicolon-separated paths in the - // manifests repository that affects the manifest generation. Paths might be either relative or absolute. The - // absolute path means an absolute path within the repository and the relative path is relative to the application - // source path within the repository. - AnnotationKeyManifestGeneratePaths = "argocd.argoproj.io/manifest-generate-paths" // AnnotationKeyLinkPrefix tells the UI to add an external link icon to the application node // that links to the value given in the annotation. @@ -151,6 +180,10 @@ const ( // Ex: "http://grafana.example.com/d/yu5UH4MMz/deployments" // Ex: "Go to Dashboard|http://grafana.example.com/d/yu5UH4MMz/deployments" AnnotationKeyLinkPrefix = "link.argocd.argoproj.io/" + + // AnnotationKeyAppSkipReconcile tells the Application to skip the Application controller reconcile. + // Skip reconcile when the value is "true" or any other string values that can be strconv.ParseBool() to be true. + AnnotationKeyAppSkipReconcile = "argocd.argoproj.io/skip-reconcile" ) // Environment variables for tuning and debugging Argo CD @@ -159,25 +192,20 @@ const ( EnvVarSSODebug = "ARGOCD_SSO_DEBUG" // EnvVarRBACDebug is an environment variable to enable additional RBAC debugging in the API server EnvVarRBACDebug = "ARGOCD_RBAC_DEBUG" - // EnvVarFakeInClusterConfig is an environment variable to fake an in-cluster RESTConfig using - // the current kubectl context (for development purposes) - EnvVarFakeInClusterConfig = "ARGOCD_FAKE_IN_CLUSTER" - // Overrides the location where SSH known hosts for repo access data is stored + // EnvVarSSHDataPath overrides the location where SSH known hosts for repo access data is stored EnvVarSSHDataPath = "ARGOCD_SSH_DATA_PATH" - // Overrides the location where TLS certificate for repo access data is stored + // EnvVarTLSDataPath overrides the location where TLS certificate for repo access data is stored EnvVarTLSDataPath = "ARGOCD_TLS_DATA_PATH" - // Specifies number of git remote operations attempts count + // EnvGitAttemptsCount specifies number of git remote operations attempts count EnvGitAttemptsCount = "ARGOCD_GIT_ATTEMPTS_COUNT" - // Overrides git submodule support, true by default + // EnvGitRetryMaxDuration specifices max duration of git remote operation retry + EnvGitRetryMaxDuration = "ARGOCD_GIT_RETRY_MAX_DURATION" + // EnvGitRetryDuration specifies duration of git remote operation retry + EnvGitRetryDuration = "ARGOCD_GIT_RETRY_DURATION" + // EnvGitRetryFactor specifies fator of git remote operation retry + EnvGitRetryFactor = "ARGOCD_GIT_RETRY_FACTOR" + // EnvGitSubmoduleEnabled overrides git submodule support, true by default EnvGitSubmoduleEnabled = "ARGOCD_GIT_MODULES_ENABLED" - // EnvK8sClientQPS is the QPS value used for the kubernetes client (default: 50) - EnvK8sClientQPS = "ARGOCD_K8S_CLIENT_QPS" - // EnvK8sClientBurst is the burst value used for the kubernetes client (default: twice the client QPS) - EnvK8sClientBurst = "ARGOCD_K8S_CLIENT_BURST" - // EnvClusterCacheResyncDuration is the env variable that holds cluster cache re-sync duration - EnvClusterCacheResyncDuration = "ARGOCD_CLUSTER_CACHE_RESYNC_DURATION" - // EnvK8sClientMaxIdleConnections is the number of max idle connections in K8s REST client HTTP transport (default: 500) - EnvK8sClientMaxIdleConnections = "ARGOCD_K8S_CLIENT_MAX_IDLE_CONNECTIONS" // EnvGnuPGHome is the path to ArgoCD's GnuPG keyring for signature verification EnvGnuPGHome = "ARGOCD_GNUPGHOME" // EnvWatchAPIBufferSize is the buffer size used to transfer K8S watch events to watch API consumer @@ -190,16 +218,57 @@ const ( EnvPauseGenerationRequests = "ARGOCD_PAUSE_GEN_REQUESTS" // EnvControllerReplicas is the number of controller replicas EnvControllerReplicas = "ARGOCD_CONTROLLER_REPLICAS" + // EnvControllerHeartbeatTime will update the heartbeat for application controller to claim shard + EnvControllerHeartbeatTime = "ARGOCD_CONTROLLER_HEARTBEAT_TIME" // EnvControllerShard is the shard number that should be handled by controller EnvControllerShard = "ARGOCD_CONTROLLER_SHARD" + // EnvControllerShardingAlgorithm is the distribution sharding algorithm to be used: legacy or round-robin + EnvControllerShardingAlgorithm = "ARGOCD_CONTROLLER_SHARDING_ALGORITHM" + //EnvEnableDynamicClusterDistribution enables dynamic sharding (ALPHA) + EnvEnableDynamicClusterDistribution = "ARGOCD_ENABLE_DYNAMIC_CLUSTER_DISTRIBUTION" // EnvEnableGRPCTimeHistogramEnv enables gRPC metrics collection EnvEnableGRPCTimeHistogramEnv = "ARGOCD_ENABLE_GRPC_TIME_HISTOGRAM" // EnvGithubAppCredsExpirationDuration controls the caching of Github app credentials. This value is in minutes (default: 60) EnvGithubAppCredsExpirationDuration = "ARGOCD_GITHUB_APP_CREDS_EXPIRATION_DURATION" // EnvHelmIndexCacheDuration controls how the helm repository index file is cached for (default: 0) EnvHelmIndexCacheDuration = "ARGOCD_HELM_INDEX_CACHE_DURATION" - // EnvRepoServerConfigPath allows to override the configuration path for repo server + // EnvAppConfigPath allows to override the configuration path for repo server EnvAppConfigPath = "ARGOCD_APP_CONF_PATH" + // EnvLogFormat log format that is defined by `--logformat` option + EnvLogFormat = "ARGOCD_LOG_FORMAT" + // EnvLogLevel log level that is defined by `--loglevel` option + EnvLogLevel = "ARGOCD_LOG_LEVEL" + // EnvMaxCookieNumber max number of chunks a cookie can be broken into + EnvMaxCookieNumber = "ARGOCD_MAX_COOKIE_NUMBER" + // EnvPluginSockFilePath allows to override the pluginSockFilePath for repo server and cmp server + EnvPluginSockFilePath = "ARGOCD_PLUGINSOCKFILEPATH" + // EnvCMPChunkSize defines the chunk size in bytes used when sending files to the cmp server + EnvCMPChunkSize = "ARGOCD_CMP_CHUNK_SIZE" + // EnvCMPWorkDir defines the full path of the work directory used by the CMP server + EnvCMPWorkDir = "ARGOCD_CMP_WORKDIR" + // EnvGPGDataPath overrides the location where GPG keyring for signature verification is stored + EnvGPGDataPath = "ARGOCD_GPG_DATA_PATH" + // EnvServerName is the name of the Argo CD server component, as specified by the value under the LabelKeyAppName label key. + EnvServerName = "ARGOCD_SERVER_NAME" + // EnvRepoServerName is the name of the Argo CD repo server component, as specified by the value under the LabelKeyAppName label key. + EnvRepoServerName = "ARGOCD_REPO_SERVER_NAME" + // EnvAppControllerName is the name of the Argo CD application controller component, as specified by the value under the LabelKeyAppName label key. + EnvAppControllerName = "ARGOCD_APPLICATION_CONTROLLER_NAME" + // EnvRedisName is the name of the Argo CD redis component, as specified by the value under the LabelKeyAppName label key. + EnvRedisName = "ARGOCD_REDIS_NAME" + // EnvRedisHaProxyName is the name of the Argo CD Redis HA proxy component, as specified by the value under the LabelKeyAppName label key. + EnvRedisHaProxyName = "ARGOCD_REDIS_HAPROXY_NAME" +) + +// Config Management Plugin related constants +const ( + // DefaultCMPChunkSize defines chunk size in bytes used when sending files to the cmp server + DefaultCMPChunkSize = 1024 + + // DefaultCMPWorkDirName defines the work directory name used by the cmp-server + DefaultCMPWorkDirName = "_cmp_server" + + ConfigMapPluginDeprecationWarning = "argocd-cm plugins are deprecated, and support will be removed in v2.7. Upgrade your plugin to be installed via sidecar. https://argo-cd.readthedocs.io/en/stable/user-guide/config-management-plugins/" ) const ( @@ -212,6 +281,28 @@ const ( CacheVersion = "1.8.3" ) +// Constants used by util/clusterauth package +const ( + ClusterAuthRequestTimeout = 10 * time.Second + BearerTokenTimeout = 30 * time.Second +) + +const ( + DefaultGitRetryMaxDuration time.Duration = time.Second * 5 // 5s + DefaultGitRetryDuration time.Duration = time.Millisecond * 250 // 0.25s + DefaultGitRetryFactor = int64(2) +) + +// Constants represent the pod selector labels of the Argo CD component names. These values are determined by the +// installation manifests. +const ( + DefaultServerName = "argocd-server" + DefaultRepoServerName = "argocd-repo-server" + DefaultApplicationControllerName = "argocd-application-controller" + DefaultRedisName = "argocd-redis" + DefaultRedisHaProxyName = "argocd-redis-ha-haproxy" +) + // GetGnuPGHomePath retrieves the path to use for GnuPG home directory, which is either taken from GNUPGHOME environment or a default value func GetGnuPGHomePath() string { if gnuPgHome := os.Getenv(EnvGnuPGHome); gnuPgHome == "" { @@ -221,39 +312,67 @@ func GetGnuPGHomePath() string { } } -var ( - // K8sClientConfigQPS controls the QPS to be used in K8s REST client configs - K8sClientConfigQPS float32 = 50 - // K8sClientConfigBurst controls the burst to be used in K8s REST client configs - K8sClientConfigBurst int = 100 - // K8sMaxIdleConnections controls the number of max idle connections in K8s REST client HTTP transport - K8sMaxIdleConnections = 500 - // K8sMaxIdleConnections controls the duration of cluster cache refresh - K8SClusterResyncDuration = 12 * time.Hour -) - -func init() { - if envQPS := os.Getenv(EnvK8sClientQPS); envQPS != "" { - if qps, err := strconv.ParseFloat(envQPS, 32); err != nil { - K8sClientConfigQPS = float32(qps) - } - } - if envBurst := os.Getenv(EnvK8sClientBurst); envBurst != "" { - if burst, err := strconv.Atoi(envBurst); err != nil { - K8sClientConfigBurst = burst - } +// GetPluginSockFilePath retrieves the path of plugin sock file, which is either taken from PluginSockFilePath environment or a default value +func GetPluginSockFilePath() string { + if pluginSockFilePath := os.Getenv(EnvPluginSockFilePath); pluginSockFilePath == "" { + return DefaultPluginSockFilePath } else { - K8sClientConfigBurst = 2 * int(K8sClientConfigQPS) + return pluginSockFilePath } +} - if envMaxConn := os.Getenv(EnvK8sClientMaxIdleConnections); envMaxConn != "" { - if maxConn, err := strconv.Atoi(envMaxConn); err != nil { - K8sMaxIdleConnections = maxConn +// GetCMPChunkSize will return the env var EnvCMPChunkSize value if defined or DefaultCMPChunkSize otherwise. +// If EnvCMPChunkSize is defined but not a valid int, DefaultCMPChunkSize will be returned +func GetCMPChunkSize() int { + if chunkSizeStr := os.Getenv(EnvCMPChunkSize); chunkSizeStr != "" { + chunkSize, err := strconv.Atoi(chunkSizeStr) + if err != nil { + logrus.Warnf("invalid env var value for %s: not a valid int: %s. Default value will be used.", EnvCMPChunkSize, err) + return DefaultCMPChunkSize } + return chunkSize } - if clusterResyncDurationStr := os.Getenv(EnvClusterCacheResyncDuration); clusterResyncDurationStr != "" { - if duration, err := time.ParseDuration(clusterResyncDurationStr); err == nil { - K8SClusterResyncDuration = duration - } + return DefaultCMPChunkSize +} + +// GetCMPWorkDir will return the full path of the work directory used by the CMP server. +// This directory and all it's contents will be deleted durring CMP bootstrap. +func GetCMPWorkDir() string { + if workDir := os.Getenv(EnvCMPWorkDir); workDir != "" { + return filepath.Join(workDir, DefaultCMPWorkDirName) } + return filepath.Join(os.TempDir(), DefaultCMPWorkDirName) } + +const ( + // AnnotationApplicationSetRefresh is an annotation that is added when an ApplicationSet is requested to be refreshed by a webhook. The ApplicationSet controller will remove this annotation at the end of reconciliation. + AnnotationApplicationSetRefresh = "argocd.argoproj.io/application-set-refresh" +) + +// gRPC settings +const ( + GRPCKeepAliveEnforcementMinimum = 10 * time.Second + // GRPCKeepAliveTime is 2x enforcement minimum to ensure network jitter does not introduce ENHANCE_YOUR_CALM errors + GRPCKeepAliveTime = 2 * GRPCKeepAliveEnforcementMinimum +) + +// Security severity logging +const ( + SecurityField = "security" + // SecurityCWEField is the logs field for the CWE associated with a log line. CWE stands for Common Weakness Enumeration. See https://cwe.mitre.org/ + SecurityCWEField = "CWE" + SecurityCWEIncompleteCleanup = 459 + SecurityCWEMissingReleaseOfFileDescriptor = 775 + SecurityEmergency = 5 // Indicates unmistakably malicious events that should NEVER occur accidentally and indicates an active attack (i.e. brute forcing, DoS) + SecurityCritical = 4 // Indicates any malicious or exploitable event that had a side effect (i.e. secrets being left behind on the filesystem) + SecurityHigh = 3 // Indicates likely malicious events but one that had no side effects or was blocked (i.e. out of bounds symlinks in repos) + SecurityMedium = 2 // Could indicate malicious events, but has a high likelihood of being user/system error (i.e. access denied) + SecurityLow = 1 // Unexceptional entries (i.e. successful access logs) +) + +// TokenVerificationError is a generic error message for a failure to verify a JWT +const TokenVerificationError = "failed to verify the token" + +var TokenVerificationErr = errors.New(TokenVerificationError) + +var PermissionDeniedAPIError = status.Error(codes.PermissionDenied, "permission denied") diff --git a/common/version.go b/common/version.go index b06976877f9b2..e8caf37a30601 100644 --- a/common/version.go +++ b/common/version.go @@ -3,6 +3,8 @@ package common import ( "fmt" "runtime" + + log "github.com/sirupsen/logrus" ) // Version information set by link flags during build. We fall back to these sane @@ -14,6 +16,7 @@ var ( gitTag = "" // output from `git describe --exact-match --tags HEAD` (if clean tree state) gitTreeState = "" // determined from `git status --porcelain`. either 'clean' or 'dirty' kubectlVersion = "" // determined from go.mod file + extraBuildInfo = "" // extra build information for vendors to populate during build ) // Version contains Argo version information @@ -27,12 +30,23 @@ type Version struct { Compiler string Platform string KubectlVersion string + ExtraBuildInfo string } func (v Version) String() string { return v.Version } +func (v Version) LogStartupInfo(componentName string, fields map[string]any) { + if fields == nil { + fields = map[string]any{} + } + fields["version"] = v.Version + fields["commit"] = v.GitCommit + fields["built"] = v.BuildDate + log.WithFields(log.Fields(fields)).Infof("%s is starting", componentName) +} + // GetVersion returns the version information func GetVersion() Version { var versionStr string @@ -54,6 +68,7 @@ func GetVersion() Version { versionStr += "+unknown" } } + return Version{ Version: versionStr, BuildDate: buildDate, @@ -64,5 +79,6 @@ func GetVersion() Version { Compiler: runtime.Compiler, Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), KubectlVersion: kubectlVersion, + ExtraBuildInfo: extraBuildInfo, } } diff --git a/controller/OWNERS b/controller/OWNERS new file mode 100644 index 0000000000000..03538861101ae --- /dev/null +++ b/controller/OWNERS @@ -0,0 +1,2 @@ +owners: +- alexmt diff --git a/controller/appcontroller.go b/controller/appcontroller.go index 91823a78dde55..afa2a2d7b8186 100644 --- a/controller/appcontroller.go +++ b/controller/appcontroller.go @@ -18,6 +18,7 @@ import ( "github.com/argoproj/gitops-engine/pkg/diff" "github.com/argoproj/gitops-engine/pkg/health" synccommon "github.com/argoproj/gitops-engine/pkg/sync/common" + resourceutil "github.com/argoproj/gitops-engine/pkg/sync/resource" "github.com/argoproj/gitops-engine/pkg/utils/kube" jsonpatch "github.com/evanphx/json-patch" log "github.com/sirupsen/logrus" @@ -28,37 +29,46 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" apiruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/informers" + informerv1 "k8s.io/client-go/informers/apps/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" - // make sure to register workqueue prometheus metrics - _ "k8s.io/component-base/metrics/prometheus/workqueue" - "github.com/argoproj/argo-cd/v2/common" statecache "github.com/argoproj/argo-cd/v2/controller/cache" "github.com/argoproj/argo-cd/v2/controller/metrics" + "github.com/argoproj/argo-cd/v2/controller/sharding" "github.com/argoproj/argo-cd/v2/pkg/apis/application" appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + argov1alpha "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned" "github.com/argoproj/argo-cd/v2/pkg/client/informers/externalversions/application/v1alpha1" applisters "github.com/argoproj/argo-cd/v2/pkg/client/listers/application/v1alpha1" "github.com/argoproj/argo-cd/v2/reposerver/apiclient" "github.com/argoproj/argo-cd/v2/util/argo" + argodiff "github.com/argoproj/argo-cd/v2/util/argo/diff" + "github.com/argoproj/argo-cd/v2/util/env" + + kubeerrors "k8s.io/apimachinery/pkg/api/errors" + appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate" "github.com/argoproj/argo-cd/v2/util/db" "github.com/argoproj/argo-cd/v2/util/errors" "github.com/argoproj/argo-cd/v2/util/glob" + "github.com/argoproj/argo-cd/v2/util/helm" logutils "github.com/argoproj/argo-cd/v2/util/log" settings_util "github.com/argoproj/argo-cd/v2/util/settings" ) const ( - updateOperationStateTimeout = 1 * time.Second + updateOperationStateTimeout = 1 * time.Second + defaultDeploymentInformerResyncDuration = 10 // orphanedIndex contains application which monitor orphaned resources by namespace orphanedIndex = "orphaned" ) @@ -66,6 +76,8 @@ const ( type CompareWith int const ( + // Compare live application state against state defined in latest git revision with no resolved revision caching. + CompareWithLatestForceResolve CompareWith = 3 // Compare live application state against state defined in latest git revision. CompareWithLatest CompareWith = 2 // Compare live application state against state defined using revision of most recent comparison. @@ -99,9 +111,11 @@ type ApplicationController struct { appInformer cache.SharedIndexInformer appLister applisters.ApplicationLister projInformer cache.SharedIndexInformer + deploymentInformer informerv1.DeploymentInformer appStateManager AppStateManager stateCache statecache.LiveStateCache statusRefreshTimeout time.Duration + statusHardRefreshTimeout time.Duration selfHealTimeout time.Duration repoClientset apiclient.Clientset db db.ArgoDB @@ -111,6 +125,8 @@ type ApplicationController struct { metricsServer *metrics.MetricsServer kubectlSemaphore *semaphore.Weighted clusterFilter func(cluster *appv1.Cluster) bool + projByNameCache sync.Map + applicationNamespaces []string } // NewApplicationController creates new instance of ApplicationController. @@ -123,13 +139,17 @@ func NewApplicationController( argoCache *appstatecache.Cache, kubectl kube.Kubectl, appResyncPeriod time.Duration, + appHardResyncPeriod time.Duration, selfHealTimeout time.Duration, metricsPort int, metricsCacheExpiration time.Duration, + metricsApplicationLabels []string, kubectlParallelismLimit int64, + persistResourceHealth bool, clusterFilter func(cluster *appv1.Cluster) bool, + applicationNamespaces []string, ) (*ApplicationController, error) { - log.Infof("appResyncPeriod=%v", appResyncPeriod) + log.Infof("appResyncPeriod=%v, appHardResyncPeriod=%v", appResyncPeriod, appHardResyncPeriod) db := db.NewDB(namespace, settingsMgr, kubeClientset) ctrl := ApplicationController{ cache: argoCache, @@ -144,12 +164,15 @@ func NewApplicationController( appComparisonTypeRefreshQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), db: db, statusRefreshTimeout: appResyncPeriod, + statusHardRefreshTimeout: appHardResyncPeriod, refreshRequestedApps: make(map[string]CompareWith), refreshRequestedAppsMutex: &sync.Mutex{}, - auditLogger: argo.NewAuditLogger(namespace, kubeClientset, "argocd-application-controller"), + auditLogger: argo.NewAuditLogger(namespace, kubeClientset, common.ApplicationController), settingsMgr: settingsMgr, selfHealTimeout: selfHealTimeout, clusterFilter: clusterFilter, + projByNameCache: sync.Map{}, + applicationNamespaces: applicationNamespaces, } if kubectlParallelismLimit > 0 { ctrl.kubectlSemaphore = semaphore.NewWeighted(kubectlParallelismLimit) @@ -162,24 +185,58 @@ func NewApplicationController( AddFunc: func(obj interface{}) { if key, err := cache.MetaNamespaceKeyFunc(obj); err == nil { ctrl.projectRefreshQueue.Add(key) + if projMeta, ok := obj.(metav1.Object); ok { + ctrl.InvalidateProjectsCache(projMeta.GetName()) + } + } }, UpdateFunc: func(old, new interface{}) { if key, err := cache.MetaNamespaceKeyFunc(new); err == nil { ctrl.projectRefreshQueue.Add(key) + if projMeta, ok := new.(metav1.Object); ok { + ctrl.InvalidateProjectsCache(projMeta.GetName()) + } } }, DeleteFunc: func(obj interface{}) { if key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj); err == nil { ctrl.projectRefreshQueue.Add(key) + if projMeta, ok := obj.(metav1.Object); ok { + ctrl.InvalidateProjectsCache(projMeta.GetName()) + } } }, }) + + factory := informers.NewSharedInformerFactoryWithOptions(ctrl.kubeClientset, defaultDeploymentInformerResyncDuration, informers.WithNamespace(settingsMgr.GetNamespace())) + deploymentInformer := factory.Apps().V1().Deployments() + + readinessHealthCheck := func(r *http.Request) error { + applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName) + appControllerDeployment, err := deploymentInformer.Lister().Deployments(settingsMgr.GetNamespace()).Get(applicationControllerName) + if err != nil { + if kubeerrors.IsNotFound(err) { + appControllerDeployment = nil + } else { + return fmt.Errorf("error retrieving Application Controller Deployment: %s", err) + } + } + if appControllerDeployment != nil { + if appControllerDeployment.Spec.Replicas != nil && int(*appControllerDeployment.Spec.Replicas) <= 0 { + return fmt.Errorf("application controller deployment replicas is not set or is less than 0, replicas: %d", appControllerDeployment.Spec.Replicas) + } + shard := env.ParseNumFromEnv(common.EnvControllerShard, -1, -math.MaxInt32, math.MaxInt32) + if _, err := sharding.GetOrUpdateShardFromConfigMap(kubeClientset.(*kubernetes.Clientset), settingsMgr, int(*appControllerDeployment.Spec.Replicas), shard); err != nil { + return fmt.Errorf("error while updating the heartbeat for to the Shard Mapping ConfigMap: %s", err) + } + } + return nil + } + metricsAddr := fmt.Sprintf("0.0.0.0:%d", metricsPort) var err error - ctrl.metricsServer, err = metrics.NewMetricsServer(metricsAddr, appLister, ctrl.canProcessApp, func(r *http.Request) error { - return nil - }) + ctrl.metricsServer, err = metrics.NewMetricsServer(metricsAddr, appLister, ctrl.canProcessApp, readinessHealthCheck, metricsApplicationLabels) if err != nil { return nil, err } @@ -189,17 +246,33 @@ func NewApplicationController( return nil, err } } - stateCache := statecache.NewLiveStateCache(db, appInformer, ctrl.settingsMgr, kubectl, ctrl.metricsServer, ctrl.handleObjectUpdated, clusterFilter) - appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectl, ctrl.settingsMgr, stateCache, projInformer, ctrl.metricsServer, argoCache, ctrl.statusRefreshTimeout) + stateCache := statecache.NewLiveStateCache(db, appInformer, ctrl.settingsMgr, kubectl, ctrl.metricsServer, ctrl.handleObjectUpdated, clusterFilter, argo.NewResourceTracking()) + appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectl, ctrl.settingsMgr, stateCache, projInformer, ctrl.metricsServer, argoCache, ctrl.statusRefreshTimeout, argo.NewResourceTracking(), persistResourceHealth) ctrl.appInformer = appInformer ctrl.appLister = appLister ctrl.projInformer = projInformer + ctrl.deploymentInformer = deploymentInformer ctrl.appStateManager = appStateManager ctrl.stateCache = stateCache return &ctrl, nil } +func (ctrl *ApplicationController) InvalidateProjectsCache(names ...string) { + if len(names) > 0 { + for _, name := range names { + ctrl.projByNameCache.Delete(name) + } + } else { + if ctrl != nil { + ctrl.projByNameCache.Range(func(key, _ interface{}) bool { + ctrl.projByNameCache.Delete(key) + return true + }) + } + } +} + func (ctrl *ApplicationController) GetMetricsServer() *metrics.MetricsServer { return ctrl.metricsServer } @@ -229,8 +302,50 @@ func isSelfReferencedApp(app *appv1.Application, ref v1.ObjectReference) bool { gvk.Kind == application.ApplicationKind } +func (ctrl *ApplicationController) newAppProjCache(name string) *appProjCache { + return &appProjCache{name: name, ctrl: ctrl} +} + +type appProjCache struct { + name string + ctrl *ApplicationController + + lock sync.Mutex + appProj *appv1.AppProject +} + +// GetAppProject gets an AppProject from the cache. If the AppProject is not +// yet cached, retrieves the AppProject from the K8s control plane and stores +// in the cache. +func (projCache *appProjCache) GetAppProject(ctx context.Context) (*appv1.AppProject, error) { + projCache.lock.Lock() + defer projCache.lock.Unlock() + if projCache.appProj != nil { + return projCache.appProj, nil + } + proj, err := argo.GetAppProjectByName(projCache.name, applisters.NewAppProjectLister(projCache.ctrl.projInformer.GetIndexer()), projCache.ctrl.namespace, projCache.ctrl.settingsMgr, projCache.ctrl.db, ctx) + if err != nil { + return nil, err + } + projCache.appProj = proj + return projCache.appProj, nil +} + +// getAppProj gets the AppProject for the given Application app. func (ctrl *ApplicationController) getAppProj(app *appv1.Application) (*appv1.AppProject, error) { - return argo.GetAppProject(&app.Spec, applisters.NewAppProjectLister(ctrl.projInformer.GetIndexer()), ctrl.namespace, ctrl.settingsMgr) + projCache, _ := ctrl.projByNameCache.LoadOrStore(app.Spec.GetProject(), ctrl.newAppProjCache(app.Spec.GetProject())) + proj, err := projCache.(*appProjCache).GetAppProject(context.TODO()) + if err != nil { + if apierr.IsNotFound(err) { + return nil, err + } else { + return nil, fmt.Errorf("could not retrieve AppProject '%s' from cache: %v", app.Spec.Project, err) + } + } + if !proj.IsAppNamespacePermitted(app, ctrl.namespace) { + return nil, argo.ErrProjectNotPermitted(app.GetName(), app.GetNamespace(), proj.GetName()) + } + return proj, nil } func (ctrl *ApplicationController) handleObjectUpdated(managedByApp map[string]bool, ref v1.ObjectReference) { @@ -243,24 +358,31 @@ func (ctrl *ApplicationController) handleObjectUpdated(managedByApp map[string]b if !ok { continue } - // exclude resource unless it is permitted in the app project. If project is not permitted then it is not controlled by the user and there is no point showing the warning. - if proj, err := ctrl.getAppProj(app); err == nil && proj.IsGroupKindPermitted(ref.GroupVersionKind().GroupKind(), true) && - !isKnownOrphanedResourceExclusion(kube.NewResourceKey(ref.GroupVersionKind().Group, ref.GroupVersionKind().Kind, ref.Namespace, ref.Name), proj) { - managedByApp[app.Name] = false - } + managedByApp[app.InstanceName(ctrl.namespace)] = true } } } for appName, isManagedResource := range managedByApp { - obj, exists, err := ctrl.appInformer.GetIndexer().GetByKey(ctrl.namespace + "/" + appName) - if app, ok := obj.(*appv1.Application); exists && err == nil && ok && isSelfReferencedApp(app, ref) { + // The appName is given as _, but the indexer needs it + // format / + appKey := ctrl.toAppKey(appName) + obj, exists, err := ctrl.appInformer.GetIndexer().GetByKey(appKey) + app, ok := obj.(*appv1.Application) + if exists && err == nil && ok && isSelfReferencedApp(app, ref) { // Don't force refresh app if related resource is application itself. This prevents infinite reconciliation loop. continue } if !ctrl.canProcessApp(obj) { - // Don't force refresh app if app belongs to a different controller shard + // Don't force refresh app if app belongs to a different controller shard or is outside the allowed namespaces. + continue + } + + // Enforce application's permission for the source namespace + _, err = ctrl.getAppProj(app) + if err != nil { + log.Errorf("Unable to determine project for app '%s': %v", app.QualifiedName(), err) continue } @@ -268,24 +390,46 @@ func (ctrl *ApplicationController) handleObjectUpdated(managedByApp map[string]b if isManagedResource { level = CompareWithRecent } - ctrl.requestAppRefresh(appName, &level, nil) + + namespace := ref.Namespace + if ref.Namespace == "" { + namespace = "(cluster-scoped)" + } + log.WithFields(log.Fields{ + "application": appKey, + "level": level, + "namespace": namespace, + "name": ref.Name, + "api-version": ref.APIVersion, + "kind": ref.Kind, + "server": app.Spec.Destination.Server, + "cluster-name": app.Spec.Destination.Name, + }).Debug("Requesting app refresh caused by object update") + + ctrl.requestAppRefresh(app.QualifiedName(), &level, nil) } } +// setAppManagedResources will build a list of ResourceDiff based on the provided comparisonResult +// and persist app resources related data in the cache. Will return the persisted ApplicationTree. func (ctrl *ApplicationController) setAppManagedResources(a *appv1.Application, comparisonResult *comparisonResult) (*appv1.ApplicationTree, error) { - managedResources, err := ctrl.managedResources(comparisonResult) + managedResources, err := ctrl.hideSecretData(a, comparisonResult) if err != nil { - return nil, err + return nil, fmt.Errorf("error getting managed resources: %s", err) } tree, err := ctrl.getResourceTree(a, managedResources) if err != nil { - return nil, err + return nil, fmt.Errorf("error getting resource tree: %s", err) } - err = ctrl.cache.SetAppResourcesTree(a.Name, tree) + err = ctrl.cache.SetAppResourcesTree(a.InstanceName(ctrl.namespace), tree) if err != nil { - return nil, err + return nil, fmt.Errorf("error setting app resource tree: %s", err) } - return tree, ctrl.cache.SetAppManagedResources(a.Name, managedResources) + err = ctrl.cache.SetAppManagedResources(a.InstanceName(ctrl.namespace), managedResources) + if err != nil { + return nil, fmt.Errorf("error setting app managed resources: %s", err) + } + return tree, nil } // returns true of given resources exist in the namespace by default and not managed by the user @@ -314,33 +458,32 @@ func isKnownOrphanedResourceExclusion(key kube.ResourceKey, proj *appv1.AppProje func (ctrl *ApplicationController) getResourceTree(a *appv1.Application, managedResources []*appv1.ResourceDiff) (*appv1.ApplicationTree, error) { nodes := make([]appv1.ResourceNode, 0) - - proj, err := argo.GetAppProject(&a.Spec, applisters.NewAppProjectLister(ctrl.projInformer.GetIndexer()), ctrl.namespace, ctrl.settingsMgr) + proj, err := ctrl.getAppProj(a) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get project: %w", err) } + orphanedNodesMap := make(map[kube.ResourceKey]appv1.ResourceNode) warnOrphaned := true if proj.Spec.OrphanedResources != nil { orphanedNodesMap, err = ctrl.stateCache.GetNamespaceTopLevelResources(a.Spec.Destination.Server, a.Spec.Destination.Namespace) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get namespace top-level resources: %w", err) } warnOrphaned = proj.Spec.OrphanedResources.IsWarn() } - for i := range managedResources { managedResource := managedResources[i] delete(orphanedNodesMap, kube.NewResourceKey(managedResource.Group, managedResource.Kind, managedResource.Namespace, managedResource.Name)) var live = &unstructured.Unstructured{} err := json.Unmarshal([]byte(managedResource.LiveState), &live) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to unmarshal live state of managed resources: %w", err) } var target = &unstructured.Unstructured{} err = json.Unmarshal([]byte(managedResource.TargetState), &target) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to unmarshal target state of managed resources: %w", err) } if live == nil { @@ -354,27 +497,50 @@ func (ctrl *ApplicationController) getResourceTree(a *appv1.Application, managed }, }) } else { - err := ctrl.stateCache.IterateHierarchy(a.Spec.Destination.Server, kube.GetResourceKey(live), func(child appv1.ResourceNode, appName string) { + err := ctrl.stateCache.IterateHierarchy(a.Spec.Destination.Server, kube.GetResourceKey(live), func(child appv1.ResourceNode, appName string) bool { + permitted, _ := proj.IsResourcePermitted(schema.GroupKind{Group: child.ResourceRef.Group, Kind: child.ResourceRef.Kind}, child.Namespace, a.Spec.Destination, func(project string) ([]*appv1.Cluster, error) { + clusters, err := ctrl.db.GetProjectClusters(context.TODO(), project) + if err != nil { + return nil, fmt.Errorf("failed to get project clusters: %w", err) + } + return clusters, nil + }) + if !permitted { + return false + } nodes = append(nodes, child) + return true }) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to iterate resource hierarchy: %w", err) } } } orphanedNodes := make([]appv1.ResourceNode, 0) for k := range orphanedNodesMap { if k.Namespace != "" && proj.IsGroupKindPermitted(k.GroupKind(), true) && !isKnownOrphanedResourceExclusion(k, proj) { - err := ctrl.stateCache.IterateHierarchy(a.Spec.Destination.Server, k, func(child appv1.ResourceNode, appName string) { + err := ctrl.stateCache.IterateHierarchy(a.Spec.Destination.Server, k, func(child appv1.ResourceNode, appName string) bool { belongToAnotherApp := false if appName != "" { - if _, exists, err := ctrl.appInformer.GetIndexer().GetByKey(ctrl.namespace + "/" + appName); exists && err == nil { + appKey := ctrl.toAppKey(appName) + if _, exists, err := ctrl.appInformer.GetIndexer().GetByKey(appKey); exists && err == nil { belongToAnotherApp = true } } - if !belongToAnotherApp { - orphanedNodes = append(orphanedNodes, child) + + if belongToAnotherApp { + return false } + + permitted, _ := proj.IsResourcePermitted(schema.GroupKind{Group: child.ResourceRef.Group, Kind: child.ResourceRef.Kind}, child.Namespace, a.Spec.Destination, func(project string) ([]*appv1.Cluster, error) { + return ctrl.db.GetProjectClusters(context.TODO(), project) + }) + + if !permitted { + return false + } + orphanedNodes = append(orphanedNodes, child) + return true }) if err != nil { return nil, err @@ -395,9 +561,8 @@ func (ctrl *ApplicationController) getResourceTree(a *appv1.Application, managed hosts, err := ctrl.getAppHosts(a, nodes) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get app hosts: %w", err) } - return &appv1.ApplicationTree{Nodes: nodes, OrphanedNodes: orphanedNodes, Hosts: hosts}, nil } @@ -413,11 +578,13 @@ func (ctrl *ApplicationController) getAppHosts(a *appv1.Application, appNodes [] appPods[kube.NewResourceKey(node.Group, node.Kind, node.Namespace, node.Name)] = true } } + allNodesInfo := map[string]statecache.NodeInfo{} allPodsByNode := map[string][]statecache.PodInfo{} appPodsByNode := map[string][]statecache.PodInfo{} err := ctrl.stateCache.IterateResources(a.Spec.Destination.Server, func(res *clustercache.Resource, info *statecache.ResourceInfo) { key := res.ResourceKey() + switch { case info.NodeInfo != nil && key.Group == "" && key.Kind == "Node": allNodesInfo[key.Name] = *info.NodeInfo @@ -464,7 +631,7 @@ func (ctrl *ApplicationController) getAppHosts(a *appv1.Application, appNodes [] for _, pod := range neighbors { for name, resource := range pod.ResourceRequests { - if !supportedResourceNames[name] { + if !supportedResourceNames[name] || pod.Phase == v1.PodSucceeded || pod.Phase == v1.PodFailed { continue } info := resources[name] @@ -487,7 +654,7 @@ func (ctrl *ApplicationController) getAppHosts(a *appv1.Application, appNodes [] return hosts, nil } -func (ctrl *ApplicationController) managedResources(comparisonResult *comparisonResult) ([]*appv1.ResourceDiff, error) { +func (ctrl *ApplicationController) hideSecretData(app *appv1.Application, comparisonResult *comparisonResult) ([]*appv1.ResourceDiff, error) { items := make([]*appv1.ResourceDiff, len(comparisonResult.managedResources)) for i := range comparisonResult.managedResources { res := comparisonResult.managedResources[i] @@ -507,26 +674,51 @@ func (ctrl *ApplicationController) managedResources(comparisonResult *comparison var err error target, live, err = diff.HideSecretData(res.Target, res.Live) if err != nil { - return nil, err + return nil, fmt.Errorf("error hiding secret data: %s", err) } compareOptions, err := ctrl.settingsMgr.GetResourceCompareOptions() if err != nil { - return nil, err + return nil, fmt.Errorf("error getting resource compare options: %s", err) } - resDiffPtr, err := diff.Diff(target, live, - diff.WithNormalizer(comparisonResult.diffNormalizer), - diff.WithLogr(logutils.NewLogrusLogger(log.New())), - diff.IgnoreAggregatedRoles(compareOptions.IgnoreAggregatedRoles)) + resourceOverrides, err := ctrl.settingsMgr.GetResourceOverrides() if err != nil { - return nil, err + return nil, fmt.Errorf("error getting resource overrides: %s", err) + } + appLabelKey, err := ctrl.settingsMgr.GetAppInstanceLabelKey() + if err != nil { + return nil, fmt.Errorf("error getting app instance label key: %s", err) + } + trackingMethod, err := ctrl.settingsMgr.GetTrackingMethod() + if err != nil { + return nil, fmt.Errorf("error getting tracking method: %s", err) } - resDiff = *resDiffPtr + + clusterCache, err := ctrl.stateCache.GetClusterCache(app.Spec.Destination.Server) + if err != nil { + return nil, fmt.Errorf("error getting cluster cache: %s", err) + } + diffConfig, err := argodiff.NewDiffConfigBuilder(). + WithDiffSettings(app.Spec.IgnoreDifferences, resourceOverrides, compareOptions.IgnoreAggregatedRoles). + WithTracking(appLabelKey, trackingMethod). + WithNoCache(). + WithLogger(logutils.NewLogrusLogger(logutils.NewWithCurrentConfig())). + WithGVKParser(clusterCache.GetGVKParser()). + Build() + if err != nil { + return nil, fmt.Errorf("appcontroller error building diff config: %s", err) + } + + diffResult, err := argodiff.StateDiff(live, target, diffConfig) + if err != nil { + return nil, fmt.Errorf("error applying diff: %s", err) + } + resDiff = diffResult } if live != nil { data, err := json.Marshal(live) if err != nil { - return nil, err + return nil, fmt.Errorf("error marshaling live json: %s", err) } item.LiveState = string(data) } else { @@ -536,7 +728,7 @@ func (ctrl *ApplicationController) managedResources(comparisonResult *comparison if target != nil { data, err := json.Marshal(target) if err != nil { - return nil, err + return nil, fmt.Errorf("error marshaling target json: %s", err) } item.TargetState = string(data) } else { @@ -564,6 +756,7 @@ func (ctrl *ApplicationController) Run(ctx context.Context, statusProcessors int go ctrl.appInformer.Run(ctx.Done()) go ctrl.projInformer.Run(ctx.Done()) + go ctrl.deploymentInformer.Informer().Run(ctx.Done()) errors.CheckError(ctrl.stateCache.Init()) @@ -601,15 +794,17 @@ func (ctrl *ApplicationController) Run(ctx context.Context, statusProcessors int <-ctx.Done() } +// requestAppRefresh adds a request for given app to the refresh queue. appName +// needs to be the qualified name of the application, i.e. /. func (ctrl *ApplicationController) requestAppRefresh(appName string, compareWith *CompareWith, after *time.Duration) { - key := fmt.Sprintf("%s/%s", ctrl.namespace, appName) + key := ctrl.toAppKey(appName) if compareWith != nil && after != nil { ctrl.appComparisonTypeRefreshQueue.AddAfter(fmt.Sprintf("%s/%d", key, compareWith), *after) } else { if compareWith != nil { ctrl.refreshRequestedAppsMutex.Lock() - ctrl.refreshRequestedApps[appName] = compareWith.Max(ctrl.refreshRequestedApps[appName]) + ctrl.refreshRequestedApps[key] = compareWith.Max(ctrl.refreshRequestedApps[key]) ctrl.refreshRequestedAppsMutex.Unlock() } if after != nil { @@ -662,17 +857,31 @@ func (ctrl *ApplicationController) processAppOperationQueueItem() (processNext b } app := origApp.DeepCopy() + if app.Operation != nil { + // If we get here, we are about to process an operation, but we cannot rely on informer since it might have stale data. + // So always retrieve the latest version to ensure it is not stale to avoid unnecessary syncing. + // We cannot rely on informer since applications might be updated by both application controller and api server. + freshApp, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.ObjectMeta.Namespace).Get(context.Background(), app.ObjectMeta.Name, metav1.GetOptions{}) + if err != nil { + log.Errorf("Failed to retrieve latest application state: %v", err) + return + } + app = freshApp + } + if app.Operation != nil { ctrl.processRequestedAppOperation(app) } else if app.DeletionTimestamp != nil && app.CascadedDeletion() { - _, err = ctrl.finalizeApplicationDeletion(app) + _, err = ctrl.finalizeApplicationDeletion(app, func(project string) ([]*appv1.Cluster, error) { + return ctrl.db.GetProjectClusters(context.Background(), project) + }) if err != nil { ctrl.setAppCondition(app, appv1.ApplicationCondition{ Type: appv1.ApplicationConditionDeletionError, Message: err.Error(), }) message := fmt.Sprintf("Unable to delete application resources: %v", err.Error()) - ctrl.auditLogger.LogAppEvent(app, argo.EventInfo{Reason: argo.EventReasonStatusRefreshed, Type: v1.EventTypeWarning}, message) + ctrl.auditLogger.LogAppEvent(app, argo.EventInfo{Reason: argo.EventReasonStatusRefreshed, Type: v1.EventTypeWarning}, message, "") } } return @@ -700,7 +909,7 @@ func (ctrl *ApplicationController) processAppComparisonTypeQueueItem() (processN log.Warnf("Unable to parse comparison type: %v", err) return } else { - ctrl.requestAppRefresh(parts[1], CompareWith(compareWith).Pointer(), nil) + ctrl.requestAppRefresh(ctrl.toAppQualifiedName(parts[1], parts[0]), CompareWith(compareWith).Pointer(), nil) } } return @@ -746,13 +955,12 @@ func (ctrl *ApplicationController) processProjectQueueItem() (processNext bool) func (ctrl *ApplicationController) finalizeProjectDeletion(proj *appv1.AppProject) error { apps, err := ctrl.appLister.Applications(ctrl.namespace).List(labels.Everything()) if err != nil { - return err + return fmt.Errorf("error listing applications: %w", err) } appsCount := 0 for i := range apps { if apps[i].Spec.GetProject() == proj.Name { appsCount++ - break } } if appsCount == 0 { @@ -777,25 +985,33 @@ func (ctrl *ApplicationController) removeProjectFinalizer(proj *appv1.AppProject // shouldBeDeleted returns whether a given resource obj should be deleted on cascade delete of application app func (ctrl *ApplicationController) shouldBeDeleted(app *appv1.Application, obj *unstructured.Unstructured) bool { - return !kube.IsCRD(obj) && !isSelfReferencedApp(app, kube.GetObjectRef(obj)) + return !kube.IsCRD(obj) && !isSelfReferencedApp(app, kube.GetObjectRef(obj)) && + !resourceutil.HasAnnotationOption(obj, synccommon.AnnotationSyncOptions, synccommon.SyncOptionDisableDeletion) && + !resourceutil.HasAnnotationOption(obj, helm.ResourcePolicyAnnotation, helm.ResourcePolicyKeep) } -func (ctrl *ApplicationController) getPermittedAppLiveObjects(app *appv1.Application, proj *appv1.AppProject) (map[kube.ResourceKey]*unstructured.Unstructured, error) { +func (ctrl *ApplicationController) getPermittedAppLiveObjects(app *appv1.Application, proj *appv1.AppProject, projectClusters func(project string) ([]*appv1.Cluster, error)) (map[kube.ResourceKey]*unstructured.Unstructured, error) { objsMap, err := ctrl.stateCache.GetManagedLiveObjs(app, []*unstructured.Unstructured{}) if err != nil { return nil, err } // Don't delete live resources which are not permitted in the app project for k, v := range objsMap { - if !proj.IsLiveResourcePermitted(v, app.Spec.Destination.Server) { + permitted, err := proj.IsLiveResourcePermitted(v, app.Spec.Destination.Server, app.Spec.Destination.Name, projectClusters) + + if err != nil { + return nil, err + } + + if !permitted { delete(objsMap, k) } } return objsMap, nil } -func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Application) ([]*unstructured.Unstructured, error) { - logCtx := log.WithField("application", app.Name) +func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Application, projectClusters func(project string) ([]*appv1.Cluster, error)) ([]*unstructured.Unstructured, error) { + logCtx := log.WithField("application", app.QualifiedName()) logCtx.Infof("Deleting resources") // Get refreshed application info, since informer app copy might be stale app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Get(context.Background(), app.Name, metav1.GetOptions{}) @@ -810,84 +1026,109 @@ func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Applic return nil, err } - err = argo.ValidateDestination(context.Background(), &app.Spec.Destination, ctrl.db) - if err != nil { - return nil, err - } + // validDestination is true if the Application destination points to a cluster that is managed by Argo CD + // (and thus either a cluster secret exists for it, or it's local); validDestination is false otherwise. + validDestination := true - objsMap, err := ctrl.getPermittedAppLiveObjects(app, proj) - if err != nil { - return nil, err + // Validate the cluster using the Application destination's `name` field, if applicable, + // and set the Server field, if needed. + if err := argo.ValidateDestination(context.Background(), &app.Spec.Destination, ctrl.db); err != nil { + log.Warnf("Unable to validate destination of the Application being deleted: %v", err) + validDestination = false } objs := make([]*unstructured.Unstructured, 0) - for k := range objsMap { - // Wait for objects pending deletion to complete before proceeding with next sync wave - if objsMap[k].GetDeletionTimestamp() != nil { - logCtx.Infof("%d objects remaining for deletion", len(objsMap)) - return objs, nil - } - if ctrl.shouldBeDeleted(app, objsMap[k]) { - objs = append(objs, objsMap[k]) + var cluster *appv1.Cluster + + // Attempt to validate the destination via its URL + if validDestination { + if cluster, err = ctrl.db.GetCluster(context.Background(), app.Spec.Destination.Server); err != nil { + log.Warnf("Unable to locate cluster URL for Application being deleted: %v", err) + validDestination = false } } - cluster, err := ctrl.db.GetCluster(context.Background(), app.Spec.Destination.Server) - if err != nil { - return nil, err - } - config := metrics.AddMetricsTransportWrapper(ctrl.metricsServer, app, cluster.RESTConfig()) + if validDestination { + // ApplicationDestination points to a valid cluster, so we may clean up the live objects - filteredObjs := FilterObjectsForDeletion(objs) + objsMap, err := ctrl.getPermittedAppLiveObjects(app, proj, projectClusters) + if err != nil { + return nil, err + } - propagationPolicy := metav1.DeletePropagationForeground - if app.GetPropagationPolicy() == common.BackgroundPropagationPolicyFinalizer { - propagationPolicy = metav1.DeletePropagationBackground - } - logCtx.Infof("Deleting application's resources with %s propagation policy", propagationPolicy) + for k := range objsMap { + // Wait for objects pending deletion to complete before proceeding with next sync wave + if objsMap[k].GetDeletionTimestamp() != nil { + logCtx.Infof("%d objects remaining for deletion", len(objsMap)) + return objs, nil + } - err = kube.RunAllAsync(len(filteredObjs), func(i int) error { - obj := filteredObjs[i] - return ctrl.kubectl.DeleteResource(context.Background(), config, obj.GroupVersionKind(), obj.GetName(), obj.GetNamespace(), metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}) - }) - if err != nil { - return objs, err - } + if ctrl.shouldBeDeleted(app, objsMap[k]) { + objs = append(objs, objsMap[k]) + } + } - objsMap, err = ctrl.getPermittedAppLiveObjects(app, proj) - if err != nil { - return nil, err - } + config := metrics.AddMetricsTransportWrapper(ctrl.metricsServer, app, cluster.RESTConfig()) - for k, obj := range objsMap { - if !ctrl.shouldBeDeleted(app, obj) { - delete(objsMap, k) + filteredObjs := FilterObjectsForDeletion(objs) + + propagationPolicy := metav1.DeletePropagationForeground + if app.GetPropagationPolicy() == appv1.BackgroundPropagationPolicyFinalizer { + propagationPolicy = metav1.DeletePropagationBackground + } + logCtx.Infof("Deleting application's resources with %s propagation policy", propagationPolicy) + + err = kube.RunAllAsync(len(filteredObjs), func(i int) error { + obj := filteredObjs[i] + return ctrl.kubectl.DeleteResource(context.Background(), config, obj.GroupVersionKind(), obj.GetName(), obj.GetNamespace(), metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}) + }) + if err != nil { + return objs, err + } + + objsMap, err = ctrl.getPermittedAppLiveObjects(app, proj, projectClusters) + if err != nil { + return nil, err + } + + for k, obj := range objsMap { + if !ctrl.shouldBeDeleted(app, obj) { + delete(objsMap, k) + } + } + if len(objsMap) > 0 { + logCtx.Infof("%d objects remaining for deletion", len(objsMap)) + return objs, nil } } - if len(objsMap) > 0 { - logCtx.Infof("%d objects remaining for deletion", len(objsMap)) - return objs, nil - } - err = ctrl.cache.SetAppManagedResources(app.Name, nil) - if err != nil { + + if err := ctrl.cache.SetAppManagedResources(app.Name, nil); err != nil { return objs, err } - err = ctrl.cache.SetAppResourcesTree(app.Name, nil) - if err != nil { + + if err := ctrl.cache.SetAppResourcesTree(app.Name, nil); err != nil { return objs, err } - err = ctrl.removeCascadeFinalizer(app) - if err != nil { + if err := ctrl.removeCascadeFinalizer(app); err != nil { return objs, err } - logCtx.Infof("Successfully deleted %d resources", len(objs)) - ctrl.projectRefreshQueue.Add(fmt.Sprintf("%s/%s", app.Namespace, app.Spec.GetProject())) + if validDestination { + logCtx.Infof("Successfully deleted %d resources", len(objs)) + } else { + logCtx.Infof("Resource entries removed from undefined cluster") + } + + ctrl.projectRefreshQueue.Add(fmt.Sprintf("%s/%s", ctrl.namespace, app.Spec.GetProject())) return objs, nil } func (ctrl *ApplicationController) removeCascadeFinalizer(app *appv1.Application) error { + _, err := ctrl.getAppProj(app) + if err != nil { + return fmt.Errorf("error getting project: %w", err) + } app.UnSetCascadedDeletion() var patch []byte patch, _ = json.Marshal(map[string]interface{}{ @@ -896,7 +1137,7 @@ func (ctrl *ApplicationController) removeCascadeFinalizer(app *appv1.Application }, }) - _, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Patch(context.Background(), app.Name, types.MergePatchType, patch, metav1.PatchOptions{}) + _, err = ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Patch(context.Background(), app.Name, types.MergePatchType, patch, metav1.PatchOptions{}) return err } @@ -925,7 +1166,7 @@ func (ctrl *ApplicationController) setAppCondition(app *appv1.Application, condi } func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Application) { - logCtx := log.WithField("application", app.Name) + logCtx := log.WithField("application", app.QualifiedName()) var state *appv1.OperationState // Recover from any unexpected panics and automatically set the status to be failed defer func() { @@ -942,20 +1183,6 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli }() terminating := false if isOperationInProgress(app) { - // If we get here, we are about process an operation but we notice it is already in progress. - // We need to detect if the app object we pulled off the informer is stale and doesn't - // reflect the fact that the operation is completed. We don't want to perform the operation - // again. To detect this, always retrieve the latest version to ensure it is not stale. - freshApp, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(ctrl.namespace).Get(context.Background(), app.ObjectMeta.Name, metav1.GetOptions{}) - if err != nil { - logCtx.Errorf("Failed to retrieve latest application state: %v", err) - return - } - if !isOperationInProgress(freshApp) { - logCtx.Infof("Skipping operation on stale application state") - return - } - app = freshApp state = app.Status.OperationState.DeepCopy() terminating = state.Phase == synccommon.OperationTerminating // Failed operation with retry strategy might have be in-progress and has completion time @@ -970,7 +1197,7 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli retryAfter := time.Until(retryAt) if retryAfter > 0 { logCtx.Infof("Skipping retrying in-progress operation. Attempting again at: %s", retryAt.Format(time.RFC3339)) - ctrl.requestAppRefresh(app.Name, CompareWithLatest.Pointer(), &retryAfter) + ctrl.requestAppRefresh(app.QualifiedName(), CompareWithLatest.Pointer(), &retryAfter) return } else { // retrying operation. remove previous failure time in app since it is used as a trigger @@ -996,11 +1223,24 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli ctrl.appStateManager.SyncAppState(app, state) } + // Check whether application is allowed to use project + _, err := ctrl.getAppProj(app) + if err != nil { + state.Phase = synccommon.OperationError + state.Message = err.Error() + } + if state.Phase == synccommon.OperationRunning { // It's possible for an app to be terminated while we were operating on it. We do not want // to clobber the Terminated state with Running. Get the latest app state to check for this. - freshApp, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(ctrl.namespace).Get(context.Background(), app.ObjectMeta.Name, metav1.GetOptions{}) + freshApp, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Get(context.Background(), app.ObjectMeta.Name, metav1.GetOptions{}) if err == nil { + // App may have lost permissions to use the project meanwhile. + _, err = ctrl.getAppProj(freshApp) + if err != nil { + state.Phase = synccommon.OperationFailed + state.Message = fmt.Sprintf("operation not allowed: %v", err) + } if freshApp.Status.OperationState != nil && freshApp.Status.OperationState.Phase == synccommon.OperationTerminating { state.Phase = synccommon.OperationTerminating state.Message = "operation is terminating" @@ -1028,12 +1268,12 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli } ctrl.setOperationState(app, state) - if state.Phase.Completed() && !app.Operation.Sync.DryRun { + if state.Phase.Completed() && (app.Operation.Sync != nil && !app.Operation.Sync.DryRun) { // if we just completed an operation, force a refresh so that UI will report up-to-date // sync/health information if _, err := cache.MetaNamespaceKeyFunc(app); err == nil { // force app refresh with using CompareWithLatest comparison type and trigger app reconciliation loop - ctrl.requestAppRefresh(app.Name, CompareWithLatest.Pointer(), nil) + ctrl.requestAppRefresh(app.QualifiedName(), CompareWithLatest.Pointer(), nil) } else { logCtx.Warnf("Fails to requeue application: %v", err) } @@ -1041,76 +1281,86 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli } func (ctrl *ApplicationController) setOperationState(app *appv1.Application, state *appv1.OperationState) { - kube.RetryUntilSucceed(context.Background(), updateOperationStateTimeout, "Update application operation state", logutils.NewLogrusLogger(log.New()), func() error { - if state.Phase == "" { - // expose any bugs where we neglect to set phase - panic("no phase was set") - } - if state.Phase.Completed() { - now := metav1.Now() - state.FinishedAt = &now - } - patch := map[string]interface{}{ - "status": map[string]interface{}{ - "operationState": state, - }, - } - if state.Phase.Completed() { - // If operation is completed, clear the operation field to indicate no operation is - // in progress. - patch["operation"] = nil - } - if reflect.DeepEqual(app.Status.OperationState, state) { - log.Infof("No operation updates necessary to '%s'. Skipping patch", app.Name) - return nil - } - patchJSON, err := json.Marshal(patch) + logCtx := log.WithFields(log.Fields{"application": app.Name, "appNamespace": app.Namespace, "project": app.Spec.Project}) + + if state.Phase == "" { + // expose any bugs where we neglect to set phase + panic("no phase was set") + } + if state.Phase.Completed() { + now := metav1.Now() + state.FinishedAt = &now + } + patch := map[string]interface{}{ + "status": map[string]interface{}{ + "operationState": state, + }, + } + if state.Phase.Completed() { + // If operation is completed, clear the operation field to indicate no operation is + // in progress. + patch["operation"] = nil + } + if reflect.DeepEqual(app.Status.OperationState, state) { + logCtx.Infof("No operation updates necessary to '%s'. Skipping patch", app.QualifiedName()) + return + } + patchJSON, err := json.Marshal(patch) + if err != nil { + logCtx.Errorf("error marshaling json: %v", err) + return + } + if app.Status.OperationState != nil && app.Status.OperationState.FinishedAt != nil && state.FinishedAt == nil { + patchJSON, err = jsonpatch.MergeMergePatches(patchJSON, []byte(`{"status": {"operationState": {"finishedAt": null}}}`)) if err != nil { - return err - } - if app.Status.OperationState != nil && app.Status.OperationState.FinishedAt != nil && state.FinishedAt == nil { - patchJSON, err = jsonpatch.MergeMergePatches(patchJSON, []byte(`{"status": {"operationState": {"finishedAt": null}}}`)) - if err != nil { - return err - } + logCtx.Errorf("error merging operation state patch: %v", err) + return } + } - appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(ctrl.namespace) + kube.RetryUntilSucceed(context.Background(), updateOperationStateTimeout, "Update application operation state", logutils.NewLogrusLogger(logutils.NewWithCurrentConfig()), func() error { + appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace) _, err = appClient.Patch(context.Background(), app.Name, types.MergePatchType, patchJSON, metav1.PatchOptions{}) if err != nil { // Stop retrying updating deleted application if apierr.IsNotFound(err) { return nil } - return err - } - log.Infof("updated '%s' operation (phase: %s)", app.Name, state.Phase) - if state.Phase.Completed() { - eventInfo := argo.EventInfo{Reason: argo.EventReasonOperationCompleted} - var messages []string - if state.Operation.Sync != nil && len(state.Operation.Sync.Resources) > 0 { - messages = []string{"Partial sync operation"} - } else { - messages = []string{"Sync operation"} - } - if state.SyncResult != nil { - messages = append(messages, "to", state.SyncResult.Revision) - } - if state.Phase.Successful() { - eventInfo.Type = v1.EventTypeNormal - messages = append(messages, "succeeded") - } else { - eventInfo.Type = v1.EventTypeWarning - messages = append(messages, "failed:", state.Message) - } - ctrl.auditLogger.LogAppEvent(app, eventInfo, strings.Join(messages, " ")) - ctrl.metricsServer.IncSync(app, state) + // kube.RetryUntilSucceed logs failed attempts at "debug" level, but we want to know if this fails. Log a + // warning. + logCtx.Warnf("error patching application with operation state: %v", err) + return fmt.Errorf("error patching application with operation state: %w", err) } return nil }) + + logCtx.Infof("updated '%s' operation (phase: %s)", app.QualifiedName(), state.Phase) + if state.Phase.Completed() { + eventInfo := argo.EventInfo{Reason: argo.EventReasonOperationCompleted} + var messages []string + if state.Operation.Sync != nil && len(state.Operation.Sync.Resources) > 0 { + messages = []string{"Partial sync operation"} + } else { + messages = []string{"Sync operation"} + } + if state.SyncResult != nil { + messages = append(messages, "to", state.SyncResult.Revision) + } + if state.Phase.Successful() { + eventInfo.Type = v1.EventTypeNormal + messages = append(messages, "succeeded") + } else { + eventInfo.Type = v1.EventTypeWarning + messages = append(messages, "failed:", state.Message) + } + ctrl.auditLogger.LogAppEvent(app, eventInfo, strings.Join(messages, " "), "") + ctrl.metricsServer.IncSync(app, state) + } } func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext bool) { + patchMs := time.Duration(0) // time spent in doing patch/update calls + setOpMs := time.Duration(0) // time spent in doing Operation patch calls in autosync appKey, shutdown := ctrl.appRefreshQueue.Get() if shutdown { processNext = false @@ -1123,7 +1373,6 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo } ctrl.appRefreshQueue.Done(appKey) }() - obj, exists, err := ctrl.appInformer.GetIndexer().GetByKey(appKey.(string)) if err != nil { log.Errorf("Failed to get application '%s' from informer index: %+v", appKey, err) @@ -1139,42 +1388,46 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo return } origApp = origApp.DeepCopy() - needRefresh, refreshType, comparisonLevel := ctrl.needRefreshAppStatus(origApp, ctrl.statusRefreshTimeout) + needRefresh, refreshType, comparisonLevel := ctrl.needRefreshAppStatus(origApp, ctrl.statusRefreshTimeout, ctrl.statusHardRefreshTimeout) if !needRefresh { return } - app := origApp.DeepCopy() - logCtx := log.WithFields(log.Fields{"application": app.Name}) + logCtx := log.WithFields(log.Fields{ + "application": app.QualifiedName(), + "level": comparisonLevel, + "dest-server": origApp.Spec.Destination.Server, + "dest-name": origApp.Spec.Destination.Name, + "dest-namespace": origApp.Spec.Destination.Namespace, + }) + startTime := time.Now() defer func() { reconcileDuration := time.Since(startTime) ctrl.metricsServer.IncReconcile(origApp, reconcileDuration) logCtx.WithFields(log.Fields{ - "time_ms": reconcileDuration.Milliseconds(), - "level": comparisonLevel, - "dest-server": origApp.Spec.Destination.Server, - "dest-name": origApp.Spec.Destination.Name, - "dest-namespace": origApp.Spec.Destination.Namespace, + "time_ms": reconcileDuration.Milliseconds(), + "patch_ms": patchMs.Milliseconds(), + "setop_ms": setOpMs.Milliseconds(), }).Info("Reconciliation completed") }() if comparisonLevel == ComparisonWithNothing { managedResources := make([]*appv1.ResourceDiff, 0) - if err := ctrl.cache.GetAppManagedResources(app.Name, &managedResources); err != nil { - logCtx.Warnf("Failed to get cached managed resources for tree reconciliation, fallback to full reconciliation") + if err := ctrl.cache.GetAppManagedResources(app.InstanceName(ctrl.namespace), &managedResources); err != nil { + logCtx.Warnf("Failed to get cached managed resources for tree reconciliation, fall back to full reconciliation") } else { var tree *appv1.ApplicationTree if tree, err = ctrl.getResourceTree(app, managedResources); err == nil { - app.Status.Summary = tree.GetSummary() - if err := ctrl.cache.SetAppResourcesTree(app.Name, tree); err != nil { + app.Status.Summary = tree.GetSummary(app) + if err := ctrl.cache.SetAppResourcesTree(app.InstanceName(ctrl.namespace), tree); err != nil { logCtx.Errorf("Failed to cache resources tree: %v", err) return } } - ctrl.persistAppStatus(origApp, &app.Status) + patchMs = ctrl.persistAppStatus(origApp, &app.Status) return } } @@ -1183,7 +1436,14 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo if hasErrors { app.Status.Sync.Status = appv1.SyncStatusCodeUnknown app.Status.Health.Status = health.HealthStatusUnknown - ctrl.persistAppStatus(origApp, &app.Status) + patchMs = ctrl.persistAppStatus(origApp, &app.Status) + + if err := ctrl.cache.SetAppResourcesTree(app.InstanceName(ctrl.namespace), &appv1.ApplicationTree{}); err != nil { + log.Warnf("failed to set app resource tree: %v", err) + } + if err := ctrl.cache.SetAppManagedResources(app.InstanceName(ctrl.namespace), nil); err != nil { + log.Warnf("failed to set app managed resources tree: %v", err) + } return } @@ -1192,13 +1452,38 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo localManifests = opState.Operation.Sync.Manifests } - revision := app.Spec.Source.TargetRevision - if comparisonLevel == CompareWithRecent { - revision = app.Status.Sync.Revision - } + revisions := make([]string, 0) + sources := make([]appv1.ApplicationSource, 0) + hasMultipleSources := app.Spec.HasMultipleSources() + + // If we have multiple sources, we use all the sources under `sources` field and ignore source under `source` field. + // else we use the source under the source field. + if hasMultipleSources { + for _, source := range app.Spec.Sources { + // We do not perform any filtering of duplicate sources. + // Argo CD will apply and update the resources generated from the sources automatically + // based on the order in which manifests were generated + sources = append(sources, source) + revisions = append(revisions, source.TargetRevision) + } + if comparisonLevel == CompareWithRecent { + revisions = app.Status.Sync.Revisions + } + } else { + revision := app.Spec.GetSource().TargetRevision + if comparisonLevel == CompareWithRecent { + revision = app.Status.Sync.Revision + } + revisions = append(revisions, revision) + sources = append(sources, app.Spec.GetSource()) + } now := metav1.Now() - compareResult := ctrl.appStateManager.CompareAppState(app, project, revision, app.Spec.Source, refreshType == appv1.RefreshTypeHard, localManifests) + + compareResult := ctrl.appStateManager.CompareAppState(app, project, revisions, sources, + refreshType == appv1.RefreshTypeHard, + comparisonLevel == CompareWithLatestForceResolve, localManifests, hasMultipleSources) + for k, v := range compareResult.timings { logCtx = logCtx.WithField(k, v.Milliseconds()) } @@ -1209,11 +1494,12 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo if err != nil { logCtx.Errorf("Failed to cache app resources: %v", err) } else { - app.Status.Summary = tree.GetSummary() + app.Status.Summary = tree.GetSummary(app) } if project.Spec.SyncWindows.Matches(app).CanSync(false) { - syncErrCond := ctrl.autoSync(app, compareResult.syncStatus, compareResult.resources) + syncErrCond, opMS := ctrl.autoSync(app, compareResult.syncStatus, compareResult.resources) + setOpMs = opMS if syncErrCond != nil { app.Status.SetConditions( []appv1.ApplicationCondition{*syncErrCond}, @@ -1229,7 +1515,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo logCtx.Info("Sync prevented by sync window") } - if app.Status.ReconciledAt == nil || comparisonLevel == CompareWithLatest { + if app.Status.ReconciledAt == nil || comparisonLevel >= CompareWithLatest { app.Status.ReconciledAt = &now } app.Status.Sync = *compareResult.syncStatus @@ -1239,7 +1525,9 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo return resourceStatusKey(app.Status.Resources[i]) < resourceStatusKey(app.Status.Resources[j]) }) app.Status.SourceType = compareResult.appSourceType - ctrl.persistAppStatus(origApp, &app.Status) + app.Status.SourceTypes = compareResult.appSourceTypes + app.Status.ControllerNamespace = ctrl.namespace + patchMs = ctrl.persistAppStatus(origApp, &app.Status) return } @@ -1247,37 +1535,60 @@ func resourceStatusKey(res appv1.ResourceStatus) string { return strings.Join([]string{res.Group, res.Kind, res.Namespace, res.Name}, "/") } +func currentSourceEqualsSyncedSource(app *appv1.Application) bool { + if app.Spec.HasMultipleSources() { + return app.Spec.Sources.Equals(app.Status.Sync.ComparedTo.Sources) + } + return app.Spec.Source.Equals(&app.Status.Sync.ComparedTo.Source) +} + // needRefreshAppStatus answers if application status needs to be refreshed. // Returns true if application never been compared, has changed or comparison result has expired. -// Additionally returns whether full refresh was requested or not. +// Additionally, it returns whether full refresh was requested or not. // If full refresh is requested then target and live state should be reconciled, else only live state tree should be updated. -func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application, statusRefreshTimeout time.Duration) (bool, appv1.RefreshType, CompareWith) { - logCtx := log.WithFields(log.Fields{"application": app.Name}) +func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application, statusRefreshTimeout, statusHardRefreshTimeout time.Duration) (bool, appv1.RefreshType, CompareWith) { + logCtx := log.WithFields(log.Fields{"application": app.QualifiedName()}) var reason string compareWith := CompareWithLatest refreshType := appv1.RefreshTypeNormal - expired := app.Status.ReconciledAt == nil || app.Status.ReconciledAt.Add(statusRefreshTimeout).Before(time.Now().UTC()) + softExpired := app.Status.ReconciledAt == nil || app.Status.ReconciledAt.Add(statusRefreshTimeout).Before(time.Now().UTC()) + hardExpired := (app.Status.ReconciledAt == nil || app.Status.ReconciledAt.Add(statusHardRefreshTimeout).Before(time.Now().UTC())) && statusHardRefreshTimeout.Seconds() != 0 if requestedType, ok := app.IsRefreshRequested(); ok { + compareWith = CompareWithLatestForceResolve // user requested app refresh. refreshType = requestedType reason = fmt.Sprintf("%s refresh requested", refreshType) - } else if expired { - // The commented line below mysteriously crashes if app.Status.ReconciledAt is nil - // reason = fmt.Sprintf("comparison expired. reconciledAt: %v, expiry: %v", app.Status.ReconciledAt, statusRefreshTimeout) - //TODO: find existing Golang bug or create a new one - reconciledAtStr := "never" - if app.Status.ReconciledAt != nil { - reconciledAtStr = app.Status.ReconciledAt.String() - } - reason = fmt.Sprintf("comparison expired. reconciledAt: %v, expiry: %v", reconciledAtStr, statusRefreshTimeout) - } else if !app.Spec.Source.Equals(app.Status.Sync.ComparedTo.Source) { - reason = "spec.source differs" - } else if !app.Spec.Destination.Equals(app.Status.Sync.ComparedTo.Destination) { - reason = "spec.destination differs" - } else if requested, level := ctrl.isRefreshRequested(app.Name); requested { - compareWith = level - reason = "controller refresh requested" + } else { + if !currentSourceEqualsSyncedSource(app) { + reason = "spec.source differs" + compareWith = CompareWithLatestForceResolve + if app.Spec.HasMultipleSources() { + reason = "at least one of the spec.sources differs" + } + } else if hardExpired || softExpired { + // The commented line below mysteriously crashes if app.Status.ReconciledAt is nil + // reason = fmt.Sprintf("comparison expired. reconciledAt: %v, expiry: %v", app.Status.ReconciledAt, statusRefreshTimeout) + //TODO: find existing Golang bug or create a new one + reconciledAtStr := "never" + if app.Status.ReconciledAt != nil { + reconciledAtStr = app.Status.ReconciledAt.String() + } + reason = fmt.Sprintf("comparison expired, requesting refresh. reconciledAt: %v, expiry: %v", reconciledAtStr, statusRefreshTimeout) + if hardExpired { + reason = fmt.Sprintf("comparison expired, requesting hard refresh. reconciledAt: %v, expiry: %v", reconciledAtStr, statusHardRefreshTimeout) + refreshType = appv1.RefreshTypeHard + } + } else if !app.Spec.Destination.Equals(app.Status.Sync.ComparedTo.Destination) { + reason = "spec.destination differs" + } else if app.HasChangedManagedNamespaceMetadata() { + reason = "spec.syncPolicy.managedNamespaceMetadata differs" + } else if !app.Spec.IgnoreDifferences.Equals(app.Status.Sync.ComparedTo.IgnoreDifferences) { + reason = "spec.ignoreDifferences differs" + } else if requested, level := ctrl.isRefreshRequested(app.QualifiedName()); requested { + compareWith = level + reason = "controller refresh requested" + } } if reason != "" { @@ -1291,17 +1602,7 @@ func (ctrl *ApplicationController) refreshAppConditions(app *appv1.Application) errorConditions := make([]appv1.ApplicationCondition, 0) proj, err := ctrl.getAppProj(app) if err != nil { - if apierr.IsNotFound(err) { - errorConditions = append(errorConditions, appv1.ApplicationCondition{ - Type: appv1.ApplicationConditionInvalidSpecError, - Message: fmt.Sprintf("Application referencing project %s which does not exist", app.Spec.Project), - }) - } else { - errorConditions = append(errorConditions, appv1.ApplicationCondition{ - Type: appv1.ApplicationConditionUnknownError, - Message: err.Error(), - }) - } + errorConditions = append(errorConditions, ctrl.projectErrorToCondition(err, app)) } else { specConditions, err := argo.ValidatePermissions(context.Background(), &app.Spec, proj, ctrl.db) if err != nil { @@ -1322,9 +1623,11 @@ func (ctrl *ApplicationController) refreshAppConditions(app *appv1.Application) // normalizeApplication normalizes an application.spec and additionally persists updates if it changed func (ctrl *ApplicationController) normalizeApplication(orig, app *appv1.Application) { - logCtx := log.WithFields(log.Fields{"application": app.Name}) + logCtx := log.WithFields(log.Fields{"application": app.QualifiedName()}) app.Spec = *argo.NormalizeApplicationSpec(&app.Spec) + patch, modified, err := diff.CreateTwoWayMergePatch(orig, app, appv1.Application{}) + if err != nil { logCtx.Errorf("error constructing app spec patch: %v", err) } else if modified { @@ -1339,15 +1642,15 @@ func (ctrl *ApplicationController) normalizeApplication(orig, app *appv1.Applica } // persistAppStatus persists updates to application status. If no changes were made, it is a no-op -func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, newStatus *appv1.ApplicationStatus) { - logCtx := log.WithFields(log.Fields{"application": orig.Name}) +func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, newStatus *appv1.ApplicationStatus) (patchMs time.Duration) { + logCtx := log.WithFields(log.Fields{"application": orig.QualifiedName()}) if orig.Status.Sync.Status != newStatus.Sync.Status { message := fmt.Sprintf("Updated sync status: %s -> %s", orig.Status.Sync.Status, newStatus.Sync.Status) - ctrl.auditLogger.LogAppEvent(orig, argo.EventInfo{Reason: argo.EventReasonResourceUpdated, Type: v1.EventTypeNormal}, message) + ctrl.auditLogger.LogAppEvent(orig, argo.EventInfo{Reason: argo.EventReasonResourceUpdated, Type: v1.EventTypeNormal}, message, "") } if orig.Status.Health.Status != newStatus.Health.Status { message := fmt.Sprintf("Updated health status: %s -> %s", orig.Status.Health.Status, newStatus.Health.Status) - ctrl.auditLogger.LogAppEvent(orig, argo.EventInfo{Reason: argo.EventReasonResourceUpdated, Type: v1.EventTypeNormal}, message) + ctrl.auditLogger.LogAppEvent(orig, argo.EventInfo{Reason: argo.EventReasonResourceUpdated, Type: v1.EventTypeNormal}, message, "") } var newAnnotations map[string]string if orig.GetAnnotations() != nil { @@ -1355,7 +1658,7 @@ func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, new for k, v := range orig.GetAnnotations() { newAnnotations[k] = v } - delete(newAnnotations, common.AnnotationKeyRefresh) + delete(newAnnotations, appv1.AnnotationKeyRefresh) } patch, modified, err := diff.CreateTwoWayMergePatch( &appv1.Application{ObjectMeta: metav1.ObjectMeta{Annotations: orig.GetAnnotations()}, Status: orig.Status}, @@ -1368,7 +1671,11 @@ func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, new logCtx.Infof("No status changes. Skipping patch") return } - logCtx.Debugf("patch: %s", string(patch)) + // calculate time for path call + start := time.Now() + defer func() { + patchMs = time.Since(start) + }() appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(orig.Namespace) _, err = appClient.Patch(context.Background(), orig.Name, types.MergePatchType, patch, metav1.PatchOptions{}) if err != nil { @@ -1376,28 +1683,30 @@ func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, new } else { logCtx.Infof("Update successful") } + return patchMs } // autoSync will initiate a sync operation for an application configured with automated sync -func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *appv1.SyncStatus, resources []appv1.ResourceStatus) *appv1.ApplicationCondition { +func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *appv1.SyncStatus, resources []appv1.ResourceStatus) (*appv1.ApplicationCondition, time.Duration) { if app.Spec.SyncPolicy == nil || app.Spec.SyncPolicy.Automated == nil { - return nil + return nil, 0 } - logCtx := log.WithFields(log.Fields{"application": app.Name}) + logCtx := log.WithFields(log.Fields{"application": app.QualifiedName()}) + if app.Operation != nil { logCtx.Infof("Skipping auto-sync: another operation is in progress") - return nil + return nil, 0 } if app.DeletionTimestamp != nil && !app.DeletionTimestamp.IsZero() { logCtx.Infof("Skipping auto-sync: deletion in progress") - return nil + return nil, 0 } // Only perform auto-sync if we detect OutOfSync status. This is to prevent us from attempting // a sync when application is already in a Synced or Unknown state if syncStatus.Status != appv1.SyncStatusCodeOutOfSync { logCtx.Infof("Skipping auto-sync: application status is %s", syncStatus.Status) - return nil + return nil, 0 } if !app.Spec.SyncPolicy.Automated.Prune { @@ -1410,18 +1719,20 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus * } if requirePruneOnly { logCtx.Infof("Skipping auto-sync: need to prune extra resources only but automated prune is disabled") - return nil + return nil, 0 } } desiredCommitSHA := syncStatus.Revision - alreadyAttempted, attemptPhase := alreadyAttemptedSync(app, desiredCommitSHA) + desiredCommitSHAsMS := syncStatus.Revisions + alreadyAttempted, attemptPhase := alreadyAttemptedSync(app, desiredCommitSHA, desiredCommitSHAsMS, app.Spec.HasMultipleSources()) selfHeal := app.Spec.SyncPolicy.Automated.SelfHeal op := appv1.Operation{ Sync: &appv1.SyncOperation{ Revision: desiredCommitSHA, Prune: app.Spec.SyncPolicy.Automated.Prune, SyncOptions: app.Spec.SyncPolicy.SyncOptions, + Revisions: desiredCommitSHAsMS, }, InitiatedBy: appv1.OperationInitiator{Automated: true}, Retry: appv1.RetryStrategy{Limit: 5}, @@ -1437,10 +1748,10 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus * if !attemptPhase.Successful() { logCtx.Warnf("Skipping auto-sync: failed previous sync attempt to %s", desiredCommitSHA) message := fmt.Sprintf("Failed sync attempt to %s: %s", desiredCommitSHA, app.Status.OperationState.Message) - return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: message} + return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: message}, 0 } logCtx.Infof("Skipping auto-sync: most recent sync already to %s", desiredCommitSHA) - return nil + return nil, 0 } else if alreadyAttempted && selfHeal { if shouldSelfHeal, retryAfter := ctrl.shouldSelfHeal(app); shouldSelfHeal { for _, resource := range resources { @@ -1454,8 +1765,8 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus * } } else { logCtx.Infof("Skipping auto-sync: already attempted sync to %s with timeout %v (retrying in %v)", desiredCommitSHA, ctrl.selfHealTimeout, retryAfter) - ctrl.requestAppRefresh(app.Name, CompareWithLatest.Pointer(), &retryAfter) - return nil + ctrl.requestAppRefresh(app.QualifiedName(), CompareWithLatest.Pointer(), &retryAfter) + return nil, 0 } } @@ -1470,38 +1781,61 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus * if bAllNeedPrune { message := fmt.Sprintf("Skipping sync attempt to %s: auto-sync will wipe out all resources", desiredCommitSHA) logCtx.Warnf(message) - return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: message} + return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: message}, 0 } } appIf := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace) + start := time.Now() _, err := argo.SetAppOperation(appIf, app.Name, &op) + setOpTime := time.Since(start) if err != nil { logCtx.Errorf("Failed to initiate auto-sync to %s: %v", desiredCommitSHA, err) - return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: err.Error()} + return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: err.Error()}, setOpTime } message := fmt.Sprintf("Initiated automated sync to '%s'", desiredCommitSHA) - ctrl.auditLogger.LogAppEvent(app, argo.EventInfo{Reason: argo.EventReasonOperationStarted, Type: v1.EventTypeNormal}, message) + ctrl.auditLogger.LogAppEvent(app, argo.EventInfo{Reason: argo.EventReasonOperationStarted, Type: v1.EventTypeNormal}, message, "") logCtx.Info(message) - return nil + return nil, setOpTime } -// alreadyAttemptedSync returns whether or not the most recent sync was performed against the +// alreadyAttemptedSync returns whether the most recent sync was performed against the // commitSHA and with the same app source config which are currently set in the app -func alreadyAttemptedSync(app *appv1.Application, commitSHA string) (bool, synccommon.OperationPhase) { +func alreadyAttemptedSync(app *appv1.Application, commitSHA string, commitSHAsMS []string, hasMultipleSources bool) (bool, synccommon.OperationPhase) { if app.Status.OperationState == nil || app.Status.OperationState.Operation.Sync == nil || app.Status.OperationState.SyncResult == nil { return false, "" } - if app.Status.OperationState.SyncResult.Revision != commitSHA { - return false, "" + if hasMultipleSources { + if !reflect.DeepEqual(app.Status.OperationState.SyncResult.Revisions, commitSHAsMS) { + return false, "" + } + } else { + if app.Status.OperationState.SyncResult.Revision != commitSHA { + return false, "" + } + } + + if hasMultipleSources { + // Ignore differences in target revision, since we already just verified commitSHAs are equal, + // and we do not want to trigger auto-sync due to things like HEAD != master + specSources := app.Spec.Sources.DeepCopy() + syncSources := app.Status.OperationState.SyncResult.Sources.DeepCopy() + for _, source := range specSources { + source.TargetRevision = "" + } + for _, source := range syncSources { + source.TargetRevision = "" + } + return reflect.DeepEqual(app.Spec.Sources, app.Status.OperationState.SyncResult.Sources), app.Status.OperationState.Phase + } else { + // Ignore differences in target revision, since we already just verified commitSHAs are equal, + // and we do not want to trigger auto-sync due to things like HEAD != master + specSource := app.Spec.Source.DeepCopy() + specSource.TargetRevision = "" + syncResSource := app.Status.OperationState.SyncResult.Source.DeepCopy() + syncResSource.TargetRevision = "" + return reflect.DeepEqual(app.Spec.GetSource(), app.Status.OperationState.SyncResult.Source), app.Status.OperationState.Phase } - // Ignore differences in target revision, since we already just verified commitSHAs are equal, - // and we do not want to trigger auto-sync due to things like HEAD != master - specSource := app.Spec.Source.DeepCopy() - specSource.TargetRevision = "" - syncResSource := app.Status.OperationState.SyncResult.Source.DeepCopy() - syncResSource.TargetRevision = "" - return reflect.DeepEqual(app.Spec.Source, app.Status.OperationState.SyncResult.Source), app.Status.OperationState.Phase } func (ctrl *ApplicationController) shouldSelfHeal(app *appv1.Application) (bool, time.Duration) { @@ -1518,11 +1852,38 @@ func (ctrl *ApplicationController) shouldSelfHeal(app *appv1.Application) (bool, return retryAfter <= 0, retryAfter } +// isAppNamespaceAllowed returns whether the application is allowed in the +// namespace it's residing in. +func (ctrl *ApplicationController) isAppNamespaceAllowed(app *appv1.Application) bool { + return app.Namespace == ctrl.namespace || glob.MatchStringInList(ctrl.applicationNamespaces, app.Namespace, false) +} + func (ctrl *ApplicationController) canProcessApp(obj interface{}) bool { app, ok := obj.(*appv1.Application) if !ok { return false } + + // Only process given app if it exists in a watched namespace, or in the + // control plane's namespace. + if !ctrl.isAppNamespaceAllowed(app) { + return false + } + + if annotations := app.GetAnnotations(); annotations != nil { + if skipVal, ok := annotations[common.AnnotationKeyAppSkipReconcile]; ok { + logCtx := log.WithFields(log.Fields{"application": app.QualifiedName()}) + if skipReconcile, err := strconv.ParseBool(skipVal); err == nil { + if skipReconcile { + logCtx.Debugf("Skipping Application reconcile based on annotation %s", common.AnnotationKeyAppSkipReconcile) + return false + } + } else { + logCtx.Debugf("Unable to determine if Application should skip reconcile based on annotation %s: %v", common.AnnotationKeyAppSkipReconcile, err) + } + } + } + if ctrl.clusterFilter != nil { cluster, err := ctrl.db.GetCluster(context.Background(), app.Spec.Destination.Server) if err != nil { @@ -1535,23 +1896,62 @@ func (ctrl *ApplicationController) canProcessApp(obj interface{}) bool { } func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.SharedIndexInformer, applisters.ApplicationLister) { + watchNamespace := ctrl.namespace + // If we have at least one additional namespace configured, we need to + // watch on them all. + if len(ctrl.applicationNamespaces) > 0 { + watchNamespace = "" + } + refreshTimeout := ctrl.statusRefreshTimeout + if ctrl.statusHardRefreshTimeout.Seconds() != 0 && (ctrl.statusHardRefreshTimeout < ctrl.statusRefreshTimeout) { + refreshTimeout = ctrl.statusHardRefreshTimeout + } informer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (apiruntime.Object, error) { - return ctrl.applicationClientset.ArgoprojV1alpha1().Applications(ctrl.namespace).List(context.TODO(), options) + // We are only interested in apps that exist in namespaces the + // user wants to be enabled. + appList, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(watchNamespace).List(context.TODO(), options) + if err != nil { + return nil, err + } + newItems := []appv1.Application{} + for _, app := range appList.Items { + if ctrl.isAppNamespaceAllowed(&app) { + newItems = append(newItems, app) + } + } + appList.Items = newItems + return appList, nil }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return ctrl.applicationClientset.ArgoprojV1alpha1().Applications(ctrl.namespace).Watch(context.TODO(), options) + return ctrl.applicationClientset.ArgoprojV1alpha1().Applications(watchNamespace).Watch(context.TODO(), options) }, }, &appv1.Application{}, - ctrl.statusRefreshTimeout, + refreshTimeout, cache.Indexers{ cache.NamespaceIndex: func(obj interface{}) ([]string, error) { app, ok := obj.(*appv1.Application) if ok { - if err := argo.ValidateDestination(context.Background(), &app.Spec.Destination, ctrl.db); err != nil { - ctrl.setAppCondition(app, appv1.ApplicationCondition{Type: appv1.ApplicationConditionInvalidSpecError, Message: err.Error()}) + // We only generally work with applications that are in one + // the allowed namespaces. + if ctrl.isAppNamespaceAllowed(app) { + // If the application is not allowed to use the project, + // log an error. + if _, err := ctrl.getAppProj(app); err != nil { + ctrl.setAppCondition(app, ctrl.projectErrorToCondition(err, app)) + } else { + // This call to 'ValidateDestination' ensures that the .spec.destination field of all Applications + // returned by the informer/lister will have server field set (if not already set) based on the name. + // (or, if not found, an error app condition) + + // If the server field is not set, set it based on the cluster name; if the cluster name can't be found, + // log an error as an App Condition. + if err := argo.ValidateDestination(context.Background(), &app.Spec.Destination, ctrl.db); err != nil { + ctrl.setAppCondition(app, appv1.ApplicationCondition{Type: appv1.ApplicationConditionInvalidSpecError, Message: err.Error()}) + } + } } } @@ -1563,6 +1963,10 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar return nil, nil } + if !ctrl.isAppNamespaceAllowed(app) { + return nil, nil + } + proj, err := ctrl.getAppProj(app) if err != nil { return nil, nil @@ -1600,10 +2004,10 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar oldApp, oldOK := old.(*appv1.Application) newApp, newOK := new.(*appv1.Application) if oldOK && newOK && automatedSyncEnabled(oldApp, newApp) { - log.WithField("application", newApp.Name).Info("Enabled automated sync") + log.WithField("application", newApp.QualifiedName()).Info("Enabled automated sync") compareWith = CompareWithLatest.Pointer() } - ctrl.requestAppRefresh(newApp.Name, compareWith, nil) + ctrl.requestAppRefresh(newApp.QualifiedName(), compareWith, nil) ctrl.appOperationQueue.Add(key) }, DeleteFunc: func(obj interface{}) { @@ -1622,8 +2026,21 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar return informer, lister } +func (ctrl *ApplicationController) projectErrorToCondition(err error, app *appv1.Application) appv1.ApplicationCondition { + var condition appv1.ApplicationCondition + if apierr.IsNotFound(err) { + condition = appv1.ApplicationCondition{ + Type: appv1.ApplicationConditionInvalidSpecError, + Message: fmt.Sprintf("Application referencing project %s which does not exist", app.Spec.Project), + } + } else { + condition = appv1.ApplicationCondition{Type: appv1.ApplicationConditionUnknownError, Message: err.Error()} + } + return condition +} + func (ctrl *ApplicationController) RegisterClusterSecretUpdater(ctx context.Context) { - updater := NewClusterInfoUpdater(ctrl.stateCache, ctrl.db, ctrl.appLister.Applications(ctrl.namespace), ctrl.cache, ctrl.clusterFilter) + updater := NewClusterInfoUpdater(ctrl.stateCache, ctrl.db, ctrl.appLister.Applications(""), ctrl.cache, ctrl.clusterFilter, ctrl.getAppProj, ctrl.namespace) go updater.Run(ctx) } @@ -1656,3 +2073,23 @@ func automatedSyncEnabled(oldApp *appv1.Application, newApp *appv1.Application) // nothing changed return false } + +// toAppKey returns the application key from a given appName, that is, it will +// replace underscores with forward-slashes to become a / +// format. If the appName is an unqualified name (such as, "app"), it will use +// the controller's namespace in the key. +func (ctrl *ApplicationController) toAppKey(appName string) string { + if !strings.Contains(appName, "_") && !strings.Contains(appName, "/") { + return ctrl.namespace + "/" + appName + } else if strings.Contains(appName, "/") { + return appName + } else { + return strings.ReplaceAll(appName, "_", "/") + } +} + +func (ctrl *ApplicationController) toAppQualifiedName(appName, appNamespace string) string { + return fmt.Sprintf("%s/%s", appNamespace, appName) +} + +type ClusterFilterFunction func(c *argov1alpha.Cluster, distributionFunction sharding.DistributionFunction) bool diff --git a/controller/appcontroller_test.go b/controller/appcontroller_test.go index d7787879201a3..cfb2141664348 100644 --- a/controller/appcontroller_test.go +++ b/controller/appcontroller_test.go @@ -3,20 +3,22 @@ package controller import ( "context" "encoding/json" + "errors" "testing" "time" + "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/api/resource" clustercache "github.com/argoproj/gitops-engine/pkg/cache" + "github.com/argoproj/argo-cd/v2/common" statecache "github.com/argoproj/argo-cd/v2/controller/cache" "github.com/argoproj/gitops-engine/pkg/cache/mocks" synccommon "github.com/argoproj/gitops-engine/pkg/sync/common" "github.com/argoproj/gitops-engine/pkg/utils/kube" "github.com/argoproj/gitops-engine/pkg/utils/kube/kubetest" - "github.com/ghodss/yaml" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" corev1 "k8s.io/api/core/v1" @@ -28,10 +30,10 @@ import ( "k8s.io/client-go/kubernetes/fake" kubetesting "k8s.io/client-go/testing" "k8s.io/client-go/tools/cache" + "sigs.k8s.io/yaml" - "github.com/argoproj/argo-cd/v2/common" mockstatecache "github.com/argoproj/argo-cd/v2/controller/cache/mocks" - argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/fake" "github.com/argoproj/argo-cd/v2/reposerver/apiclient" mockrepoclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient/mocks" @@ -42,17 +44,19 @@ import ( ) type namespacedResource struct { - argoappv1.ResourceNode + v1alpha1.ResourceNode AppName string } type fakeData struct { apps []runtime.Object manifestResponse *apiclient.ManifestResponse + manifestResponses []*apiclient.ManifestResponse managedLiveObjs map[kube.ResourceKey]*unstructured.Unstructured namespacedResources map[kube.ResourceKey]namespacedResource configMapData map[string]string metricsCacheExpiration time.Duration + applicationNamespaces []string } func newFakeController(data *fakeData) *ApplicationController { @@ -64,7 +68,15 @@ func newFakeController(data *fakeData) *ApplicationController { // Mock out call to GenerateManifest mockRepoClient := mockrepoclient.RepoServerServiceClient{} - mockRepoClient.On("GenerateManifest", mock.Anything, mock.Anything).Return(data.manifestResponse, nil) + + if len(data.manifestResponses) > 0 { + for _, response := range data.manifestResponses { + mockRepoClient.On("GenerateManifest", mock.Anything, mock.Anything).Return(response, nil).Once() + } + } else { + mockRepoClient.On("GenerateManifest", mock.Anything, mock.Anything).Return(data.manifestResponse, nil) + } + mockRepoClientset := mockrepoclient.Clientset{RepoServerServiceClient: &mockRepoClient} secret := corev1.Secret{ @@ -102,11 +114,15 @@ func newFakeController(data *fakeData) *ApplicationController { ), kubectl, time.Minute, + time.Hour, time.Minute, common.DefaultPortArgoCDMetrics, data.metricsCacheExpiration, + []string{}, 0, + true, nil, + data.applicationNamespaces, ) if err != nil { panic(err) @@ -117,6 +133,8 @@ func newFakeController(data *fakeData) *ApplicationController { defer cancelApp() clusterCacheMock := mocks.ClusterCache{} clusterCacheMock.On("IsNamespaced", mock.Anything).Return(true, nil) + clusterCacheMock.On("GetOpenAPISchema").Return(nil, nil) + clusterCacheMock.On("GetGVKParser").Return(nil) mockStateCache := mockstatecache.LiveStateCache{} ctrl.appStateManager.(*appStateManager).liveStateCache = &mockStateCache @@ -124,7 +142,7 @@ func newFakeController(data *fakeData) *ApplicationController { mockStateCache.On("IsNamespaced", mock.Anything, mock.Anything).Return(true, nil) mockStateCache.On("GetManagedLiveObjs", mock.Anything, mock.Anything).Return(data.managedLiveObjs, nil) mockStateCache.On("GetVersionsInfo", mock.Anything).Return("v1.2.3", nil, nil) - response := make(map[kube.ResourceKey]argoappv1.ResourceNode) + response := make(map[kube.ResourceKey]v1alpha1.ResourceNode) for k, v := range data.namespacedResources { response[k] = v.ResourceNode } @@ -133,12 +151,12 @@ func newFakeController(data *fakeData) *ApplicationController { mockStateCache.On("GetClusterCache", mock.Anything).Return(&clusterCacheMock, nil) mockStateCache.On("IterateHierarchy", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { key := args[1].(kube.ResourceKey) - action := args[2].(func(child argoappv1.ResourceNode, appName string)) + action := args[2].(func(child v1alpha1.ResourceNode, appName string) bool) appName := "" if res, ok := data.namespacedResources[key]; ok { appName = res.AppName } - action(argoappv1.ResourceNode{ResourceRef: argoappv1.ResourceRef{Kind: key.Kind, Group: key.Group, Namespace: key.Namespace, Name: key.Name}}, appName) + _ = action(v1alpha1.ResourceNode{ResourceRef: v1alpha1.ResourceRef{Kind: key.Kind, Group: key.Group, Namespace: key.Namespace, Name: key.Name}}, appName) }).Return(nil) return ctrl } @@ -160,7 +178,6 @@ metadata: namespace: ` + test.FakeArgoCDNamespace + ` type: Opaque ` - var fakeApp = ` apiVersion: argoproj.io/v1alpha1 kind: Application @@ -202,6 +219,64 @@ status: repoURL: https://github.com/argoproj/argocd-example-apps.git ` +var fakeMultiSourceApp = ` +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + uid: "123" + name: my-app + namespace: ` + test.FakeArgoCDNamespace + ` +spec: + destination: + namespace: ` + test.FakeDestNamespace + ` + server: https://localhost:6443 + project: default + sources: + - path: some/path + helm: + valueFiles: + - $values_test/values.yaml + repoURL: https://github.com/argoproj/argocd-example-apps.git + - path: some/other/path + repoURL: https://github.com/argoproj/argocd-example-apps-fake.git + - ref: values_test + repoURL: https://github.com/argoproj/argocd-example-apps-fake-ref.git + syncPolicy: + automated: {} +status: + operationState: + finishedAt: 2018-09-21T23:50:29Z + message: successfully synced + operation: + sync: + revisions: + - HEAD + - HEAD + - HEAD + phase: Succeeded + startedAt: 2018-09-21T23:50:25Z + syncResult: + resources: + - kind: RoleBinding + message: |- + rolebinding.rbac.authorization.k8s.io/always-outofsync reconciled + rolebinding.rbac.authorization.k8s.io/always-outofsync configured + name: always-outofsync + namespace: default + status: Synced + revisions: + - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + - bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb + - cccccccccccccccccccccccccccccccccccccccc + sources: + - path: some/path + repoURL: https://github.com/argoproj/argocd-example-apps.git + - path: some/other/path + repoURL: https://github.com/argoproj/argocd-example-apps-fake.git + - path: some/other/path + repoURL: https://github.com/argoproj/argocd-example-apps-fake-ref.git +` + var fakeAppWithDestName = ` apiVersion: argoproj.io/v1alpha1 kind: Application @@ -252,20 +327,24 @@ metadata: data: ` -func newFakeApp() *argoappv1.Application { +func newFakeApp() *v1alpha1.Application { return createFakeApp(fakeApp) } -func newFakeAppWithDestMismatch() *argoappv1.Application { +func newFakeMultiSourceApp() *v1alpha1.Application { + return createFakeApp(fakeMultiSourceApp) +} + +func newFakeAppWithDestMismatch() *v1alpha1.Application { return createFakeApp(fakeAppWithDestMismatch) } -func newFakeAppWithDestName() *argoappv1.Application { +func newFakeAppWithDestName() *v1alpha1.Application { return createFakeApp(fakeAppWithDestName) } -func createFakeApp(testApp string) *argoappv1.Application { - var app argoappv1.Application +func createFakeApp(testApp string) *v1alpha1.Application { + var app v1alpha1.Application err := yaml.Unmarshal([]byte(testApp), &app) if err != nil { panic(err) @@ -285,11 +364,11 @@ func newFakeCM() map[string]interface{} { func TestAutoSync(t *testing.T) { app := newFakeApp() ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}) - syncStatus := argoappv1.SyncStatus{ - Status: argoappv1.SyncStatusCodeOutOfSync, + syncStatus := v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeOutOfSync, Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", } - cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: argoappv1.SyncStatusCodeOutOfSync}}) + cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}) assert.Nil(t, cond) app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{}) assert.NoError(t, err) @@ -302,11 +381,11 @@ func TestAutoSyncNotAllowEmpty(t *testing.T) { app := newFakeApp() app.Spec.SyncPolicy.Automated.Prune = true ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}) - syncStatus := argoappv1.SyncStatus{ - Status: argoappv1.SyncStatusCodeOutOfSync, + syncStatus := v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeOutOfSync, Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", } - cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{}) + cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{}) assert.NotNil(t, cond) } @@ -315,11 +394,11 @@ func TestAutoSyncAllowEmpty(t *testing.T) { app.Spec.SyncPolicy.Automated.Prune = true app.Spec.SyncPolicy.Automated.AllowEmpty = true ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}) - syncStatus := argoappv1.SyncStatus{ - Status: argoappv1.SyncStatusCodeOutOfSync, + syncStatus := v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeOutOfSync, Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", } - cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{}) + cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{}) assert.Nil(t, cond) } @@ -329,11 +408,11 @@ func TestSkipAutoSync(t *testing.T) { t.Run("PreviouslySyncedToRevision", func(t *testing.T) { app := newFakeApp() ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}) - syncStatus := argoappv1.SyncStatus{ - Status: argoappv1.SyncStatusCodeOutOfSync, + syncStatus := v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeOutOfSync, Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", } - cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{}) + cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{}) assert.Nil(t, cond) app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{}) assert.NoError(t, err) @@ -344,11 +423,11 @@ func TestSkipAutoSync(t *testing.T) { t.Run("AlreadyInSyncedState", func(t *testing.T) { app := newFakeApp() ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}) - syncStatus := argoappv1.SyncStatus{ - Status: argoappv1.SyncStatusCodeSynced, + syncStatus := v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeSynced, Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", } - cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{}) + cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{}) assert.Nil(t, cond) app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{}) assert.NoError(t, err) @@ -360,11 +439,11 @@ func TestSkipAutoSync(t *testing.T) { app := newFakeApp() app.Spec.SyncPolicy = nil ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}) - syncStatus := argoappv1.SyncStatus{ - Status: argoappv1.SyncStatusCodeOutOfSync, + syncStatus := v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeOutOfSync, Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", } - cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{}) + cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{}) assert.Nil(t, cond) app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{}) assert.NoError(t, err) @@ -377,11 +456,11 @@ func TestSkipAutoSync(t *testing.T) { now := metav1.Now() app.DeletionTimestamp = &now ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}) - syncStatus := argoappv1.SyncStatus{ - Status: argoappv1.SyncStatusCodeOutOfSync, + syncStatus := v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeOutOfSync, Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", } - cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{}) + cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{}) assert.Nil(t, cond) app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{}) assert.NoError(t, err) @@ -392,22 +471,22 @@ func TestSkipAutoSync(t *testing.T) { // Set current to 'aaaaa', desired to 'bbbbb' and add 'bbbbb' to failure history t.Run("PreviousSyncAttemptFailed", func(t *testing.T) { app := newFakeApp() - app.Status.OperationState = &argoappv1.OperationState{ - Operation: argoappv1.Operation{ - Sync: &argoappv1.SyncOperation{}, + app.Status.OperationState = &v1alpha1.OperationState{ + Operation: v1alpha1.Operation{ + Sync: &v1alpha1.SyncOperation{}, }, Phase: synccommon.OperationFailed, - SyncResult: &argoappv1.SyncOperationResult{ + SyncResult: &v1alpha1.SyncOperationResult{ Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", Source: *app.Spec.Source.DeepCopy(), }, } ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}) - syncStatus := argoappv1.SyncStatus{ - Status: argoappv1.SyncStatusCodeOutOfSync, + syncStatus := v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeOutOfSync, Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", } - cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: argoappv1.SyncStatusCodeOutOfSync}}) + cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}) assert.NotNil(t, cond) app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{}) assert.NoError(t, err) @@ -417,12 +496,12 @@ func TestSkipAutoSync(t *testing.T) { t.Run("NeedsToPruneResourcesOnlyButAutomatedPruneDisabled", func(t *testing.T) { app := newFakeApp() ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}) - syncStatus := argoappv1.SyncStatus{ - Status: argoappv1.SyncStatusCodeOutOfSync, + syncStatus := v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeOutOfSync, Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", } - cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{ - {Name: "guestbook", Kind: kube.DeploymentKind, Status: argoappv1.SyncStatusCodeOutOfSync, RequiresPruning: true}, + cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{ + {Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync, RequiresPruning: true}, }) assert.Nil(t, cond) app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{}) @@ -434,8 +513,8 @@ func TestSkipAutoSync(t *testing.T) { // TestAutoSyncIndicateError verifies we skip auto-sync and return error condition if previous sync failed func TestAutoSyncIndicateError(t *testing.T) { app := newFakeApp() - app.Spec.Source.Helm = &argoappv1.ApplicationSourceHelm{ - Parameters: []argoappv1.HelmParameter{ + app.Spec.Source.Helm = &v1alpha1.ApplicationSourceHelm{ + Parameters: []v1alpha1.HelmParameter{ { Name: "a", Value: "1", @@ -443,23 +522,23 @@ func TestAutoSyncIndicateError(t *testing.T) { }, } ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}) - syncStatus := argoappv1.SyncStatus{ - Status: argoappv1.SyncStatusCodeOutOfSync, + syncStatus := v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeOutOfSync, Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", } - app.Status.OperationState = &argoappv1.OperationState{ - Operation: argoappv1.Operation{ - Sync: &argoappv1.SyncOperation{ + app.Status.OperationState = &v1alpha1.OperationState{ + Operation: v1alpha1.Operation{ + Sync: &v1alpha1.SyncOperation{ Source: app.Spec.Source.DeepCopy(), }, }, Phase: synccommon.OperationFailed, - SyncResult: &argoappv1.SyncOperationResult{ + SyncResult: &v1alpha1.SyncOperationResult{ Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", Source: *app.Spec.Source.DeepCopy(), }, } - cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: argoappv1.SyncStatusCodeOutOfSync}}) + cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}) assert.NotNil(t, cond) app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{}) assert.NoError(t, err) @@ -469,8 +548,8 @@ func TestAutoSyncIndicateError(t *testing.T) { // TestAutoSyncParameterOverrides verifies we auto-sync if revision is same but parameter overrides are different func TestAutoSyncParameterOverrides(t *testing.T) { app := newFakeApp() - app.Spec.Source.Helm = &argoappv1.ApplicationSourceHelm{ - Parameters: []argoappv1.HelmParameter{ + app.Spec.Source.Helm = &v1alpha1.ApplicationSourceHelm{ + Parameters: []v1alpha1.HelmParameter{ { Name: "a", Value: "1", @@ -478,16 +557,16 @@ func TestAutoSyncParameterOverrides(t *testing.T) { }, } ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}) - syncStatus := argoappv1.SyncStatus{ - Status: argoappv1.SyncStatusCodeOutOfSync, + syncStatus := v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeOutOfSync, Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", } - app.Status.OperationState = &argoappv1.OperationState{ - Operation: argoappv1.Operation{ - Sync: &argoappv1.SyncOperation{ - Source: &argoappv1.ApplicationSource{ - Helm: &argoappv1.ApplicationSourceHelm{ - Parameters: []argoappv1.HelmParameter{ + app.Status.OperationState = &v1alpha1.OperationState{ + Operation: v1alpha1.Operation{ + Sync: &v1alpha1.SyncOperation{ + Source: &v1alpha1.ApplicationSource{ + Helm: &v1alpha1.ApplicationSourceHelm{ + Parameters: []v1alpha1.HelmParameter{ { Name: "a", Value: "2", // this value changed @@ -498,11 +577,11 @@ func TestAutoSyncParameterOverrides(t *testing.T) { }, }, Phase: synccommon.OperationFailed, - SyncResult: &argoappv1.SyncOperationResult{ + SyncResult: &v1alpha1.SyncOperationResult{ Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", }, } - cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: argoappv1.SyncStatusCodeOutOfSync}}) + cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}) assert.Nil(t, cond) app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{}) assert.NoError(t, err) @@ -511,14 +590,14 @@ func TestAutoSyncParameterOverrides(t *testing.T) { // TestFinalizeAppDeletion verifies application deletion func TestFinalizeAppDeletion(t *testing.T) { - defaultProj := argoappv1.AppProject{ + defaultProj := v1alpha1.AppProject{ ObjectMeta: metav1.ObjectMeta{ Name: "default", Namespace: test.FakeArgoCDNamespace, }, - Spec: argoappv1.AppProjectSpec{ + Spec: v1alpha1.AppProjectSpec{ SourceRepos: []string{"*"}, - Destinations: []argoappv1.ApplicationDestination{ + Destinations: []v1alpha1.ApplicationDestination{ { Server: "*", Namespace: "*", @@ -547,7 +626,9 @@ func TestFinalizeAppDeletion(t *testing.T) { patched = true return true, nil, nil }) - _, err := ctrl.finalizeApplicationDeletion(app) + _, err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) { + return []*v1alpha1.Cluster{}, nil + }) assert.NoError(t, err) assert.True(t, patched) }) @@ -555,14 +636,14 @@ func TestFinalizeAppDeletion(t *testing.T) { // Ensure any stray resources irregularly labeled with instance label of app are not deleted upon deleting, // when app project restriction is in place t.Run("ProjectRestrictionEnforced", func(*testing.T) { - restrictedProj := argoappv1.AppProject{ + restrictedProj := v1alpha1.AppProject{ ObjectMeta: metav1.ObjectMeta{ Name: "restricted", Namespace: test.FakeArgoCDNamespace, }, - Spec: argoappv1.AppProjectSpec{ + Spec: v1alpha1.AppProjectSpec{ SourceRepos: []string{"*"}, - Destinations: []argoappv1.ApplicationDestination{ + Destinations: []v1alpha1.ApplicationDestination{ { Server: "*", Namespace: "my-app", @@ -595,7 +676,9 @@ func TestFinalizeAppDeletion(t *testing.T) { patched = true return true, nil, nil }) - objs, err := ctrl.finalizeApplicationDeletion(app) + objs, err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) { + return []*v1alpha1.Cluster{}, nil + }) assert.NoError(t, err) assert.True(t, patched) objsMap, err := ctrl.stateCache.GetManagedLiveObjs(app, []*unstructured.Unstructured{}) @@ -627,43 +710,64 @@ func TestFinalizeAppDeletion(t *testing.T) { patched = true return true, nil, nil }) - _, err := ctrl.finalizeApplicationDeletion(app) + _, err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) { + return []*v1alpha1.Cluster{}, nil + }) assert.NoError(t, err) assert.True(t, patched) }) - t.Run("ErrorOnBothDestNameAndServer", func(t *testing.T) { - app := newFakeAppWithDestMismatch() - appObj := kube.MustToUnstructured(&app) - ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}, managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{ - kube.GetResourceKey(appObj): appObj, - }}) - fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset) - func() { - fakeAppCs.Lock() - defer fakeAppCs.Unlock() + // Create an Application with a cluster that doesn't exist + // Ensure it can be deleted. + t.Run("DeleteWithInvalidClusterName", func(t *testing.T) { + + appTemplate := newFakeAppWithDestName() + testShouldDelete := func(app *v1alpha1.Application) { + appObj := kube.MustToUnstructured(&app) + ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}, managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{ + kube.GetResourceKey(appObj): appObj, + }}) + + fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset) defaultReactor := fakeAppCs.ReactionChain[0] fakeAppCs.ReactionChain = nil fakeAppCs.AddReactor("get", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) { return defaultReactor.React(action) }) - }() - _, err := ctrl.finalizeApplicationDeletion(app) - assert.EqualError(t, err, "application destination can't have both name and server defined: another-cluster https://localhost:6443") + _, err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) { + return []*v1alpha1.Cluster{}, nil + }) + assert.NoError(t, err) + } + + app1 := appTemplate.DeepCopy() + app1.Spec.Destination.Server = "https://invalid" + testShouldDelete(app1) + + app2 := appTemplate.DeepCopy() + app2.Spec.Destination.Name = "invalid" + testShouldDelete(app2) + + app3 := appTemplate.DeepCopy() + app3.Spec.Destination.Name = "invalid" + app3.Spec.Destination.Server = "https://invalid" + testShouldDelete(app3) + }) + } // TestNormalizeApplication verifies we normalize an application during reconciliation func TestNormalizeApplication(t *testing.T) { - defaultProj := argoappv1.AppProject{ + defaultProj := v1alpha1.AppProject{ ObjectMeta: metav1.ObjectMeta{ Name: "default", Namespace: test.FakeArgoCDNamespace, }, - Spec: argoappv1.AppProjectSpec{ + Spec: v1alpha1.AppProjectSpec{ SourceRepos: []string{"*"}, - Destinations: []argoappv1.ApplicationDestination{ + Destinations: []v1alpha1.ApplicationDestination{ { Server: "*", Namespace: "*", @@ -673,7 +777,7 @@ func TestNormalizeApplication(t *testing.T) { } app := newFakeApp() app.Spec.Project = "" - app.Spec.Source.Kustomize = &argoappv1.ApplicationSourceKustomize{NamePrefix: "foo-"} + app.Spec.Source.Kustomize = &v1alpha1.ApplicationSourceKustomize{NamePrefix: "foo-"} data := fakeData{ apps: []runtime.Object{app, &defaultProj}, manifestResponse: &apiclient.ManifestResponse{ @@ -731,16 +835,18 @@ func TestNormalizeApplication(t *testing.T) { func TestHandleAppUpdated(t *testing.T) { app := newFakeApp() app.Spec.Destination.Namespace = test.FakeArgoCDNamespace - app.Spec.Destination.Server = common.KubernetesInternalAPIServerAddr - ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}) + app.Spec.Destination.Server = v1alpha1.KubernetesInternalAPIServerAddr + proj := defaultProj.DeepCopy() + proj.Spec.SourceNamespaces = []string{test.FakeArgoCDNamespace} + ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, proj}}) - ctrl.handleObjectUpdated(map[string]bool{app.Name: true}, kube.GetObjectRef(kube.MustToUnstructured(app))) - isRequested, level := ctrl.isRefreshRequested(app.Name) + ctrl.handleObjectUpdated(map[string]bool{app.InstanceName(ctrl.namespace): true}, kube.GetObjectRef(kube.MustToUnstructured(app))) + isRequested, level := ctrl.isRefreshRequested(app.QualifiedName()) assert.False(t, isRequested) assert.Equal(t, ComparisonWithNothing, level) - ctrl.handleObjectUpdated(map[string]bool{app.Name: true}, corev1.ObjectReference{UID: "test", Kind: kube.DeploymentKind, Name: "test", Namespace: "default"}) - isRequested, level = ctrl.isRefreshRequested(app.Name) + ctrl.handleObjectUpdated(map[string]bool{app.InstanceName(ctrl.namespace): true}, corev1.ObjectReference{UID: "test", Kind: kube.DeploymentKind, Name: "test", Namespace: "default"}) + isRequested, level = ctrl.isRefreshRequested(app.QualifiedName()) assert.True(t, isRequested) assert.Equal(t, CompareWithRecent, level) } @@ -749,42 +855,42 @@ func TestHandleOrphanedResourceUpdated(t *testing.T) { app1 := newFakeApp() app1.Name = "app1" app1.Spec.Destination.Namespace = test.FakeArgoCDNamespace - app1.Spec.Destination.Server = common.KubernetesInternalAPIServerAddr + app1.Spec.Destination.Server = v1alpha1.KubernetesInternalAPIServerAddr app2 := newFakeApp() app2.Name = "app2" app2.Spec.Destination.Namespace = test.FakeArgoCDNamespace - app2.Spec.Destination.Server = common.KubernetesInternalAPIServerAddr + app2.Spec.Destination.Server = v1alpha1.KubernetesInternalAPIServerAddr proj := defaultProj.DeepCopy() - proj.Spec.OrphanedResources = &argoappv1.OrphanedResourcesMonitorSettings{} + proj.Spec.OrphanedResources = &v1alpha1.OrphanedResourcesMonitorSettings{} ctrl := newFakeController(&fakeData{apps: []runtime.Object{app1, app2, proj}}) ctrl.handleObjectUpdated(map[string]bool{}, corev1.ObjectReference{UID: "test", Kind: kube.DeploymentKind, Name: "test", Namespace: test.FakeArgoCDNamespace}) - isRequested, level := ctrl.isRefreshRequested(app1.Name) + isRequested, level := ctrl.isRefreshRequested(app1.QualifiedName()) assert.True(t, isRequested) - assert.Equal(t, ComparisonWithNothing, level) + assert.Equal(t, CompareWithRecent, level) - isRequested, level = ctrl.isRefreshRequested(app2.Name) + isRequested, level = ctrl.isRefreshRequested(app2.QualifiedName()) assert.True(t, isRequested) - assert.Equal(t, ComparisonWithNothing, level) + assert.Equal(t, CompareWithRecent, level) } func TestGetResourceTree_HasOrphanedResources(t *testing.T) { app := newFakeApp() proj := defaultProj.DeepCopy() - proj.Spec.OrphanedResources = &argoappv1.OrphanedResourcesMonitorSettings{} + proj.Spec.OrphanedResources = &v1alpha1.OrphanedResourcesMonitorSettings{} - managedDeploy := argoappv1.ResourceNode{ - ResourceRef: argoappv1.ResourceRef{Group: "apps", Kind: "Deployment", Namespace: "default", Name: "nginx-deployment", Version: "v1"}, + managedDeploy := v1alpha1.ResourceNode{ + ResourceRef: v1alpha1.ResourceRef{Group: "apps", Kind: "Deployment", Namespace: "default", Name: "nginx-deployment", Version: "v1"}, } - orphanedDeploy1 := argoappv1.ResourceNode{ - ResourceRef: argoappv1.ResourceRef{Group: "apps", Kind: "Deployment", Namespace: "default", Name: "deploy1"}, + orphanedDeploy1 := v1alpha1.ResourceNode{ + ResourceRef: v1alpha1.ResourceRef{Group: "apps", Kind: "Deployment", Namespace: "default", Name: "deploy1"}, } - orphanedDeploy2 := argoappv1.ResourceNode{ - ResourceRef: argoappv1.ResourceRef{Group: "apps", Kind: "Deployment", Namespace: "default", Name: "deploy2"}, + orphanedDeploy2 := v1alpha1.ResourceNode{ + ResourceRef: v1alpha1.ResourceRef{Group: "apps", Kind: "Deployment", Namespace: "default", Name: "deploy2"}, } ctrl := newFakeController(&fakeData{ @@ -795,7 +901,7 @@ func TestGetResourceTree_HasOrphanedResources(t *testing.T) { kube.NewResourceKey("apps", "Deployment", "default", "deploy2"): {ResourceNode: orphanedDeploy2}, }, }) - tree, err := ctrl.getResourceTree(app, []*argoappv1.ResourceDiff{{ + tree, err := ctrl.getResourceTree(app, []*v1alpha1.ResourceDiff{{ Namespace: "default", Name: "nginx-deployment", Kind: "Deployment", @@ -805,8 +911,8 @@ func TestGetResourceTree_HasOrphanedResources(t *testing.T) { }}) assert.NoError(t, err) - assert.Equal(t, tree.Nodes, []argoappv1.ResourceNode{managedDeploy}) - assert.Equal(t, tree.OrphanedNodes, []argoappv1.ResourceNode{orphanedDeploy1, orphanedDeploy2}) + assert.Equal(t, tree.Nodes, []v1alpha1.ResourceNode{managedDeploy}) + assert.Equal(t, tree.OrphanedNodes, []v1alpha1.ResourceNode{orphanedDeploy1, orphanedDeploy2}) } func TestSetOperationStateOnDeletedApp(t *testing.T) { @@ -818,99 +924,287 @@ func TestSetOperationStateOnDeletedApp(t *testing.T) { patched = true return true, nil, apierr.NewNotFound(schema.GroupResource{}, "my-app") }) - ctrl.setOperationState(newFakeApp(), &argoappv1.OperationState{Phase: synccommon.OperationSucceeded}) + ctrl.setOperationState(newFakeApp(), &v1alpha1.OperationState{Phase: synccommon.OperationSucceeded}) assert.True(t, patched) } -func TestNeedRefreshAppStatus(t *testing.T) { +type logHook struct { + entries []logrus.Entry +} + +func (h *logHook) Levels() []logrus.Level { + return []logrus.Level{logrus.WarnLevel} +} + +func (h *logHook) Fire(entry *logrus.Entry) error { + h.entries = append(h.entries, *entry) + return nil +} + +func TestSetOperationStateLogRetries(t *testing.T) { + hook := logHook{} + logrus.AddHook(&hook) + t.Cleanup(func() { + logrus.StandardLogger().ReplaceHooks(logrus.LevelHooks{}) + }) ctrl := newFakeController(&fakeData{apps: []runtime.Object{}}) + fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset) + fakeAppCs.ReactionChain = nil + patched := false + fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) { + if !patched { + patched = true + return true, nil, errors.New("fake error") + } + return true, nil, nil + }) + ctrl.setOperationState(newFakeApp(), &v1alpha1.OperationState{Phase: synccommon.OperationSucceeded}) + assert.True(t, patched) + assert.Contains(t, hook.entries[0].Message, "fake error") +} - app := newFakeApp() - now := metav1.Now() - app.Status.ReconciledAt = &now - app.Status.Sync = argoappv1.SyncStatus{ - Status: argoappv1.SyncStatusCodeSynced, - ComparedTo: argoappv1.ComparedTo{ - Source: app.Spec.Source, - Destination: app.Spec.Destination, +func TestNeedRefreshAppStatus(t *testing.T) { + testCases := []struct { + name string + app *v1alpha1.Application + }{ + { + name: "single-source app", + app: newFakeApp(), + }, + { + name: "multi-source app", + app: newFakeMultiSourceApp(), }, } - // no need to refresh just reconciled application - needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour) - assert.False(t, needRefresh) + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + app := tc.app + now := metav1.Now() + app.Status.ReconciledAt = &now + + app.Status.Sync = v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeSynced, + ComparedTo: v1alpha1.ComparedTo{ + Destination: app.Spec.Destination, + IgnoreDifferences: app.Spec.IgnoreDifferences, + }, + } - // refresh app using the 'deepest' requested comparison level - ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil) - ctrl.requestAppRefresh(app.Name, ComparisonWithNothing.Pointer(), nil) + if app.Spec.HasMultipleSources() { + app.Status.Sync.ComparedTo.Sources = app.Spec.Sources + } else { + app.Status.Sync.ComparedTo.Source = app.Spec.GetSource() + } - needRefresh, refreshType, compareWith := ctrl.needRefreshAppStatus(app, 1*time.Hour) - assert.True(t, needRefresh) - assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType) - assert.Equal(t, CompareWithRecent, compareWith) + ctrl := newFakeController(&fakeData{apps: []runtime.Object{}}) - // refresh application which status is not reconciled using latest commit - app.Status.Sync = argoappv1.SyncStatus{Status: argoappv1.SyncStatusCodeUnknown} + t.Run("no need to refresh just reconciled application", func(t *testing.T) { + needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour) + assert.False(t, needRefresh) + }) - needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Hour) - assert.True(t, needRefresh) - assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType) - assert.Equal(t, CompareWithLatest, compareWith) + t.Run("requested refresh is respected", func(t *testing.T) { + needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour) + assert.False(t, needRefresh) - { - // refresh app using the 'latest' level if comparison expired - app := app.DeepCopy() - ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil) - reconciledAt := metav1.NewTime(time.Now().UTC().Add(-1 * time.Hour)) - app.Status.ReconciledAt = &reconciledAt - needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Minute) - assert.True(t, needRefresh) - assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType) - assert.Equal(t, CompareWithLatest, compareWith) + // use a one-off controller so other tests don't have a manual refresh request + ctrl := newFakeController(&fakeData{apps: []runtime.Object{}}) + + // refresh app using the 'deepest' requested comparison level + ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil) + ctrl.requestAppRefresh(app.Name, ComparisonWithNothing.Pointer(), nil) + + needRefresh, refreshType, compareWith := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour) + assert.True(t, needRefresh) + assert.Equal(t, v1alpha1.RefreshTypeNormal, refreshType) + assert.Equal(t, CompareWithRecent, compareWith) + }) + + t.Run("refresh application which status is not reconciled using latest commit", func(t *testing.T) { + app := app.DeepCopy() + needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour) + assert.False(t, needRefresh) + app.Status.Sync = v1alpha1.SyncStatus{Status: v1alpha1.SyncStatusCodeUnknown} + + needRefresh, refreshType, compareWith := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour) + assert.True(t, needRefresh) + assert.Equal(t, v1alpha1.RefreshTypeNormal, refreshType) + assert.Equal(t, CompareWithLatestForceResolve, compareWith) + }) + + t.Run("refresh app using the 'latest' level if comparison expired", func(t *testing.T) { + app := app.DeepCopy() + + // use a one-off controller so other tests don't have a manual refresh request + ctrl := newFakeController(&fakeData{apps: []runtime.Object{}}) + + needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour) + assert.False(t, needRefresh) + + ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil) + reconciledAt := metav1.NewTime(time.Now().UTC().Add(-1 * time.Hour)) + app.Status.ReconciledAt = &reconciledAt + needRefresh, refreshType, compareWith := ctrl.needRefreshAppStatus(app, 1*time.Minute, 2*time.Hour) + assert.True(t, needRefresh) + assert.Equal(t, v1alpha1.RefreshTypeNormal, refreshType) + assert.Equal(t, CompareWithLatest, compareWith) + }) + + t.Run("refresh app using the 'latest' level if comparison expired for hard refresh", func(t *testing.T) { + app := app.DeepCopy() + app.Status.Sync = v1alpha1.SyncStatus{ + Status: v1alpha1.SyncStatusCodeSynced, + ComparedTo: v1alpha1.ComparedTo{ + Destination: app.Spec.Destination, + IgnoreDifferences: app.Spec.IgnoreDifferences, + }, + } + if app.Spec.HasMultipleSources() { + app.Status.Sync.ComparedTo.Sources = app.Spec.Sources + } else { + app.Status.Sync.ComparedTo.Source = app.Spec.GetSource() + } + + // use a one-off controller so other tests don't have a manual refresh request + ctrl := newFakeController(&fakeData{apps: []runtime.Object{}}) + + needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour) + assert.False(t, needRefresh) + ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil) + reconciledAt := metav1.NewTime(time.Now().UTC().Add(-1 * time.Hour)) + app.Status.ReconciledAt = &reconciledAt + needRefresh, refreshType, compareWith := ctrl.needRefreshAppStatus(app, 2*time.Hour, 1*time.Minute) + assert.True(t, needRefresh) + assert.Equal(t, v1alpha1.RefreshTypeHard, refreshType) + assert.Equal(t, CompareWithLatest, compareWith) + }) + + t.Run("execute hard refresh if app has refresh annotation", func(t *testing.T) { + app := app.DeepCopy() + needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour) + assert.False(t, needRefresh) + reconciledAt := metav1.NewTime(time.Now().UTC().Add(-1 * time.Hour)) + app.Status.ReconciledAt = &reconciledAt + app.Annotations = map[string]string{ + v1alpha1.AnnotationKeyRefresh: string(v1alpha1.RefreshTypeHard), + } + needRefresh, refreshType, compareWith := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour) + assert.True(t, needRefresh) + assert.Equal(t, v1alpha1.RefreshTypeHard, refreshType) + assert.Equal(t, CompareWithLatestForceResolve, compareWith) + }) + + t.Run("ensure that CompareWithLatest level is used if application source has changed", func(t *testing.T) { + app := app.DeepCopy() + needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour) + assert.False(t, needRefresh) + // sample app source change + if app.Spec.HasMultipleSources() { + app.Spec.Sources[0].Helm = &v1alpha1.ApplicationSourceHelm{ + Parameters: []v1alpha1.HelmParameter{{ + Name: "foo", + Value: "bar", + }}, + } + } else { + app.Spec.Source.Helm = &v1alpha1.ApplicationSourceHelm{ + Parameters: []v1alpha1.HelmParameter{{ + Name: "foo", + Value: "bar", + }}, + } + } + + needRefresh, refreshType, compareWith := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour) + assert.True(t, needRefresh) + assert.Equal(t, v1alpha1.RefreshTypeNormal, refreshType) + assert.Equal(t, CompareWithLatestForceResolve, compareWith) + }) + + t.Run("ensure that CompareWithLatest level is used if ignored differences change", func(t *testing.T) { + app := app.DeepCopy() + needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour) + assert.False(t, needRefresh) + + app.Spec.IgnoreDifferences = []v1alpha1.ResourceIgnoreDifferences{ + { + Group: "apps", + Kind: "Deployment", + JSONPointers: []string{ + "/spec/template/spec/containers/0/image", + }, + }, + } + + needRefresh, refreshType, compareWith := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour) + assert.True(t, needRefresh) + assert.Equal(t, v1alpha1.RefreshTypeNormal, refreshType) + assert.Equal(t, CompareWithLatest, compareWith) + }) + }) } +} - { - app := app.DeepCopy() - // execute hard refresh if app has refresh annotation - reconciledAt := metav1.NewTime(time.Now().UTC().Add(-1 * time.Hour)) - app.Status.ReconciledAt = &reconciledAt - app.Annotations = map[string]string{ - common.AnnotationKeyRefresh: string(argoappv1.RefreshTypeHard), - } - needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Hour) - assert.True(t, needRefresh) - assert.Equal(t, argoappv1.RefreshTypeHard, refreshType) - assert.Equal(t, CompareWithLatest, compareWith) +func TestUpdatedManagedNamespaceMetadata(t *testing.T) { + ctrl := newFakeController(&fakeData{apps: []runtime.Object{}}) + app := newFakeApp() + app.Spec.SyncPolicy.ManagedNamespaceMetadata = &v1alpha1.ManagedNamespaceMetadata{ + Labels: map[string]string{ + "foo": "bar", + }, + Annotations: map[string]string{ + "foo": "bar", + }, } + app.Status.Sync.ComparedTo.Source = app.Spec.GetSource() + app.Status.Sync.ComparedTo.Destination = app.Spec.Destination - { - app := app.DeepCopy() - // ensure that CompareWithLatest level is used if application source has changed - ctrl.requestAppRefresh(app.Name, ComparisonWithNothing.Pointer(), nil) - // sample app source change - app.Spec.Source.Helm = &argoappv1.ApplicationSourceHelm{ - Parameters: []argoappv1.HelmParameter{{ - Name: "foo", - Value: "bar", - }}, - } + // Ensure that hard/soft refresh isn't triggered due to reconciledAt being expired + reconciledAt := metav1.NewTime(time.Now().UTC().Add(15 * time.Minute)) + app.Status.ReconciledAt = &reconciledAt + needRefresh, refreshType, compareWith := ctrl.needRefreshAppStatus(app, 30*time.Minute, 2*time.Hour) + + assert.True(t, needRefresh) + assert.Equal(t, v1alpha1.RefreshTypeNormal, refreshType) + assert.Equal(t, CompareWithLatest, compareWith) +} - needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Hour) - assert.True(t, needRefresh) - assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType) - assert.Equal(t, CompareWithLatest, compareWith) +func TestUnchangedManagedNamespaceMetadata(t *testing.T) { + ctrl := newFakeController(&fakeData{apps: []runtime.Object{}}) + app := newFakeApp() + app.Spec.SyncPolicy.ManagedNamespaceMetadata = &v1alpha1.ManagedNamespaceMetadata{ + Labels: map[string]string{ + "foo": "bar", + }, + Annotations: map[string]string{ + "foo": "bar", + }, } + app.Status.Sync.ComparedTo.Source = app.Spec.GetSource() + app.Status.Sync.ComparedTo.Destination = app.Spec.Destination + app.Status.OperationState.SyncResult.ManagedNamespaceMetadata = app.Spec.SyncPolicy.ManagedNamespaceMetadata + + // Ensure that hard/soft refresh isn't triggered due to reconciledAt being expired + reconciledAt := metav1.NewTime(time.Now().UTC().Add(15 * time.Minute)) + app.Status.ReconciledAt = &reconciledAt + needRefresh, refreshType, compareWith := ctrl.needRefreshAppStatus(app, 30*time.Minute, 2*time.Hour) + + assert.False(t, needRefresh) + assert.Equal(t, v1alpha1.RefreshTypeNormal, refreshType) + assert.Equal(t, CompareWithLatest, compareWith) } func TestRefreshAppConditions(t *testing.T) { - defaultProj := argoappv1.AppProject{ + defaultProj := v1alpha1.AppProject{ ObjectMeta: metav1.ObjectMeta{ Name: "default", Namespace: test.FakeArgoCDNamespace, }, - Spec: argoappv1.AppProjectSpec{ + Spec: v1alpha1.AppProjectSpec{ SourceRepos: []string{"*"}, - Destinations: []argoappv1.ApplicationDestination{ + Destinations: []v1alpha1.ApplicationDestination{ { Server: "*", Namespace: "*", @@ -930,27 +1224,27 @@ func TestRefreshAppConditions(t *testing.T) { t.Run("PreserveExistingWarningCondition", func(t *testing.T) { app := newFakeApp() - app.Status.SetConditions([]argoappv1.ApplicationCondition{{Type: argoappv1.ApplicationConditionExcludedResourceWarning}}, nil) + app.Status.SetConditions([]v1alpha1.ApplicationCondition{{Type: v1alpha1.ApplicationConditionExcludedResourceWarning}}, nil) ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}}) _, hasErrors := ctrl.refreshAppConditions(app) assert.False(t, hasErrors) assert.Len(t, app.Status.Conditions, 1) - assert.Equal(t, argoappv1.ApplicationConditionExcludedResourceWarning, app.Status.Conditions[0].Type) + assert.Equal(t, v1alpha1.ApplicationConditionExcludedResourceWarning, app.Status.Conditions[0].Type) }) t.Run("ReplacesSpecErrorCondition", func(t *testing.T) { app := newFakeApp() app.Spec.Project = "wrong project" - app.Status.SetConditions([]argoappv1.ApplicationCondition{{Type: argoappv1.ApplicationConditionInvalidSpecError, Message: "old message"}}, nil) + app.Status.SetConditions([]v1alpha1.ApplicationCondition{{Type: v1alpha1.ApplicationConditionInvalidSpecError, Message: "old message"}}, nil) ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}}) _, hasErrors := ctrl.refreshAppConditions(app) assert.True(t, hasErrors) assert.Len(t, app.Status.Conditions, 1) - assert.Equal(t, argoappv1.ApplicationConditionInvalidSpecError, app.Status.Conditions[0].Type) + assert.Equal(t, v1alpha1.ApplicationConditionInvalidSpecError, app.Status.Conditions[0].Type) assert.Equal(t, "Application referencing project wrong project which does not exist", app.Status.Conditions[0].Message) }) } @@ -958,8 +1252,8 @@ func TestRefreshAppConditions(t *testing.T) { func TestUpdateReconciledAt(t *testing.T) { app := newFakeApp() reconciledAt := metav1.NewTime(time.Now().Add(-1 * time.Second)) - app.Status = argoappv1.ApplicationStatus{ReconciledAt: &reconciledAt} - app.Status.Sync = argoappv1.SyncStatus{ComparedTo: argoappv1.ComparedTo{Source: app.Spec.Source, Destination: app.Spec.Destination}} + app.Status = v1alpha1.ApplicationStatus{ReconciledAt: &reconciledAt} + app.Status.Sync = v1alpha1.SyncStatus{ComparedTo: v1alpha1.ComparedTo{Source: app.Spec.GetSource(), Destination: app.Spec.Destination, IgnoreDifferences: app.Spec.IgnoreDifferences}} ctrl := newFakeController(&fakeData{ apps: []runtime.Object{app, &defaultProj}, manifestResponse: &apiclient.ManifestResponse{ @@ -1015,9 +1309,37 @@ func TestUpdateReconciledAt(t *testing.T) { } +func TestProjectErrorToCondition(t *testing.T) { + app := newFakeApp() + app.Spec.Project = "wrong project" + ctrl := newFakeController(&fakeData{ + apps: []runtime.Object{app, &defaultProj}, + manifestResponse: &apiclient.ManifestResponse{ + Manifests: []string{}, + Namespace: test.FakeDestNamespace, + Server: test.FakeClusterURL, + Revision: "abc123", + }, + managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured), + }) + key, _ := cache.MetaNamespaceKeyFunc(app) + ctrl.appRefreshQueue.Add(key) + ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil) + + ctrl.processAppRefreshQueueItem() + + obj, ok, err := ctrl.appInformer.GetIndexer().GetByKey(key) + assert.True(t, ok) + assert.NoError(t, err) + updatedApp := obj.(*v1alpha1.Application) + assert.Equal(t, v1alpha1.ApplicationConditionInvalidSpecError, updatedApp.Status.Conditions[0].Type) + assert.Equal(t, "Application referencing project wrong project which does not exist", updatedApp.Status.Conditions[0].Message) + assert.Equal(t, v1alpha1.ApplicationConditionInvalidSpecError, updatedApp.Status.Conditions[0].Type) +} + func TestFinalizeProjectDeletion_HasApplications(t *testing.T) { app := newFakeApp() - proj := &argoappv1.AppProject{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: test.FakeArgoCDNamespace}} + proj := &v1alpha1.AppProject{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: test.FakeArgoCDNamespace}} ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, proj}}) fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset) @@ -1033,7 +1355,7 @@ func TestFinalizeProjectDeletion_HasApplications(t *testing.T) { } func TestFinalizeProjectDeletion_DoesNotHaveApplications(t *testing.T) { - proj := &argoappv1.AppProject{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: test.FakeArgoCDNamespace}} + proj := &v1alpha1.AppProject{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: test.FakeArgoCDNamespace}} ctrl := newFakeController(&fakeData{apps: []runtime.Object{&defaultProj}}) fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset) @@ -1056,9 +1378,9 @@ func TestFinalizeProjectDeletion_DoesNotHaveApplications(t *testing.T) { func TestProcessRequestedAppOperation_FailedNoRetries(t *testing.T) { app := newFakeApp() - app.Spec.Project = "invalid-project" - app.Operation = &argoappv1.Operation{ - Sync: &argoappv1.SyncOperation{}, + app.Spec.Project = "default" + app.Operation = &v1alpha1.Operation{ + Sync: &v1alpha1.SyncOperation{}, } ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}) fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset) @@ -1079,10 +1401,13 @@ func TestProcessRequestedAppOperation_FailedNoRetries(t *testing.T) { func TestProcessRequestedAppOperation_InvalidDestination(t *testing.T) { app := newFakeAppWithDestMismatch() app.Spec.Project = "test-project" - app.Operation = &argoappv1.Operation{ - Sync: &argoappv1.SyncOperation{}, + app.Operation = &v1alpha1.Operation{ + Sync: &v1alpha1.SyncOperation{}, } - ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}) + proj := defaultProj + proj.Name = "test-project" + proj.Spec.SourceNamespaces = []string{test.FakeArgoCDNamespace} + ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &proj}}) fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset) receivedPatch := map[string]interface{}{} func() { @@ -1107,9 +1432,9 @@ func TestProcessRequestedAppOperation_InvalidDestination(t *testing.T) { func TestProcessRequestedAppOperation_FailedHasRetries(t *testing.T) { app := newFakeApp() app.Spec.Project = "invalid-project" - app.Operation = &argoappv1.Operation{ - Sync: &argoappv1.SyncOperation{}, - Retry: argoappv1.RetryStrategy{Limit: 1}, + app.Operation = &v1alpha1.Operation{ + Sync: &v1alpha1.SyncOperation{}, + Retry: v1alpha1.RetryStrategy{Limit: 1}, } ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}) fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset) @@ -1133,12 +1458,12 @@ func TestProcessRequestedAppOperation_FailedHasRetries(t *testing.T) { func TestProcessRequestedAppOperation_RunningPreviouslyFailed(t *testing.T) { app := newFakeApp() - app.Operation = &argoappv1.Operation{ - Sync: &argoappv1.SyncOperation{}, - Retry: argoappv1.RetryStrategy{Limit: 1}, + app.Operation = &v1alpha1.Operation{ + Sync: &v1alpha1.SyncOperation{}, + Retry: v1alpha1.RetryStrategy{Limit: 1}, } app.Status.OperationState.Phase = synccommon.OperationRunning - app.Status.OperationState.SyncResult.Resources = []*argoappv1.ResourceResult{{ + app.Status.OperationState.SyncResult.Resources = []*v1alpha1.ResourceResult{{ Name: "guestbook", Kind: "Deployment", Group: "apps", @@ -1172,9 +1497,9 @@ func TestProcessRequestedAppOperation_RunningPreviouslyFailed(t *testing.T) { func TestProcessRequestedAppOperation_HasRetriesTerminated(t *testing.T) { app := newFakeApp() - app.Operation = &argoappv1.Operation{ - Sync: &argoappv1.SyncOperation{}, - Retry: argoappv1.RetryStrategy{Limit: 10}, + app.Operation = &v1alpha1.Operation{ + Sync: &v1alpha1.SyncOperation{}, + Retry: v1alpha1.RetryStrategy{Limit: 10}, } app.Status.OperationState.Phase = synccommon.OperationTerminating @@ -1244,19 +1569,19 @@ func TestGetAppHosts(t *testing.T) { })).Return(nil) ctrl.stateCache = mockStateCache - hosts, err := ctrl.getAppHosts(app, []argoappv1.ResourceNode{{ - ResourceRef: argoappv1.ResourceRef{Name: "pod1", Namespace: "default", Kind: kube.PodKind}, - Info: []argoappv1.InfoItem{{ + hosts, err := ctrl.getAppHosts(app, []v1alpha1.ResourceNode{{ + ResourceRef: v1alpha1.ResourceRef{Name: "pod1", Namespace: "default", Kind: kube.PodKind}, + Info: []v1alpha1.InfoItem{{ Name: "Host", Value: "Minikube", }}, }}) assert.NoError(t, err) - assert.Equal(t, []argoappv1.HostInfo{{ + assert.Equal(t, []v1alpha1.HostInfo{{ Name: "minikube", SystemInfo: corev1.NodeSystemInfo{OSImage: "debian"}, - ResourcesInfo: []argoappv1.HostResourceInfo{{ + ResourcesInfo: []v1alpha1.HostResourceInfo{{ ResourceName: corev1.ResourceCPU, Capacity: 5000, RequestedByApp: 1000, RequestedByNeighbors: 2000}, }}}, hosts) } @@ -1270,3 +1595,134 @@ func TestMetricsExpiration(t *testing.T) { ctrl = newFakeController(&fakeData{apps: []runtime.Object{app}, metricsCacheExpiration: 10 * time.Second}) assert.True(t, ctrl.metricsServer.HasExpiration()) } + +func TestToAppKey(t *testing.T) { + ctrl := newFakeController(&fakeData{}) + tests := []struct { + name string + input string + expected string + }{ + {"From instance name", "foo_bar", "foo/bar"}, + {"From qualified name", "foo/bar", "foo/bar"}, + {"From unqualified name", "bar", ctrl.namespace + "/bar"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.expected, ctrl.toAppKey(tt.input)) + }) + } +} + +func Test_canProcessApp(t *testing.T) { + app := newFakeApp() + ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}) + ctrl.applicationNamespaces = []string{"good"} + t.Run("without cluster filter, good namespace", func(t *testing.T) { + app.Namespace = "good" + canProcess := ctrl.canProcessApp(app) + assert.True(t, canProcess) + }) + t.Run("without cluster filter, bad namespace", func(t *testing.T) { + app.Namespace = "bad" + canProcess := ctrl.canProcessApp(app) + assert.False(t, canProcess) + }) + t.Run("with cluster filter, good namespace", func(t *testing.T) { + app.Namespace = "good" + ctrl.clusterFilter = func(_ *v1alpha1.Cluster) bool { return true } + canProcess := ctrl.canProcessApp(app) + assert.True(t, canProcess) + }) + t.Run("with cluster filter, bad namespace", func(t *testing.T) { + app.Namespace = "bad" + ctrl.clusterFilter = func(_ *v1alpha1.Cluster) bool { return true } + canProcess := ctrl.canProcessApp(app) + assert.False(t, canProcess) + }) +} + +func Test_canProcessAppSkipReconcileAnnotation(t *testing.T) { + appSkipReconcileInvalid := newFakeApp() + appSkipReconcileInvalid.Annotations = map[string]string{common.AnnotationKeyAppSkipReconcile: "invalid-value"} + appSkipReconcileFalse := newFakeApp() + appSkipReconcileFalse.Annotations = map[string]string{common.AnnotationKeyAppSkipReconcile: "false"} + appSkipReconcileTrue := newFakeApp() + appSkipReconcileTrue.Annotations = map[string]string{common.AnnotationKeyAppSkipReconcile: "true"} + ctrl := newFakeController(&fakeData{}) + tests := []struct { + name string + input interface{} + expected bool + }{ + {"No skip reconcile annotation", newFakeApp(), true}, + {"Contains skip reconcile annotation ", appSkipReconcileInvalid, true}, + {"Contains skip reconcile annotation value false", appSkipReconcileFalse, true}, + {"Contains skip reconcile annotation value true", appSkipReconcileTrue, false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.expected, ctrl.canProcessApp(tt.input)) + }) + } +} + +func Test_syncDeleteOption(t *testing.T) { + app := newFakeApp() + ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}) + cm := newFakeCM() + t.Run("without delete option object is deleted", func(t *testing.T) { + cmObj := kube.MustToUnstructured(&cm) + delete := ctrl.shouldBeDeleted(app, cmObj) + assert.True(t, delete) + }) + t.Run("with delete set to false object is retained", func(t *testing.T) { + cmObj := kube.MustToUnstructured(&cm) + cmObj.SetAnnotations(map[string]string{"argocd.argoproj.io/sync-options": "Delete=false"}) + delete := ctrl.shouldBeDeleted(app, cmObj) + assert.False(t, delete) + }) + t.Run("with delete set to false object is retained", func(t *testing.T) { + cmObj := kube.MustToUnstructured(&cm) + cmObj.SetAnnotations(map[string]string{"helm.sh/resource-policy": "keep"}) + delete := ctrl.shouldBeDeleted(app, cmObj) + assert.False(t, delete) + }) +} + +func TestAddControllerNamespace(t *testing.T) { + t.Run("set controllerNamespace when the app is in the controller namespace", func(t *testing.T) { + app := newFakeApp() + ctrl := newFakeController(&fakeData{ + apps: []runtime.Object{app, &defaultProj}, + manifestResponse: &apiclient.ManifestResponse{}, + }) + + ctrl.processAppRefreshQueueItem() + + updatedApp, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(ctrl.namespace).Get(context.Background(), app.Name, metav1.GetOptions{}) + assert.NoError(t, err) + assert.Equal(t, test.FakeArgoCDNamespace, updatedApp.Status.ControllerNamespace) + }) + t.Run("set controllerNamespace when the app is in another namespace than the controller", func(t *testing.T) { + appNamespace := "app-namespace" + + app := newFakeApp() + app.ObjectMeta.Namespace = appNamespace + proj := defaultProj + proj.Spec.SourceNamespaces = []string{appNamespace} + ctrl := newFakeController(&fakeData{ + apps: []runtime.Object{app, &proj}, + manifestResponse: &apiclient.ManifestResponse{}, + applicationNamespaces: []string{appNamespace}, + }) + + ctrl.processAppRefreshQueueItem() + + updatedApp, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(appNamespace).Get(context.Background(), app.Name, metav1.GetOptions{}) + assert.NoError(t, err) + assert.Equal(t, test.FakeArgoCDNamespace, updatedApp.Status.ControllerNamespace) + }) +} diff --git a/controller/cache/cache.go b/controller/cache/cache.go index ef90974387d37..9eac161714089 100644 --- a/controller/cache/cache.go +++ b/controller/cache/cache.go @@ -2,9 +2,17 @@ package cache import ( "context" + "errors" "fmt" + "math" + "net" + "net/url" + "os/exec" "reflect" + "strings" "sync" + "syscall" + "time" clustercache "github.com/argoproj/gitops-engine/pkg/cache" "github.com/argoproj/gitops-engine/pkg/health" @@ -12,31 +20,104 @@ import ( log "github.com/sirupsen/logrus" "golang.org/x/sync/semaphore" v1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" - "github.com/argoproj/argo-cd/v2/common" "github.com/argoproj/argo-cd/v2/controller/metrics" + "github.com/argoproj/argo-cd/v2/pkg/apis/application" appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" "github.com/argoproj/argo-cd/v2/util/argo" "github.com/argoproj/argo-cd/v2/util/db" + "github.com/argoproj/argo-cd/v2/util/env" logutils "github.com/argoproj/argo-cd/v2/util/log" "github.com/argoproj/argo-cd/v2/util/lua" "github.com/argoproj/argo-cd/v2/util/settings" ) +const ( + // EnvClusterCacheResyncDuration is the env variable that holds cluster cache re-sync duration + EnvClusterCacheResyncDuration = "ARGOCD_CLUSTER_CACHE_RESYNC_DURATION" + + // EnvClusterCacheWatchResyncDuration is the env variable that holds cluster cache watch re-sync duration + EnvClusterCacheWatchResyncDuration = "ARGOCD_CLUSTER_CACHE_WATCH_RESYNC_DURATION" + + // EnvClusterSyncRetryTimeoutDuration is the env variable that holds cluster retry duration when sync error happens + EnvClusterSyncRetryTimeoutDuration = "ARGOCD_CLUSTER_SYNC_RETRY_TIMEOUT_DURATION" + + // EnvClusterCacheListPageSize is the env variable to control size of the list page size when making K8s queries + EnvClusterCacheListPageSize = "ARGOCD_CLUSTER_CACHE_LIST_PAGE_SIZE" + + // EnvClusterCacheListPageBufferSize is the env variable to control the number of pages to buffer when making a K8s query to list resources + EnvClusterCacheListPageBufferSize = "ARGOCD_CLUSTER_CACHE_LIST_PAGE_BUFFER_SIZE" + + // EnvClusterCacheListSemaphore is the env variable to control size of the list semaphore + // This is used to limit the number of concurrent memory consuming operations on the + // k8s list queries results across all clusters to avoid memory spikes during cache initialization. + EnvClusterCacheListSemaphore = "ARGOCD_CLUSTER_CACHE_LIST_SEMAPHORE" + + // EnvClusterCacheAttemptLimit is the env variable to control the retry limit for listing resources during cluster cache sync + EnvClusterCacheAttemptLimit = "ARGOCD_CLUSTER_CACHE_ATTEMPT_LIMIT" + + // EnvClusterCacheRetryUseBackoff is the env variable to control whether to use a backoff strategy with the retry during cluster cache sync + EnvClusterCacheRetryUseBackoff = "ARGOCD_CLUSTER_CACHE_RETRY_USE_BACKOFF" +) + +// GitOps engine cluster cache tuning options +var ( + // clusterCacheResyncDuration controls the duration of cluster cache refresh. + // NOTE: this differs from gitops-engine default of 24h + clusterCacheResyncDuration = 12 * time.Hour + + // clusterCacheWatchResyncDuration controls the maximum duration that group/kind watches are allowed to run + // for before relisting & restarting the watch + clusterCacheWatchResyncDuration = 10 * time.Minute + + // clusterSyncRetryTimeoutDuration controls the sync retry duration when cluster sync error happens + clusterSyncRetryTimeoutDuration = 10 * time.Second + + // The default limit of 50 is chosen based on experiments. + clusterCacheListSemaphoreSize int64 = 50 + + // clusterCacheListPageSize is the page size when performing K8s list requests. + // 500 is equal to kubectl's size + clusterCacheListPageSize int64 = 500 + + // clusterCacheListPageBufferSize is the number of pages to buffer when performing K8s list requests + clusterCacheListPageBufferSize int32 = 1 + + // clusterCacheRetryLimit sets a retry limit for failed requests during cluster cache sync + // If set to 1, retries are disabled. + clusterCacheAttemptLimit int32 = 1 + + // clusterCacheRetryUseBackoff specifies whether to use a backoff strategy on cluster cache sync, if retry is enabled + clusterCacheRetryUseBackoff bool = false +) + +func init() { + clusterCacheResyncDuration = env.ParseDurationFromEnv(EnvClusterCacheResyncDuration, clusterCacheResyncDuration, 0, math.MaxInt64) + clusterCacheWatchResyncDuration = env.ParseDurationFromEnv(EnvClusterCacheWatchResyncDuration, clusterCacheWatchResyncDuration, 0, math.MaxInt64) + clusterSyncRetryTimeoutDuration = env.ParseDurationFromEnv(EnvClusterSyncRetryTimeoutDuration, clusterSyncRetryTimeoutDuration, 0, math.MaxInt64) + clusterCacheListPageSize = env.ParseInt64FromEnv(EnvClusterCacheListPageSize, clusterCacheListPageSize, 0, math.MaxInt64) + clusterCacheListPageBufferSize = int32(env.ParseNumFromEnv(EnvClusterCacheListPageBufferSize, int(clusterCacheListPageBufferSize), 1, math.MaxInt32)) + clusterCacheListSemaphoreSize = env.ParseInt64FromEnv(EnvClusterCacheListSemaphore, clusterCacheListSemaphoreSize, 0, math.MaxInt64) + clusterCacheAttemptLimit = int32(env.ParseNumFromEnv(EnvClusterCacheAttemptLimit, int(clusterCacheAttemptLimit), 1, math.MaxInt32)) + clusterCacheRetryUseBackoff = env.ParseBoolFromEnv(EnvClusterCacheRetryUseBackoff, false) +} + type LiveStateCache interface { // Returns k8s server version - GetVersionsInfo(serverURL string) (string, []metav1.APIGroup, error) + GetVersionsInfo(serverURL string) (string, []kube.APIResourceInfo, error) // Returns true of given group kind is a namespaced resource IsNamespaced(server string, gk schema.GroupKind) (bool, error) // Returns synced cluster cache GetClusterCache(server string) (clustercache.ClusterCache, error) // Executes give callback against resource specified by the key and all its children - IterateHierarchy(server string, key kube.ResourceKey, action func(child appv1.ResourceNode, appName string)) error + IterateHierarchy(server string, key kube.ResourceKey, action func(child appv1.ResourceNode, appName string) bool) error // Returns state of live nodes which correspond for target nodes of specified application. GetManagedLiveObjs(a *appv1.Application, targetObjs []*unstructured.Unstructured) (map[kube.ResourceKey]*unstructured.Unstructured, error) // IterateResources iterates all resource stored in cache @@ -56,6 +137,7 @@ type ObjectUpdatedHandler = func(managedByApp map[string]bool, ref v1.ObjectRefe type PodInfo struct { NodeName string ResourceRequests v1.ResourceList + Phase v1.PodPhase } type NodeInfo struct { @@ -75,6 +157,8 @@ type ResourceInfo struct { PodInfo *PodInfo // NodeInfo is available for nodes only NodeInfo *NodeInfo + + manifestHash string } func NewLiveStateCache( @@ -84,39 +168,42 @@ func NewLiveStateCache( kubectl kube.Kubectl, metricsServer *metrics.MetricsServer, onObjectUpdated ObjectUpdatedHandler, - clusterFilter func(cluster *appv1.Cluster) bool) LiveStateCache { + clusterFilter func(cluster *appv1.Cluster) bool, + resourceTracking argo.ResourceTracking) LiveStateCache { return &liveStateCache{ - appInformer: appInformer, - db: db, - clusters: make(map[string]clustercache.ClusterCache), - onObjectUpdated: onObjectUpdated, - kubectl: kubectl, - settingsMgr: settingsMgr, - metricsServer: metricsServer, - // The default limit of 50 is chosen based on experiments. - listSemaphore: semaphore.NewWeighted(50), - clusterFilter: clusterFilter, + appInformer: appInformer, + db: db, + clusters: make(map[string]clustercache.ClusterCache), + onObjectUpdated: onObjectUpdated, + kubectl: kubectl, + settingsMgr: settingsMgr, + metricsServer: metricsServer, + clusterFilter: clusterFilter, + resourceTracking: resourceTracking, } } type cacheSettings struct { clusterSettings clustercache.Settings appInstanceLabelKey string + trackingMethod appv1.TrackingMethod + // resourceOverrides provides a list of ignored differences to ignore watched resource updates + resourceOverrides map[string]appv1.ResourceOverride + + // ignoreResourceUpdates is a flag to enable resource-ignore rules. + ignoreResourceUpdatesEnabled bool } type liveStateCache struct { - db db.ArgoDB - appInformer cache.SharedIndexInformer - onObjectUpdated ObjectUpdatedHandler - kubectl kube.Kubectl - settingsMgr *settings.SettingsManager - metricsServer *metrics.MetricsServer - clusterFilter func(cluster *appv1.Cluster) bool - - // listSemaphore is used to limit the number of concurrent memory consuming operations on the - // k8s list queries results across all clusters to avoid memory spikes during cache initialization. - listSemaphore *semaphore.Weighted + db db.ArgoDB + appInformer cache.SharedIndexInformer + onObjectUpdated ObjectUpdatedHandler + kubectl kube.Kubectl + settingsMgr *settings.SettingsManager + metricsServer *metrics.MetricsServer + clusterFilter func(cluster *appv1.Cluster) bool + resourceTracking argo.ResourceTracking clusters map[string]clustercache.ClusterCache cacheSettings cacheSettings @@ -128,6 +215,14 @@ func (c *liveStateCache) loadCacheSettings() (*cacheSettings, error) { if err != nil { return nil, err } + resourceUpdatesOverrides, err := c.settingsMgr.GetIgnoreResourceUpdatesOverrides() + if err != nil { + return nil, err + } + ignoreResourceUpdatesEnabled, err := c.settingsMgr.GetIsIgnoreResourceUpdatesEnabled() + if err != nil { + return nil, err + } resourcesFilter, err := c.settingsMgr.GetResourcesFilter() if err != nil { return nil, err @@ -140,7 +235,8 @@ func (c *liveStateCache) loadCacheSettings() (*cacheSettings, error) { ResourceHealthOverride: lua.ResourceHealthOverrides(resourceOverrides), ResourcesFilter: resourcesFilter, } - return &cacheSettings{clusterSettings, appInstanceLabelKey}, nil + + return &cacheSettings{clusterSettings, appInstanceLabelKey, argo.GetTrackingMethod(c.settingsMgr), resourceUpdatesOverrides, ignoreResourceUpdatesEnabled}, nil } func asResourceNode(r *clustercache.Resource) appv1.ResourceNode { @@ -149,10 +245,10 @@ func asResourceNode(r *clustercache.Resource) appv1.ResourceNode { gv = schema.GroupVersion{} } parentRefs := make([]appv1.ResourceRef, len(r.OwnerRefs)) - for _, ownerRef := range r.OwnerRefs { + for i, ownerRef := range r.OwnerRefs { ownerGvk := schema.FromAPIVersionAndKind(ownerRef.APIVersion, ownerRef.Kind) ownerKey := kube.NewResourceKey(ownerGvk.Group, ownerRef.Kind, r.Ref.Namespace, ownerRef.Name) - parentRefs[0] = appv1.ResourceRef{Name: ownerRef.Name, Kind: ownerKey.Kind, Namespace: r.Ref.Namespace, Group: ownerKey.Group, UID: string(ownerRef.UID)} + parentRefs[i] = appv1.ResourceRef{Name: ownerRef.Name, Kind: ownerKey.Kind, Namespace: r.Ref.Namespace, Group: ownerKey.Group, UID: string(ownerRef.UID)} } var resHealth *appv1.HealthStatus resourceInfo := resInfo(r) @@ -237,6 +333,81 @@ func skipAppRequeuing(key kube.ResourceKey) bool { return ignoredRefreshResources[key.Group+"/"+key.Kind] } +func skipResourceUpdate(oldInfo, newInfo *ResourceInfo) bool { + if oldInfo == nil || newInfo == nil { + return false + } + isSameHealthStatus := (oldInfo.Health == nil && newInfo.Health == nil) || oldInfo.Health != nil && newInfo.Health != nil && oldInfo.Health.Status == newInfo.Health.Status + isSameManifest := oldInfo.manifestHash != "" && newInfo.manifestHash != "" && oldInfo.manifestHash == newInfo.manifestHash + return isSameHealthStatus && isSameManifest +} + +// shouldHashManifest validates if the API resource needs to be hashed. +// If there's an app name from resource tracking, or if this is itself an app, we should generate a hash. +// Otherwise, the hashing should be skipped to save CPU time. +func shouldHashManifest(appName string, gvk schema.GroupVersionKind) bool { + // Only hash if the resource belongs to an app. + // Best - Only hash for resources that are part of an app or their dependencies + // (current) - Only hash for resources that are part of an app + all apps that might be from an ApplicationSet + // Orphan - If orphan is enabled, hash should be made on all resource of that namespace and a config to disable it + // Worst - Hash all resources watched by Argo + return appName != "" || (gvk.Group == application.Group && gvk.Kind == application.ApplicationKind) +} + +// isRetryableError is a helper method to see whether an error +// returned from the dynamic client is potentially retryable. +func isRetryableError(err error) bool { + if err == nil { + return false + } + return kerrors.IsInternalError(err) || + kerrors.IsInvalid(err) || + kerrors.IsTooManyRequests(err) || + kerrors.IsServerTimeout(err) || + kerrors.IsServiceUnavailable(err) || + kerrors.IsTimeout(err) || + kerrors.IsUnexpectedObjectError(err) || + kerrors.IsUnexpectedServerError(err) || + isResourceQuotaConflictErr(err) || + isTransientNetworkErr(err) || + isExceededQuotaErr(err) || + errors.Is(err, syscall.ECONNRESET) +} + +func isExceededQuotaErr(err error) bool { + return kerrors.IsForbidden(err) && strings.Contains(err.Error(), "exceeded quota") +} + +func isResourceQuotaConflictErr(err error) bool { + return kerrors.IsConflict(err) && strings.Contains(err.Error(), "Operation cannot be fulfilled on resourcequota") +} + +func isTransientNetworkErr(err error) bool { + switch err.(type) { + case net.Error: + switch err.(type) { + case *net.DNSError, *net.OpError, net.UnknownNetworkError: + return true + case *url.Error: + // For a URL error, where it replies "connection closed" + // retry again. + return strings.Contains(err.Error(), "Connection closed by foreign host") + } + } + + errorString := err.Error() + if exitErr, ok := err.(*exec.ExitError); ok { + errorString = fmt.Sprintf("%s %s", errorString, exitErr.Stderr) + } + if strings.Contains(errorString, "net/http: TLS handshake timeout") || + strings.Contains(errorString, "i/o timeout") || + strings.Contains(errorString, "connection timed out") || + strings.Contains(errorString, "connection reset by peer") { + return true + } + return false +} + func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, error) { c.lock.RLock() clusterCache, ok := c.clusters[server] @@ -257,34 +428,82 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e cluster, err := c.db.GetCluster(context.Background(), server) if err != nil { - return nil, err + return nil, fmt.Errorf("error getting cluster: %w", err) } if !c.canHandleCluster(cluster) { return nil, fmt.Errorf("controller is configured to ignore cluster %s", cluster.Server) } - clusterCache = clustercache.NewClusterCache(cluster.RESTConfig(), - clustercache.SetListSemaphore(c.listSemaphore), - clustercache.SetResyncTimeout(common.K8SClusterResyncDuration), + resourceCustomLabels, err := c.settingsMgr.GetResourceCustomLabels() + if err != nil { + return nil, fmt.Errorf("error getting custom label: %w", err) + } + + respectRBAC, err := c.settingsMgr.RespectRBAC() + if err != nil { + return nil, fmt.Errorf("error getting value for %v: %w", settings.RespectRBAC, err) + } + + clusterCacheConfig := cluster.RESTConfig() + // Controller dynamically fetches all resource types available on the cluster + // using a discovery API that may contain deprecated APIs. + // This causes log flooding when managing a large number of clusters. + // https://github.com/argoproj/argo-cd/issues/11973 + // However, we can safely suppress deprecation warnings + // because we do not rely on resources with a particular API group or version. + // https://kubernetes.io/blog/2020/09/03/warnings/#customize-client-handling + // + // Completely suppress warning logs only for log levels that are less than Debug. + if log.GetLevel() < log.DebugLevel { + clusterCacheConfig.WarningHandler = rest.NoWarnings{} + } + + clusterCacheOpts := []clustercache.UpdateSettingsFunc{ + clustercache.SetListSemaphore(semaphore.NewWeighted(clusterCacheListSemaphoreSize)), + clustercache.SetListPageSize(clusterCacheListPageSize), + clustercache.SetListPageBufferSize(clusterCacheListPageBufferSize), + clustercache.SetWatchResyncTimeout(clusterCacheWatchResyncDuration), + clustercache.SetClusterSyncRetryTimeout(clusterSyncRetryTimeoutDuration), + clustercache.SetResyncTimeout(clusterCacheResyncDuration), clustercache.SetSettings(cacheSettings.clusterSettings), clustercache.SetNamespaces(cluster.Namespaces), + clustercache.SetClusterResources(cluster.ClusterResources), clustercache.SetPopulateResourceInfoHandler(func(un *unstructured.Unstructured, isRoot bool) (interface{}, bool) { res := &ResourceInfo{} - populateNodeInfo(un, res) + populateNodeInfo(un, res, resourceCustomLabels) + c.lock.RLock() + cacheSettings := c.cacheSettings + c.lock.RUnlock() + res.Health, _ = health.GetResourceHealth(un, cacheSettings.clusterSettings.ResourceHealthOverride) - appName := kube.GetAppInstanceLabel(un, cacheSettings.appInstanceLabelKey) + + appName := c.resourceTracking.GetAppName(un, cacheSettings.appInstanceLabelKey, cacheSettings.trackingMethod) if isRoot && appName != "" { res.AppName = appName } + gvk := un.GroupVersionKind() + if cacheSettings.ignoreResourceUpdatesEnabled && shouldHashManifest(appName, gvk) { + hash, err := generateManifestHash(un, nil, cacheSettings.resourceOverrides) + if err != nil { + log.Errorf("Failed to generate manifest hash: %v", err) + } else { + res.manifestHash = hash + } + } + // edge case. we do not label CRDs, so they miss the tracking label we inject. But we still // want the full resource to be available in our cache (to diff), so we store all CRDs return res, res.AppName != "" || gvk.Kind == kube.CustomResourceDefinitionKind }), clustercache.SetLogr(logutils.NewLogrusLogger(log.WithField("server", cluster.Server))), - ) + clustercache.SetRetryOptions(clusterCacheAttemptLimit, clusterCacheRetryUseBackoff, isRetryableError), + clustercache.SetRespectRBAC(respectRBAC), + } + + clusterCache = clustercache.NewClusterCache(clusterCacheConfig, clusterCacheOpts...) _ = clusterCache.OnResourceUpdated(func(newRes *clustercache.Resource, oldRes *clustercache.Resource, namespaceResources map[kube.ResourceKey]*clustercache.Resource) { toNotify := make(map[string]bool) @@ -294,6 +513,30 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e } else { ref = oldRes.Ref } + + c.lock.RLock() + cacheSettings := c.cacheSettings + c.lock.RUnlock() + + if cacheSettings.ignoreResourceUpdatesEnabled && oldRes != nil && newRes != nil && skipResourceUpdate(resInfo(oldRes), resInfo(newRes)) { + // Additional check for debug level so we don't need to evaluate the + // format string in case of non-debug scenarios + if log.GetLevel() >= log.DebugLevel { + namespace := ref.Namespace + if ref.Namespace == "" { + namespace = "(cluster-scoped)" + } + log.WithFields(log.Fields{ + "server": cluster.Server, + "namespace": namespace, + "name": ref.Name, + "api-version": ref.APIVersion, + "kind": ref.Kind, + }).Debug("Ignoring change of object because none of the watched resource fields have changed") + } + return + } + for _, r := range []*clustercache.Resource{newRes, oldRes} { if r == nil { continue @@ -320,11 +563,11 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e func (c *liveStateCache) getSyncedCluster(server string) (clustercache.ClusterCache, error) { clusterCache, err := c.getCluster(server) if err != nil { - return nil, err + return nil, fmt.Errorf("error getting cluster: %w", err) } err = clusterCache.EnsureSynced() if err != nil { - return nil, err + return nil, fmt.Errorf("error synchronizing cache state : %w", err) } return clusterCache, nil } @@ -332,10 +575,11 @@ func (c *liveStateCache) getSyncedCluster(server string) (clustercache.ClusterCa func (c *liveStateCache) invalidate(cacheSettings cacheSettings) { log.Info("invalidating live state cache") c.lock.Lock() - defer c.lock.Unlock() - c.cacheSettings = cacheSettings - for _, clust := range c.clusters { + clusters := c.clusters + c.lock.Unlock() + + for _, clust := range clusters { clust.Invalidate(clustercache.SetSettings(cacheSettings.clusterSettings)) } log.Info("live state cache invalidated") @@ -349,13 +593,13 @@ func (c *liveStateCache) IsNamespaced(server string, gk schema.GroupKind) (bool, return clusterInfo.IsNamespaced(gk) } -func (c *liveStateCache) IterateHierarchy(server string, key kube.ResourceKey, action func(child appv1.ResourceNode, appName string)) error { +func (c *liveStateCache) IterateHierarchy(server string, key kube.ResourceKey, action func(child appv1.ResourceNode, appName string) bool) error { clusterInfo, err := c.getSyncedCluster(server) if err != nil { return err } - clusterInfo.IterateHierarchy(key, func(resource *clustercache.Resource, namespaceResources map[kube.ResourceKey]*clustercache.Resource) { - action(asResourceNode(resource), getApp(resource, namespaceResources)) + clusterInfo.IterateHierarchy(key, func(resource *clustercache.Resource, namespaceResources map[kube.ResourceKey]*clustercache.Resource) bool { + return action(asResourceNode(resource), getApp(resource, namespaceResources)) }) return nil } @@ -390,19 +634,19 @@ func (c *liveStateCache) GetNamespaceTopLevelResources(server string, namespace func (c *liveStateCache) GetManagedLiveObjs(a *appv1.Application, targetObjs []*unstructured.Unstructured) (map[kube.ResourceKey]*unstructured.Unstructured, error) { clusterInfo, err := c.getSyncedCluster(a.Spec.Destination.Server) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get cluster info for %q: %w", a.Spec.Destination.Server, err) } return clusterInfo.GetManagedLiveObjs(targetObjs, func(r *clustercache.Resource) bool { - return resInfo(r).AppName == a.Name + return resInfo(r).AppName == a.InstanceName(c.settingsMgr.GetNamespace()) }) } -func (c *liveStateCache) GetVersionsInfo(serverURL string) (string, []metav1.APIGroup, error) { +func (c *liveStateCache) GetVersionsInfo(serverURL string) (string, []kube.APIResourceInfo, error) { clusterInfo, err := c.getSyncedCluster(serverURL) if err != nil { - return "", nil, err + return "", nil, fmt.Errorf("failed to get cluster info for %q: %w", serverURL, err) } - return clusterInfo.GetServerVersion(), clusterInfo.GetAPIGroups(), nil + return clusterInfo.GetServerVersion(), clusterInfo.GetAPIResources(), nil } func (c *liveStateCache) isClusterHasApps(apps []interface{}, cluster *appv1.Cluster) bool { @@ -458,7 +702,7 @@ func (c *liveStateCache) watchSettings(ctx context.Context) { func (c *liveStateCache) Init() error { cacheSettings, err := c.loadCacheSettings() if err != nil { - return err + return fmt.Errorf("error loading cache settings: %w", err) } c.cacheSettings = *cacheSettings return nil @@ -468,7 +712,7 @@ func (c *liveStateCache) Init() error { func (c *liveStateCache) Run(ctx context.Context) error { go c.watchSettings(ctx) - kube.RetryUntilSucceed(ctx, clustercache.ClusterRetryTimeout, "watch clusters", logutils.NewLogrusLogger(log.New()), func() error { + kube.RetryUntilSucceed(ctx, clustercache.ClusterRetryTimeout, "watch clusters", logutils.NewLogrusLogger(logutils.NewWithCurrentConfig()), func() error { return c.db.WatchClusters(ctx, c.handleAddEvent, c.handleModEvent, c.handleDeleteEvent) }) @@ -523,6 +767,9 @@ func (c *liveStateCache) handleModEvent(oldCluster *appv1.Cluster, newCluster *a if !reflect.DeepEqual(oldCluster.Namespaces, newCluster.Namespaces) { updateSettings = append(updateSettings, clustercache.SetNamespaces(newCluster.Namespaces)) } + if !reflect.DeepEqual(oldCluster.ClusterResources, newCluster.ClusterResources) { + updateSettings = append(updateSettings, clustercache.SetClusterResources(newCluster.ClusterResources)) + } forceInvalidate := false if newCluster.RefreshRequestedAt != nil && cluster.GetClusterInfo().LastCacheSyncTime != nil && @@ -542,12 +789,14 @@ func (c *liveStateCache) handleModEvent(oldCluster *appv1.Cluster, newCluster *a } func (c *liveStateCache) handleDeleteEvent(clusterServer string) { - c.lock.Lock() - defer c.lock.Unlock() + c.lock.RLock() cluster, ok := c.clusters[clusterServer] + c.lock.RUnlock() if ok { cluster.Invalidate() + c.lock.Lock() delete(c.clusters, clusterServer) + c.lock.Unlock() } } diff --git a/controller/cache/cache_test.go b/controller/cache/cache_test.go index 94fc7154bf160..de2d96eb7aa28 100644 --- a/controller/cache/cache_test.go +++ b/controller/cache/cache_test.go @@ -1,17 +1,36 @@ package cache import ( + "context" + "errors" + "net" + "net/url" + "sync" "testing" + "time" "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + apierr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" "github.com/argoproj/gitops-engine/pkg/cache" "github.com/argoproj/gitops-engine/pkg/cache/mocks" + "github.com/argoproj/gitops-engine/pkg/health" "github.com/stretchr/testify/mock" + "k8s.io/client-go/kubernetes/fake" appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + argosettings "github.com/argoproj/argo-cd/v2/util/settings" ) +type netError string + +func (n netError) Error() string { return string(n) } +func (n netError) Timeout() bool { return false } +func (n netError) Temporary() bool { return false } + func TestHandleModEvent_HasChanges(t *testing.T) { clusterCache := &mocks.ClusterCache{} clusterCache.On("Invalidate", mock.Anything, mock.Anything).Return(nil).Once() @@ -93,3 +112,314 @@ func TestHandleAddEvent_ClusterExcluded(t *testing.T) { assert.Len(t, clustersCache.clusters, 0) } + +func TestHandleDeleteEvent_CacheDeadlock(t *testing.T) { + testCluster := &appv1.Cluster{ + Server: "https://mycluster", + Config: appv1.ClusterConfig{Username: "bar"}, + } + fakeClient := fake.NewSimpleClientset() + settingsMgr := argosettings.NewSettingsManager(context.TODO(), fakeClient, "argocd") + externalLockRef := sync.RWMutex{} + gitopsEngineClusterCache := &mocks.ClusterCache{} + clustersCache := liveStateCache{ + clusters: map[string]cache.ClusterCache{ + testCluster.Server: gitopsEngineClusterCache, + }, + clusterFilter: func(cluster *appv1.Cluster) bool { + return true + }, + settingsMgr: settingsMgr, + // Set the lock here so we can reference it later + // nolint We need to overwrite here to have access to the lock + lock: externalLockRef, + } + channel := make(chan string) + // Mocked lock held by the gitops-engine cluster cache + mockMutex := sync.RWMutex{} + // Locks to force trigger condition during test + // Condition order: + // EnsuredSynced -> Locks gitops-engine + // handleDeleteEvent -> Locks liveStateCache + // EnsureSynced via sync, newResource, populateResourceInfoHandler -> attempts to Lock liveStateCache + // handleDeleteEvent via cluster.Invalidate -> attempts to Lock gitops-engine + handleDeleteWasCalled := sync.Mutex{} + engineHoldsLock := sync.Mutex{} + handleDeleteWasCalled.Lock() + engineHoldsLock.Lock() + gitopsEngineClusterCache.On("EnsureSynced").Run(func(args mock.Arguments) { + // Held by EnsureSync calling into sync and watchEvents + mockMutex.Lock() + defer mockMutex.Unlock() + // Continue Execution of timer func + engineHoldsLock.Unlock() + // Wait for handleDeleteEvent to be called triggering the lock + // on the liveStateCache + handleDeleteWasCalled.Lock() + t.Logf("handleDelete was called, EnsureSynced continuing...") + handleDeleteWasCalled.Unlock() + // Try and obtain the lock on the liveStateCache + alreadyFailed := !externalLockRef.TryLock() + if alreadyFailed { + channel <- "DEADLOCKED -- EnsureSynced could not obtain lock on liveStateCache" + return + } + externalLockRef.Lock() + t.Logf("EnsureSynce was able to lock liveStateCache") + externalLockRef.Unlock() + }).Return(nil).Once() + gitopsEngineClusterCache.On("Invalidate").Run(func(args mock.Arguments) { + // If deadlock is fixed should be able to acquire lock here + alreadyFailed := !mockMutex.TryLock() + if alreadyFailed { + channel <- "DEADLOCKED -- Invalidate could not obtain lock on gitops-engine" + return + } + mockMutex.Lock() + t.Logf("Invalidate was able to lock gitops-engine cache") + mockMutex.Unlock() + }).Return() + go func() { + // Start the gitops-engine lock holds + go func() { + err := gitopsEngineClusterCache.EnsureSynced() + if err != nil { + assert.Fail(t, err.Error()) + } + }() + // Wait for EnsureSynced to grab the lock for gitops-engine + engineHoldsLock.Lock() + t.Log("EnsureSynced has obtained lock on gitops-engine") + engineHoldsLock.Unlock() + // Run in background + go clustersCache.handleDeleteEvent(testCluster.Server) + // Allow execution to continue on clusters cache call to trigger lock + handleDeleteWasCalled.Unlock() + channel <- "PASSED" + }() + select { + case str := <-channel: + assert.Equal(t, "PASSED", str, str) + case <-time.After(5 * time.Second): + assert.Fail(t, "Ended up in deadlock") + } +} + +func TestIsRetryableError(t *testing.T) { + var ( + tlsHandshakeTimeoutErr net.Error = netError("net/http: TLS handshake timeout") + ioTimeoutErr net.Error = netError("i/o timeout") + connectionTimedout net.Error = netError("connection timed out") + connectionReset net.Error = netError("connection reset by peer") + ) + t.Run("Nil", func(t *testing.T) { + assert.False(t, isRetryableError(nil)) + }) + t.Run("ResourceQuotaConflictErr", func(t *testing.T) { + assert.False(t, isRetryableError(apierr.NewConflict(schema.GroupResource{}, "", nil))) + assert.True(t, isRetryableError(apierr.NewConflict(schema.GroupResource{Group: "v1", Resource: "resourcequotas"}, "", nil))) + }) + t.Run("ExceededQuotaErr", func(t *testing.T) { + assert.False(t, isRetryableError(apierr.NewForbidden(schema.GroupResource{}, "", nil))) + assert.True(t, isRetryableError(apierr.NewForbidden(schema.GroupResource{Group: "v1", Resource: "pods"}, "", errors.New("exceeded quota")))) + }) + t.Run("TooManyRequestsDNS", func(t *testing.T) { + assert.True(t, isRetryableError(apierr.NewTooManyRequests("", 0))) + }) + t.Run("DNSError", func(t *testing.T) { + assert.True(t, isRetryableError(&net.DNSError{})) + }) + t.Run("OpError", func(t *testing.T) { + assert.True(t, isRetryableError(&net.OpError{})) + }) + t.Run("UnknownNetworkError", func(t *testing.T) { + assert.True(t, isRetryableError(net.UnknownNetworkError(""))) + }) + t.Run("ConnectionClosedErr", func(t *testing.T) { + assert.False(t, isRetryableError(&url.Error{Err: errors.New("")})) + assert.True(t, isRetryableError(&url.Error{Err: errors.New("Connection closed by foreign host")})) + }) + t.Run("TLSHandshakeTimeout", func(t *testing.T) { + assert.True(t, isRetryableError(tlsHandshakeTimeoutErr)) + }) + t.Run("IOHandshakeTimeout", func(t *testing.T) { + assert.True(t, isRetryableError(ioTimeoutErr)) + }) + t.Run("ConnectionTimeout", func(t *testing.T) { + assert.True(t, isRetryableError(connectionTimedout)) + }) + t.Run("ConnectionReset", func(t *testing.T) { + assert.True(t, isRetryableError(connectionReset)) + }) +} + +func Test_asResourceNode_owner_refs(t *testing.T) { + resNode := asResourceNode(&cache.Resource{ + ResourceVersion: "", + Ref: v1.ObjectReference{ + APIVersion: "v1", + }, + OwnerRefs: []metav1.OwnerReference{ + { + APIVersion: "v1", + Kind: "ConfigMap", + Name: "cm-1", + }, + { + APIVersion: "v1", + Kind: "ConfigMap", + Name: "cm-2", + }, + }, + CreationTimestamp: nil, + Info: nil, + Resource: nil, + }) + expected := appv1.ResourceNode{ + ResourceRef: appv1.ResourceRef{ + Version: "v1", + }, + ParentRefs: []appv1.ResourceRef{ + { + Group: "", + Kind: "ConfigMap", + Name: "cm-1", + }, + { + Group: "", + Kind: "ConfigMap", + Name: "cm-2", + }, + }, + Info: nil, + NetworkingInfo: nil, + ResourceVersion: "", + Images: nil, + Health: nil, + CreatedAt: nil, + } + assert.Equal(t, expected, resNode) +} + +func TestSkipResourceUpdate(t *testing.T) { + var ( + hash1_x string = "x" + hash2_y string = "y" + hash3_x string = "x" + ) + info := &ResourceInfo{ + manifestHash: hash1_x, + Health: &health.HealthStatus{ + Status: health.HealthStatusHealthy, + Message: "default", + }, + } + t.Run("Nil", func(t *testing.T) { + assert.False(t, skipResourceUpdate(nil, nil)) + }) + t.Run("From Nil", func(t *testing.T) { + assert.False(t, skipResourceUpdate(nil, info)) + }) + t.Run("To Nil", func(t *testing.T) { + assert.False(t, skipResourceUpdate(info, nil)) + }) + t.Run("No hash", func(t *testing.T) { + assert.False(t, skipResourceUpdate(&ResourceInfo{}, &ResourceInfo{})) + }) + t.Run("Same hash", func(t *testing.T) { + assert.True(t, skipResourceUpdate(&ResourceInfo{ + manifestHash: hash1_x, + }, &ResourceInfo{ + manifestHash: hash1_x, + })) + }) + t.Run("Same hash value", func(t *testing.T) { + assert.True(t, skipResourceUpdate(&ResourceInfo{ + manifestHash: hash1_x, + }, &ResourceInfo{ + manifestHash: hash3_x, + })) + }) + t.Run("Different hash value", func(t *testing.T) { + assert.False(t, skipResourceUpdate(&ResourceInfo{ + manifestHash: hash1_x, + }, &ResourceInfo{ + manifestHash: hash2_y, + })) + }) + t.Run("Same hash, empty health", func(t *testing.T) { + assert.True(t, skipResourceUpdate(&ResourceInfo{ + manifestHash: hash1_x, + Health: &health.HealthStatus{}, + }, &ResourceInfo{ + manifestHash: hash3_x, + Health: &health.HealthStatus{}, + })) + }) + t.Run("Same hash, old health", func(t *testing.T) { + assert.False(t, skipResourceUpdate(&ResourceInfo{ + manifestHash: hash1_x, + Health: &health.HealthStatus{ + Status: health.HealthStatusHealthy}, + }, &ResourceInfo{ + manifestHash: hash3_x, + Health: nil, + })) + }) + t.Run("Same hash, new health", func(t *testing.T) { + assert.False(t, skipResourceUpdate(&ResourceInfo{ + manifestHash: hash1_x, + Health: &health.HealthStatus{}, + }, &ResourceInfo{ + manifestHash: hash3_x, + Health: &health.HealthStatus{ + Status: health.HealthStatusHealthy, + }, + })) + }) + t.Run("Same hash, same health", func(t *testing.T) { + assert.True(t, skipResourceUpdate(&ResourceInfo{ + manifestHash: hash1_x, + Health: &health.HealthStatus{ + Status: health.HealthStatusHealthy, + Message: "same", + }, + }, &ResourceInfo{ + manifestHash: hash3_x, + Health: &health.HealthStatus{ + Status: health.HealthStatusHealthy, + Message: "same", + }, + })) + }) + t.Run("Same hash, different health status", func(t *testing.T) { + assert.False(t, skipResourceUpdate(&ResourceInfo{ + manifestHash: hash1_x, + Health: &health.HealthStatus{ + Status: health.HealthStatusHealthy, + Message: "same", + }, + }, &ResourceInfo{ + manifestHash: hash3_x, + Health: &health.HealthStatus{ + Status: health.HealthStatusDegraded, + Message: "same", + }, + })) + }) + t.Run("Same hash, different health message", func(t *testing.T) { + assert.True(t, skipResourceUpdate(&ResourceInfo{ + manifestHash: hash1_x, + Health: &health.HealthStatus{ + Status: health.HealthStatusHealthy, + Message: "same", + }, + }, &ResourceInfo{ + manifestHash: hash3_x, + Health: &health.HealthStatus{ + Status: health.HealthStatusHealthy, + Message: "different", + }, + })) + }) +} diff --git a/controller/cache/info.go b/controller/cache/info.go index d015170e7b91b..cf0d12318a447 100644 --- a/controller/cache/info.go +++ b/controller/cache/info.go @@ -1,11 +1,16 @@ package cache import ( + "errors" "fmt" + "strconv" "strings" + "k8s.io/apimachinery/pkg/runtime/schema" + "github.com/argoproj/gitops-engine/pkg/utils/kube" "github.com/argoproj/gitops-engine/pkg/utils/text" + "github.com/cespare/xxhash/v2" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -13,39 +18,44 @@ import ( "github.com/argoproj/argo-cd/v2/common" "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/argoproj/argo-cd/v2/util/argo/normalizers" "github.com/argoproj/argo-cd/v2/util/resource" ) -func populateNodeInfo(un *unstructured.Unstructured, res *ResourceInfo) { +func populateNodeInfo(un *unstructured.Unstructured, res *ResourceInfo, customLabels []string) { gvk := un.GroupVersionKind() revision := resource.GetRevision(un) if revision > 0 { res.Info = append(res.Info, v1alpha1.InfoItem{Name: "Revision", Value: fmt.Sprintf("Rev:%v", revision)}) } + if len(customLabels) > 0 { + if labels := un.GetLabels(); labels != nil { + for _, customLabel := range customLabels { + if value, ok := labels[customLabel]; ok { + res.Info = append(res.Info, v1alpha1.InfoItem{Name: customLabel, Value: value}) + } + } + } + } switch gvk.Group { case "": switch gvk.Kind { case kube.PodKind: populatePodInfo(un, res) - return case kube.ServiceKind: populateServiceInfo(un, res) - return case "Node": populateHostNodeInfo(un, res) - return } case "extensions", "networking.k8s.io": switch gvk.Kind { case kube.IngressKind: populateIngressInfo(un, res) - return } case "networking.istio.io": switch gvk.Kind { case "VirtualService": populateIstioVirtualServiceInfo(un, res) - return } } @@ -86,16 +96,36 @@ func populateServiceInfo(un *unstructured.Unstructured, res *ResourceInfo) { res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{TargetLabels: targetLabels, Ingress: ingress} } +func getServiceName(backend map[string]interface{}, gvk schema.GroupVersionKind) (string, error) { + switch gvk.Group { + case "extensions": + return fmt.Sprintf("%s", backend["serviceName"]), nil + case "networking.k8s.io": + switch gvk.Version { + case "v1beta1": + return fmt.Sprintf("%s", backend["serviceName"]), nil + case "v1": + if service, ok, err := unstructured.NestedMap(backend, "service"); ok && err == nil { + return fmt.Sprintf("%s", service["name"]), nil + } + } + } + return "", errors.New("unable to resolve string") +} + func populateIngressInfo(un *unstructured.Unstructured, res *ResourceInfo) { ingress := getIngress(un) targetsMap := make(map[v1alpha1.ResourceRef]bool) + gvk := un.GroupVersionKind() if backend, ok, err := unstructured.NestedMap(un.Object, "spec", "backend"); ok && err == nil { - targetsMap[v1alpha1.ResourceRef{ - Group: "", - Kind: kube.ServiceKind, - Namespace: un.GetNamespace(), - Name: fmt.Sprintf("%s", backend["serviceName"]), - }] = true + if serviceName, err := getServiceName(backend, gvk); err == nil { + targetsMap[v1alpha1.ResourceRef{ + Group: "", + Kind: kube.ServiceKind, + Namespace: un.GetNamespace(), + Name: serviceName, + }] = true + } } urlsSet := make(map[string]bool) if rules, ok, err := unstructured.NestedSlice(un.Object, "spec", "rules"); ok && err == nil { @@ -123,13 +153,15 @@ func populateIngressInfo(un *unstructured.Unstructured, res *ResourceInfo) { continue } - if serviceName, ok, err := unstructured.NestedString(path, "backend", "serviceName"); ok && err == nil { - targetsMap[v1alpha1.ResourceRef{ - Group: "", - Kind: kube.ServiceKind, - Namespace: un.GetNamespace(), - Name: serviceName, - }] = true + if backend, ok, err := unstructured.NestedMap(path, "backend"); ok && err == nil { + if serviceName, err := getServiceName(backend, gvk); err == nil { + targetsMap[v1alpha1.ResourceRef{ + Group: "", + Kind: kube.ServiceKind, + Namespace: un.GetNamespace(), + Name: serviceName, + }] = true + } } if host == nil || host == "" { @@ -146,6 +178,17 @@ func populateIngressInfo(un *unstructured.Unstructured, res *ResourceInfo) { tlshost := tlsline["host"] if tlshost == host { stringPort = "https" + continue + } + if hosts := tlsline["hosts"]; hosts != nil { + tlshosts, ok := tlsline["hosts"].(map[string]interface{}) + if ok { + for j := range tlshosts { + if tlshosts[j] == host { + stringPort = "https" + } + } + } } } } @@ -324,7 +367,7 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) { } req, _ := resourcehelper.PodRequestsAndLimits(&pod) - res.PodInfo = &PodInfo{NodeName: pod.Spec.NodeName, ResourceRequests: req} + res.PodInfo = &PodInfo{NodeName: pod.Spec.NodeName, ResourceRequests: req, Phase: pod.Status.Phase} res.Info = append(res.Info, v1alpha1.InfoItem{Name: "Node", Value: pod.Spec.NodeName}) res.Info = append(res.Info, v1alpha1.InfoItem{Name: "Containers", Value: fmt.Sprintf("%d/%d", readyContainers, totalContainers)}) @@ -346,3 +389,27 @@ func populateHostNodeInfo(un *unstructured.Unstructured, res *ResourceInfo) { SystemInfo: node.Status.NodeInfo, } } + +func generateManifestHash(un *unstructured.Unstructured, ignores []v1alpha1.ResourceIgnoreDifferences, overrides map[string]v1alpha1.ResourceOverride) (string, error) { + normalizer, err := normalizers.NewIgnoreNormalizer(ignores, overrides) + if err != nil { + return "", fmt.Errorf("error creating normalizer: %w", err) + } + + resource := un.DeepCopy() + err = normalizer.Normalize(resource) + if err != nil { + return "", fmt.Errorf("error normalizing resource: %w", err) + } + + data, err := resource.MarshalJSON() + if err != nil { + return "", fmt.Errorf("error marshaling resource: %w", err) + } + hash := hash(data) + return hash, nil +} + +func hash(data []byte) string { + return strconv.FormatUint(xxhash.Sum64(data), 16) +} diff --git a/controller/cache/info_test.go b/controller/cache/info_test.go index 9dcb2b08f04e6..8a06d3745e13b 100644 --- a/controller/cache/info_test.go +++ b/controller/cache/info_test.go @@ -9,11 +9,11 @@ import ( "github.com/argoproj/gitops-engine/pkg/utils/kube" "github.com/argoproj/pkg/errors" - "github.com/ghodss/yaml" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/yaml" "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" ) @@ -43,6 +43,25 @@ var ( ingress: - hostname: localhost`) + testLinkAnnotatedService = strToUnstructured(` + apiVersion: v1 + kind: Service + metadata: + name: helm-guestbook + namespace: default + resourceVersion: "123" + uid: "4" + annotations: + link.argocd.argoproj.io/external-link: http://my-grafana.com/pre-generated-link + spec: + selector: + app: guestbook + type: LoadBalancer + status: + loadBalancer: + ingress: + - hostname: localhost`) + testIngress = strToUnstructured(` apiVersion: extensions/v1beta1 kind: Ingress @@ -74,6 +93,39 @@ var ( ingress: - ip: 107.178.210.11`) + testLinkAnnotatedIngress = strToUnstructured(` + apiVersion: extensions/v1beta1 + kind: Ingress + metadata: + name: helm-guestbook + namespace: default + uid: "4" + annotations: + link.argocd.argoproj.io/external-link: http://my-grafana.com/ingress-link + spec: + backend: + serviceName: not-found-service + servicePort: 443 + rules: + - host: helm-guestbook.com + http: + paths: + - backend: + serviceName: helm-guestbook + servicePort: 443 + path: / + - backend: + serviceName: helm-guestbook + servicePort: https + path: / + tls: + - host: helm-guestbook.com + secretName: my-tls-secret + status: + loadBalancer: + ingress: + - ip: 107.178.210.11`) + testIngressWildCardPath = strToUnstructured(` apiVersion: extensions/v1beta1 kind: Ingress @@ -133,6 +185,43 @@ var ( ingress: - ip: 107.178.210.11`) + testIngressNetworkingV1 = strToUnstructured(` + apiVersion: networking.k8s.io/v1 + kind: Ingress + metadata: + name: helm-guestbook + namespace: default + uid: "4" + spec: + backend: + service: + name: not-found-service + port: + number: 443 + rules: + - host: helm-guestbook.com + http: + paths: + - backend: + service: + name: helm-guestbook + port: + number: 443 + path: / + - backend: + service: + name: helm-guestbook + port: + name: https + path: / + tls: + - host: helm-guestbook.com + secretName: my-tls-secret + status: + loadBalancer: + ingress: + - ip: 107.178.210.11`) + testIstioVirtualService = strToUnstructured(` apiVersion: networking.istio.io/v1alpha3 kind: VirtualService @@ -182,7 +271,7 @@ func TestGetPodInfo(t *testing.T) { `) info := &ResourceInfo{} - populateNodeInfo(pod, info) + populateNodeInfo(pod, info, []string{}) assert.Equal(t, []v1alpha1.InfoItem{ {Name: "Node", Value: "minikube"}, {Name: "Containers", Value: "0/1"}, @@ -213,7 +302,7 @@ status: `) info := &ResourceInfo{} - populateNodeInfo(node, info) + populateNodeInfo(node, info, []string{}) assert.Equal(t, &NodeInfo{ Name: "minikube", Capacity: v1.ResourceList{v1.ResourceMemory: resource.MustParse("6091320Ki"), v1.ResourceCPU: resource.MustParse("6")}, @@ -223,7 +312,7 @@ status: func TestGetServiceInfo(t *testing.T) { info := &ResourceInfo{} - populateNodeInfo(testService, info) + populateNodeInfo(testService, info, []string{}) assert.Equal(t, 0, len(info.Info)) assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{ TargetLabels: map[string]string{"app": "guestbook"}, @@ -231,9 +320,20 @@ func TestGetServiceInfo(t *testing.T) { }, info.NetworkingInfo) } +func TestGetLinkAnnotatedServiceInfo(t *testing.T) { + info := &ResourceInfo{} + populateNodeInfo(testLinkAnnotatedService, info, []string{}) + assert.Equal(t, 0, len(info.Info)) + assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{ + TargetLabels: map[string]string{"app": "guestbook"}, + Ingress: []v1.LoadBalancerIngress{{Hostname: "localhost"}}, + ExternalURLs: []string{"http://my-grafana.com/pre-generated-link"}, + }, info.NetworkingInfo) +} + func TestGetIstioVirtualServiceInfo(t *testing.T) { info := &ResourceInfo{} - populateNodeInfo(testIstioVirtualService, info) + populateNodeInfo(testIstioVirtualService, info, []string{}) assert.Equal(t, 0, len(info.Info)) require.NotNil(t, info.NetworkingInfo) require.NotNil(t, info.NetworkingInfo.TargetRefs) @@ -255,8 +355,40 @@ func TestGetIstioVirtualServiceInfo(t *testing.T) { } func TestGetIngressInfo(t *testing.T) { + var tests = []struct { + Ingress *unstructured.Unstructured + }{ + {testIngress}, + {testIngressNetworkingV1}, + } + for _, tc := range tests { + info := &ResourceInfo{} + populateNodeInfo(tc.Ingress, info, []string{}) + assert.Equal(t, 0, len(info.Info)) + sort.Slice(info.NetworkingInfo.TargetRefs, func(i, j int) bool { + return strings.Compare(info.NetworkingInfo.TargetRefs[j].Name, info.NetworkingInfo.TargetRefs[i].Name) < 0 + }) + assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{ + Ingress: []v1.LoadBalancerIngress{{IP: "107.178.210.11"}}, + TargetRefs: []v1alpha1.ResourceRef{{ + Namespace: "default", + Group: "", + Kind: kube.ServiceKind, + Name: "not-found-service", + }, { + Namespace: "default", + Group: "", + Kind: kube.ServiceKind, + Name: "helm-guestbook", + }}, + ExternalURLs: []string{"https://helm-guestbook.com/"}, + }, info.NetworkingInfo) + } +} + +func TestGetLinkAnnotatedIngressInfo(t *testing.T) { info := &ResourceInfo{} - populateNodeInfo(testIngress, info) + populateNodeInfo(testLinkAnnotatedIngress, info, []string{}) assert.Equal(t, 0, len(info.Info)) sort.Slice(info.NetworkingInfo.TargetRefs, func(i, j int) bool { return strings.Compare(info.NetworkingInfo.TargetRefs[j].Name, info.NetworkingInfo.TargetRefs[i].Name) < 0 @@ -274,13 +406,13 @@ func TestGetIngressInfo(t *testing.T) { Kind: kube.ServiceKind, Name: "helm-guestbook", }}, - ExternalURLs: []string{"https://helm-guestbook.com/"}, + ExternalURLs: []string{"https://helm-guestbook.com/", "http://my-grafana.com/ingress-link"}, }, info.NetworkingInfo) } func TestGetIngressInfoWildCardPath(t *testing.T) { info := &ResourceInfo{} - populateNodeInfo(testIngressWildCardPath, info) + populateNodeInfo(testIngressWildCardPath, info, []string{}) assert.Equal(t, 0, len(info.Info)) sort.Slice(info.NetworkingInfo.TargetRefs, func(i, j int) bool { return strings.Compare(info.NetworkingInfo.TargetRefs[j].Name, info.NetworkingInfo.TargetRefs[i].Name) < 0 @@ -304,7 +436,7 @@ func TestGetIngressInfoWildCardPath(t *testing.T) { func TestGetIngressInfoWithoutTls(t *testing.T) { info := &ResourceInfo{} - populateNodeInfo(testIngressWithoutTls, info) + populateNodeInfo(testIngressWithoutTls, info, []string{}) assert.Equal(t, 0, len(info.Info)) sort.Slice(info.NetworkingInfo.TargetRefs, func(i, j int) bool { return strings.Compare(info.NetworkingInfo.TargetRefs[j].Name, info.NetworkingInfo.TargetRefs[i].Name) < 0 @@ -349,7 +481,7 @@ func TestGetIngressInfoWithHost(t *testing.T) { - ip: 107.178.210.11`) info := &ResourceInfo{} - populateNodeInfo(ingress, info) + populateNodeInfo(ingress, info, []string{}) assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{ Ingress: []v1.LoadBalancerIngress{{IP: "107.178.210.11"}}, @@ -382,7 +514,7 @@ func TestGetIngressInfoNoHost(t *testing.T) { `) info := &ResourceInfo{} - populateNodeInfo(ingress, info) + populateNodeInfo(ingress, info, []string{}) assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{ TargetRefs: []v1alpha1.ResourceRef{{ @@ -396,7 +528,7 @@ func TestGetIngressInfoNoHost(t *testing.T) { } func TestExternalUrlWithSubPath(t *testing.T) { ingress := strToUnstructured(` - apiVersion: extensions/v1beta1 + apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: helm-guestbook @@ -417,14 +549,14 @@ func TestExternalUrlWithSubPath(t *testing.T) { - ip: 107.178.210.11`) info := &ResourceInfo{} - populateNodeInfo(ingress, info) + populateNodeInfo(ingress, info, []string{}) expectedExternalUrls := []string{"https://107.178.210.11/my/sub/path/"} assert.Equal(t, expectedExternalUrls, info.NetworkingInfo.ExternalURLs) } func TestExternalUrlWithMultipleSubPaths(t *testing.T) { ingress := strToUnstructured(` - apiVersion: extensions/v1beta1 + apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: helm-guestbook @@ -453,7 +585,7 @@ func TestExternalUrlWithMultipleSubPaths(t *testing.T) { - ip: 107.178.210.11`) info := &ResourceInfo{} - populateNodeInfo(ingress, info) + populateNodeInfo(ingress, info, []string{}) expectedExternalUrls := []string{"https://helm-guestbook.com/my/sub/path/", "https://helm-guestbook.com/my/sub/path/2", "https://helm-guestbook.com"} actualURLs := info.NetworkingInfo.ExternalURLs @@ -463,7 +595,7 @@ func TestExternalUrlWithMultipleSubPaths(t *testing.T) { } func TestExternalUrlWithNoSubPath(t *testing.T) { ingress := strToUnstructured(` - apiVersion: extensions/v1beta1 + apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: helm-guestbook @@ -483,7 +615,7 @@ func TestExternalUrlWithNoSubPath(t *testing.T) { - ip: 107.178.210.11`) info := &ResourceInfo{} - populateNodeInfo(ingress, info) + populateNodeInfo(ingress, info, []string{}) expectedExternalUrls := []string{"https://107.178.210.11"} assert.Equal(t, expectedExternalUrls, info.NetworkingInfo.ExternalURLs) @@ -511,8 +643,113 @@ func TestExternalUrlWithNetworkingApi(t *testing.T) { - ip: 107.178.210.11`) info := &ResourceInfo{} - populateNodeInfo(ingress, info) + populateNodeInfo(ingress, info, []string{}) expectedExternalUrls := []string{"https://107.178.210.11"} assert.Equal(t, expectedExternalUrls, info.NetworkingInfo.ExternalURLs) } + +func TestCustomLabel(t *testing.T) { + configmap := strToUnstructured(` + apiVersion: v1 + kind: ConfigMap + metadata: + name: cm`) + + info := &ResourceInfo{} + populateNodeInfo(configmap, info, []string{"my-label"}) + + assert.Equal(t, 0, len(info.Info)) + + configmap = strToUnstructured(` + apiVersion: v1 + kind: ConfigMap + metadata: + name: cm + labels: + my-label: value`) + + info = &ResourceInfo{} + populateNodeInfo(configmap, info, []string{"my-label", "other-label"}) + + assert.Equal(t, 1, len(info.Info)) + assert.Equal(t, "my-label", info.Info[0].Name) + assert.Equal(t, "value", info.Info[0].Value) + + configmap = strToUnstructured(` + apiVersion: v1 + kind: ConfigMap + metadata: + name: cm + labels: + my-label: value + other-label: value2`) + + info = &ResourceInfo{} + populateNodeInfo(configmap, info, []string{"my-label", "other-label"}) + + assert.Equal(t, 2, len(info.Info)) + assert.Equal(t, "my-label", info.Info[0].Name) + assert.Equal(t, "value", info.Info[0].Value) + assert.Equal(t, "other-label", info.Info[1].Name) + assert.Equal(t, "value2", info.Info[1].Value) +} + +func TestManifestHash(t *testing.T) { + manifest := strToUnstructured(` + apiVersion: v1 + kind: Pod + metadata: + name: helm-guestbook-pod + namespace: default + ownerReferences: + - apiVersion: extensions/v1beta1 + kind: ReplicaSet + name: helm-guestbook-rs + resourceVersion: "123" + labels: + app: guestbook + spec: + nodeName: minikube + containers: + - image: bar + resources: + requests: + memory: 128Mi +`) + + ignores := []v1alpha1.ResourceIgnoreDifferences{ + { + Group: "*", + Kind: "*", + JSONPointers: []string{"/metadata/resourceVersion"}, + }, + } + + data, _ := strToUnstructured(` + apiVersion: v1 + kind: Pod + metadata: + name: helm-guestbook-pod + namespace: default + ownerReferences: + - apiVersion: extensions/v1beta1 + kind: ReplicaSet + name: helm-guestbook-rs + labels: + app: guestbook + spec: + nodeName: minikube + containers: + - image: bar + resources: + requests: + memory: 128Mi +`).MarshalJSON() + + expected := hash(data) + + hash, err := generateManifestHash(manifest, ignores, nil) + assert.Equal(t, expected, hash) + assert.Nil(t, err) +} diff --git a/controller/cache/mocks/LiveStateCache.go b/controller/cache/mocks/LiveStateCache.go index 9e4f9fd33b6c7..7dc4d6b7710e2 100644 --- a/controller/cache/mocks/LiveStateCache.go +++ b/controller/cache/mocks/LiveStateCache.go @@ -17,8 +17,6 @@ import ( unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" ) @@ -113,7 +111,7 @@ func (_m *LiveStateCache) GetNamespaceTopLevelResources(server string, namespace } // GetVersionsInfo provides a mock function with given fields: serverURL -func (_m *LiveStateCache) GetVersionsInfo(serverURL string) (string, []v1.APIGroup, error) { +func (_m *LiveStateCache) GetVersionsInfo(serverURL string) (string, []kube.APIResourceInfo, error) { ret := _m.Called(serverURL) var r0 string @@ -123,12 +121,12 @@ func (_m *LiveStateCache) GetVersionsInfo(serverURL string) (string, []v1.APIGro r0 = ret.Get(0).(string) } - var r1 []v1.APIGroup - if rf, ok := ret.Get(1).(func(string) []v1.APIGroup); ok { + var r1 []kube.APIResourceInfo + if rf, ok := ret.Get(1).(func(string) []kube.APIResourceInfo); ok { r1 = rf(serverURL) } else { if ret.Get(1) != nil { - r1 = ret.Get(1).([]v1.APIGroup) + r1 = ret.Get(1).([]kube.APIResourceInfo) } } @@ -178,11 +176,11 @@ func (_m *LiveStateCache) IsNamespaced(server string, gk schema.GroupKind) (bool } // IterateHierarchy provides a mock function with given fields: server, key, action -func (_m *LiveStateCache) IterateHierarchy(server string, key kube.ResourceKey, action func(v1alpha1.ResourceNode, string)) error { +func (_m *LiveStateCache) IterateHierarchy(server string, key kube.ResourceKey, action func(v1alpha1.ResourceNode, string) bool) error { ret := _m.Called(server, key, action) var r0 error - if rf, ok := ret.Get(0).(func(string, kube.ResourceKey, func(v1alpha1.ResourceNode, string)) error); ok { + if rf, ok := ret.Get(0).(func(string, kube.ResourceKey, func(v1alpha1.ResourceNode, string) bool) error); ok { r0 = rf(server, key, action) } else { r0 = ret.Error(0) diff --git a/controller/clusterinfoupdater.go b/controller/clusterinfoupdater.go index 7582807891233..a2f488534aeb0 100644 --- a/controller/clusterinfoupdater.go +++ b/controller/clusterinfoupdater.go @@ -2,8 +2,10 @@ package controller import ( "context" + "fmt" "time" + "github.com/argoproj/argo-cd/v2/util/env" "github.com/argoproj/gitops-engine/pkg/cache" "github.com/argoproj/gitops-engine/pkg/utils/kube" log "github.com/sirupsen/logrus" @@ -19,7 +21,13 @@ import ( ) const ( - secretUpdateInterval = 10 * time.Second + defaultSecretUpdateInterval = 10 * time.Second + + EnvClusterInfoTimeout = "ARGO_CD_UPDATE_CLUSTER_INFO_TIMEOUT" +) + +var ( + clusterInfoTimeout = env.ParseDurationFromEnv(EnvClusterInfoTimeout, defaultSecretUpdateInterval, defaultSecretUpdateInterval, 1*time.Minute) ) type clusterInfoUpdater struct { @@ -28,6 +36,9 @@ type clusterInfoUpdater struct { appLister v1alpha1.ApplicationNamespaceLister cache *appstatecache.Cache clusterFilter func(cluster *appv1.Cluster) bool + projGetter func(app *appv1.Application) (*appv1.AppProject, error) + namespace string + lastUpdated time.Time } func NewClusterInfoUpdater( @@ -35,19 +46,21 @@ func NewClusterInfoUpdater( db db.ArgoDB, appLister v1alpha1.ApplicationNamespaceLister, cache *appstatecache.Cache, - clusterFilter func(cluster *appv1.Cluster) bool) *clusterInfoUpdater { + clusterFilter func(cluster *appv1.Cluster) bool, + projGetter func(app *appv1.Application) (*appv1.AppProject, error), + namespace string) *clusterInfoUpdater { - return &clusterInfoUpdater{infoSource, db, appLister, cache, clusterFilter} + return &clusterInfoUpdater{infoSource, db, appLister, cache, clusterFilter, projGetter, namespace, time.Time{}} } func (c *clusterInfoUpdater) Run(ctx context.Context) { c.updateClusters() - ticker := time.NewTicker(secretUpdateInterval) + ticker := time.NewTicker(clusterInfoTimeout) for { select { case <-ctx.Done(): ticker.Stop() - break + return case <-ticker.C: c.updateClusters() } @@ -55,15 +68,26 @@ func (c *clusterInfoUpdater) Run(ctx context.Context) { } func (c *clusterInfoUpdater) updateClusters() { + if time.Since(c.lastUpdated) < clusterInfoTimeout { + return + } + + ctx, cancel := context.WithTimeout(context.Background(), clusterInfoTimeout) + defer func() { + cancel() + c.lastUpdated = time.Now() + }() + infoByServer := make(map[string]*cache.ClusterInfo) clustersInfo := c.infoSource.GetClustersInfo() for i := range clustersInfo { info := clustersInfo[i] infoByServer[info.Server] = &info } - clusters, err := c.db.ListClusters(context.Background()) + clusters, err := c.db.ListClusters(ctx) if err != nil { log.Warnf("Failed to save clusters info: %v", err) + return } var clustersFiltered []appv1.Cluster if c.clusterFilter == nil { @@ -77,7 +101,7 @@ func (c *clusterInfoUpdater) updateClusters() { } _ = kube.RunAllAsync(len(clustersFiltered), func(i int) error { cluster := clustersFiltered[i] - if err := c.updateClusterInfo(cluster, infoByServer[cluster.Server]); err != nil { + if err := c.updateClusterInfo(ctx, cluster, infoByServer[cluster.Server]); err != nil { log.Warnf("Failed to save clusters info: %v", err) } return nil @@ -85,14 +109,20 @@ func (c *clusterInfoUpdater) updateClusters() { log.Debugf("Successfully saved info of %d clusters", len(clustersFiltered)) } -func (c *clusterInfoUpdater) updateClusterInfo(cluster appv1.Cluster, info *cache.ClusterInfo) error { +func (c *clusterInfoUpdater) updateClusterInfo(ctx context.Context, cluster appv1.Cluster, info *cache.ClusterInfo) error { apps, err := c.appLister.List(labels.Everything()) if err != nil { - return err + return fmt.Errorf("error while fetching the apps list: %w", err) } var appCount int64 for _, a := range apps { - if err := argo.ValidateDestination(context.Background(), &a.Spec.Destination, c.db); err != nil { + if c.projGetter != nil { + proj, err := c.projGetter(a) + if err != nil || !proj.IsAppNamespacePermitted(a, c.namespace) { + continue + } + } + if err := argo.ValidateDestination(ctx, &a.Spec.Destination, c.db); err != nil { continue } if a.Spec.Destination.Server == cluster.Server { @@ -106,6 +136,7 @@ func (c *clusterInfoUpdater) updateClusterInfo(cluster appv1.Cluster, info *cach } if info != nil { clusterInfo.ServerVersion = info.K8SVersion + clusterInfo.APIVersions = argo.APIResourcesToStrings(info.APIResources, true) if info.LastCacheSyncTime == nil { clusterInfo.ConnectionState.Status = appv1.ConnectionStatusUnknown } else if info.SyncError == nil { @@ -121,7 +152,7 @@ func (c *clusterInfoUpdater) updateClusterInfo(cluster appv1.Cluster, info *cach } else { clusterInfo.ConnectionState.Status = appv1.ConnectionStatusUnknown if appCount == 0 { - clusterInfo.ConnectionState.Message = "Cluster has no application and not being monitored." + clusterInfo.ConnectionState.Message = "Cluster has no applications and is not being monitored." } } diff --git a/controller/clusterinfoupdater_test.go b/controller/clusterinfoupdater_test.go index 993e63b8d6e2a..bac0bb56cbe08 100644 --- a/controller/clusterinfoupdater_test.go +++ b/controller/clusterinfoupdater_test.go @@ -6,6 +6,11 @@ import ( "testing" "time" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/argoproj/argo-cd/v2/common" + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" appsfake "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/fake" appinformers "github.com/argoproj/argo-cd/v2/pkg/client/informers/externalversions/application/v1alpha1" @@ -37,7 +42,30 @@ func TestClusterSecretUpdater(t *testing.T) { {&now, fmt.Errorf("sync failed"), v1alpha1.ConnectionStatusFailed}, } - kubeclientset := fake.NewSimpleClientset() + emptyArgoCDConfigMap := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: common.ArgoCDConfigMapName, + Namespace: fakeNamespace, + Labels: map[string]string{ + "app.kubernetes.io/part-of": "argocd", + }, + }, + Data: map[string]string{}, + } + argoCDSecret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: common.ArgoCDSecretName, + Namespace: fakeNamespace, + Labels: map[string]string{ + "app.kubernetes.io/part-of": "argocd", + }, + }, + Data: map[string][]byte{ + "admin.password": nil, + "server.secretkey": nil, + }, + } + kubeclientset := fake.NewSimpleClientset(emptyArgoCDConfigMap, argoCDSecret) appclientset := appsfake.NewSimpleClientset() appInformer := appinformers.NewApplicationInformer(appclientset, "", time.Minute, cache.Indexers{}) settingsManager := settings.NewSettingsManager(context.Background(), kubeclientset, fakeNamespace) @@ -58,9 +86,9 @@ func TestClusterSecretUpdater(t *testing.T) { } lister := applisters.NewApplicationLister(appInformer.GetIndexer()).Applications(fakeNamespace) - updater := NewClusterInfoUpdater(nil, argoDB, lister, appCache, nil) + updater := NewClusterInfoUpdater(nil, argoDB, lister, appCache, nil, nil, fakeNamespace) - err = updater.updateClusterInfo(*cluster, info) + err = updater.updateClusterInfo(context.Background(), *cluster, info) assert.NoError(t, err, "Invoking updateClusterInfo failed.") var clusterInfo v1alpha1.ClusterInfo diff --git a/controller/health.go b/controller/health.go new file mode 100644 index 0000000000000..b1acac8ac5b9b --- /dev/null +++ b/controller/health.go @@ -0,0 +1,86 @@ +package controller + +import ( + "fmt" + + "github.com/argoproj/gitops-engine/pkg/health" + hookutil "github.com/argoproj/gitops-engine/pkg/sync/hook" + "github.com/argoproj/gitops-engine/pkg/sync/ignore" + kubeutil "github.com/argoproj/gitops-engine/pkg/utils/kube" + log "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/argoproj/argo-cd/v2/pkg/apis/application" + appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/argoproj/argo-cd/v2/util/lua" +) + +// setApplicationHealth updates the health statuses of all resources performed in the comparison +func setApplicationHealth(resources []managedResource, statuses []appv1.ResourceStatus, resourceOverrides map[string]appv1.ResourceOverride, app *appv1.Application, persistResourceHealth bool) (*appv1.HealthStatus, error) { + var savedErr error + var errCount uint + appHealth := appv1.HealthStatus{Status: health.HealthStatusHealthy} + for i, res := range resources { + if res.Target != nil && hookutil.Skip(res.Target) { + continue + } + + if res.Live != nil && (hookutil.IsHook(res.Live) || ignore.Ignore(res.Live)) { + continue + } + + var healthStatus *health.HealthStatus + var err error + healthOverrides := lua.ResourceHealthOverrides(resourceOverrides) + gvk := schema.GroupVersionKind{Group: res.Group, Version: res.Version, Kind: res.Kind} + if res.Live == nil { + healthStatus = &health.HealthStatus{Status: health.HealthStatusMissing} + } else { + // App the manages itself should not affect own health + if isSelfReferencedApp(app, kubeutil.GetObjectRef(res.Live)) { + continue + } + healthStatus, err = health.GetResourceHealth(res.Live, healthOverrides) + if err != nil && savedErr == nil { + errCount++ + savedErr = fmt.Errorf("failed to get resource health for %q with name %q in namespace %q: %w", res.Live.GetKind(), res.Live.GetName(), res.Live.GetNamespace(), err) + // also log so we don't lose the message + log.WithField("application", app.QualifiedName()).Warn(savedErr) + } + } + + if healthStatus == nil { + continue + } + + if persistResourceHealth { + resHealth := appv1.HealthStatus{Status: healthStatus.Status, Message: healthStatus.Message} + statuses[i].Health = &resHealth + } else { + statuses[i].Health = nil + } + + // Is health status is missing but resource has not built-in/custom health check then it should not affect parent app health + if _, hasOverride := healthOverrides[lua.GetConfigMapKey(gvk)]; healthStatus.Status == health.HealthStatusMissing && !hasOverride && health.GetHealthCheckFunc(gvk) == nil { + continue + } + + // Missing or Unknown health status of child Argo CD app should not affect parent + if res.Kind == application.ApplicationKind && res.Group == application.Group && (healthStatus.Status == health.HealthStatusMissing || healthStatus.Status == health.HealthStatusUnknown) { + continue + } + + if health.IsWorse(appHealth.Status, healthStatus.Status) { + appHealth.Status = healthStatus.Status + } + } + if persistResourceHealth { + app.Status.ResourceHealthSource = appv1.ResourceHealthLocationInline + } else { + app.Status.ResourceHealthSource = appv1.ResourceHealthLocationAppTree + } + if savedErr != nil && errCount > 1 { + savedErr = fmt.Errorf("see applicaton-controller logs for %d other errors; most recent error was: %w", errCount-1, savedErr) + } + return &appHealth, savedErr +} diff --git a/controller/health_test.go b/controller/health_test.go new file mode 100644 index 0000000000000..caa53b446f733 --- /dev/null +++ b/controller/health_test.go @@ -0,0 +1,179 @@ +package controller + +import ( + "os" + "testing" + + "github.com/argoproj/gitops-engine/pkg/health" + synccommon "github.com/argoproj/gitops-engine/pkg/sync/common" + "github.com/argoproj/gitops-engine/pkg/utils/kube" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/yaml" + + "github.com/argoproj/argo-cd/v2/pkg/apis/application" + appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/argoproj/argo-cd/v2/util/lua" +) + +var app = &appv1.Application{} + +func initStatuses(resources []managedResource) []appv1.ResourceStatus { + statuses := make([]appv1.ResourceStatus, len(resources)) + for i := range resources { + statuses[i] = appv1.ResourceStatus{Group: resources[i].Group, Kind: resources[i].Kind, Version: resources[i].Version} + } + return statuses +} + +func resourceFromFile(filePath string) unstructured.Unstructured { + yamlBytes, err := os.ReadFile(filePath) + if err != nil { + panic(err) + } + var res unstructured.Unstructured + err = yaml.Unmarshal(yamlBytes, &res) + if err != nil { + panic(err) + } + return res +} + +func TestSetApplicationHealth(t *testing.T) { + failedJob := resourceFromFile("./testdata/job-failed.yaml") + runningPod := resourceFromFile("./testdata/pod-running-restart-always.yaml") + + resources := []managedResource{{ + Group: "", Version: "v1", Kind: "Pod", Live: &runningPod}, { + Group: "batch", Version: "v1", Kind: "Job", Live: &failedJob, + }} + resourceStatuses := initStatuses(resources) + + healthStatus, err := setApplicationHealth(resources, resourceStatuses, lua.ResourceHealthOverrides{}, app, true) + assert.NoError(t, err) + assert.Equal(t, health.HealthStatusDegraded, healthStatus.Status) + + assert.Equal(t, resourceStatuses[0].Health.Status, health.HealthStatusHealthy) + assert.Equal(t, resourceStatuses[1].Health.Status, health.HealthStatusDegraded) + + // now mark the job as a hook and retry. it should ignore the hook and consider the app healthy + failedJob.SetAnnotations(map[string]string{synccommon.AnnotationKeyHook: "PreSync"}) + healthStatus, err = setApplicationHealth(resources, resourceStatuses, nil, app, true) + assert.NoError(t, err) + assert.Equal(t, health.HealthStatusHealthy, healthStatus.Status) +} + +func TestSetApplicationHealth_ResourceHealthNotPersisted(t *testing.T) { + failedJob := resourceFromFile("./testdata/job-failed.yaml") + + resources := []managedResource{{ + Group: "batch", Version: "v1", Kind: "Job", Live: &failedJob, + }} + resourceStatuses := initStatuses(resources) + + healthStatus, err := setApplicationHealth(resources, resourceStatuses, lua.ResourceHealthOverrides{}, app, false) + assert.NoError(t, err) + assert.Equal(t, health.HealthStatusDegraded, healthStatus.Status) + + assert.Nil(t, resourceStatuses[0].Health) +} + +func TestSetApplicationHealth_MissingResource(t *testing.T) { + pod := resourceFromFile("./testdata/pod-running-restart-always.yaml") + + resources := []managedResource{{ + Group: "", Version: "v1", Kind: "Pod", Target: &pod}, {}} + resourceStatuses := initStatuses(resources) + + healthStatus, err := setApplicationHealth(resources, resourceStatuses, lua.ResourceHealthOverrides{}, app, true) + assert.NoError(t, err) + assert.Equal(t, health.HealthStatusMissing, healthStatus.Status) +} + +func TestSetApplicationHealth_MissingResourceNoBuiltHealthCheck(t *testing.T) { + cm := resourceFromFile("./testdata/configmap.yaml") + + resources := []managedResource{{ + Group: "", Version: "v1", Kind: "ConfigMap", Target: &cm}} + resourceStatuses := initStatuses(resources) + + t.Run("NoOverride", func(t *testing.T) { + healthStatus, err := setApplicationHealth(resources, resourceStatuses, lua.ResourceHealthOverrides{}, app, true) + assert.NoError(t, err) + assert.Equal(t, health.HealthStatusHealthy, healthStatus.Status) + assert.Equal(t, resourceStatuses[0].Health.Status, health.HealthStatusMissing) + }) + + t.Run("HasOverride", func(t *testing.T) { + healthStatus, err := setApplicationHealth(resources, resourceStatuses, lua.ResourceHealthOverrides{ + lua.GetConfigMapKey(schema.GroupVersionKind{Version: "v1", Kind: "ConfigMap"}): appv1.ResourceOverride{ + HealthLua: "some health check", + }, + }, app, true) + assert.NoError(t, err) + assert.Equal(t, health.HealthStatusMissing, healthStatus.Status) + }) +} + +func newAppLiveObj(status health.HealthStatusCode) *unstructured.Unstructured { + app := appv1.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + TypeMeta: metav1.TypeMeta{ + APIVersion: "argoproj.io/v1alpha1", + Kind: application.ApplicationKind, + }, + Status: appv1.ApplicationStatus{ + Health: appv1.HealthStatus{ + Status: status, + }, + }, + } + + return kube.MustToUnstructured(&app) +} + +func TestChildAppHealth(t *testing.T) { + overrides := lua.ResourceHealthOverrides{ + lua.GetConfigMapKey(appv1.ApplicationSchemaGroupVersionKind): appv1.ResourceOverride{ + HealthLua: ` +hs = {} +hs.status = "Progressing" +hs.message = "" +if obj.status ~= nil then + if obj.status.health ~= nil then + hs.status = obj.status.health.status + if obj.status.health.message ~= nil then + hs.message = obj.status.health.message + end + end +end +return hs`, + }, + } + + t.Run("ChildAppDegraded", func(t *testing.T) { + degradedApp := newAppLiveObj(health.HealthStatusDegraded) + resources := []managedResource{{ + Group: application.Group, Version: "v1alpha1", Kind: application.ApplicationKind, Live: degradedApp}, {}} + resourceStatuses := initStatuses(resources) + + healthStatus, err := setApplicationHealth(resources, resourceStatuses, overrides, app, true) + assert.NoError(t, err) + assert.Equal(t, health.HealthStatusDegraded, healthStatus.Status) + }) + + t.Run("ChildAppMissing", func(t *testing.T) { + degradedApp := newAppLiveObj(health.HealthStatusMissing) + resources := []managedResource{{ + Group: application.Group, Version: "v1alpha1", Kind: application.ApplicationKind, Live: degradedApp}, {}} + resourceStatuses := initStatuses(resources) + + healthStatus, err := setApplicationHealth(resources, resourceStatuses, overrides, app, true) + assert.NoError(t, err) + assert.Equal(t, health.HealthStatusHealthy, healthStatus.Status) + }) +} diff --git a/controller/metrics/clustercollector.go b/controller/metrics/clustercollector.go index f5b5b45b69a22..bebbfef62d807 100644 --- a/controller/metrics/clustercollector.go +++ b/controller/metrics/clustercollector.go @@ -41,6 +41,12 @@ var ( descClusterDefaultLabels, nil, ) + descClusterConnectionStatus = prometheus.NewDesc( + "argocd_cluster_connection_status", + "The k8s cluster current connection status.", + append(descClusterDefaultLabels, "k8s_version"), + nil, + ) ) type HasClustersInfo interface { @@ -77,9 +83,11 @@ func (c *clusterCollector) Describe(ch chan<- *prometheus.Desc) { ch <- descClusterCacheResources ch <- descClusterAPIs ch <- descClusterCacheAgeSeconds + ch <- descClusterConnectionStatus } func (c *clusterCollector) Collect(ch chan<- prometheus.Metric) { + now := time.Now() for _, c := range c.info { defaultValues := []string{c.Server} @@ -91,5 +99,6 @@ func (c *clusterCollector) Collect(ch chan<- prometheus.Metric) { cacheAgeSeconds = int(now.Sub(*c.LastCacheSyncTime).Seconds()) } ch <- prometheus.MustNewConstMetric(descClusterCacheAgeSeconds, prometheus.GaugeValue, float64(cacheAgeSeconds), defaultValues...) + ch <- prometheus.MustNewConstMetric(descClusterConnectionStatus, prometheus.GaugeValue, boolFloat64(c.SyncError == nil), append(defaultValues, c.K8SVersion)...) } } diff --git a/controller/metrics/clustercollector_test.go b/controller/metrics/clustercollector_test.go new file mode 100644 index 0000000000000..cbe124ca29f9f --- /dev/null +++ b/controller/metrics/clustercollector_test.go @@ -0,0 +1,104 @@ +package metrics + +import ( + "errors" + "testing" + + gitopsCache "github.com/argoproj/gitops-engine/pkg/cache" +) + +func TestMetricClusterConnectivity(t *testing.T) { + type testCases struct { + testCombination + skip bool + description string + metricLabels []string + clustersInfo []gitopsCache.ClusterInfo + } + cases := []testCases{ + { + description: "metric will have value 1 if connected with the cluster", + skip: false, + metricLabels: []string{"non-existing"}, + testCombination: testCombination{ + applications: []string{fakeApp}, + responseContains: ` +# TYPE argocd_cluster_connection_status gauge +argocd_cluster_connection_status{k8s_version="1.21",server="server1"} 1 +`, + }, + clustersInfo: []gitopsCache.ClusterInfo{ + { + Server: "server1", + K8SVersion: "1.21", + SyncError: nil, + }, + }, + }, + { + description: "metric will have value 0 if not connected with the cluster", + skip: false, + metricLabels: []string{"non-existing"}, + testCombination: testCombination{ + applications: []string{fakeApp}, + responseContains: ` +# TYPE argocd_cluster_connection_status gauge +argocd_cluster_connection_status{k8s_version="1.21",server="server1"} 0 +`, + }, + clustersInfo: []gitopsCache.ClusterInfo{ + { + Server: "server1", + K8SVersion: "1.21", + SyncError: errors.New("error connecting with cluster"), + }, + }, + }, + { + description: "will have one metric per cluster", + skip: false, + metricLabels: []string{"non-existing"}, + testCombination: testCombination{ + applications: []string{fakeApp}, + responseContains: ` +# TYPE argocd_cluster_connection_status gauge +argocd_cluster_connection_status{k8s_version="1.21",server="server1"} 1 +argocd_cluster_connection_status{k8s_version="1.21",server="server2"} 1 +argocd_cluster_connection_status{k8s_version="1.21",server="server3"} 1 +`, + }, + clustersInfo: []gitopsCache.ClusterInfo{ + { + Server: "server1", + K8SVersion: "1.21", + SyncError: nil, + }, + { + Server: "server2", + K8SVersion: "1.21", + SyncError: nil, + }, + { + Server: "server3", + K8SVersion: "1.21", + SyncError: nil, + }, + }, + }, + } + + for _, c := range cases { + c := c + t.Run(c.description, func(t *testing.T) { + if !c.skip { + cfg := TestMetricServerConfig{ + FakeAppYAMLs: c.applications, + ExpectedResponse: c.responseContains, + AppLabels: c.metricLabels, + ClustersInfo: c.clustersInfo, + } + runTest(t, cfg) + } + }) + } +} diff --git a/controller/metrics/metrics.go b/controller/metrics/metrics.go index d6d95de5bed0d..e4ef09552c09d 100644 --- a/controller/metrics/metrics.go +++ b/controller/metrics/metrics.go @@ -6,20 +6,23 @@ import ( "fmt" "net/http" "os" + "regexp" "strconv" "time" "github.com/argoproj/gitops-engine/pkg/health" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/robfig/cron" + "github.com/robfig/cron/v3" log "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/labels" + "github.com/argoproj/argo-cd/v2/common" argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" applister "github.com/argoproj/argo-cd/v2/pkg/client/listers/application/v1alpha1" "github.com/argoproj/argo-cd/v2/util/git" "github.com/argoproj/argo-cd/v2/util/healthz" + "github.com/argoproj/argo-cd/v2/util/profile" ) type MetricsServer struct { @@ -49,10 +52,12 @@ const ( var ( descAppDefaultLabels = []string{"namespace", "name", "project"} + descAppLabels *prometheus.Desc + descAppInfo = prometheus.NewDesc( "argocd_app_info", "Information about application.", - append(descAppDefaultLabels, "repo", "dest_server", "dest_namespace", "sync_status", "health_status", "operation"), + append(descAppDefaultLabels, "autosync_enabled", "repo", "dest_server", "dest_namespace", "sync_status", "health_status", "operation"), nil, ) // DEPRECATED @@ -62,14 +67,14 @@ var ( descAppDefaultLabels, nil, ) - // DEPRECATED: superceded by sync_status label in argocd_app_info + // DEPRECATED: superseded by sync_status label in argocd_app_info descAppSyncStatusCode = prometheus.NewDesc( "argocd_app_sync_status", "The application current sync status.", append(descAppDefaultLabels, "sync_status"), nil, ) - // DEPRECATED: superceded by health_status label in argocd_app_info + // DEPRECATED: superseded by health_status label in argocd_app_info descAppHealthStatus = prometheus.NewDesc( "argocd_app_health_status", "The application current health status.", @@ -121,7 +126,7 @@ var ( redisRequestCounter = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "argocd_redis_request_total", - Help: "Number of kubernetes requests executed during application reconciliation.", + Help: "Number of redis requests executed during application reconciliation.", }, []string{"hostname", "initiator", "failed"}, ) @@ -137,19 +142,32 @@ var ( ) // NewMetricsServer returns a new prometheus server which collects application metrics -func NewMetricsServer(addr string, appLister applister.ApplicationLister, appFilter func(obj interface{}) bool, healthCheck func(r *http.Request) error) (*MetricsServer, error) { +func NewMetricsServer(addr string, appLister applister.ApplicationLister, appFilter func(obj interface{}) bool, healthCheck func(r *http.Request) error, appLabels []string) (*MetricsServer, error) { hostname, err := os.Hostname() if err != nil { return nil, err } + + if len(appLabels) > 0 { + normalizedLabels := normalizeLabels("label", appLabels) + descAppLabels = prometheus.NewDesc( + "argocd_app_labels", + "Argo Application labels converted to Prometheus labels", + append(descAppDefaultLabels, normalizedLabels...), + nil, + ) + } + mux := http.NewServeMux() - registry := NewAppRegistry(appLister, appFilter) + registry := NewAppRegistry(appLister, appFilter, appLabels) + registry.MustRegister(depth, adds, latency, workDuration, unfinished, longestRunningProcessor, retries) mux.Handle(MetricsPath, promhttp.HandlerFor(prometheus.Gatherers{ // contains app controller specific metrics registry, // contains process, golang and controller workqueues metrics prometheus.DefaultGatherer, }, promhttp.HandlerOpts{})) + profile.RegisterProfiler(mux) healthz.ServeHealthCheck(mux, healthCheck) registry.MustRegister(syncCounter) @@ -176,10 +194,27 @@ func NewMetricsServer(addr string, appLister applister.ApplicationLister, appFil redisRequestCounter: redisRequestCounter, redisRequestHistogram: redisRequestHistogram, hostname: hostname, - cron: cron.New(), + // This cron is used to expire the metrics cache. + // Currently clearing the metrics cache is logging and deleting from the map + // so there is no possibility of panic, but we will add a chain to keep robfig/cron v1 behavior. + cron: cron.New(cron.WithChain(cron.Recover(cron.PrintfLogger(log.StandardLogger())))), }, nil } +// Prometheus invalid labels, more info: https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels. +var invalidPromLabelChars = regexp.MustCompile(`[^a-zA-Z0-9_]`) + +func normalizeLabels(prefix string, appLabels []string) []string { + results := []string{} + for _, label := range appLabels { + //prometheus labels don't accept dash in their name + curr := invalidPromLabelChars.ReplaceAllString(label, "_") + result := fmt.Sprintf("%s_%s", prefix, curr) + results = append(results, result) + } + return results +} + func (m *MetricsServer) RegisterClustersInfoSource(ctx context.Context, source HasClustersInfo) { collector := &clusterCollector{infoSource: source} go collector.Run(ctx) @@ -226,12 +261,12 @@ func (m *MetricsServer) IncKubernetesRequest(app *argoappv1.Application, server, } func (m *MetricsServer) IncRedisRequest(failed bool) { - m.redisRequestCounter.WithLabelValues(m.hostname, "argocd-application-controller", strconv.FormatBool(failed)).Inc() + m.redisRequestCounter.WithLabelValues(m.hostname, common.ApplicationController, strconv.FormatBool(failed)).Inc() } // ObserveRedisRequestDuration observes redis request duration func (m *MetricsServer) ObserveRedisRequestDuration(duration time.Duration) { - m.redisRequestHistogram.WithLabelValues(m.hostname, "argocd-application-controller").Observe(duration.Seconds()) + m.redisRequestHistogram.WithLabelValues(m.hostname, common.ApplicationController).Observe(duration.Seconds()) } // IncReconcile increments the reconcile counter for an application @@ -250,7 +285,7 @@ func (m *MetricsServer) SetExpiration(cacheExpiration time.Duration) error { return errors.New("Expiration is already set") } - err := m.cron.AddFunc(fmt.Sprintf("@every %s", cacheExpiration), func() { + _, err := m.cron.AddFunc(fmt.Sprintf("@every %s", cacheExpiration), func() { log.Infof("Reset Prometheus metrics based on existing expiration '%v'", cacheExpiration) m.syncCounter.Reset() m.kubectlExecCounter.Reset() @@ -272,25 +307,30 @@ func (m *MetricsServer) SetExpiration(cacheExpiration time.Duration) error { type appCollector struct { store applister.ApplicationLister appFilter func(obj interface{}) bool + appLabels []string } // NewAppCollector returns a prometheus collector for application metrics -func NewAppCollector(appLister applister.ApplicationLister, appFilter func(obj interface{}) bool) prometheus.Collector { +func NewAppCollector(appLister applister.ApplicationLister, appFilter func(obj interface{}) bool, appLabels []string) prometheus.Collector { return &appCollector{ store: appLister, appFilter: appFilter, + appLabels: appLabels, } } // NewAppRegistry creates a new prometheus registry that collects applications -func NewAppRegistry(appLister applister.ApplicationLister, appFilter func(obj interface{}) bool) *prometheus.Registry { +func NewAppRegistry(appLister applister.ApplicationLister, appFilter func(obj interface{}) bool, appLabels []string) *prometheus.Registry { registry := prometheus.NewRegistry() - registry.MustRegister(NewAppCollector(appLister, appFilter)) + registry.MustRegister(NewAppCollector(appLister, appFilter, appLabels)) return registry } // Describe implements the prometheus.Collector interface func (c *appCollector) Describe(ch chan<- *prometheus.Desc) { + if len(c.appLabels) > 0 { + ch <- descAppLabels + } ch <- descAppInfo ch <- descAppSyncStatusCode ch <- descAppHealthStatus @@ -305,7 +345,7 @@ func (c *appCollector) Collect(ch chan<- prometheus.Metric) { } for _, app := range apps { if c.appFilter(app) { - collectApps(ch, app) + c.collectApps(ch, app) } } } @@ -317,7 +357,7 @@ func boolFloat64(b bool) float64 { return 0 } -func collectApps(ch chan<- prometheus.Metric, app *argoappv1.Application) { +func (c *appCollector) collectApps(ch chan<- prometheus.Metric, app *argoappv1.Application) { addConstMetric := func(desc *prometheus.Desc, t prometheus.ValueType, v float64, lv ...string) { project := app.Spec.GetProject() lv = append([]string{app.Namespace, app.Name, project}, lv...) @@ -342,7 +382,18 @@ func collectApps(ch chan<- prometheus.Metric, app *argoappv1.Application) { healthStatus = health.HealthStatusUnknown } - addGauge(descAppInfo, 1, git.NormalizeGitURL(app.Spec.Source.RepoURL), app.Spec.Destination.Server, app.Spec.Destination.Namespace, string(syncStatus), string(healthStatus), operation) + autoSyncEnabled := app.Spec.SyncPolicy != nil && app.Spec.SyncPolicy.Automated != nil + + addGauge(descAppInfo, 1, strconv.FormatBool(autoSyncEnabled), git.NormalizeGitURL(app.Spec.GetSource().RepoURL), app.Spec.Destination.Server, app.Spec.Destination.Namespace, string(syncStatus), string(healthStatus), operation) + + if len(c.appLabels) > 0 { + labelValues := []string{} + for _, desiredLabel := range c.appLabels { + value := app.GetLabels()[desiredLabel] + labelValues = append(labelValues, value) + } + addGauge(descAppLabels, 1, labelValues...) + } // Deprecated controller metrics if os.Getenv(EnvVarLegacyControllerMetrics) == "true" { diff --git a/controller/metrics/metrics_test.go b/controller/metrics/metrics_test.go index f526f8098fa2f..61a99a46492a2 100644 --- a/controller/metrics/metrics_test.go +++ b/controller/metrics/metrics_test.go @@ -5,17 +5,17 @@ import ( "log" "net/http" "net/http/httptest" - "os" "strings" "testing" "time" + gitopsCache "github.com/argoproj/gitops-engine/pkg/cache" "github.com/argoproj/gitops-engine/pkg/sync/common" - "github.com/ghodss/yaml" "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/cache" + "sigs.k8s.io/yaml" argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/fake" @@ -29,6 +29,10 @@ kind: Application metadata: name: my-app namespace: argocd + labels: + team-name: my-team + team-bu: bu-id + argoproj.io/cluster: test-cluster spec: destination: namespace: dummy-namespace @@ -50,6 +54,10 @@ kind: Application metadata: name: my-app-2 namespace: argocd + labels: + team-name: my-team + team-bu: bu-id + argoproj.io/cluster: test-cluster spec: destination: namespace: dummy-namespace @@ -58,6 +66,10 @@ spec: source: path: some/path repoURL: https://github.com/argoproj/argocd-example-apps.git + syncPolicy: + automated: + selfHeal: false + prune: true status: sync: status: Synced @@ -77,6 +89,10 @@ metadata: name: my-app-3 namespace: argocd deletionTimestamp: "2020-03-16T09:17:45Z" + labels: + team-name: my-team + team-bu: bu-id + argoproj.io/cluster: test-cluster spec: destination: namespace: dummy-namespace @@ -85,6 +101,10 @@ spec: source: path: some/path repoURL: https://github.com/argoproj/argocd-example-apps.git + syncPolicy: + automated: + selfHeal: true + prune: false status: sync: status: OutOfSync @@ -138,7 +158,7 @@ func newFakeLister(fakeAppYAMLs ...string) (context.CancelFunc, applister.Applic fakeApps = append(fakeApps, a) } appClientset := appclientset.NewSimpleClientset(fakeApps...) - factory := appinformer.NewFilteredSharedInformerFactory(appClientset, 0, "argocd", func(options *metav1.ListOptions) {}) + factory := appinformer.NewSharedInformerFactoryWithOptions(appClientset, 0, appinformer.WithNamespace("argocd"), appinformer.WithTweakListOptions(func(options *metav1.ListOptions) {})) appInformer := factory.Argoproj().V1alpha1().Applications().Informer() go appInformer.Run(ctx.Done()) if !cache.WaitForCacheSync(ctx.Done(), appInformer.HasSynced) { @@ -148,55 +168,138 @@ func newFakeLister(fakeAppYAMLs ...string) (context.CancelFunc, applister.Applic } func testApp(t *testing.T, fakeAppYAMLs []string, expectedResponse string) { - cancel, appLister := newFakeLister(fakeAppYAMLs...) + t.Helper() + testMetricServer(t, fakeAppYAMLs, expectedResponse, []string{}) +} + +type fakeClusterInfo struct { + clustersInfo []gitopsCache.ClusterInfo +} + +func (f *fakeClusterInfo) GetClustersInfo() []gitopsCache.ClusterInfo { + return f.clustersInfo +} + +type TestMetricServerConfig struct { + FakeAppYAMLs []string + ExpectedResponse string + AppLabels []string + ClustersInfo []gitopsCache.ClusterInfo +} + +func testMetricServer(t *testing.T, fakeAppYAMLs []string, expectedResponse string, appLabels []string) { + t.Helper() + cfg := TestMetricServerConfig{ + FakeAppYAMLs: fakeAppYAMLs, + ExpectedResponse: expectedResponse, + AppLabels: appLabels, + ClustersInfo: []gitopsCache.ClusterInfo{}, + } + runTest(t, cfg) +} + +func runTest(t *testing.T, cfg TestMetricServerConfig) { + t.Helper() + cancel, appLister := newFakeLister(cfg.FakeAppYAMLs...) defer cancel() - metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck) + metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, cfg.AppLabels) assert.NoError(t, err) - req, err := http.NewRequest("GET", "/metrics", nil) + + if len(cfg.ClustersInfo) > 0 { + ci := &fakeClusterInfo{clustersInfo: cfg.ClustersInfo} + collector := &clusterCollector{ + infoSource: ci, + info: ci.GetClustersInfo(), + } + metricsServ.registry.MustRegister(collector) + } + + req, err := http.NewRequest(http.MethodGet, "/metrics", nil) assert.NoError(t, err) rr := httptest.NewRecorder() metricsServ.Handler.ServeHTTP(rr, req) assert.Equal(t, rr.Code, http.StatusOK) body := rr.Body.String() - log.Println(body) - assertMetricsPrinted(t, expectedResponse, body) + assertMetricsPrinted(t, cfg.ExpectedResponse, body) } type testCombination struct { applications []string - expectedResponse string + responseContains string } func TestMetrics(t *testing.T) { combinations := []testCombination{ { applications: []string{fakeApp, fakeApp2, fakeApp3}, - expectedResponse: ` + responseContains: ` # HELP argocd_app_info Information about application. # TYPE argocd_app_info gauge -argocd_app_info{dest_namespace="dummy-namespace",dest_server="https://localhost:6443",health_status="Degraded",name="my-app-3",namespace="argocd",operation="delete",project="important-project",repo="https://github.com/argoproj/argocd-example-apps",sync_status="OutOfSync"} 1 -argocd_app_info{dest_namespace="dummy-namespace",dest_server="https://localhost:6443",health_status="Healthy",name="my-app",namespace="argocd",operation="",project="important-project",repo="https://github.com/argoproj/argocd-example-apps",sync_status="Synced"} 1 -argocd_app_info{dest_namespace="dummy-namespace",dest_server="https://localhost:6443",health_status="Healthy",name="my-app-2",namespace="argocd",operation="sync",project="important-project",repo="https://github.com/argoproj/argocd-example-apps",sync_status="Synced"} 1 +argocd_app_info{autosync_enabled="true",dest_namespace="dummy-namespace",dest_server="https://localhost:6443",health_status="Degraded",name="my-app-3",namespace="argocd",operation="delete",project="important-project",repo="https://github.com/argoproj/argocd-example-apps",sync_status="OutOfSync"} 1 +argocd_app_info{autosync_enabled="false",dest_namespace="dummy-namespace",dest_server="https://localhost:6443",health_status="Healthy",name="my-app",namespace="argocd",operation="",project="important-project",repo="https://github.com/argoproj/argocd-example-apps",sync_status="Synced"} 1 +argocd_app_info{autosync_enabled="true",dest_namespace="dummy-namespace",dest_server="https://localhost:6443",health_status="Healthy",name="my-app-2",namespace="argocd",operation="sync",project="important-project",repo="https://github.com/argoproj/argocd-example-apps",sync_status="Synced"} 1 `, }, { applications: []string{fakeDefaultApp}, - expectedResponse: ` + responseContains: ` # HELP argocd_app_info Information about application. # TYPE argocd_app_info gauge -argocd_app_info{dest_namespace="dummy-namespace",dest_server="https://localhost:6443",health_status="Healthy",name="my-app",namespace="argocd",operation="",project="default",repo="https://github.com/argoproj/argocd-example-apps",sync_status="Synced"} 1 +argocd_app_info{autosync_enabled="false",dest_namespace="dummy-namespace",dest_server="https://localhost:6443",health_status="Healthy",name="my-app",namespace="argocd",operation="",project="default",repo="https://github.com/argoproj/argocd-example-apps",sync_status="Synced"} 1 `, }, } for _, combination := range combinations { - testApp(t, combination.applications, combination.expectedResponse) + testApp(t, combination.applications, combination.responseContains) + } +} + +func TestMetricLabels(t *testing.T) { + type testCases struct { + testCombination + description string + metricLabels []string + } + cases := []testCases{ + { + description: "will return the labels metrics successfully", + metricLabels: []string{"team-name", "team-bu", "argoproj.io/cluster"}, + testCombination: testCombination{ + applications: []string{fakeApp, fakeApp2, fakeApp3}, + responseContains: ` +# TYPE argocd_app_labels gauge +argocd_app_labels{label_argoproj_io_cluster="test-cluster",label_team_bu="bu-id",label_team_name="my-team",name="my-app",namespace="argocd",project="important-project"} 1 +argocd_app_labels{label_argoproj_io_cluster="test-cluster",label_team_bu="bu-id",label_team_name="my-team",name="my-app-2",namespace="argocd",project="important-project"} 1 +argocd_app_labels{label_argoproj_io_cluster="test-cluster",label_team_bu="bu-id",label_team_name="my-team",name="my-app-3",namespace="argocd",project="important-project"} 1 +`, + }, + }, + { + description: "metric will have empty label value if not present in the application", + metricLabels: []string{"non-existing"}, + testCombination: testCombination{ + applications: []string{fakeApp, fakeApp2, fakeApp3}, + responseContains: ` +# TYPE argocd_app_labels gauge +argocd_app_labels{label_non_existing="",name="my-app",namespace="argocd",project="important-project"} 1 +argocd_app_labels{label_non_existing="",name="my-app-2",namespace="argocd",project="important-project"} 1 +argocd_app_labels{label_non_existing="",name="my-app-3",namespace="argocd",project="important-project"} 1 +`, + }, + }, + } + + for _, c := range cases { + c := c + t.Run(c.description, func(t *testing.T) { + testMetricServer(t, c.applications, c.responseContains, c.metricLabels) + }) } } func TestLegacyMetrics(t *testing.T) { - os.Setenv(EnvVarLegacyControllerMetrics, "true") - defer os.Unsetenv(EnvVarLegacyControllerMetrics) + t.Setenv(EnvVarLegacyControllerMetrics, "true") expectedResponse := ` # HELP argocd_app_created_time Creation time in unix timestamp for an application. @@ -222,7 +325,7 @@ argocd_app_sync_status{name="my-app",namespace="argocd",project="important-proje func TestMetricsSyncCounter(t *testing.T) { cancel, appLister := newFakeLister() defer cancel() - metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck) + metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{}) assert.NoError(t, err) appSyncTotal := ` @@ -240,7 +343,7 @@ argocd_app_sync_total{dest_server="https://localhost:6443",name="my-app",namespa metricsServ.IncSync(fakeApp, &argoappv1.OperationState{Phase: common.OperationSucceeded}) metricsServ.IncSync(fakeApp, &argoappv1.OperationState{Phase: common.OperationSucceeded}) - req, err := http.NewRequest("GET", "/metrics", nil) + req, err := http.NewRequest(http.MethodGet, "/metrics", nil) assert.NoError(t, err) rr := httptest.NewRecorder() metricsServ.Handler.ServeHTTP(rr, req) @@ -252,11 +355,12 @@ argocd_app_sync_total{dest_server="https://localhost:6443",name="my-app",namespa // assertMetricsPrinted asserts every line in the expected lines appears in the body func assertMetricsPrinted(t *testing.T, expectedLines, body string) { + t.Helper() for _, line := range strings.Split(expectedLines, "\n") { if line == "" { continue } - assert.Contains(t, body, line) + assert.Contains(t, body, line, "expected metrics mismatch") } } @@ -273,7 +377,7 @@ func assertMetricsNotPrinted(t *testing.T, expectedLines, body string) { func TestReconcileMetrics(t *testing.T) { cancel, appLister := newFakeLister() defer cancel() - metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck) + metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{}) assert.NoError(t, err) appReconcileMetrics := ` @@ -293,7 +397,7 @@ argocd_app_reconcile_count{dest_server="https://localhost:6443",namespace="argoc fakeApp := newFakeApp(fakeApp) metricsServ.IncReconcile(fakeApp, 5*time.Second) - req, err := http.NewRequest("GET", "/metrics", nil) + req, err := http.NewRequest(http.MethodGet, "/metrics", nil) assert.NoError(t, err) rr := httptest.NewRecorder() metricsServ.Handler.ServeHTTP(rr, req) @@ -306,7 +410,7 @@ argocd_app_reconcile_count{dest_server="https://localhost:6443",namespace="argoc func TestMetricsReset(t *testing.T) { cancel, appLister := newFakeLister() defer cancel() - metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck) + metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{}) assert.NoError(t, err) appSyncTotal := ` @@ -317,7 +421,7 @@ argocd_app_sync_total{dest_server="https://localhost:6443",name="my-app",namespa argocd_app_sync_total{dest_server="https://localhost:6443",name="my-app",namespace="argocd",phase="Succeeded",project="important-project"} 2 ` - req, err := http.NewRequest("GET", "/metrics", nil) + req, err := http.NewRequest(http.MethodGet, "/metrics", nil) assert.NoError(t, err) rr := httptest.NewRecorder() metricsServ.Handler.ServeHTTP(rr, req) @@ -328,7 +432,7 @@ argocd_app_sync_total{dest_server="https://localhost:6443",name="my-app",namespa err = metricsServ.SetExpiration(time.Second) assert.NoError(t, err) time.Sleep(2 * time.Second) - req, err = http.NewRequest("GET", "/metrics", nil) + req, err = http.NewRequest(http.MethodGet, "/metrics", nil) assert.NoError(t, err) rr = httptest.NewRecorder() metricsServ.Handler.ServeHTTP(rr, req) diff --git a/controller/metrics/workqueue.go b/controller/metrics/workqueue.go new file mode 100644 index 0000000000000..2ef10685ee47d --- /dev/null +++ b/controller/metrics/workqueue.go @@ -0,0 +1,101 @@ +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "k8s.io/client-go/util/workqueue" +) + +const ( + WorkQueueSubsystem = "workqueue" + DepthKey = "depth" + AddsKey = "adds_total" + QueueLatencyKey = "queue_duration_seconds" + WorkDurationKey = "work_duration_seconds" + UnfinishedWorkKey = "unfinished_work_seconds" + LongestRunningProcessorKey = "longest_running_processor_seconds" + RetriesKey = "retries_total" +) + +var ( + depth = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: DepthKey, + Help: "Current depth of workqueue", + }, []string{"name"}) + + adds = prometheus.NewCounterVec(prometheus.CounterOpts{ + Subsystem: WorkQueueSubsystem, + Name: AddsKey, + Help: "Total number of adds handled by workqueue", + }, []string{"name"}) + + latency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Subsystem: WorkQueueSubsystem, + Name: QueueLatencyKey, + Help: "How long in seconds an item stays in workqueue before being requested", + Buckets: []float64{1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 30, 60, 120, 180}, + }, []string{"name"}) + + workDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Subsystem: WorkQueueSubsystem, + Name: WorkDurationKey, + Help: "How long in seconds processing an item from workqueue takes.", + Buckets: []float64{1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 30, 60, 120, 180}, + }, []string{"name"}) + + unfinished = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: UnfinishedWorkKey, + Help: "How many seconds of work has been done that " + + "is in progress and hasn't been observed by work_duration. Large " + + "values indicate stuck threads. One can deduce the number of stuck " + + "threads by observing the rate at which this increases.", + }, []string{"name"}) + + longestRunningProcessor = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: LongestRunningProcessorKey, + Help: "How many seconds has the longest running " + + "processor for workqueue been running.", + }, []string{"name"}) + + retries = prometheus.NewCounterVec(prometheus.CounterOpts{ + Subsystem: WorkQueueSubsystem, + Name: RetriesKey, + Help: "Total number of retries handled by workqueue", + }, []string{"name"}) +) + +func init() { + workqueue.SetProvider(workqueueMetricsProvider{}) +} + +type workqueueMetricsProvider struct{} + +func (workqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric { + return depth.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric { + return adds.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric { + return latency.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric { + return workDuration.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { + return unfinished.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric { + return longestRunningProcessor.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric { + return retries.WithLabelValues(name) +} diff --git a/controller/sharding/sharding.go b/controller/sharding/sharding.go index 1c0615196bd06..526896531dbca 100644 --- a/controller/sharding/sharding.go +++ b/controller/sharding/sharding.go @@ -1,19 +1,154 @@ package sharding import ( + "context" "fmt" "hash/fnv" "os" + "sort" "strconv" "strings" + "time" + "encoding/json" + + "github.com/argoproj/argo-cd/v2/common" "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + + "github.com/argoproj/argo-cd/v2/util/db" + "github.com/argoproj/argo-cd/v2/util/env" + "github.com/argoproj/argo-cd/v2/util/settings" + log "github.com/sirupsen/logrus" + kubeerrors "k8s.io/apimachinery/pkg/api/errors" +) + +// Make it overridable for testing +var osHostnameFunction = os.Hostname + +// Make it overridable for testing +var heartbeatCurrentTime = metav1.Now + +var ( + HeartbeatDuration = env.ParseNumFromEnv(common.EnvControllerHeartbeatTime, 10, 10, 60) + HeartbeatTimeout = 3 * HeartbeatDuration ) +const ShardControllerMappingKey = "shardControllerMapping" + +type DistributionFunction func(c *v1alpha1.Cluster) int +type ClusterFilterFunction func(c *v1alpha1.Cluster) bool + +// shardApplicationControllerMapping stores the mapping of Shard Number to Application Controller in ConfigMap. +// It also stores the heartbeat of last synced time of the application controller. +type shardApplicationControllerMapping struct { + ShardNumber int + ControllerName string + HeartbeatTime metav1.Time +} + +// GetClusterFilter returns a ClusterFilterFunction which is a function taking a cluster as a parameter +// and returns wheter or not the cluster should be processed by a given shard. It calls the distributionFunction +// to determine which shard will process the cluster, and if the given shard is equal to the calculated shard +// the function will return true. +func GetClusterFilter(db db.ArgoDB, distributionFunction DistributionFunction, shard int) ClusterFilterFunction { + replicas := db.GetApplicationControllerReplicas() + return func(c *v1alpha1.Cluster) bool { + clusterShard := 0 + if c != nil && c.Shard != nil { + requestedShard := int(*c.Shard) + if requestedShard < replicas { + clusterShard = requestedShard + } else { + log.Warnf("Specified cluster shard (%d) for cluster: %s is greater than the number of available shard. Assigning automatically.", requestedShard, c.Name) + } + } else { + clusterShard = distributionFunction(c) + } + return clusterShard == shard + } +} + +// GetDistributionFunction returns which DistributionFunction should be used based on the passed algorithm and +// the current datas. +func GetDistributionFunction(db db.ArgoDB, shardingAlgorithm string) DistributionFunction { + log.Infof("Using filter function: %s", shardingAlgorithm) + distributionFunction := LegacyDistributionFunction(db) + switch shardingAlgorithm { + case common.RoundRobinShardingAlgorithm: + distributionFunction = RoundRobinDistributionFunction(db) + case common.LegacyShardingAlgorithm: + distributionFunction = LegacyDistributionFunction(db) + default: + log.Warnf("distribution type %s is not supported, defaulting to %s", shardingAlgorithm, common.DefaultShardingAlgorithm) + } + return distributionFunction +} + +// LegacyDistributionFunction returns a DistributionFunction using a stable distribution algorithm: +// for a given cluster the function will return the shard number based on the cluster id. This function +// is lightweight and can be distributed easily, however, it does not ensure an homogenous distribution as +// some shards may get assigned more clusters than others. It is the legacy function distribution that is +// kept for compatibility reasons +func LegacyDistributionFunction(db db.ArgoDB) DistributionFunction { + replicas := db.GetApplicationControllerReplicas() + return func(c *v1alpha1.Cluster) int { + if replicas == 0 { + return -1 + } + if c == nil { + return 0 + } + id := c.ID + log.Debugf("Calculating cluster shard for cluster id: %s", id) + if id == "" { + return 0 + } else { + h := fnv.New32a() + _, _ = h.Write([]byte(id)) + shard := int32(h.Sum32() % uint32(replicas)) + log.Debugf("Cluster with id=%s will be processed by shard %d", id, shard) + return int(shard) + } + } +} + +// RoundRobinDistributionFunction returns a DistributionFunction using an homogeneous distribution algorithm: +// for a given cluster the function will return the shard number based on the modulo of the cluster rank in +// the cluster's list sorted by uid on the shard number. +// This function ensures an homogenous distribution: each shards got assigned the same number of +// clusters +/-1 , but with the drawback of a reshuffling of clusters accross shards in case of some changes +// in the cluster list +func RoundRobinDistributionFunction(db db.ArgoDB) DistributionFunction { + replicas := db.GetApplicationControllerReplicas() + return func(c *v1alpha1.Cluster) int { + if replicas > 0 { + if c == nil { // in-cluster does not necessarly have a secret assigned. So we are receiving a nil cluster here. + return 0 + } else { + clusterIndexdByClusterIdMap := createClusterIndexByClusterIdMap(db) + clusterIndex, ok := clusterIndexdByClusterIdMap[c.ID] + if !ok { + log.Warnf("Cluster with id=%s not found in cluster map.", c.ID) + return -1 + } + shard := int(clusterIndex % replicas) + log.Debugf("Cluster with id=%s will be processed by shard %d", c.ID, shard) + return shard + } + } + log.Warnf("The number of replicas (%d) is lower than 1", replicas) + return -1 + } +} + +// InferShard extracts the shard index based on its hostname. func InferShard() (int, error) { - hostname, err := os.Hostname() + hostname, err := osHostnameFunction() if err != nil { - return 0, err + return -1, err } parts := strings.Split(hostname, "-") if len(parts) == 0 { @@ -23,31 +158,196 @@ func InferShard() (int, error) { if err != nil { return 0, fmt.Errorf("hostname should ends with shard number separated by '-' but got: %s", hostname) } - return shard, nil + return int(shard), nil } -// GetShardByID calculates cluster shard as `clusterSecret.UID % replicas count` -func GetShardByID(id string, replicas int) int { - if id == "" { - return 0 +func getSortedClustersList(db db.ArgoDB) []v1alpha1.Cluster { + ctx := context.Background() + clustersList, dbErr := db.ListClusters(ctx) + if dbErr != nil { + log.Warnf("Error while querying clusters list from database: %v", dbErr) + return []v1alpha1.Cluster{} + } + clusters := clustersList.Items + sort.Slice(clusters, func(i, j int) bool { + return clusters[i].ID < clusters[j].ID + }) + return clusters +} + +func createClusterIndexByClusterIdMap(db db.ArgoDB) map[string]int { + clusters := getSortedClustersList(db) + log.Debugf("ClustersList has %d items", len(clusters)) + clusterById := make(map[string]v1alpha1.Cluster) + clusterIndexedByClusterId := make(map[string]int) + for i, cluster := range clusters { + log.Debugf("Adding cluster with id=%s and name=%s to cluster's map", cluster.ID, cluster.Name) + clusterById[cluster.ID] = cluster + clusterIndexedByClusterId[cluster.ID] = i + } + return clusterIndexedByClusterId +} + +// GetOrUpdateShardFromConfigMap finds the shard number from the shard mapping configmap. If the shard mapping configmap does not exist, +// the function creates the shard mapping configmap. +// The function takes the shard number from the environment variable (default value -1, if not set) and passes it to this function. +// If the shard value passed to this function is -1, that is, the shard was not set as an environment variable, +// we default the shard number to 0 for computing the default config map. +func GetOrUpdateShardFromConfigMap(kubeClient *kubernetes.Clientset, settingsMgr *settings.SettingsManager, replicas, shard int) (int, error) { + + hostname, err := osHostnameFunction() + if err != nil { + return -1, err + } + + // fetch the shard mapping configMap + shardMappingCM, err := kubeClient.CoreV1().ConfigMaps(settingsMgr.GetNamespace()).Get(context.Background(), common.ArgoCDAppControllerShardConfigMapName, metav1.GetOptions{}) + + if err != nil { + if !kubeerrors.IsNotFound(err) { + return -1, fmt.Errorf("error getting sharding config map: %s", err) + } + log.Infof("shard mapping configmap %s not found. Creating default shard mapping configmap.", common.ArgoCDAppControllerShardConfigMapName) + + // if the shard is not set as an environment variable, set the default value of shard to 0 for generating default CM + if shard == -1 { + shard = 0 + } + shardMappingCM, err = generateDefaultShardMappingCM(settingsMgr.GetNamespace(), hostname, replicas, shard) + if err != nil { + return -1, fmt.Errorf("error generating default shard mapping configmap %s", err) + } + if _, err = kubeClient.CoreV1().ConfigMaps(settingsMgr.GetNamespace()).Create(context.Background(), shardMappingCM, metav1.CreateOptions{}); err != nil { + return -1, fmt.Errorf("error creating shard mapping configmap %s", err) + } + // return 0 as the controller is assigned to shard 0 while generating default shard mapping ConfigMap + return shard, nil } else { - h := fnv.New32a() - _, _ = h.Write([]byte(id)) - return int(h.Sum32() % uint32(replicas)) + // Identify the available shard and update the ConfigMap + data := shardMappingCM.Data[ShardControllerMappingKey] + var shardMappingData []shardApplicationControllerMapping + err := json.Unmarshal([]byte(data), &shardMappingData) + if err != nil { + return -1, fmt.Errorf("error unmarshalling shard config map data: %s", err) + } + + shard, shardMappingData := getOrUpdateShardNumberForController(shardMappingData, hostname, replicas, shard) + updatedShardMappingData, err := json.Marshal(shardMappingData) + if err != nil { + return -1, fmt.Errorf("error marshalling data of shard mapping ConfigMap: %s", err) + } + shardMappingCM.Data[ShardControllerMappingKey] = string(updatedShardMappingData) + + _, err = kubeClient.CoreV1().ConfigMaps(settingsMgr.GetNamespace()).Update(context.Background(), shardMappingCM, metav1.UpdateOptions{}) + if err != nil { + return -1, err + } + return shard, nil } } -func GetClusterFilter(replicas int, shard int) func(c *v1alpha1.Cluster) bool { - return func(c *v1alpha1.Cluster) bool { - clusterShard := 0 - // cluster might be nil if app is using invalid cluster URL, assume shard 0 in this case. - if c != nil { - if c.Shard != nil { - clusterShard = int(*c.Shard) - } else { - clusterShard = GetShardByID(c.ID, replicas) +// getOrUpdateShardNumberForController takes list of shardApplicationControllerMapping and performs computation to find the matching or empty shard number +func getOrUpdateShardNumberForController(shardMappingData []shardApplicationControllerMapping, hostname string, replicas, shard int) (int, []shardApplicationControllerMapping) { + + // if current length of shardMappingData in shard mapping configMap is less than the number of replicas, + // create additional empty entries for missing shard numbers in shardMappingDataconfigMap + if len(shardMappingData) < replicas { + // generate extra default mappings + for currentShard := len(shardMappingData); currentShard < replicas; currentShard++ { + shardMappingData = append(shardMappingData, shardApplicationControllerMapping{ + ShardNumber: currentShard, + }) + } + } + + // if current length of shardMappingData in shard mapping configMap is more than the number of replicas, + // we replace the config map with default config map and let controllers self assign the new shard to itself + if len(shardMappingData) > replicas { + shardMappingData = getDefaultShardMappingData(replicas) + } + + if shard != -1 && shard < replicas { + log.Debugf("update heartbeat for shard %d", shard) + for i := range shardMappingData { + shardMapping := shardMappingData[i] + if shardMapping.ShardNumber == shard { + log.Debugf("Shard found. Updating heartbeat!!") + shardMapping.ControllerName = hostname + shardMapping.HeartbeatTime = heartbeatCurrentTime() + shardMappingData[i] = shardMapping + break } } - return clusterShard == shard + } else { + // find the matching shard with assigned controllerName + for i := range shardMappingData { + shardMapping := shardMappingData[i] + if shardMapping.ControllerName == hostname { + log.Debugf("Shard matched. Updating heartbeat!!") + shard = int(shardMapping.ShardNumber) + shardMapping.HeartbeatTime = heartbeatCurrentTime() + shardMappingData[i] = shardMapping + break + } + } + } + + // at this point, we have still not found a shard with matching hostname. + // So, find a shard with either no controller assigned or assigned controller + // with heartbeat past threshold + if shard == -1 { + for i := range shardMappingData { + shardMapping := shardMappingData[i] + if (shardMapping.ControllerName == "") || (metav1.Now().After(shardMapping.HeartbeatTime.Add(time.Duration(HeartbeatTimeout) * time.Second))) { + shard = int(shardMapping.ShardNumber) + log.Debugf("Empty shard found %d", shard) + shardMapping.ControllerName = hostname + shardMapping.HeartbeatTime = heartbeatCurrentTime() + shardMappingData[i] = shardMapping + break + } + } + } + return shard, shardMappingData +} + +// generateDefaultShardMappingCM creates a default shard mapping configMap. Assigns current controller to shard 0. +func generateDefaultShardMappingCM(namespace, hostname string, replicas, shard int) (*v1.ConfigMap, error) { + + shardingCM := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: common.ArgoCDAppControllerShardConfigMapName, + Namespace: namespace, + }, + Data: map[string]string{}, + } + + shardMappingData := getDefaultShardMappingData(replicas) + + // if shard is not assigned to a controller, we use shard 0 + if shard == -1 || shard > replicas { + shard = 0 + } + shardMappingData[shard].ControllerName = hostname + shardMappingData[shard].HeartbeatTime = heartbeatCurrentTime() + + data, err := json.Marshal(shardMappingData) + if err != nil { + return nil, fmt.Errorf("error generating default ConfigMap: %s", err) + } + shardingCM.Data[ShardControllerMappingKey] = string(data) + + return shardingCM, nil +} + +func getDefaultShardMappingData(replicas int) []shardApplicationControllerMapping { + shardMappingData := make([]shardApplicationControllerMapping, 0) + + for i := 0; i < replicas; i++ { + mapping := shardApplicationControllerMapping{ + ShardNumber: i, + } + shardMappingData = append(shardMappingData, mapping) } + return shardMappingData } diff --git a/controller/sharding/sharding_test.go b/controller/sharding/sharding_test.go index dc27726f8a6fa..a8a25e11c4978 100644 --- a/controller/sharding/sharding_test.go +++ b/controller/sharding/sharding_test.go @@ -1,29 +1,678 @@ package sharding import ( + "encoding/json" + "errors" + "fmt" + "os" "testing" + "time" + "github.com/argoproj/argo-cd/v2/common" "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" - + dbmocks "github.com/argoproj/argo-cd/v2/util/db/mocks" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestGetShardByID_NotEmptyID(t *testing.T) { - assert.Equal(t, 0, GetShardByID("1", 2)) - assert.Equal(t, 1, GetShardByID("2", 2)) - assert.Equal(t, 0, GetShardByID("3", 2)) - assert.Equal(t, 1, GetShardByID("4", 2)) + db := &dbmocks.ArgoDB{} + db.On("GetApplicationControllerReplicas").Return(1) + assert.Equal(t, 0, LegacyDistributionFunction(db)(&v1alpha1.Cluster{ID: "1"})) + assert.Equal(t, 0, LegacyDistributionFunction(db)(&v1alpha1.Cluster{ID: "2"})) + assert.Equal(t, 0, LegacyDistributionFunction(db)(&v1alpha1.Cluster{ID: "3"})) + assert.Equal(t, 0, LegacyDistributionFunction(db)(&v1alpha1.Cluster{ID: "4"})) } func TestGetShardByID_EmptyID(t *testing.T) { - shard := GetShardByID("", 10) + db := &dbmocks.ArgoDB{} + db.On("GetApplicationControllerReplicas").Return(1) + distributionFunction := LegacyDistributionFunction + shard := distributionFunction(db)(&v1alpha1.Cluster{}) assert.Equal(t, 0, shard) } -func TestGetClusterFilter(t *testing.T) { - filter := GetClusterFilter(2, 1) +func TestGetShardByID_NoReplicas(t *testing.T) { + db := &dbmocks.ArgoDB{} + db.On("GetApplicationControllerReplicas").Return(0) + distributionFunction := LegacyDistributionFunction + shard := distributionFunction(db)(&v1alpha1.Cluster{}) + assert.Equal(t, -1, shard) +} + +func TestGetShardByID_NoReplicasUsingHashDistributionFunction(t *testing.T) { + db := &dbmocks.ArgoDB{} + db.On("GetApplicationControllerReplicas").Return(0) + distributionFunction := LegacyDistributionFunction + shard := distributionFunction(db)(&v1alpha1.Cluster{}) + assert.Equal(t, -1, shard) +} + +func TestGetShardByID_NoReplicasUsingHashDistributionFunctionWithClusters(t *testing.T) { + db, cluster1, cluster2, cluster3, cluster4, cluster5 := createTestClusters() + // Test with replicas set to 0 + db.On("GetApplicationControllerReplicas").Return(0) + t.Setenv(common.EnvControllerShardingAlgorithm, common.RoundRobinShardingAlgorithm) + distributionFunction := RoundRobinDistributionFunction(db) + assert.Equal(t, -1, distributionFunction(nil)) + assert.Equal(t, -1, distributionFunction(&cluster1)) + assert.Equal(t, -1, distributionFunction(&cluster2)) + assert.Equal(t, -1, distributionFunction(&cluster3)) + assert.Equal(t, -1, distributionFunction(&cluster4)) + assert.Equal(t, -1, distributionFunction(&cluster5)) +} + +func TestGetClusterFilterDefault(t *testing.T) { + shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...) + os.Unsetenv(common.EnvControllerShardingAlgorithm) + db := &dbmocks.ArgoDB{} + db.On("GetApplicationControllerReplicas").Return(2) + filter := GetClusterFilter(db, GetDistributionFunction(db, common.DefaultShardingAlgorithm), shardIndex) + assert.False(t, filter(&v1alpha1.Cluster{ID: "1"})) + assert.True(t, filter(&v1alpha1.Cluster{ID: "2"})) + assert.False(t, filter(&v1alpha1.Cluster{ID: "3"})) + assert.True(t, filter(&v1alpha1.Cluster{ID: "4"})) +} + +func TestGetClusterFilterLegacy(t *testing.T) { + shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...) + db := &dbmocks.ArgoDB{} + db.On("GetApplicationControllerReplicas").Return(2) + t.Setenv(common.EnvControllerShardingAlgorithm, common.LegacyShardingAlgorithm) + filter := GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), shardIndex) assert.False(t, filter(&v1alpha1.Cluster{ID: "1"})) assert.True(t, filter(&v1alpha1.Cluster{ID: "2"})) assert.False(t, filter(&v1alpha1.Cluster{ID: "3"})) assert.True(t, filter(&v1alpha1.Cluster{ID: "4"})) } + +func TestGetClusterFilterUnknown(t *testing.T) { + shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...) + db := &dbmocks.ArgoDB{} + db.On("GetApplicationControllerReplicas").Return(2) + t.Setenv(common.EnvControllerShardingAlgorithm, "unknown") + filter := GetClusterFilter(db, GetDistributionFunction(db, "unknown"), shardIndex) + assert.False(t, filter(&v1alpha1.Cluster{ID: "1"})) + assert.True(t, filter(&v1alpha1.Cluster{ID: "2"})) + assert.False(t, filter(&v1alpha1.Cluster{ID: "3"})) + assert.True(t, filter(&v1alpha1.Cluster{ID: "4"})) +} + +func TestLegacyGetClusterFilterWithFixedShard(t *testing.T) { + shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...) + db := &dbmocks.ArgoDB{} + db.On("GetApplicationControllerReplicas").Return(2) + filter := GetClusterFilter(db, GetDistributionFunction(db, common.DefaultShardingAlgorithm), shardIndex) + assert.False(t, filter(nil)) + assert.False(t, filter(&v1alpha1.Cluster{ID: "1"})) + assert.True(t, filter(&v1alpha1.Cluster{ID: "2"})) + assert.False(t, filter(&v1alpha1.Cluster{ID: "3"})) + assert.True(t, filter(&v1alpha1.Cluster{ID: "4"})) + + var fixedShard int64 = 4 + filter = GetClusterFilter(db, GetDistributionFunction(db, common.DefaultShardingAlgorithm), int(fixedShard)) + assert.False(t, filter(&v1alpha1.Cluster{ID: "4", Shard: &fixedShard})) + + fixedShard = 1 + filter = GetClusterFilter(db, GetDistributionFunction(db, common.DefaultShardingAlgorithm), int(fixedShard)) + assert.True(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard})) +} + +func TestRoundRobinGetClusterFilterWithFixedShard(t *testing.T) { + shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...) + db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters() + db.On("GetApplicationControllerReplicas").Return(2) + filter := GetClusterFilter(db, GetDistributionFunction(db, common.RoundRobinShardingAlgorithm), shardIndex) + assert.False(t, filter(nil)) + assert.False(t, filter(&cluster1)) + assert.True(t, filter(&cluster2)) + assert.False(t, filter(&cluster3)) + assert.True(t, filter(&cluster4)) + + // a cluster with a fixed shard should be processed by the specified exact + // same shard unless the specified shard index is greater than the number of replicas. + var fixedShard int64 = 4 + filter = GetClusterFilter(db, GetDistributionFunction(db, common.RoundRobinShardingAlgorithm), int(fixedShard)) + assert.False(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard})) + + fixedShard = 1 + filter = GetClusterFilter(db, GetDistributionFunction(db, common.RoundRobinShardingAlgorithm), int(fixedShard)) + assert.True(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard})) +} + +func TestGetClusterFilterLegacyHash(t *testing.T) { + shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...) + t.Setenv(common.EnvControllerShardingAlgorithm, "hash") + db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters() + db.On("GetApplicationControllerReplicas").Return(2) + filter := GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), shardIndex) + assert.False(t, filter(&cluster1)) + assert.True(t, filter(&cluster2)) + assert.False(t, filter(&cluster3)) + assert.True(t, filter(&cluster4)) + + // a cluster with a fixed shard should be processed by the specified exact + // same shard unless the specified shard index is greater than the number of replicas. + var fixedShard int64 = 4 + filter = GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), int(fixedShard)) + assert.False(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard})) + + fixedShard = 1 + filter = GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), int(fixedShard)) + assert.True(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard})) +} + +func TestGetClusterFilterWithEnvControllerShardingAlgorithms(t *testing.T) { + db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters() + shardIndex := 1 + db.On("GetApplicationControllerReplicas").Return(2) + + t.Run("legacy", func(t *testing.T) { + t.Setenv(common.EnvControllerShardingAlgorithm, common.LegacyShardingAlgorithm) + shardShouldProcessCluster := GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), shardIndex) + assert.False(t, shardShouldProcessCluster(&cluster1)) + assert.True(t, shardShouldProcessCluster(&cluster2)) + assert.False(t, shardShouldProcessCluster(&cluster3)) + assert.True(t, shardShouldProcessCluster(&cluster4)) + assert.False(t, shardShouldProcessCluster(nil)) + }) + + t.Run("roundrobin", func(t *testing.T) { + t.Setenv(common.EnvControllerShardingAlgorithm, common.RoundRobinShardingAlgorithm) + shardShouldProcessCluster := GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), shardIndex) + assert.False(t, shardShouldProcessCluster(&cluster1)) + assert.True(t, shardShouldProcessCluster(&cluster2)) + assert.False(t, shardShouldProcessCluster(&cluster3)) + assert.True(t, shardShouldProcessCluster(&cluster4)) + assert.False(t, shardShouldProcessCluster(nil)) + }) +} + +func TestGetShardByIndexModuloReplicasCountDistributionFunction2(t *testing.T) { + db, cluster1, cluster2, cluster3, cluster4, cluster5 := createTestClusters() + + t.Run("replicas set to 1", func(t *testing.T) { + db.On("GetApplicationControllerReplicas").Return(1).Once() + distributionFunction := RoundRobinDistributionFunction(db) + assert.Equal(t, 0, distributionFunction(nil)) + assert.Equal(t, 0, distributionFunction(&cluster1)) + assert.Equal(t, 0, distributionFunction(&cluster2)) + assert.Equal(t, 0, distributionFunction(&cluster3)) + assert.Equal(t, 0, distributionFunction(&cluster4)) + assert.Equal(t, 0, distributionFunction(&cluster5)) + }) + + t.Run("replicas set to 2", func(t *testing.T) { + db.On("GetApplicationControllerReplicas").Return(2).Once() + distributionFunction := RoundRobinDistributionFunction(db) + assert.Equal(t, 0, distributionFunction(nil)) + assert.Equal(t, 0, distributionFunction(&cluster1)) + assert.Equal(t, 1, distributionFunction(&cluster2)) + assert.Equal(t, 0, distributionFunction(&cluster3)) + assert.Equal(t, 1, distributionFunction(&cluster4)) + assert.Equal(t, 0, distributionFunction(&cluster5)) + }) + + t.Run("replicas set to 3", func(t *testing.T) { + db.On("GetApplicationControllerReplicas").Return(3).Once() + distributionFunction := RoundRobinDistributionFunction(db) + assert.Equal(t, 0, distributionFunction(nil)) + assert.Equal(t, 0, distributionFunction(&cluster1)) + assert.Equal(t, 1, distributionFunction(&cluster2)) + assert.Equal(t, 2, distributionFunction(&cluster3)) + assert.Equal(t, 0, distributionFunction(&cluster4)) + assert.Equal(t, 1, distributionFunction(&cluster5)) + }) +} + +func TestGetShardByIndexModuloReplicasCountDistributionFunctionWhenClusterNumberIsHigh(t *testing.T) { + // Unit test written to evaluate the cost of calling db.ListCluster on every call of distributionFunction + // Doing that allows to accept added and removed clusters on the fly. + // Initial tests where showing that under 1024 clusters, execution time was around 400ms + // and for 4096 clusters, execution time was under 9s + // The other implementation was giving almost linear time of 400ms up to 10'000 clusters + db := dbmocks.ArgoDB{} + clusterList := &v1alpha1.ClusterList{Items: []v1alpha1.Cluster{}} + for i := 0; i < 2048; i++ { + cluster := createCluster(fmt.Sprintf("cluster-%d", i), fmt.Sprintf("%d", i)) + clusterList.Items = append(clusterList.Items, cluster) + } + db.On("ListClusters", mock.Anything).Return(clusterList, nil) + db.On("GetApplicationControllerReplicas").Return(2) + distributionFunction := RoundRobinDistributionFunction(&db) + for i, c := range clusterList.Items { + assert.Equal(t, i%2, distributionFunction(&c)) + } +} + +func TestGetShardByIndexModuloReplicasCountDistributionFunctionWhenClusterIsAddedAndRemoved(t *testing.T) { + db := dbmocks.ArgoDB{} + cluster1 := createCluster("cluster1", "1") + cluster2 := createCluster("cluster2", "2") + cluster3 := createCluster("cluster3", "3") + cluster4 := createCluster("cluster4", "4") + cluster5 := createCluster("cluster5", "5") + cluster6 := createCluster("cluster6", "6") + + clusterList := &v1alpha1.ClusterList{Items: []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5}} + db.On("ListClusters", mock.Anything).Return(clusterList, nil) + + // Test with replicas set to 2 + db.On("GetApplicationControllerReplicas").Return(2) + distributionFunction := RoundRobinDistributionFunction(&db) + assert.Equal(t, 0, distributionFunction(nil)) + assert.Equal(t, 0, distributionFunction(&cluster1)) + assert.Equal(t, 1, distributionFunction(&cluster2)) + assert.Equal(t, 0, distributionFunction(&cluster3)) + assert.Equal(t, 1, distributionFunction(&cluster4)) + assert.Equal(t, 0, distributionFunction(&cluster5)) + assert.Equal(t, -1, distributionFunction(&cluster6)) // as cluster6 is not in the DB, this one should not have a shard assigned + + // Now, the database knows cluster6. Shard should be assigned a proper shard + clusterList.Items = append(clusterList.Items, cluster6) + assert.Equal(t, 1, distributionFunction(&cluster6)) + + // Now, we remove the last added cluster, it should be unassigned as well + clusterList.Items = clusterList.Items[:len(clusterList.Items)-1] + assert.Equal(t, -1, distributionFunction(&cluster6)) +} + +func TestGetShardByIndexModuloReplicasCountDistributionFunction(t *testing.T) { + db, cluster1, cluster2, _, _, _ := createTestClusters() + db.On("GetApplicationControllerReplicas").Return(2) + distributionFunction := RoundRobinDistributionFunction(db) + + // Test that the function returns the correct shard for cluster1 and cluster2 + expectedShardForCluster1 := 0 + expectedShardForCluster2 := 1 + shardForCluster1 := distributionFunction(&cluster1) + shardForCluster2 := distributionFunction(&cluster2) + + if shardForCluster1 != expectedShardForCluster1 { + t.Errorf("Expected shard for cluster1 to be %d but got %d", expectedShardForCluster1, shardForCluster1) + } + if shardForCluster2 != expectedShardForCluster2 { + t.Errorf("Expected shard for cluster2 to be %d but got %d", expectedShardForCluster2, shardForCluster2) + } +} + +func TestInferShard(t *testing.T) { + // Override the os.Hostname function to return a specific hostname for testing + defer func() { osHostnameFunction = os.Hostname }() + + expectedShard := 3 + osHostnameFunction = func() (string, error) { return "example-shard-3", nil } + actualShard, _ := InferShard() + assert.Equal(t, expectedShard, actualShard) + + osHostnameError := errors.New("cannot resolve hostname") + osHostnameFunction = func() (string, error) { return "exampleshard", osHostnameError } + _, err := InferShard() + assert.NotNil(t, err) + assert.Equal(t, err, osHostnameError) + + osHostnameFunction = func() (string, error) { return "exampleshard", nil } + _, err = InferShard() + assert.NotNil(t, err) + + osHostnameFunction = func() (string, error) { return "example-shard", nil } + _, err = InferShard() + assert.NotNil(t, err) +} + +func createTestClusters() (*dbmocks.ArgoDB, v1alpha1.Cluster, v1alpha1.Cluster, v1alpha1.Cluster, v1alpha1.Cluster, v1alpha1.Cluster) { + db := dbmocks.ArgoDB{} + cluster1 := createCluster("cluster1", "1") + cluster2 := createCluster("cluster2", "2") + cluster3 := createCluster("cluster3", "3") + cluster4 := createCluster("cluster4", "4") + cluster5 := createCluster("cluster5", "5") + + db.On("ListClusters", mock.Anything).Return(&v1alpha1.ClusterList{Items: []v1alpha1.Cluster{ + cluster1, cluster2, cluster3, cluster4, cluster5, + }}, nil) + return &db, cluster1, cluster2, cluster3, cluster4, cluster5 +} + +func createCluster(name string, id string) v1alpha1.Cluster { + cluster := v1alpha1.Cluster{ + Name: name, + ID: id, + Server: "https://kubernetes.default.svc?" + id, + } + return cluster +} + +func Test_getDefaultShardMappingData(t *testing.T) { + expectedData := []shardApplicationControllerMapping{ + { + ShardNumber: 0, + ControllerName: "", + }, { + ShardNumber: 1, + ControllerName: "", + }, + } + + shardMappingData := getDefaultShardMappingData(2) + assert.Equal(t, expectedData, shardMappingData) +} + +func Test_generateDefaultShardMappingCM_NoPredefinedShard(t *testing.T) { + replicas := 2 + expectedTime := metav1.Now() + defer func() { osHostnameFunction = os.Hostname }() + defer func() { heartbeatCurrentTime = metav1.Now }() + + expectedMapping := []shardApplicationControllerMapping{ + { + ShardNumber: 0, + ControllerName: "test-example", + HeartbeatTime: expectedTime, + }, { + ShardNumber: 1, + }, + } + + expectedMappingCM, err := json.Marshal(expectedMapping) + assert.NoError(t, err) + + expectedShadingCM := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: common.ArgoCDAppControllerShardConfigMapName, + Namespace: "test", + }, + Data: map[string]string{ + "shardControllerMapping": string(expectedMappingCM), + }, + } + heartbeatCurrentTime = func() metav1.Time { return expectedTime } + osHostnameFunction = func() (string, error) { return "test-example", nil } + shardingCM, err := generateDefaultShardMappingCM("test", "test-example", replicas, -1) + assert.NoError(t, err) + assert.Equal(t, expectedShadingCM, shardingCM) + +} + +func Test_generateDefaultShardMappingCM_PredefinedShard(t *testing.T) { + replicas := 2 + expectedTime := metav1.Now() + defer func() { osHostnameFunction = os.Hostname }() + defer func() { heartbeatCurrentTime = metav1.Now }() + + expectedMapping := []shardApplicationControllerMapping{ + { + ShardNumber: 0, + }, { + ShardNumber: 1, + ControllerName: "test-example", + HeartbeatTime: expectedTime, + }, + } + + expectedMappingCM, err := json.Marshal(expectedMapping) + assert.NoError(t, err) + + expectedShadingCM := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: common.ArgoCDAppControllerShardConfigMapName, + Namespace: "test", + }, + Data: map[string]string{ + "shardControllerMapping": string(expectedMappingCM), + }, + } + heartbeatCurrentTime = func() metav1.Time { return expectedTime } + osHostnameFunction = func() (string, error) { return "test-example", nil } + shardingCM, err := generateDefaultShardMappingCM("test", "test-example", replicas, 1) + assert.NoError(t, err) + assert.Equal(t, expectedShadingCM, shardingCM) + +} + +func Test_getOrUpdateShardNumberForController(t *testing.T) { + expectedTime := metav1.Now() + + testCases := []struct { + name string + shardApplicationControllerMapping []shardApplicationControllerMapping + hostname string + replicas int + shard int + expectedShard int + expectedShardMappingData []shardApplicationControllerMapping + }{ + { + name: "length of shard mapping less than number of replicas - Existing controller", + shardApplicationControllerMapping: []shardApplicationControllerMapping{ + { + ControllerName: "test-example", + ShardNumber: 0, + HeartbeatTime: metav1.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), + }, + }, + hostname: "test-example", + replicas: 2, + shard: -1, + expectedShard: 0, + expectedShardMappingData: []shardApplicationControllerMapping{ + { + ControllerName: "test-example", + ShardNumber: 0, + HeartbeatTime: expectedTime, + }, { + ControllerName: "", + ShardNumber: 1, + HeartbeatTime: metav1.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), + }, + }, + }, + { + name: "length of shard mapping less than number of replicas - New controller", + shardApplicationControllerMapping: []shardApplicationControllerMapping{ + { + ControllerName: "test-example", + ShardNumber: 0, + HeartbeatTime: expectedTime, + }, + }, + hostname: "test-example-1", + replicas: 2, + shard: -1, + expectedShard: 1, + expectedShardMappingData: []shardApplicationControllerMapping{ + { + ControllerName: "test-example", + ShardNumber: 0, + HeartbeatTime: expectedTime, + }, { + ControllerName: "test-example-1", + ShardNumber: 1, + HeartbeatTime: expectedTime, + }, + }, + }, + { + name: "length of shard mapping more than number of replicas", + shardApplicationControllerMapping: []shardApplicationControllerMapping{ + { + ControllerName: "test-example", + ShardNumber: 0, + HeartbeatTime: expectedTime, + }, { + ControllerName: "test-example-1", + ShardNumber: 1, + HeartbeatTime: expectedTime, + }, + }, + hostname: "test-example", + replicas: 1, + shard: -1, + expectedShard: 0, + expectedShardMappingData: []shardApplicationControllerMapping{ + { + ControllerName: "test-example", + ShardNumber: 0, + HeartbeatTime: expectedTime, + }, + }, + }, + { + name: "shard number is pre-specified and length of shard mapping less than number of replicas - Existing controller", + shardApplicationControllerMapping: []shardApplicationControllerMapping{ + { + ControllerName: "test-example-1", + ShardNumber: 1, + HeartbeatTime: metav1.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), + }, { + ControllerName: "test-example", + ShardNumber: 0, + HeartbeatTime: expectedTime, + }, + }, + hostname: "test-example-1", + replicas: 2, + shard: 1, + expectedShard: 1, + expectedShardMappingData: []shardApplicationControllerMapping{ + { + ControllerName: "test-example-1", + ShardNumber: 1, + HeartbeatTime: expectedTime, + }, { + ControllerName: "test-example", + ShardNumber: 0, + HeartbeatTime: expectedTime, + }, + }, + }, + { + name: "shard number is pre-specified and length of shard mapping less than number of replicas - New controller", + shardApplicationControllerMapping: []shardApplicationControllerMapping{ + { + ControllerName: "test-example", + ShardNumber: 0, + HeartbeatTime: expectedTime, + }, + }, + hostname: "test-example-1", + replicas: 2, + shard: 1, + expectedShard: 1, + expectedShardMappingData: []shardApplicationControllerMapping{ + { + ControllerName: "test-example", + ShardNumber: 0, + HeartbeatTime: expectedTime, + }, { + ControllerName: "test-example-1", + ShardNumber: 1, + HeartbeatTime: expectedTime, + }, + }, + }, + { + name: "shard number is pre-specified and length of shard mapping more than number of replicas", + shardApplicationControllerMapping: []shardApplicationControllerMapping{ + { + ControllerName: "test-example", + ShardNumber: 0, + HeartbeatTime: expectedTime, + }, { + ControllerName: "test-example-1", + ShardNumber: 1, + HeartbeatTime: expectedTime, + }, { + ControllerName: "test-example-2", + ShardNumber: 2, + HeartbeatTime: expectedTime, + }, + }, + hostname: "test-example", + replicas: 2, + shard: 1, + expectedShard: 1, + expectedShardMappingData: []shardApplicationControllerMapping{ + { + ControllerName: "", + ShardNumber: 0, + HeartbeatTime: metav1.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), + }, { + ControllerName: "test-example", + ShardNumber: 1, + HeartbeatTime: expectedTime, + }, + }, + }, + { + name: "updating heartbeat", + shardApplicationControllerMapping: []shardApplicationControllerMapping{ + { + ControllerName: "test-example", + ShardNumber: 0, + HeartbeatTime: expectedTime, + }, { + ControllerName: "test-example-1", + ShardNumber: 1, + HeartbeatTime: metav1.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), + }, + }, + hostname: "test-example-1", + replicas: 2, + shard: -1, + expectedShard: 1, + expectedShardMappingData: []shardApplicationControllerMapping{ + { + ControllerName: "test-example", + ShardNumber: 0, + HeartbeatTime: expectedTime, + }, { + ControllerName: "test-example-1", + ShardNumber: 1, + HeartbeatTime: expectedTime, + }, + }, + }, + { + name: "updating heartbeat - shard pre-defined", + shardApplicationControllerMapping: []shardApplicationControllerMapping{ + { + ControllerName: "test-example", + ShardNumber: 0, + HeartbeatTime: expectedTime, + }, { + ControllerName: "test-example-1", + ShardNumber: 1, + HeartbeatTime: metav1.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), + }, + }, + hostname: "test-example-1", + replicas: 2, + shard: 1, + expectedShard: 1, + expectedShardMappingData: []shardApplicationControllerMapping{ + { + ControllerName: "test-example", + ShardNumber: 0, + HeartbeatTime: expectedTime, + }, { + ControllerName: "test-example-1", + ShardNumber: 1, + HeartbeatTime: expectedTime, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + defer func() { osHostnameFunction = os.Hostname }() + heartbeatCurrentTime = func() metav1.Time { return expectedTime } + shard, shardMappingData := getOrUpdateShardNumberForController(tc.shardApplicationControllerMapping, tc.hostname, tc.replicas, tc.shard) + assert.Equal(t, tc.expectedShard, shard) + assert.Equal(t, tc.expectedShardMappingData, shardMappingData) + }) + } +} diff --git a/controller/sharding/shuffle_test.go b/controller/sharding/shuffle_test.go new file mode 100644 index 0000000000000..9e089e31bad0f --- /dev/null +++ b/controller/sharding/shuffle_test.go @@ -0,0 +1,82 @@ +package sharding + +import ( + "fmt" + "math" + "testing" + + "github.com/argoproj/argo-cd/v2/common" + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + dbmocks "github.com/argoproj/argo-cd/v2/util/db/mocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestLargeShuffle(t *testing.T) { + t.Skip() + db := dbmocks.ArgoDB{} + clusterList := &v1alpha1.ClusterList{Items: []v1alpha1.Cluster{}} + for i := 0; i < math.MaxInt/4096; i += 256 { + //fmt.Fprintf(os.Stdout, "%d", i) + cluster := createCluster(fmt.Sprintf("cluster-%d", i), fmt.Sprintf("%d", i)) + clusterList.Items = append(clusterList.Items, cluster) + } + db.On("ListClusters", mock.Anything).Return(clusterList, nil) + // Test with replicas set to 256 + t.Setenv(common.EnvControllerReplicas, "256") + distributionFunction := RoundRobinDistributionFunction(&db) + for i, c := range clusterList.Items { + assert.Equal(t, i%2567, distributionFunction(&c)) + } + +} + +func TestShuffle(t *testing.T) { + t.Skip() + db := dbmocks.ArgoDB{} + cluster1 := createCluster("cluster1", "10") + cluster2 := createCluster("cluster2", "20") + cluster3 := createCluster("cluster3", "30") + cluster4 := createCluster("cluster4", "40") + cluster5 := createCluster("cluster5", "50") + cluster6 := createCluster("cluster6", "60") + cluster25 := createCluster("cluster6", "25") + + clusterList := &v1alpha1.ClusterList{Items: []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5, cluster6}} + db.On("ListClusters", mock.Anything).Return(clusterList, nil) + + // Test with replicas set to 3 + t.Setenv(common.EnvControllerReplicas, "3") + distributionFunction := RoundRobinDistributionFunction(&db) + assert.Equal(t, 0, distributionFunction(nil)) + assert.Equal(t, 0, distributionFunction(&cluster1)) + assert.Equal(t, 1, distributionFunction(&cluster2)) + assert.Equal(t, 2, distributionFunction(&cluster3)) + assert.Equal(t, 0, distributionFunction(&cluster4)) + assert.Equal(t, 1, distributionFunction(&cluster5)) + assert.Equal(t, 2, distributionFunction(&cluster6)) + + // Now, we remove cluster1, it should be unassigned, and all the other should be resuffled + clusterList.Items = Remove(clusterList.Items, 0) + assert.Equal(t, -1, distributionFunction(&cluster1)) + assert.Equal(t, 0, distributionFunction(&cluster2)) + assert.Equal(t, 1, distributionFunction(&cluster3)) + assert.Equal(t, 2, distributionFunction(&cluster4)) + assert.Equal(t, 0, distributionFunction(&cluster5)) + assert.Equal(t, 1, distributionFunction(&cluster6)) + + // Now, we add a cluster with an id=25 so it will be placed right after cluster2 + clusterList.Items = append(clusterList.Items, cluster25) + assert.Equal(t, -1, distributionFunction(&cluster1)) + assert.Equal(t, 0, distributionFunction(&cluster2)) + assert.Equal(t, 1, distributionFunction(&cluster25)) + assert.Equal(t, 2, distributionFunction(&cluster3)) + assert.Equal(t, 0, distributionFunction(&cluster4)) + assert.Equal(t, 1, distributionFunction(&cluster5)) + assert.Equal(t, 2, distributionFunction(&cluster6)) + +} + +func Remove(slice []v1alpha1.Cluster, s int) []v1alpha1.Cluster { + return append(slice[:s], slice[s+1:]...) +} diff --git a/controller/state.go b/controller/state.go index 73d8123572319..19757510aa71d 100644 --- a/controller/state.go +++ b/controller/state.go @@ -4,7 +4,9 @@ import ( "context" "encoding/json" "fmt" + v1 "k8s.io/api/core/v1" "reflect" + "strings" "time" "github.com/argoproj/gitops-engine/pkg/diff" @@ -13,7 +15,7 @@ import ( hookutil "github.com/argoproj/gitops-engine/pkg/sync/hook" "github.com/argoproj/gitops-engine/pkg/sync/ignore" resourceutil "github.com/argoproj/gitops-engine/pkg/sync/resource" - "github.com/argoproj/gitops-engine/pkg/utils/kube" + "github.com/argoproj/gitops-engine/pkg/sync/syncwaves" kubeutil "github.com/argoproj/gitops-engine/pkg/utils/kube" log "github.com/sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -26,14 +28,13 @@ import ( statecache "github.com/argoproj/argo-cd/v2/controller/cache" "github.com/argoproj/argo-cd/v2/controller/metrics" "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" - appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned" "github.com/argoproj/argo-cd/v2/reposerver/apiclient" "github.com/argoproj/argo-cd/v2/util/argo" + argodiff "github.com/argoproj/argo-cd/v2/util/argo/diff" appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate" "github.com/argoproj/argo-cd/v2/util/db" "github.com/argoproj/argo-cd/v2/util/gpg" - argohealth "github.com/argoproj/argo-cd/v2/util/health" "github.com/argoproj/argo-cd/v2/util/io" "github.com/argoproj/argo-cd/v2/util/settings" "github.com/argoproj/argo-cd/v2/util/stats" @@ -59,34 +60,23 @@ type managedResource struct { ResourceVersion string } -func GetLiveObjsForApplicationHealth(resources []managedResource, statuses []appv1.ResourceStatus) ([]*appv1.ResourceStatus, []*unstructured.Unstructured) { - liveObjs := make([]*unstructured.Unstructured, 0) - resStatuses := make([]*appv1.ResourceStatus, 0) - for i, resource := range resources { - if resource.Target != nil && hookutil.Skip(resource.Target) { - continue - } - - liveObjs = append(liveObjs, resource.Live) - resStatuses = append(resStatuses, &statuses[i]) - } - return resStatuses, liveObjs -} - // AppStateManager defines methods which allow to compare application spec and actual application state. type AppStateManager interface { - CompareAppState(app *v1alpha1.Application, project *appv1.AppProject, revision string, source v1alpha1.ApplicationSource, noCache bool, localObjects []string) *comparisonResult + CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localObjects []string, hasMultipleSources bool) *comparisonResult SyncAppState(app *v1alpha1.Application, state *v1alpha1.OperationState) } +// comparisonResult holds the state of an application after the reconciliation type comparisonResult struct { syncStatus *v1alpha1.SyncStatus healthStatus *v1alpha1.HealthStatus resources []v1alpha1.ResourceStatus managedResources []managedResource reconciliationResult sync.ReconciliationResult - diffNormalizer diff.Normalizer + diffConfig argodiff.DiffConfig appSourceType v1alpha1.ApplicationSourceType + // appSourceTypes stores the SourceType for each application source under sources field + appSourceTypes []v1alpha1.ApplicationSourceType // timings maps phases of comparison to the duration it took to complete (for statistical purposes) timings map[string]time.Duration diffResultList *diff.DiffResultList @@ -102,97 +92,140 @@ func (res *comparisonResult) GetHealthStatus() *v1alpha1.HealthStatus { // appStateManager allows to compare applications to git type appStateManager struct { - metricsServer *metrics.MetricsServer - db db.ArgoDB - settingsMgr *settings.SettingsManager - appclientset appclientset.Interface - projInformer cache.SharedIndexInformer - kubectl kubeutil.Kubectl - repoClientset apiclient.Clientset - liveStateCache statecache.LiveStateCache - cache *appstatecache.Cache - namespace string - statusRefreshTimeout time.Duration + metricsServer *metrics.MetricsServer + db db.ArgoDB + settingsMgr *settings.SettingsManager + appclientset appclientset.Interface + projInformer cache.SharedIndexInformer + kubectl kubeutil.Kubectl + repoClientset apiclient.Clientset + liveStateCache statecache.LiveStateCache + cache *appstatecache.Cache + namespace string + statusRefreshTimeout time.Duration + resourceTracking argo.ResourceTracking + persistResourceHealth bool } -func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, source v1alpha1.ApplicationSource, appLabelKey, revision string, noCache, verifySignature bool) ([]*unstructured.Unstructured, *apiclient.ManifestResponse, error) { +func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, error) { + ts := stats.NewTimingStats() helmRepos, err := m.db.ListHelmRepositories(context.Background()) if err != nil { - return nil, nil, err + return nil, nil, fmt.Errorf("failed to list Helm repositories: %w", err) } - ts.AddCheckpoint("helm_ms") - repo, err := m.db.GetRepository(context.Background(), source.RepoURL) + permittedHelmRepos, err := argo.GetPermittedRepos(proj, helmRepos) if err != nil { - return nil, nil, err + return nil, nil, fmt.Errorf("failed to get permitted Helm repositories for project %q: %w", proj.Name, err) } + ts.AddCheckpoint("repo_ms") - conn, repoClient, err := m.repoClientset.NewRepoServerClient() + helmRepositoryCredentials, err := m.db.GetAllHelmRepositoryCredentials(context.Background()) if err != nil { - return nil, nil, err + return nil, nil, fmt.Errorf("failed to get Helm credentials: %w", err) } - defer io.Close(conn) - - if revision == "" { - revision = source.TargetRevision + permittedHelmCredentials, err := argo.GetPermittedReposCredentials(proj, helmRepositoryCredentials) + if err != nil { + return nil, nil, fmt.Errorf("failed to get permitted Helm credentials for project %q: %w", proj.Name, err) } - plugins, err := m.settingsMgr.GetConfigManagementPlugins() + enabledSourceTypes, err := m.settingsMgr.GetEnabledSourceTypes() if err != nil { - return nil, nil, err + return nil, nil, fmt.Errorf("failed to get enabled source types: %w", err) } ts.AddCheckpoint("plugins_ms") - tools := make([]*appv1.ConfigManagementPlugin, len(plugins)) - for i := range plugins { - tools[i] = &plugins[i] - } kustomizeSettings, err := m.settingsMgr.GetKustomizeSettings() if err != nil { - return nil, nil, err + return nil, nil, fmt.Errorf("failed to get Kustomize settings: %w", err) } - kustomizeOptions, err := kustomizeSettings.GetOptions(app.Spec.Source) + + helmOptions, err := m.settingsMgr.GetHelmSettings() if err != nil { - return nil, nil, err + return nil, nil, fmt.Errorf("failed to get Helm settings: %w", err) } + ts.AddCheckpoint("build_options_ms") - serverVersion, apiGroups, err := m.liveStateCache.GetVersionsInfo(app.Spec.Destination.Server) + serverVersion, apiResources, err := m.liveStateCache.GetVersionsInfo(app.Spec.Destination.Server) if err != nil { - return nil, nil, err - } - ts.AddCheckpoint("version_ms") - manifestInfo, err := repoClient.GenerateManifest(context.Background(), &apiclient.ManifestRequest{ - Repo: repo, - Repos: helmRepos, - Revision: revision, - NoCache: noCache, - AppLabelKey: appLabelKey, - AppName: app.Name, - Namespace: app.Spec.Destination.Namespace, - ApplicationSource: &source, - Plugins: tools, - KustomizeOptions: kustomizeOptions, - KubeVersion: serverVersion, - ApiVersions: argo.APIGroupsToVersions(apiGroups), - VerifySignature: verifySignature, - }) + return nil, nil, fmt.Errorf("failed to get cluster version for cluster %q: %w", app.Spec.Destination.Server, err) + } + conn, repoClient, err := m.repoClientset.NewRepoServerClient() if err != nil { - return nil, nil, err + return nil, nil, fmt.Errorf("failed to connect to repo server: %w", err) } - targetObjs, err := unmarshalManifests(manifestInfo.Manifests) + defer io.Close(conn) + + manifestInfos := make([]*apiclient.ManifestResponse, 0) + targetObjs := make([]*unstructured.Unstructured, 0) + // Store the map of all sources having ref field into a map for applications with sources field + refSources, err := argo.GetRefSources(context.Background(), app.Spec, m.db) if err != nil { - return nil, nil, err + return nil, nil, fmt.Errorf("failed to get ref sources: %v", err) + } + + for i, source := range sources { + if len(revisions) < len(sources) || revisions[i] == "" { + revisions[i] = source.TargetRevision + } + ts.AddCheckpoint("helm_ms") + repo, err := m.db.GetRepository(context.Background(), source.RepoURL) + if err != nil { + return nil, nil, fmt.Errorf("failed to get repo %q: %w", source.RepoURL, err) + } + kustomizeOptions, err := kustomizeSettings.GetOptions(source) + if err != nil { + return nil, nil, fmt.Errorf("failed to get Kustomize options for source %d of %d: %w", i+1, len(sources), err) + } + + ts.AddCheckpoint("version_ms") + log.Debugf("Generating Manifest for source %s revision %s", source, revisions[i]) + manifestInfo, err := repoClient.GenerateManifest(context.Background(), &apiclient.ManifestRequest{ + Repo: repo, + Repos: permittedHelmRepos, + Revision: revisions[i], + NoCache: noCache, + NoRevisionCache: noRevisionCache, + AppLabelKey: appLabelKey, + AppName: app.InstanceName(m.namespace), + Namespace: app.Spec.Destination.Namespace, + ApplicationSource: &source, + KustomizeOptions: kustomizeOptions, + KubeVersion: serverVersion, + ApiVersions: argo.APIResourcesToStrings(apiResources, true), + VerifySignature: verifySignature, + HelmRepoCreds: permittedHelmCredentials, + TrackingMethod: string(argo.GetTrackingMethod(m.settingsMgr)), + EnabledSourceTypes: enabledSourceTypes, + HelmOptions: helmOptions, + HasMultipleSources: app.Spec.HasMultipleSources(), + RefSources: refSources, + ProjectName: proj.Name, + ProjectSourceRepos: proj.Spec.SourceRepos, + }) + if err != nil { + return nil, nil, fmt.Errorf("failed to generate manifest for source %d of %d: %w", i+1, len(sources), err) + } + + targetObj, err := unmarshalManifests(manifestInfo.Manifests) + + if err != nil { + return nil, nil, fmt.Errorf("failed to unmarshal manifests for source %d of %d: %w", i+1, len(sources), err) + } + targetObjs = append(targetObjs, targetObj...) + + manifestInfos = append(manifestInfos, manifestInfo) } ts.AddCheckpoint("unmarshal_ms") - logCtx := log.WithField("application", app.Name) + logCtx := log.WithField("application", app.QualifiedName()) for k, v := range ts.Timings() { logCtx = logCtx.WithField(k, v.Milliseconds()) } logCtx = logCtx.WithField("time_ms", time.Since(ts.StartTime).Milliseconds()) logCtx.Info("getRepoObjs stats") - return targetObjs, manifestInfo, nil + return targetObjs, manifestInfos, nil } func unmarshalManifests(manifests []string) ([]*unstructured.Unstructured, error) { @@ -236,8 +269,8 @@ func DeduplicateTargetObjects( for key, targets := range targetByKey { if len(targets) > 1 { now := metav1.Now() - conditions = append(conditions, appv1.ApplicationCondition{ - Type: appv1.ApplicationConditionRepeatedResourceWarning, + conditions = append(conditions, v1alpha1.ApplicationCondition{ + Type: v1alpha1.ApplicationConditionRepeatedResourceWarning, Message: fmt.Sprintf("Resource %s appeared %d times among application resources.", key.String(), len(targets)), LastTransitionTime: &now, }) @@ -248,31 +281,29 @@ func DeduplicateTargetObjects( return result, conditions, nil } -func (m *appStateManager) getComparisonSettings(app *appv1.Application) (string, map[string]v1alpha1.ResourceOverride, diff.Normalizer, *settings.ResourcesFilter, error) { +// getComparisonSettings will return the system level settings related to the +// diff/normalization process. +func (m *appStateManager) getComparisonSettings() (string, map[string]v1alpha1.ResourceOverride, *settings.ResourcesFilter, error) { resourceOverrides, err := m.settingsMgr.GetResourceOverrides() if err != nil { - return "", nil, nil, nil, err + return "", nil, nil, err } appLabelKey, err := m.settingsMgr.GetAppInstanceLabelKey() if err != nil { - return "", nil, nil, nil, err - } - diffNormalizer, err := argo.NewDiffNormalizer(app.Spec.IgnoreDifferences, resourceOverrides) - if err != nil { - return "", nil, nil, nil, err + return "", nil, nil, err } resFilter, err := m.settingsMgr.GetResourcesFilter() if err != nil { - return "", nil, nil, nil, err + return "", nil, nil, err } - return appLabelKey, resourceOverrides, diffNormalizer, resFilter, nil + return appLabelKey, resourceOverrides, resFilter, nil } // verifyGnuPGSignature verifies the result of a GnuPG operation for a given git // revision. -func verifyGnuPGSignature(revision string, project *appv1.AppProject, manifestInfo *apiclient.ManifestResponse) []appv1.ApplicationCondition { +func verifyGnuPGSignature(revision string, project *v1alpha1.AppProject, manifestInfo *apiclient.ManifestResponse) []v1alpha1.ApplicationCondition { now := metav1.Now() - conditions := make([]appv1.ApplicationCondition, 0) + conditions := make([]v1alpha1.ApplicationCondition, 0) // We need to have some data in the verification result to parse, otherwise there was no signature if manifestInfo.VerifyResult != "" { verifyResult := gpg.ParseGitCommitVerification(manifestInfo.VerifyResult) @@ -307,74 +338,39 @@ func verifyGnuPGSignature(revision string, project *appv1.AppProject, manifestIn return conditions } -func (m *appStateManager) diffArrayCached(configArray []*unstructured.Unstructured, liveArray []*unstructured.Unstructured, cachedDiff []*appv1.ResourceDiff, opts ...diff.Option) (*diff.DiffResultList, error) { - numItems := len(configArray) - if len(liveArray) != numItems { - return nil, fmt.Errorf("left and right arrays have mismatched lengths") - } - - diffByKey := map[kube.ResourceKey]*appv1.ResourceDiff{} - for i := range cachedDiff { - res := cachedDiff[i] - diffByKey[kube.NewResourceKey(res.Group, res.Kind, res.Namespace, res.Name)] = cachedDiff[i] - } - - diffResultList := diff.DiffResultList{ - Diffs: make([]diff.DiffResult, numItems), - } - - for i := 0; i < numItems; i++ { - config := configArray[i] - live := liveArray[i] - resourceVersion := "" - var key kube.ResourceKey - if live != nil { - key = kube.GetResourceKey(live) - resourceVersion = live.GetResourceVersion() - } else { - key = kube.GetResourceKey(config) - } - var dr *diff.DiffResult - if cachedDiff, ok := diffByKey[key]; ok && cachedDiff.ResourceVersion == resourceVersion { - dr = &diff.DiffResult{ - NormalizedLive: []byte(cachedDiff.NormalizedLiveState), - PredictedLive: []byte(cachedDiff.PredictedLiveState), - Modified: cachedDiff.Modified, - } - } else { - res, err := diff.Diff(configArray[i], liveArray[i], opts...) - if err != nil { - return nil, err - } - dr = res - } - if dr != nil { - diffResultList.Diffs[i] = *dr - if dr.Modified { - diffResultList.Modified = true - } - } - } - - return &diffResultList, nil +func isManagedNamespace(ns *unstructured.Unstructured, app *v1alpha1.Application) bool { + return ns != nil && ns.GetKind() == kubeutil.NamespaceKind && ns.GetName() == app.Spec.Destination.Namespace && app.Spec.SyncPolicy != nil && app.Spec.SyncPolicy.ManagedNamespaceMetadata != nil } // CompareAppState compares application git state to the live app state, using the specified // revision and supplied source. If revision or overrides are empty, then compares against // revision and overrides in the app spec. -func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *appv1.AppProject, revision string, source v1alpha1.ApplicationSource, noCache bool, localManifests []string) *comparisonResult { +func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localManifests []string, hasMultipleSources bool) *comparisonResult { ts := stats.NewTimingStats() - appLabelKey, resourceOverrides, diffNormalizer, resFilter, err := m.getComparisonSettings(app) + appLabelKey, resourceOverrides, resFilter, err := m.getComparisonSettings() + ts.AddCheckpoint("settings_ms") // return unknown comparison result if basic comparison settings cannot be loaded if err != nil { - return &comparisonResult{ - syncStatus: &v1alpha1.SyncStatus{ - ComparedTo: appv1.ComparedTo{Source: source, Destination: app.Spec.Destination}, - Status: appv1.SyncStatusCodeUnknown, - }, - healthStatus: &appv1.HealthStatus{Status: health.HealthStatusUnknown}, + if hasMultipleSources { + return &comparisonResult{ + syncStatus: &v1alpha1.SyncStatus{ + ComparedTo: v1alpha1.ComparedTo{Destination: app.Spec.Destination, Sources: sources, IgnoreDifferences: app.Spec.IgnoreDifferences}, + Status: v1alpha1.SyncStatusCodeUnknown, + Revisions: revisions, + }, + healthStatus: &v1alpha1.HealthStatus{Status: health.HealthStatusUnknown}, + } + } else { + return &comparisonResult{ + syncStatus: &v1alpha1.SyncStatus{ + ComparedTo: v1alpha1.ComparedTo{Source: sources[0], Destination: app.Spec.Destination, IgnoreDifferences: app.Spec.IgnoreDifferences}, + Status: v1alpha1.SyncStatusCodeUnknown, + Revision: revisions[0], + }, + healthStatus: &v1alpha1.HealthStatus{Status: health.HealthStatusUnknown}, + } } } @@ -388,18 +384,29 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap failedToLoadObjs := false conditions := make([]v1alpha1.ApplicationCondition, 0) - logCtx := log.WithField("application", app.Name) + logCtx := log.WithField("application", app.QualifiedName()) logCtx.Infof("Comparing app state (cluster: %s, namespace: %s)", app.Spec.Destination.Server, app.Spec.Destination.Namespace) var targetObjs []*unstructured.Unstructured - var manifestInfo *apiclient.ManifestResponse now := metav1.Now() + var manifestInfos []*apiclient.ManifestResponse + if len(localManifests) == 0 { - targetObjs, manifestInfo, err = m.getRepoObjs(app, source, appLabelKey, revision, noCache, verifySignature) + // If the length of revisions is not same as the length of sources, + // we take the revisions from the sources directly for all the sources. + if len(revisions) != len(sources) { + revisions = make([]string, 0) + for _, source := range sources { + revisions = append(revisions, source.TargetRevision) + } + } + + targetObjs, manifestInfos, err = m.getRepoObjs(app, sources, appLabelKey, revisions, noCache, noRevisionCache, verifySignature, project) if err != nil { targetObjs = make([]*unstructured.Unstructured, 0) - conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error(), LastTransitionTime: &now}) + msg := fmt.Sprintf("Failed to load target state: %s", err.Error()) + conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now}) failedToLoadObjs = true } } else { @@ -414,11 +421,13 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap targetObjs, err = unmarshalManifests(localManifests) if err != nil { targetObjs = make([]*unstructured.Unstructured, 0) - conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error(), LastTransitionTime: &now}) + msg := fmt.Sprintf("Failed to load local manifests: %s", err.Error()) + conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now}) failedToLoadObjs = true } } - manifestInfo = nil + // empty out manifestInfoMap + manifestInfos = make([]*apiclient.ManifestResponse, 0) } ts.AddCheckpoint("git_ms") @@ -429,7 +438,8 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap } targetObjs, dedupConditions, err := DeduplicateTargetObjects(app.Spec.Destination.Namespace, targetObjs, infoProvider) if err != nil { - conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error(), LastTransitionTime: &now}) + msg := fmt.Sprintf("Failed to deduplicate target state: %s", err.Error()) + conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now}) } conditions = append(conditions, dedupConditions...) for i := len(targetObjs) - 1; i >= 0; i-- { @@ -449,28 +459,77 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap liveObjByKey, err := m.liveStateCache.GetManagedLiveObjs(app, targetObjs) if err != nil { liveObjByKey = make(map[kubeutil.ResourceKey]*unstructured.Unstructured) - conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error(), LastTransitionTime: &now}) + msg := fmt.Sprintf("Failed to load live state: %s", err.Error()) + conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now}) failedToLoadObjs = true } - logCtx.Debugf("Retrieved lived manifests") + + logCtx.Debugf("Retrieved live manifests") // filter out all resources which are not permitted in the application project for k, v := range liveObjByKey { - if !project.IsLiveResourcePermitted(v, app.Spec.Destination.Server) { + permitted, err := project.IsLiveResourcePermitted(v, app.Spec.Destination.Server, app.Spec.Destination.Name, func(project string) ([]*v1alpha1.Cluster, error) { + clusters, err := m.db.GetProjectClusters(context.TODO(), project) + if err != nil { + return nil, fmt.Errorf("failed to get clusters for project %q: %v", project, err) + } + return clusters, nil + }) + + if err != nil { + msg := fmt.Sprintf("Failed to check if live resource %q is permitted in project %q: %s", k.String(), app.Spec.Project, err.Error()) + conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now}) + failedToLoadObjs = true + continue + } + + if !permitted { delete(liveObjByKey, k) } } + trackingMethod := argo.GetTrackingMethod(m.settingsMgr) + for _, liveObj := range liveObjByKey { if liveObj != nil { - appInstanceName := kubeutil.GetAppInstanceLabel(liveObj, appLabelKey) - if appInstanceName != "" && appInstanceName != app.Name { + appInstanceName := m.resourceTracking.GetAppName(liveObj, appLabelKey, trackingMethod) + if appInstanceName != "" && appInstanceName != app.InstanceName(m.namespace) { + fqInstanceName := strings.ReplaceAll(appInstanceName, "_", "/") conditions = append(conditions, v1alpha1.ApplicationCondition{ Type: v1alpha1.ApplicationConditionSharedResourceWarning, - Message: fmt.Sprintf("%s/%s is part of applications %s and %s", liveObj.GetKind(), liveObj.GetName(), app.Name, appInstanceName), + Message: fmt.Sprintf("%s/%s is part of applications %s and %s", liveObj.GetKind(), liveObj.GetName(), app.QualifiedName(), fqInstanceName), LastTransitionTime: &now, }) } + + // For the case when a namespace is managed with `managedNamespaceMetadata` AND it has resource tracking + // enabled (e.g. someone manually adds resource tracking labels or annotations), we need to do some + // bookkeeping in order to prevent the managed namespace from being pruned. + // + // Live namespaces which are managed namespaces (i.e. application namespaces which are managed with + // CreateNamespace=true and has non-nil managedNamespaceMetadata) will (usually) not have a corresponding + // entry in source control. In order for the namespace not to risk being pruned, we'll need to generate a + // namespace which we can compare the live namespace with. For that, we'll do the same as is done in + // gitops-engine, the difference here being that we create a managed namespace which is only used for comparison. + if isManagedNamespace(liveObj, app) { + nsSpec := &v1.Namespace{TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: kubeutil.NamespaceKind}, ObjectMeta: metav1.ObjectMeta{Name: liveObj.GetName()}} + managedNs, err := kubeutil.ToUnstructured(nsSpec) + + if err != nil { + conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error(), LastTransitionTime: &now}) + failedToLoadObjs = true + continue + } + + // No need to care about the return value here, we just want the modified managedNs + _, err = syncNamespace(m.resourceTracking, appLabelKey, trackingMethod, app.Name, app.Spec.SyncPolicy)(managedNs, liveObj) + if err != nil { + conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error(), LastTransitionTime: &now}) + failedToLoadObjs = true + } else { + targetObjs = append(targetObjs, managedNs) + } + } } } @@ -482,33 +541,51 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap log.Warnf("Could not get compare options from ConfigMap (assuming defaults): %v", err) compareOptions = settings.GetDefaultDiffOptions() } + manifestRevisions := make([]string, 0) - logCtx.Debugf("built managed objects list") - var diffResults *diff.DiffResultList - - diffOpts := []diff.Option{ - diff.WithNormalizer(diffNormalizer), - diff.IgnoreAggregatedRoles(compareOptions.IgnoreAggregatedRoles), + for _, manifestInfo := range manifestInfos { + manifestRevisions = append(manifestRevisions, manifestInfo.Revision) } - cachedDiff := make([]*appv1.ResourceDiff, 0) + // restore comparison using cached diff result if previous comparison was performed for the same revision - revisionChanged := manifestInfo == nil || app.Status.Sync.Revision != manifestInfo.Revision - specChanged := !reflect.DeepEqual(app.Status.Sync.ComparedTo, appv1.ComparedTo{Source: app.Spec.Source, Destination: app.Spec.Destination}) + revisionChanged := len(manifestInfos) != len(sources) || !reflect.DeepEqual(app.Status.Sync.Revisions, manifestRevisions) + specChanged := !reflect.DeepEqual(app.Status.Sync.ComparedTo, v1alpha1.ComparedTo{Source: app.Spec.GetSource(), Destination: app.Spec.Destination, Sources: sources, IgnoreDifferences: app.Spec.IgnoreDifferences}) _, refreshRequested := app.IsRefreshRequested() - noCache = noCache || refreshRequested || app.Status.Expired(m.statusRefreshTimeout) + noCache = noCache || refreshRequested || app.Status.Expired(m.statusRefreshTimeout) || specChanged || revisionChanged - if noCache || specChanged || revisionChanged || m.cache.GetAppManagedResources(app.Name, &cachedDiff) != nil { - // (rare) cache miss - diffResults, err = diff.DiffArray(reconciliation.Target, reconciliation.Live, diffOpts...) + diffConfigBuilder := argodiff.NewDiffConfigBuilder(). + WithDiffSettings(app.Spec.IgnoreDifferences, resourceOverrides, compareOptions.IgnoreAggregatedRoles). + WithTracking(appLabelKey, string(trackingMethod)) + + if noCache { + diffConfigBuilder.WithNoCache() } else { - diffResults, err = m.diffArrayCached(reconciliation.Target, reconciliation.Live, cachedDiff, diffOpts...) + diffConfigBuilder.WithCache(m.cache, app.GetName()) } + gvkParser, err := m.getGVKParser(app.Spec.Destination.Server) + if err != nil { + conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionUnknownError, Message: err.Error(), LastTransitionTime: &now}) + } + diffConfigBuilder.WithGVKParser(gvkParser) + diffConfigBuilder.WithManager(common.ArgoCDSSAManager) + + // enable structured merge diff if application syncs with server-side apply + if app.Spec.SyncPolicy != nil && app.Spec.SyncPolicy.SyncOptions.HasOption("ServerSideApply=true") { + diffConfigBuilder.WithStructuredMergeDiff(true) + } + + // it is necessary to ignore the error at this point to avoid creating duplicated + // application conditions as argo.StateDiffs will validate this diffConfig again. + diffConfig, _ := diffConfigBuilder.Build() + + diffResults, err := argodiff.StateDiffs(reconciliation.Live, reconciliation.Target, diffConfig) if err != nil { diffResults = &diff.DiffResultList{} failedToLoadObjs = true - conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error(), LastTransitionTime: &now}) + msg := fmt.Sprintf("Failed to compare desired state to live state: %s", err.Error()) + conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now}) } ts.AddCheckpoint("diff_ms") @@ -526,6 +603,8 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap } gvk := obj.GroupVersionKind() + isSelfReferencedObj := m.isSelfReferencedObj(liveObj, targetObj, app.GetName(), appLabelKey, trackingMethod) + resState := v1alpha1.ResourceStatus{ Namespace: obj.GetNamespace(), Name: obj.GetName(), @@ -533,7 +612,10 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap Version: gvk.Version, Group: gvk.Group, Hook: hookutil.IsHook(obj), - RequiresPruning: targetObj == nil && liveObj != nil, + RequiresPruning: targetObj == nil && liveObj != nil && isSelfReferencedObj, + } + if targetObj != nil { + resState.SyncWave = int64(syncwaves.Wave(targetObj)) } var diffResult diff.DiffResult @@ -542,9 +624,22 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap } else { diffResult = diff.DiffResult{Modified: false, NormalizedLive: []byte("{}"), PredictedLive: []byte("{}")} } - if resState.Hook || ignore.Ignore(obj) || (targetObj != nil && hookutil.Skip(targetObj)) { - // For resource hooks or skipped resources, don't store sync status, and do not affect overall sync status - } else if diffResult.Modified || targetObj == nil || liveObj == nil { + + // For the case when a namespace is managed with `managedNamespaceMetadata` AND it has resource tracking + // enabled (e.g. someone manually adds resource tracking labels or annotations), we need to do some + // bookkeeping in order to ensure that it's not considered `OutOfSync` (since it does not exist in source + // control). + // + // This is in addition to the bookkeeping we do (see `isManagedNamespace` and its references) to prevent said + // namespace from being pruned. + isManagedNs := isManagedNamespace(targetObj, app) && liveObj == nil + + if resState.Hook || ignore.Ignore(obj) || (targetObj != nil && hookutil.Skip(targetObj)) || !isSelfReferencedObj { + // For resource hooks, skipped resources or objects that may have + // been created by another controller with annotations copied from + // the source object, don't store sync status, and do not affect + // overall sync status + } else if !isManagedNs && (diffResult.Modified || targetObj == nil || liveObj == nil) { // Set resource state to OutOfSync since one of the following is true: // * target and live resource are different // * target resource not defined and live resource is extra @@ -565,7 +660,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap } if isNamespaced && obj.GetNamespace() == "" { - conditions = append(conditions, appv1.ApplicationCondition{Type: v1alpha1.ApplicationConditionInvalidSpecError, Message: fmt.Sprintf("Namespace for %s %s is missing.", obj.GetName(), gvk.String()), LastTransitionTime: &now}) + conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionInvalidSpecError, Message: fmt.Sprintf("Namespace for %s %s is missing.", obj.GetName(), gvk.String()), LastTransitionTime: &now}) } // we can't say anything about the status if we were unable to get the target objects @@ -594,33 +689,51 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap if failedToLoadObjs { syncCode = v1alpha1.SyncStatusCodeUnknown + } else if app.HasChangedManagedNamespaceMetadata() { + syncCode = v1alpha1.SyncStatusCodeOutOfSync } - syncStatus := v1alpha1.SyncStatus{ - ComparedTo: appv1.ComparedTo{ - Source: source, - Destination: app.Spec.Destination, - }, - Status: syncCode, + var revision string + + if !hasMultipleSources && len(manifestRevisions) > 0 { + revision = manifestRevisions[0] } - if manifestInfo != nil { - syncStatus.Revision = manifestInfo.Revision + var syncStatus v1alpha1.SyncStatus + if hasMultipleSources { + syncStatus = v1alpha1.SyncStatus{ + ComparedTo: v1alpha1.ComparedTo{ + Destination: app.Spec.Destination, + Sources: sources, + IgnoreDifferences: app.Spec.IgnoreDifferences, + }, + Status: syncCode, + Revisions: manifestRevisions, + } + } else { + syncStatus = v1alpha1.SyncStatus{ + ComparedTo: v1alpha1.ComparedTo{ + Destination: app.Spec.Destination, + Source: app.Spec.GetSource(), + IgnoreDifferences: app.Spec.IgnoreDifferences, + }, + Status: syncCode, + Revision: revision, + } } - ts.AddCheckpoint("sync_ms") - resSumForAppHealth, liveObjsForAppHealth := GetLiveObjsForApplicationHealth(managedResources, resourceSummaries) - healthStatus, err := argohealth.SetApplicationHealth(resSumForAppHealth, liveObjsForAppHealth, resourceOverrides, func(obj *unstructured.Unstructured) bool { - return !isSelfReferencedApp(app, kubeutil.GetObjectRef(obj)) - }) + ts.AddCheckpoint("sync_ms") + healthStatus, err := setApplicationHealth(managedResources, resourceSummaries, resourceOverrides, app, m.persistResourceHealth) if err != nil { - conditions = append(conditions, appv1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error(), LastTransitionTime: &now}) + conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: fmt.Sprintf("error setting app health: %s", err.Error()), LastTransitionTime: &now}) } // Git has already performed the signature verification via its GPG interface, and the result is available // in the manifest info received from the repository server. We now need to form our opinion about the result // and stop processing if we do not agree about the outcome. - if gpg.IsGPGEnabled() && verifySignature && manifestInfo != nil { - conditions = append(conditions, verifyGnuPGSignature(revision, project, manifestInfo)...) + for _, manifestInfo := range manifestInfos { + if gpg.IsGPGEnabled() && verifySignature && manifestInfo != nil { + conditions = append(conditions, verifyGnuPGSignature(manifestInfo.Revision, project, manifestInfo)...) + } } compRes := comparisonResult{ @@ -629,35 +742,55 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap resources: resourceSummaries, managedResources: managedResources, reconciliationResult: reconciliation, - diffNormalizer: diffNormalizer, + diffConfig: diffConfig, diffResultList: diffResults, } - if manifestInfo != nil { - compRes.appSourceType = v1alpha1.ApplicationSourceType(manifestInfo.SourceType) + + if hasMultipleSources { + for _, manifestInfo := range manifestInfos { + compRes.appSourceTypes = append(compRes.appSourceTypes, v1alpha1.ApplicationSourceType(manifestInfo.SourceType)) + } + } else { + for _, manifestInfo := range manifestInfos { + compRes.appSourceType = v1alpha1.ApplicationSourceType(manifestInfo.SourceType) + break + } } - app.Status.SetConditions(conditions, map[appv1.ApplicationConditionType]bool{ - appv1.ApplicationConditionComparisonError: true, - appv1.ApplicationConditionSharedResourceWarning: true, - appv1.ApplicationConditionRepeatedResourceWarning: true, - appv1.ApplicationConditionExcludedResourceWarning: true, + + app.Status.SetConditions(conditions, map[v1alpha1.ApplicationConditionType]bool{ + v1alpha1.ApplicationConditionComparisonError: true, + v1alpha1.ApplicationConditionSharedResourceWarning: true, + v1alpha1.ApplicationConditionRepeatedResourceWarning: true, + v1alpha1.ApplicationConditionExcludedResourceWarning: true, }) ts.AddCheckpoint("health_ms") compRes.timings = ts.Timings() return &compRes } -func (m *appStateManager) persistRevisionHistory(app *v1alpha1.Application, revision string, source v1alpha1.ApplicationSource, startedAt metav1.Time) error { +func (m *appStateManager) persistRevisionHistory(app *v1alpha1.Application, revision string, source v1alpha1.ApplicationSource, revisions []string, sources []v1alpha1.ApplicationSource, hasMultipleSources bool, startedAt metav1.Time) error { var nextID int64 if len(app.Status.History) > 0 { nextID = app.Status.History.LastRevisionHistory().ID + 1 } - app.Status.History = append(app.Status.History, v1alpha1.RevisionHistory{ - Revision: revision, - DeployedAt: metav1.NewTime(time.Now().UTC()), - DeployStartedAt: &startedAt, - ID: nextID, - Source: source, - }) + + if hasMultipleSources { + app.Status.History = append(app.Status.History, v1alpha1.RevisionHistory{ + DeployedAt: metav1.NewTime(time.Now().UTC()), + DeployStartedAt: &startedAt, + ID: nextID, + Sources: sources, + Revisions: revisions, + }) + } else { + app.Status.History = append(app.Status.History, v1alpha1.RevisionHistory{ + Revision: revision, + DeployedAt: metav1.NewTime(time.Now().UTC()), + DeployStartedAt: &startedAt, + ID: nextID, + Source: source, + }) + } app.Status.History = app.Status.History.Trunc(app.Spec.GetRevisionHistoryLimit()) @@ -667,9 +800,9 @@ func (m *appStateManager) persistRevisionHistory(app *v1alpha1.Application, revi }, }) if err != nil { - return err + return fmt.Errorf("error marshaling revision history patch: %w", err) } - _, err = m.appclientset.ArgoprojV1alpha1().Applications(m.namespace).Patch(context.Background(), app.Name, types.MergePatchType, patch, metav1.PatchOptions{}) + _, err = m.appclientset.ArgoprojV1alpha1().Applications(app.Namespace).Patch(context.Background(), app.Name, types.MergePatchType, patch, metav1.PatchOptions{}) return err } @@ -686,18 +819,79 @@ func NewAppStateManager( metricsServer *metrics.MetricsServer, cache *appstatecache.Cache, statusRefreshTimeout time.Duration, + resourceTracking argo.ResourceTracking, + persistResourceHealth bool, ) AppStateManager { return &appStateManager{ - liveStateCache: liveStateCache, - cache: cache, - db: db, - appclientset: appclientset, - kubectl: kubectl, - repoClientset: repoClientset, - namespace: namespace, - settingsMgr: settingsMgr, - projInformer: projInformer, - metricsServer: metricsServer, - statusRefreshTimeout: statusRefreshTimeout, + liveStateCache: liveStateCache, + cache: cache, + db: db, + appclientset: appclientset, + kubectl: kubectl, + repoClientset: repoClientset, + namespace: namespace, + settingsMgr: settingsMgr, + projInformer: projInformer, + metricsServer: metricsServer, + statusRefreshTimeout: statusRefreshTimeout, + resourceTracking: resourceTracking, + persistResourceHealth: persistResourceHealth, } } + +// isSelfReferencedObj returns whether the given obj is managed by the application +// according to the values of the tracking id (aka app instance value) annotation. +// It returns true when all of the properties of the tracking id (app name, namespace, +// group and kind) match the properties of the live object, or if the tracking method +// used does not provide the required properties for matching. +// Reference: https://github.com/argoproj/argo-cd/issues/8683 +func (m *appStateManager) isSelfReferencedObj(live, config *unstructured.Unstructured, appName, appLabelKey string, trackingMethod v1alpha1.TrackingMethod) bool { + if live == nil { + return true + } + + // If tracking method doesn't contain required metadata for this check, + // we are not able to determine and just assume the object to be managed. + if trackingMethod == argo.TrackingMethodLabel { + return true + } + + // config != nil is the best-case scenario for constructing an accurate + // Tracking ID. `config` is the "desired state" (from git/helm/etc.). + // Using the desired state is important when there is an ApiGroup upgrade. + // When upgrading, the comparison must be made with the new tracking ID. + // Example: + // live resource annotation will be: + // ingress-app:extensions/Ingress:default/some-ingress + // when it should be: + // ingress-app:networking.k8s.io/Ingress:default/some-ingress + // More details in: https://github.com/argoproj/argo-cd/pull/11012 + var aiv argo.AppInstanceValue + if config != nil { + aiv = argo.UnstructuredToAppInstanceValue(config, appName, "") + return isSelfReferencedObj(live, aiv) + } + + // If config is nil then compare the live resource with the value + // of the annotation. In this case, in order to validate if obj is + // managed by this application, the values from the annotation have + // to match the properties from the live object. Cluster scoped objects + // carry the app's destination namespace in the tracking annotation, + // but are unique in GVK + name combination. + appInstance := m.resourceTracking.GetAppInstance(live, appLabelKey, trackingMethod) + if appInstance != nil { + return isSelfReferencedObj(live, *appInstance) + } + return true +} + +// isSelfReferencedObj returns true if the given Tracking ID (`aiv`) matches +// the given object. It returns false when the ID doesn't match. This sometimes +// happens when a tracking label or annotation gets accidentally copied to a +// different resource. +func isSelfReferencedObj(obj *unstructured.Unstructured, aiv argo.AppInstanceValue) bool { + return (obj.GetNamespace() == aiv.Namespace || obj.GetNamespace() == "") && + obj.GetName() == aiv.Name && + obj.GetObjectKind().GroupVersionKind().Group == aiv.Group && + obj.GetObjectKind().GroupVersionKind().Kind == aiv.Kind +} diff --git a/controller/state_test.go b/controller/state_test.go index a7df7f162b950..dcb48e87fce9b 100644 --- a/controller/state_test.go +++ b/controller/state_test.go @@ -2,7 +2,6 @@ package controller import ( "encoding/json" - "io/ioutil" "os" "testing" "time" @@ -13,6 +12,8 @@ import ( . "github.com/argoproj/gitops-engine/pkg/utils/testing" "github.com/stretchr/testify/assert" v1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -21,6 +22,7 @@ import ( argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" "github.com/argoproj/argo-cd/v2/reposerver/apiclient" "github.com/argoproj/argo-cd/v2/test" + "github.com/argoproj/argo-cd/v2/util/argo" ) // TestCompareAppStateEmpty tests comparison when both git and live have no objects @@ -36,7 +38,96 @@ func TestCompareAppStateEmpty(t *testing.T) { managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured), } ctrl := newFakeController(&data) - compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, nil) + sources := make([]argoappv1.ApplicationSource, 0) + sources = append(sources, app.Spec.GetSource()) + revisions := make([]string, 0) + revisions = append(revisions, "") + compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false) + assert.NotNil(t, compRes) + assert.NotNil(t, compRes.syncStatus) + assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status) + assert.Len(t, compRes.resources, 0) + assert.Len(t, compRes.managedResources, 0) + assert.Len(t, app.Status.Conditions, 0) +} + +// TestCompareAppStateNamespaceMetadataDiffers tests comparison when managed namespace metadata differs +func TestCompareAppStateNamespaceMetadataDiffers(t *testing.T) { + app := newFakeApp() + app.Spec.SyncPolicy.ManagedNamespaceMetadata = &argoappv1.ManagedNamespaceMetadata{ + Labels: map[string]string{ + "foo": "bar", + }, + Annotations: map[string]string{ + "foo": "bar", + }, + } + app.Status.OperationState = &argoappv1.OperationState{ + SyncResult: &argoappv1.SyncOperationResult{}, + } + + data := fakeData{ + manifestResponse: &apiclient.ManifestResponse{ + Manifests: []string{}, + Namespace: test.FakeDestNamespace, + Server: test.FakeClusterURL, + Revision: "abc123", + }, + managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured), + } + ctrl := newFakeController(&data) + sources := make([]argoappv1.ApplicationSource, 0) + sources = append(sources, app.Spec.GetSource()) + revisions := make([]string, 0) + revisions = append(revisions, "") + compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false) + assert.NotNil(t, compRes) + assert.NotNil(t, compRes.syncStatus) + assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status) + assert.Len(t, compRes.resources, 0) + assert.Len(t, compRes.managedResources, 0) + assert.Len(t, app.Status.Conditions, 0) +} + +// TestCompareAppStateNamespaceMetadataIsTheSame tests comparison when managed namespace metadata is the same +func TestCompareAppStateNamespaceMetadataIsTheSame(t *testing.T) { + app := newFakeApp() + app.Spec.SyncPolicy.ManagedNamespaceMetadata = &argoappv1.ManagedNamespaceMetadata{ + Labels: map[string]string{ + "foo": "bar", + }, + Annotations: map[string]string{ + "foo": "bar", + }, + } + app.Status.OperationState = &argoappv1.OperationState{ + SyncResult: &argoappv1.SyncOperationResult{ + ManagedNamespaceMetadata: &argoappv1.ManagedNamespaceMetadata{ + Labels: map[string]string{ + "foo": "bar", + }, + Annotations: map[string]string{ + "foo": "bar", + }, + }, + }, + } + + data := fakeData{ + manifestResponse: &apiclient.ManifestResponse{ + Manifests: []string{}, + Namespace: test.FakeDestNamespace, + Server: test.FakeClusterURL, + Revision: "abc123", + }, + managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured), + } + ctrl := newFakeController(&data) + sources := make([]argoappv1.ApplicationSource, 0) + sources = append(sources, app.Spec.GetSource()) + revisions := make([]string, 0) + revisions = append(revisions, "") + compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false) assert.NotNil(t, compRes) assert.NotNil(t, compRes.syncStatus) assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status) @@ -59,7 +150,11 @@ func TestCompareAppStateMissing(t *testing.T) { managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured), } ctrl := newFakeController(&data) - compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, nil) + sources := make([]argoappv1.ApplicationSource, 0) + sources = append(sources, app.Spec.GetSource()) + revisions := make([]string, 0) + revisions = append(revisions, "") + compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false) assert.NotNil(t, compRes) assert.NotNil(t, compRes.syncStatus) assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status) @@ -86,7 +181,11 @@ func TestCompareAppStateExtra(t *testing.T) { }, } ctrl := newFakeController(&data) - compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, nil) + sources := make([]argoappv1.ApplicationSource, 0) + sources = append(sources, app.Spec.GetSource()) + revisions := make([]string, 0) + revisions = append(revisions, "") + compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false) assert.NotNil(t, compRes) assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status) assert.Equal(t, 1, len(compRes.resources)) @@ -112,7 +211,11 @@ func TestCompareAppStateHook(t *testing.T) { managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured), } ctrl := newFakeController(&data) - compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, nil) + sources := make([]argoappv1.ApplicationSource, 0) + sources = append(sources, app.Spec.GetSource()) + revisions := make([]string, 0) + revisions = append(revisions, "") + compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false) assert.NotNil(t, compRes) assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status) assert.Equal(t, 0, len(compRes.resources)) @@ -139,7 +242,11 @@ func TestCompareAppStateSkipHook(t *testing.T) { managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured), } ctrl := newFakeController(&data) - compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, nil) + sources := make([]argoappv1.ApplicationSource, 0) + sources = append(sources, app.Spec.GetSource()) + revisions := make([]string, 0) + revisions = append(revisions, "") + compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false) assert.NotNil(t, compRes) assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status) assert.Equal(t, 1, len(compRes.resources)) @@ -165,7 +272,11 @@ func TestCompareAppStateCompareOptionIgnoreExtraneous(t *testing.T) { } ctrl := newFakeController(&data) - compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, nil) + sources := make([]argoappv1.ApplicationSource, 0) + sources = append(sources, app.Spec.GetSource()) + revisions := make([]string, 0) + revisions = append(revisions, "") + compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false) assert.NotNil(t, compRes) assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status) @@ -193,7 +304,11 @@ func TestCompareAppStateExtraHook(t *testing.T) { }, } ctrl := newFakeController(&data) - compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, nil) + sources := make([]argoappv1.ApplicationSource, 0) + sources = append(sources, app.Spec.GetSource()) + revisions := make([]string, 0) + revisions = append(revisions, "") + compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false) assert.NotNil(t, compRes) assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status) @@ -203,6 +318,73 @@ func TestCompareAppStateExtraHook(t *testing.T) { assert.Equal(t, 0, len(app.Status.Conditions)) } +// TestAppRevisions tests that revisions are properly propagated for a single source app +func TestAppRevisionsSingleSource(t *testing.T) { + obj1 := NewPod() + obj1.SetNamespace(test.FakeDestNamespace) + data := fakeData{ + manifestResponse: &apiclient.ManifestResponse{ + Manifests: []string{toJSON(t, obj1)}, + Namespace: test.FakeDestNamespace, + Server: test.FakeClusterURL, + Revision: "abc123", + }, + managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured), + } + ctrl := newFakeController(&data) + + app := newFakeApp() + revisions := make([]string, 0) + revisions = append(revisions, "") + compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, app.Spec.GetSources(), false, false, nil, app.Spec.HasMultipleSources()) + assert.NotNil(t, compRes) + assert.NotNil(t, compRes.syncStatus) + assert.NotEmpty(t, compRes.syncStatus.Revision) + assert.Len(t, compRes.syncStatus.Revisions, 0) +} + +// TestAppRevisions tests that revisions are properly propagated for a multi source app +func TestAppRevisionsMultiSource(t *testing.T) { + obj1 := NewPod() + obj1.SetNamespace(test.FakeDestNamespace) + data := fakeData{ + manifestResponses: []*apiclient.ManifestResponse{ + { + Manifests: []string{toJSON(t, obj1)}, + Namespace: test.FakeDestNamespace, + Server: test.FakeClusterURL, + Revision: "abc123", + }, + { + Manifests: []string{toJSON(t, obj1)}, + Namespace: test.FakeDestNamespace, + Server: test.FakeClusterURL, + Revision: "def456", + }, + { + Manifests: []string{}, + Namespace: test.FakeDestNamespace, + Server: test.FakeClusterURL, + Revision: "ghi789", + }, + }, + managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured), + } + ctrl := newFakeController(&data) + + app := newFakeMultiSourceApp() + revisions := make([]string, 0) + revisions = append(revisions, "") + compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, app.Spec.GetSources(), false, false, nil, app.Spec.HasMultipleSources()) + assert.NotNil(t, compRes) + assert.NotNil(t, compRes.syncStatus) + assert.Empty(t, compRes.syncStatus.Revision) + assert.Len(t, compRes.syncStatus.Revisions, 3) + assert.Equal(t, "abc123", compRes.syncStatus.Revisions[0]) + assert.Equal(t, "def456", compRes.syncStatus.Revisions[1]) + assert.Equal(t, "ghi789", compRes.syncStatus.Revisions[2]) +} + func toJSON(t *testing.T, obj *unstructured.Unstructured) string { data, err := json.Marshal(obj) assert.NoError(t, err) @@ -236,7 +418,11 @@ func TestCompareAppStateDuplicatedNamespacedResources(t *testing.T) { }, } ctrl := newFakeController(&data) - compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, nil) + sources := make([]argoappv1.ApplicationSource, 0) + sources = append(sources, app.Spec.GetSource()) + revisions := make([]string, 0) + revisions = append(revisions, "") + compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false) assert.NotNil(t, compRes) assert.Equal(t, 1, len(app.Status.Conditions)) @@ -246,6 +432,47 @@ func TestCompareAppStateDuplicatedNamespacedResources(t *testing.T) { assert.Equal(t, 4, len(compRes.resources)) } +func TestCompareAppStateManagedNamespaceMetadataWithLiveNsDoesNotGetPruned(t *testing.T) { + app := newFakeApp() + app.Spec.SyncPolicy = &argoappv1.SyncPolicy{ + ManagedNamespaceMetadata: &argoappv1.ManagedNamespaceMetadata{ + Labels: nil, + Annotations: nil, + }, + } + + ns := NewNamespace() + ns.SetName(test.FakeDestNamespace) + ns.SetNamespace(test.FakeDestNamespace) + ns.SetAnnotations(map[string]string{"argocd.argoproj.io/sync-options": "ServerSideApply=true"}) + + data := fakeData{ + manifestResponse: &apiclient.ManifestResponse{ + Manifests: []string{}, + Namespace: test.FakeDestNamespace, + Server: test.FakeClusterURL, + Revision: "abc123", + }, + managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{ + kube.GetResourceKey(ns): ns, + }, + } + ctrl := newFakeController(&data) + compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, []string{}, app.Spec.Sources, false, false, nil, false) + + assert.NotNil(t, compRes) + assert.Equal(t, 0, len(app.Status.Conditions)) + assert.NotNil(t, compRes) + assert.NotNil(t, compRes.syncStatus) + // Ensure that ns does not get pruned + assert.NotNil(t, compRes.reconciliationResult.Target[0]) + assert.Equal(t, compRes.reconciliationResult.Target[0].GetName(), ns.GetName()) + assert.Equal(t, compRes.reconciliationResult.Target[0].GetAnnotations(), ns.GetAnnotations()) + assert.Equal(t, compRes.reconciliationResult.Target[0].GetLabels(), ns.GetLabels()) + assert.Len(t, compRes.resources, 1) + assert.Len(t, compRes.managedResources, 1) +} + var defaultProj = argoappv1.AppProject{ ObjectMeta: metav1.ObjectMeta{ Name: "default", @@ -266,7 +493,7 @@ func TestSetHealth(t *testing.T) { app := newFakeApp() deployment := kube.MustToUnstructured(&v1.Deployment{ TypeMeta: metav1.TypeMeta{ - APIVersion: "apps/v1beta1", + APIVersion: "apps/v1", Kind: "Deployment", }, ObjectMeta: metav1.ObjectMeta{ @@ -287,9 +514,13 @@ func TestSetHealth(t *testing.T) { }, }) - compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, nil) + sources := make([]argoappv1.ApplicationSource, 0) + sources = append(sources, app.Spec.GetSource()) + revisions := make([]string, 0) + revisions = append(revisions, "") + compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false) - assert.Equal(t, compRes.healthStatus.Status, health.HealthStatusHealthy) + assert.Equal(t, health.HealthStatusHealthy, compRes.healthStatus.Status) } func TestSetHealthSelfReferencedApp(t *testing.T) { @@ -297,7 +528,7 @@ func TestSetHealthSelfReferencedApp(t *testing.T) { unstructuredApp := kube.MustToUnstructured(app) deployment := kube.MustToUnstructured(&v1.Deployment{ TypeMeta: metav1.TypeMeta{ - APIVersion: "apps/v1beta1", + APIVersion: "apps/v1", Kind: "Deployment", }, ObjectMeta: metav1.ObjectMeta{ @@ -319,9 +550,13 @@ func TestSetHealthSelfReferencedApp(t *testing.T) { }, }) - compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, nil) + sources := make([]argoappv1.ApplicationSource, 0) + sources = append(sources, app.Spec.GetSource()) + revisions := make([]string, 0) + revisions = append(revisions, "") + compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false) - assert.Equal(t, compRes.healthStatus.Status, health.HealthStatusHealthy) + assert.Equal(t, health.HealthStatusHealthy, compRes.healthStatus.Status) } func TestSetManagedResourcesWithOrphanedResources(t *testing.T) { @@ -373,7 +608,7 @@ func TestSetManagedResourcesWithResourcesOfAnotherApp(t *testing.T) { tree, err := ctrl.setAppManagedResources(app1, &comparisonResult{managedResources: make([]managedResource, 0)}) assert.NoError(t, err) - assert.Equal(t, len(tree.OrphanedNodes), 0) + assert.Equal(t, 0, len(tree.OrphanedNodes)) } func TestReturnUnknownComparisonStateOnSettingLoadError(t *testing.T) { @@ -389,7 +624,11 @@ func TestReturnUnknownComparisonStateOnSettingLoadError(t *testing.T) { }, }) - compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, nil) + sources := make([]argoappv1.ApplicationSource, 0) + sources = append(sources, app.Spec.GetSource()) + revisions := make([]string, 0) + revisions = append(revisions, "") + compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false) assert.Equal(t, health.HealthStatusUnknown, compRes.healthStatus.Status) assert.Equal(t, argoappv1.SyncStatusCodeUnknown, compRes.syncStatus.Status) @@ -398,6 +637,7 @@ func TestReturnUnknownComparisonStateOnSettingLoadError(t *testing.T) { func TestSetManagedResourcesKnownOrphanedResourceExceptions(t *testing.T) { proj := defaultProj.DeepCopy() proj.Spec.OrphanedResources = &argoappv1.OrphanedResourcesMonitorSettings{} + proj.Spec.SourceNamespaces = []string{"default"} app := newFakeApp() app.Namespace = "default" @@ -435,7 +675,7 @@ func Test_appStateManager_persistRevisionHistory(t *testing.T) { app.Spec.RevisionHistoryLimit = &i } addHistory := func() { - err := manager.persistRevisionHistory(app, "my-revision", argoappv1.ApplicationSource{}, metav1.Time{}) + err := manager.persistRevisionHistory(app, "my-revision", argoappv1.ApplicationSource{}, []string{}, []argoappv1.ApplicationSource{}, false, metav1.Time{}) assert.NoError(t, err) } addHistory() @@ -471,7 +711,7 @@ func Test_appStateManager_persistRevisionHistory(t *testing.T) { assert.Len(t, app.Status.History, 9) metav1NowTime := metav1.NewTime(time.Now()) - err := manager.persistRevisionHistory(app, "my-revision", argoappv1.ApplicationSource{}, metav1NowTime) + err := manager.persistRevisionHistory(app, "my-revision", argoappv1.ApplicationSource{}, []string{}, []argoappv1.ApplicationSource{}, false, metav1NowTime) assert.NoError(t, err) assert.Equal(t, app.Status.History.LastRevisionHistory().DeployStartedAt, &metav1NowTime) } @@ -479,7 +719,7 @@ func Test_appStateManager_persistRevisionHistory(t *testing.T) { // helper function to read contents of a file to string // panics on error func mustReadFile(path string) string { - b, err := ioutil.ReadFile(path) + b, err := os.ReadFile(path) if err != nil { panic(err.Error()) } @@ -508,9 +748,8 @@ var signedProj = argoappv1.AppProject{ } func TestSignedResponseNoSignatureRequired(t *testing.T) { - oldval := os.Getenv("ARGOCD_GPG_ENABLED") - os.Setenv("ARGOCD_GPG_ENABLED", "true") - defer os.Setenv("ARGOCD_GPG_ENABLED", oldval) + t.Setenv("ARGOCD_GPG_ENABLED", "true") + // We have a good signature response, but project does not require signed commits { app := newFakeApp() @@ -525,7 +764,11 @@ func TestSignedResponseNoSignatureRequired(t *testing.T) { managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured), } ctrl := newFakeController(&data) - compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, nil) + sources := make([]argoappv1.ApplicationSource, 0) + sources = append(sources, app.Spec.GetSource()) + revisions := make([]string, 0) + revisions = append(revisions, "") + compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false) assert.NotNil(t, compRes) assert.NotNil(t, compRes.syncStatus) assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status) @@ -547,7 +790,11 @@ func TestSignedResponseNoSignatureRequired(t *testing.T) { managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured), } ctrl := newFakeController(&data) - compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, nil) + sources := make([]argoappv1.ApplicationSource, 0) + sources = append(sources, app.Spec.GetSource()) + revisions := make([]string, 0) + revisions = append(revisions, "") + compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false) assert.NotNil(t, compRes) assert.NotNil(t, compRes.syncStatus) assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status) @@ -558,9 +805,7 @@ func TestSignedResponseNoSignatureRequired(t *testing.T) { } func TestSignedResponseSignatureRequired(t *testing.T) { - oldval := os.Getenv("ARGOCD_GPG_ENABLED") - os.Setenv("ARGOCD_GPG_ENABLED", "true") - defer os.Setenv("ARGOCD_GPG_ENABLED", oldval) + t.Setenv("ARGOCD_GPG_ENABLED", "true") // We have a good signature response, valid key, and signing is required - sync! { @@ -576,7 +821,11 @@ func TestSignedResponseSignatureRequired(t *testing.T) { managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured), } ctrl := newFakeController(&data) - compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, "", app.Spec.Source, false, nil) + sources := make([]argoappv1.ApplicationSource, 0) + sources = append(sources, app.Spec.GetSource()) + revisions := make([]string, 0) + revisions = append(revisions, "") + compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false) assert.NotNil(t, compRes) assert.NotNil(t, compRes.syncStatus) assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status) @@ -598,7 +847,11 @@ func TestSignedResponseSignatureRequired(t *testing.T) { managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured), } ctrl := newFakeController(&data) - compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, "abc123", app.Spec.Source, false, nil) + sources := make([]argoappv1.ApplicationSource, 0) + sources = append(sources, app.Spec.GetSource()) + revisions := make([]string, 0) + revisions = append(revisions, "abc123") + compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false) assert.NotNil(t, compRes) assert.NotNil(t, compRes.syncStatus) assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status) @@ -620,7 +873,11 @@ func TestSignedResponseSignatureRequired(t *testing.T) { managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured), } ctrl := newFakeController(&data) - compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, "abc123", app.Spec.Source, false, nil) + sources := make([]argoappv1.ApplicationSource, 0) + sources = append(sources, app.Spec.GetSource()) + revisions := make([]string, 0) + revisions = append(revisions, "abc123") + compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false) assert.NotNil(t, compRes) assert.NotNil(t, compRes.syncStatus) assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status) @@ -642,7 +899,11 @@ func TestSignedResponseSignatureRequired(t *testing.T) { managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured), } ctrl := newFakeController(&data) - compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, "abc123", app.Spec.Source, false, nil) + sources := make([]argoappv1.ApplicationSource, 0) + sources = append(sources, app.Spec.GetSource()) + revisions := make([]string, 0) + revisions = append(revisions, "abc123") + compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false) assert.NotNil(t, compRes) assert.NotNil(t, compRes.syncStatus) assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status) @@ -667,7 +928,11 @@ func TestSignedResponseSignatureRequired(t *testing.T) { ctrl := newFakeController(&data) testProj := signedProj testProj.Spec.SignatureKeys[0].KeyID = "4AEE18F83AFDEB24" - compRes := ctrl.appStateManager.CompareAppState(app, &testProj, "abc123", app.Spec.Source, false, nil) + sources := make([]argoappv1.ApplicationSource, 0) + sources = append(sources, app.Spec.GetSource()) + revisions := make([]string, 0) + revisions = append(revisions, "abc123") + compRes := ctrl.appStateManager.CompareAppState(app, &testProj, revisions, sources, false, false, nil, false) assert.NotNil(t, compRes) assert.NotNil(t, compRes.syncStatus) assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status) @@ -692,7 +957,11 @@ func TestSignedResponseSignatureRequired(t *testing.T) { // it doesn't matter for our test whether local manifests are valid localManifests := []string{"foobar"} ctrl := newFakeController(&data) - compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, "abc123", app.Spec.Source, false, localManifests) + sources := make([]argoappv1.ApplicationSource, 0) + sources = append(sources, app.Spec.GetSource()) + revisions := make([]string, 0) + revisions = append(revisions, "abc123") + compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, localManifests, false) assert.NotNil(t, compRes) assert.NotNil(t, compRes.syncStatus) assert.Equal(t, argoappv1.SyncStatusCodeUnknown, compRes.syncStatus.Status) @@ -702,7 +971,7 @@ func TestSignedResponseSignatureRequired(t *testing.T) { assert.Contains(t, app.Status.Conditions[0].Message, "Cannot use local manifests") } - os.Setenv("ARGOCD_GPG_ENABLED", "false") + t.Setenv("ARGOCD_GPG_ENABLED", "false") // We have a bad signature response and signing would be required, but GPG subsystem is disabled - sync { app := newFakeApp() @@ -717,7 +986,11 @@ func TestSignedResponseSignatureRequired(t *testing.T) { managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured), } ctrl := newFakeController(&data) - compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, "abc123", app.Spec.Source, false, nil) + sources := make([]argoappv1.ApplicationSource, 0) + sources = append(sources, app.Spec.GetSource()) + revisions := make([]string, 0) + revisions = append(revisions, "abc123") + compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false) assert.NotNil(t, compRes) assert.NotNil(t, compRes.syncStatus) assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status) @@ -726,7 +999,7 @@ func TestSignedResponseSignatureRequired(t *testing.T) { assert.Len(t, app.Status.Conditions, 0) } - // Signature required and local manifests supplied and GPG subystem is disabled - sync + // Signature required and local manifests supplied and GPG subsystem is disabled - sync { app := newFakeApp() data := fakeData{ @@ -742,7 +1015,11 @@ func TestSignedResponseSignatureRequired(t *testing.T) { // it doesn't matter for our test whether local manifests are valid localManifests := []string{""} ctrl := newFakeController(&data) - compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, "abc123", app.Spec.Source, false, localManifests) + sources := make([]argoappv1.ApplicationSource, 0) + sources = append(sources, app.Spec.GetSource()) + revisions := make([]string, 0) + revisions = append(revisions, "abc123") + compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, localManifests, false) assert.NotNil(t, compRes) assert.NotNil(t, compRes.syncStatus) assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status) @@ -750,7 +1027,6 @@ func TestSignedResponseSignatureRequired(t *testing.T) { assert.Len(t, compRes.managedResources, 0) assert.Len(t, app.Status.Conditions, 0) } - } func TestComparisonResult_GetHealthStatus(t *testing.T) { @@ -770,3 +1046,180 @@ func TestComparisonResult_GetSyncStatus(t *testing.T) { assert.Equal(t, status, res.GetSyncStatus()) } + +func TestIsLiveResourceManaged(t *testing.T) { + managedObj := kube.MustToUnstructured(&corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "configmap1", + Namespace: "default", + Annotations: map[string]string{ + common.AnnotationKeyAppInstance: "guestbook:/ConfigMap:default/configmap1", + }, + }, + }) + managedObjWithLabel := kube.MustToUnstructured(&corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "configmap1", + Namespace: "default", + Labels: map[string]string{ + common.LabelKeyAppInstance: "guestbook", + }, + }, + }) + unmanagedObjWrongName := kube.MustToUnstructured(&corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "configmap2", + Namespace: "default", + Annotations: map[string]string{ + common.AnnotationKeyAppInstance: "guestbook:/ConfigMap:default/configmap1", + }, + }, + }) + unmanagedObjWrongKind := kube.MustToUnstructured(&corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "configmap2", + Namespace: "default", + Annotations: map[string]string{ + common.AnnotationKeyAppInstance: "guestbook:/Service:default/configmap2", + }, + }, + }) + unmanagedObjWrongGroup := kube.MustToUnstructured(&corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "configmap2", + Namespace: "default", + Annotations: map[string]string{ + common.AnnotationKeyAppInstance: "guestbook:apps/ConfigMap:default/configmap2", + }, + }, + }) + unmanagedObjWrongNamespace := kube.MustToUnstructured(&corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "configmap2", + Namespace: "default", + Annotations: map[string]string{ + common.AnnotationKeyAppInstance: "guestbook:/ConfigMap:fakens/configmap2", + }, + }, + }) + managedWrongAPIGroup := kube.MustToUnstructured(&networkingv1.Ingress{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "networking.k8s.io/v1", + Kind: "Ingress", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "some-ingress", + Namespace: "default", + Annotations: map[string]string{ + common.AnnotationKeyAppInstance: "guestbook:extensions/Ingress:default/some-ingress", + }, + }, + }) + ctrl := newFakeController(&fakeData{ + apps: []runtime.Object{app, &defaultProj}, + manifestResponse: &apiclient.ManifestResponse{ + Manifests: []string{}, + Namespace: test.FakeDestNamespace, + Server: test.FakeClusterURL, + Revision: "abc123", + }, + managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{ + kube.GetResourceKey(managedObj): managedObj, + kube.GetResourceKey(unmanagedObjWrongName): unmanagedObjWrongName, + kube.GetResourceKey(unmanagedObjWrongKind): unmanagedObjWrongKind, + kube.GetResourceKey(unmanagedObjWrongGroup): unmanagedObjWrongGroup, + kube.GetResourceKey(unmanagedObjWrongNamespace): unmanagedObjWrongNamespace, + }, + }) + + manager := ctrl.appStateManager.(*appStateManager) + appName := "guestbook" + + t.Run("will return true if trackingid matches the resource", func(t *testing.T) { + // given + t.Parallel() + configObj := managedObj.DeepCopy() + + // then + assert.True(t, manager.isSelfReferencedObj(managedObj, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel)) + assert.True(t, manager.isSelfReferencedObj(managedObj, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation)) + }) + t.Run("will return true if tracked with label", func(t *testing.T) { + // given + t.Parallel() + configObj := managedObjWithLabel.DeepCopy() + + // then + assert.True(t, manager.isSelfReferencedObj(managedObjWithLabel, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel)) + }) + t.Run("will handle if trackingId has wrong resource name and config is nil", func(t *testing.T) { + // given + t.Parallel() + + // then + assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongName, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel)) + assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongName, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation)) + }) + t.Run("will handle if trackingId has wrong resource group and config is nil", func(t *testing.T) { + // given + t.Parallel() + + // then + assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongGroup, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel)) + assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongGroup, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation)) + }) + t.Run("will handle if trackingId has wrong kind and config is nil", func(t *testing.T) { + // given + t.Parallel() + + // then + assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongKind, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel)) + assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongKind, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation)) + }) + t.Run("will handle if trackingId has wrong namespace and config is nil", func(t *testing.T) { + // given + t.Parallel() + + // then + assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongNamespace, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel)) + assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongNamespace, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotationAndLabel)) + }) + t.Run("will return true if live is nil", func(t *testing.T) { + t.Parallel() + assert.True(t, manager.isSelfReferencedObj(nil, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation)) + }) + + t.Run("will handle upgrade in desired state APIGroup", func(t *testing.T) { + // given + t.Parallel() + config := managedWrongAPIGroup.DeepCopy() + delete(config.GetAnnotations(), common.AnnotationKeyAppInstance) + + // then + assert.True(t, manager.isSelfReferencedObj(managedWrongAPIGroup, config, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation)) + }) +} diff --git a/controller/sync.go b/controller/sync.go index ea1f49e0fbe36..783183c17fc7c 100644 --- a/controller/sync.go +++ b/controller/sync.go @@ -2,25 +2,31 @@ package controller import ( "context" + "encoding/json" "fmt" "os" "strconv" "sync/atomic" "time" + cdcommon "github.com/argoproj/argo-cd/v2/common" + "github.com/argoproj/gitops-engine/pkg/sync" "github.com/argoproj/gitops-engine/pkg/sync/common" "github.com/argoproj/gitops-engine/pkg/utils/kube" + jsonpatch "github.com/evanphx/json-patch" log "github.com/sirupsen/logrus" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/managedfields" + "k8s.io/kubectl/pkg/util/openapi" - cdcommon "github.com/argoproj/argo-cd/v2/common" "github.com/argoproj/argo-cd/v2/controller/metrics" "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" listersv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/client/listers/application/v1alpha1" "github.com/argoproj/argo-cd/v2/util/argo" + "github.com/argoproj/argo-cd/v2/util/argo/diff" logutils "github.com/argoproj/argo-cd/v2/util/log" "github.com/argoproj/argo-cd/v2/util/lua" "github.com/argoproj/argo-cd/v2/util/rand" @@ -34,6 +40,22 @@ const ( EnvVarSyncWaveDelay = "ARGOCD_SYNC_WAVE_DELAY" ) +func (m *appStateManager) getOpenAPISchema(server string) (openapi.Resources, error) { + cluster, err := m.liveStateCache.GetClusterCache(server) + if err != nil { + return nil, err + } + return cluster.GetOpenAPISchema(), nil +} + +func (m *appStateManager) getGVKParser(server string) (*managedfields.GvkParser, error) { + cluster, err := m.liveStateCache.GetClusterCache(server) + if err != nil { + return nil, err + } + return cluster.GetGVKParser(), nil +} + func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha1.OperationState) { // Sync requests might be requested with ambiguous revisions (e.g. master, HEAD, v1.2.3). // This can change meaning when resuming operations (e.g a hook sync). After calculating a @@ -44,6 +66,8 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha var syncOp v1alpha1.SyncOperation var syncRes *v1alpha1.SyncOperationResult var source v1alpha1.ApplicationSource + var sources []v1alpha1.ApplicationSource + revisions := make([]string, 0) if state.Operation.Sync == nil { state.Phase = common.OperationFailed @@ -51,44 +75,89 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha return } syncOp = *state.Operation.Sync - if syncOp.Source == nil { - // normal sync case (where source is taken from app.spec.source) - source = app.Spec.Source + + // validates if it should fail the sync if it finds shared resources + hasSharedResource, sharedResourceMessage := hasSharedResourceCondition(app) + if syncOp.SyncOptions.HasOption("FailOnSharedResource=true") && + hasSharedResource { + state.Phase = common.OperationFailed + state.Message = fmt.Sprintf("Shared resouce found: %s", sharedResourceMessage) + return + } + + if syncOp.Source == nil || (syncOp.Sources != nil && len(syncOp.Sources) > 0) { + // normal sync case (where source is taken from app.spec.sources) + if app.Spec.HasMultipleSources() { + sources = app.Spec.Sources + } else { + // normal sync case (where source is taken from app.spec.source) + source = app.Spec.GetSource() + sources = make([]v1alpha1.ApplicationSource, 0) + } } else { // rollback case - source = *state.Operation.Sync.Source + if app.Spec.HasMultipleSources() { + sources = state.Operation.Sync.Sources + } else { + source = *state.Operation.Sync.Source + sources = make([]v1alpha1.ApplicationSource, 0) + } } if state.SyncResult != nil { syncRes = state.SyncResult revision = state.SyncResult.Revision + revisions = append(revisions, state.SyncResult.Revisions...) } else { syncRes = &v1alpha1.SyncOperationResult{} // status.operationState.syncResult.source. must be set properly since auto-sync relies // on this information to decide if it should sync (if source is different than the last // sync attempt) - syncRes.Source = source + if app.Spec.HasMultipleSources() { + syncRes.Sources = sources + } else { + syncRes.Source = source + } state.SyncResult = syncRes } - if revision == "" { - // if we get here, it means we did not remember a commit SHA which we should be syncing to. - // This typically indicates we are just about to begin a brand new sync/rollback operation. - // Take the value in the requested operation. We will resolve this to a SHA later. - revision = syncOp.Revision + // if we get here, it means we did not remember a commit SHA which we should be syncing to. + // This typically indicates we are just about to begin a brand new sync/rollback operation. + // Take the value in the requested operation. We will resolve this to a SHA later. + if app.Spec.HasMultipleSources() { + if len(revisions) != len(sources) { + revisions = syncOp.Revisions + } + } else { + if revision == "" { + revision = syncOp.Revision + } } - proj, err := argo.GetAppProject(&app.Spec, listersv1alpha1.NewAppProjectLister(m.projInformer.GetIndexer()), m.namespace, m.settingsMgr) + proj, err := argo.GetAppProject(app, listersv1alpha1.NewAppProjectLister(m.projInformer.GetIndexer()), m.namespace, m.settingsMgr, m.db, context.TODO()) if err != nil { state.Phase = common.OperationError state.Message = fmt.Sprintf("Failed to load application project: %v", err) return } - compareResult := m.CompareAppState(app, proj, revision, source, false, syncOp.Manifests) + if app.Spec.HasMultipleSources() { + revisions = syncRes.Revisions + } else { + revisions = append(revisions, revision) + } + + if !app.Spec.HasMultipleSources() { + sources = []v1alpha1.ApplicationSource{source} + revisions = []string{revision} + } + + compareResult := m.CompareAppState(app, proj, revisions, sources, false, true, syncOp.Manifests, app.Spec.HasMultipleSources()) // We now have a concrete commit SHA. Save this in the sync result revision so that we remember // what we should be syncing to when resuming operations. + syncRes.Revision = compareResult.syncStatus.Revision + syncRes.Revisions = compareResult.syncStatus.Revisions // If there are any comparison or spec errors error conditions do not perform the operation if errConditions := app.Status.GetConditions(map[v1alpha1.ApplicationConditionType]bool{ @@ -118,9 +187,15 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha } atomic.AddUint64(&syncIdPrefix, 1) - syncId := fmt.Sprintf("%05d-%s", syncIdPrefix, rand.RandString(5)) + randSuffix, err := rand.String(5) + if err != nil { + state.Phase = common.OperationError + state.Message = fmt.Sprintf("Failed generate random sync ID: %v", err) + return + } + syncId := fmt.Sprintf("%05d-%s", syncIdPrefix, randSuffix) - logEntry := log.WithFields(log.Fields{"application": app.Name, "syncId": syncId}) + logEntry := log.WithFields(log.Fields{"application": app.QualifiedName(), "syncId": syncId}) initialResourcesRes := make([]common.ResourceSyncResult, 0) for i, res := range syncRes.Resources { key := kube.ResourceKey{Group: res.Group, Kind: res.Kind, Namespace: res.Namespace, Name: res.Name} @@ -146,49 +221,97 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha prunePropagationPolicy = v1.DeletePropagationOrphan } - syncCtx, err := sync.NewSyncContext( - compareResult.syncStatus.Revision, - compareResult.reconciliationResult, - restConfig, - rawConfig, - m.kubectl, - app.Spec.Destination.Namespace, + openAPISchema, err := m.getOpenAPISchema(clst.Server) + if err != nil { + state.Phase = common.OperationError + state.Message = fmt.Sprintf("failed to load openAPISchema: %v", err) + return + } + + reconciliationResult := compareResult.reconciliationResult + + // if RespectIgnoreDifferences is enabled, it should normalize the target + // resources which in this case applies the live values in the configured + // ignore differences fields. + if syncOp.SyncOptions.HasOption("RespectIgnoreDifferences=true") { + patchedTargets, err := normalizeTargetResources(compareResult) + if err != nil { + state.Phase = common.OperationError + state.Message = fmt.Sprintf("Failed to normalize target resources: %s", err) + return + } + reconciliationResult.Target = patchedTargets + } + + appLabelKey, err := m.settingsMgr.GetAppInstanceLabelKey() + if err != nil { + log.Errorf("Could not get appInstanceLabelKey: %v", err) + return + } + trackingMethod := argo.GetTrackingMethod(m.settingsMgr) + + opts := []sync.SyncOpt{ sync.WithLogr(logutils.NewLogrusLogger(logEntry)), sync.WithHealthOverride(lua.ResourceHealthOverrides(resourceOverrides)), sync.WithPermissionValidator(func(un *unstructured.Unstructured, res *v1.APIResource) error { if !proj.IsGroupKindPermitted(un.GroupVersionKind().GroupKind(), res.Namespaced) { - return fmt.Errorf("Resource %s:%s is not permitted in project %s.", un.GroupVersionKind().Group, un.GroupVersionKind().Kind, proj.Name) + return fmt.Errorf("resource %s:%s is not permitted in project %s", un.GroupVersionKind().Group, un.GroupVersionKind().Kind, proj.Name) } - if res.Namespaced && !proj.IsDestinationPermitted(v1alpha1.ApplicationDestination{Namespace: un.GetNamespace(), Server: app.Spec.Destination.Server}) { - return fmt.Errorf("namespace %v is not permitted in project '%s'", un.GetNamespace(), proj.Name) + if res.Namespaced { + permitted, err := proj.IsDestinationPermitted(v1alpha1.ApplicationDestination{Namespace: un.GetNamespace(), Server: app.Spec.Destination.Server, Name: app.Spec.Destination.Name}, func(project string) ([]*v1alpha1.Cluster, error) { + return m.db.GetProjectClusters(context.TODO(), project) + }) + + if err != nil { + return err + } + + if !permitted { + return fmt.Errorf("namespace %v is not permitted in project '%s'", un.GetNamespace(), proj.Name) + } } return nil }), sync.WithOperationSettings(syncOp.DryRun, syncOp.Prune, syncOp.SyncStrategy.Force(), syncOp.IsApplyStrategy() || len(syncOp.Resources) > 0), sync.WithInitialState(state.Phase, state.Message, initialResourcesRes, state.StartedAt), sync.WithResourcesFilter(func(key kube.ResourceKey, target *unstructured.Unstructured, live *unstructured.Unstructured) bool { - return len(syncOp.Resources) == 0 || argo.ContainsSyncResource(key.Name, key.Namespace, schema.GroupVersionKind{Kind: key.Kind, Group: key.Group}, syncOp.Resources) + return (len(syncOp.Resources) == 0 || + argo.ContainsSyncResource(key.Name, key.Namespace, schema.GroupVersionKind{Kind: key.Kind, Group: key.Group}, syncOp.Resources)) && + m.isSelfReferencedObj(live, target, app.GetName(), appLabelKey, trackingMethod) }), sync.WithManifestValidation(!syncOp.SyncOptions.HasOption(common.SyncOptionsDisableValidation)), - sync.WithNamespaceCreation(syncOp.SyncOptions.HasOption("CreateNamespace=true"), func(un *unstructured.Unstructured) bool { - if un != nil && kube.GetAppInstanceLabel(un, cdcommon.LabelKeyAppInstance) != "" { - kube.UnsetLabel(un, cdcommon.LabelKeyAppInstance) - return true - } - return false - }), sync.WithSyncWaveHook(delayBetweenSyncWaves), sync.WithPruneLast(syncOp.SyncOptions.HasOption(common.SyncOptionPruneLast)), sync.WithResourceModificationChecker(syncOp.SyncOptions.HasOption("ApplyOutOfSyncOnly=true"), compareResult.diffResultList), sync.WithPrunePropagationPolicy(&prunePropagationPolicy), sync.WithReplace(syncOp.SyncOptions.HasOption(common.SyncOptionReplace)), + sync.WithServerSideApply(syncOp.SyncOptions.HasOption(common.SyncOptionServerSideApply)), + sync.WithServerSideApplyManager(cdcommon.ArgoCDSSAManager), + } + + if syncOp.SyncOptions.HasOption("CreateNamespace=true") { + opts = append(opts, sync.WithNamespaceModifier(syncNamespace(m.resourceTracking, appLabelKey, trackingMethod, app.Name, app.Spec.SyncPolicy))) + } + + syncCtx, cleanup, err := sync.NewSyncContext( + compareResult.syncStatus.Revision, + reconciliationResult, + restConfig, + rawConfig, + m.kubectl, + app.Spec.Destination.Namespace, + openAPISchema, + opts..., ) if err != nil { state.Phase = common.OperationError - state.Message = fmt.Sprintf("failed to record sync to history: %v", err) + state.Message = fmt.Sprintf("failed to initialize sync context: %v", err) + return } + defer cleanup() + start := time.Now() if state.Phase == common.OperationTerminating { @@ -199,7 +322,29 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha var resState []common.ResourceSyncResult state.Phase, state.Message, resState = syncCtx.GetState() state.SyncResult.Resources = nil + + if app.Spec.SyncPolicy != nil { + state.SyncResult.ManagedNamespaceMetadata = app.Spec.SyncPolicy.ManagedNamespaceMetadata + } + + var apiVersion []kube.APIResourceInfo for _, res := range resState { + augmentedMsg, err := argo.AugmentSyncMsg(res, func() ([]kube.APIResourceInfo, error) { + if apiVersion == nil { + _, apiVersion, err = m.liveStateCache.GetVersionsInfo(app.Spec.Destination.Server) + if err != nil { + return nil, fmt.Errorf("failed to get version info from the target cluster %q", app.Spec.Destination.Server) + } + } + return apiVersion, nil + }) + + if err != nil { + log.Errorf("using the original message since: %v", err) + } else { + res.Message = augmentedMsg + } + state.SyncResult.Resources = append(state.SyncResult.Resources, &v1alpha1.ResourceResult{ HookType: res.HookType, Group: res.ResourceKey.Group, @@ -217,7 +362,7 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha logEntry.WithField("duration", time.Since(start)).Info("sync/terminate complete") if !syncOp.DryRun && len(syncOp.Resources) == 0 && state.Phase.Successful() { - err := m.persistRevisionHistory(app, compareResult.syncStatus.Revision, source, state.StartedAt) + err := m.persistRevisionHistory(app, compareResult.syncStatus.Revision, source, compareResult.syncStatus.Revisions, compareResult.syncStatus.ComparedTo.Sources, app.Spec.HasMultipleSources(), state.StartedAt) if err != nil { state.Phase = common.OperationError state.Message = fmt.Sprintf("failed to record sync to history: %v", err) @@ -225,6 +370,159 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha } } +// normalizeTargetResources will apply the diff normalization in all live and target resources. +// Then it calculates the merge patch between the normalized live and the current live resources. +// Finally it applies the merge patch in the normalized target resources. This is done to ensure +// that target resources have the same ignored diff fields values from live ones to avoid them to +// be applied in the cluster. Returns the list of normalized target resources. +func normalizeTargetResources(cr *comparisonResult) ([]*unstructured.Unstructured, error) { + // normalize live and target resources + normalized, err := diff.Normalize(cr.reconciliationResult.Live, cr.reconciliationResult.Target, cr.diffConfig) + if err != nil { + return nil, err + } + patchedTargets := []*unstructured.Unstructured{} + for idx, live := range cr.reconciliationResult.Live { + normalizedTarget := normalized.Targets[idx] + if normalizedTarget == nil { + patchedTargets = append(patchedTargets, nil) + continue + } + originalTarget := cr.reconciliationResult.Target[idx] + if live == nil { + patchedTargets = append(patchedTargets, originalTarget) + continue + } + // calculate targetPatch between normalized and target resource + targetPatch, err := getMergePatch(normalizedTarget, originalTarget) + if err != nil { + return nil, err + } + + // check if there is a patch to apply. An empty patch is identified by a '{}' string. + if len(targetPatch) > 2 { + livePatch, err := getMergePatch(normalized.Lives[idx], live) + if err != nil { + return nil, err + } + // generate a minimal patch that uses the fields from targetPatch (template) + // with livePatch values + patch, err := compilePatch(targetPatch, livePatch) + if err != nil { + return nil, err + } + normalizedTarget, err = applyMergePatch(normalizedTarget, patch) + if err != nil { + return nil, err + } + } else { + // if there is no patch just use the original target + normalizedTarget = originalTarget + } + patchedTargets = append(patchedTargets, normalizedTarget) + } + return patchedTargets, nil +} + +// compilePatch will generate a patch using the fields from templatePatch with +// the values from valuePatch. +func compilePatch(templatePatch, valuePatch []byte) ([]byte, error) { + templateMap := make(map[string]interface{}) + err := json.Unmarshal(templatePatch, &templateMap) + if err != nil { + return nil, err + } + valueMap := make(map[string]interface{}) + err = json.Unmarshal(valuePatch, &valueMap) + if err != nil { + return nil, err + } + resultMap := intersectMap(templateMap, valueMap) + return json.Marshal(resultMap) +} + +// intersectMap will return map with the fields intersection from the 2 provided +// maps populated with the valueMap values. +func intersectMap(templateMap, valueMap map[string]interface{}) map[string]interface{} { + result := make(map[string]interface{}) + for k, v := range templateMap { + if innerTMap, ok := v.(map[string]interface{}); ok { + if innerVMap, ok := valueMap[k].(map[string]interface{}); ok { + result[k] = intersectMap(innerTMap, innerVMap) + } + } else if innerTSlice, ok := v.([]interface{}); ok { + if innerVSlice, ok := valueMap[k].([]interface{}); ok { + items := []interface{}{} + for idx, innerTSliceValue := range innerTSlice { + if idx < len(innerVSlice) { + if tSliceValueMap, ok := innerTSliceValue.(map[string]interface{}); ok { + if vSliceValueMap, ok := innerVSlice[idx].(map[string]interface{}); ok { + item := intersectMap(tSliceValueMap, vSliceValueMap) + items = append(items, item) + } + } else { + items = append(items, innerVSlice[idx]) + } + } + } + if len(items) > 0 { + result[k] = items + } + } + } else { + if _, ok := valueMap[k]; ok { + result[k] = valueMap[k] + } + } + } + return result +} + +// getMergePatch calculates and returns the patch between the original and the +// modified unstructures. +func getMergePatch(original, modified *unstructured.Unstructured) ([]byte, error) { + originalJSON, err := original.MarshalJSON() + if err != nil { + return nil, err + } + modifiedJSON, err := modified.MarshalJSON() + if err != nil { + return nil, err + } + return jsonpatch.CreateMergePatch(originalJSON, modifiedJSON) +} + +// applyMergePatch will apply the given patch in the obj and return the patched +// unstructure. +func applyMergePatch(obj *unstructured.Unstructured, patch []byte) (*unstructured.Unstructured, error) { + originalJSON, err := obj.MarshalJSON() + if err != nil { + return nil, err + } + patchedJSON, err := jsonpatch.MergePatch(originalJSON, patch) + if err != nil { + return nil, err + } + patchedObj := &unstructured.Unstructured{} + _, _, err = unstructured.UnstructuredJSONScheme.Decode(patchedJSON, nil, patchedObj) + if err != nil { + return nil, err + } + return patchedObj, nil +} + +// hasSharedResourceCondition will check if the Application has any resource that has already +// been synced by another Application. If the resource is found in another Application it returns +// true along with a human readable message of which specific resource has this condition. +func hasSharedResourceCondition(app *v1alpha1.Application) (bool, string) { + for _, condition := range app.Status.Conditions { + if condition.Type == v1alpha1.ApplicationConditionSharedResourceWarning { + return true, condition.Message + } + } + return false, "" +} + // delayBetweenSyncWaves is a gitops-engine SyncWaveHook which introduces an artificial delay // between each sync wave. We introduce an artificial delay in order give other controllers a // _chance_ to react to the spec change that we just applied. This is important because without diff --git a/controller/sync_namespace.go b/controller/sync_namespace.go new file mode 100644 index 0000000000000..9203e27f502e7 --- /dev/null +++ b/controller/sync_namespace.go @@ -0,0 +1,55 @@ +package controller + +import ( + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/argoproj/argo-cd/v2/util/argo" + gitopscommon "github.com/argoproj/gitops-engine/pkg/sync/common" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +// syncNamespace determine if Argo CD should create and/or manage the namespace +// where the application will be deployed. +func syncNamespace(resourceTracking argo.ResourceTracking, appLabelKey string, trackingMethod v1alpha1.TrackingMethod, appName string, syncPolicy *v1alpha1.SyncPolicy) func(m, l *unstructured.Unstructured) (bool, error) { + // This function must return true for the managed namespace to be synced. + return func(managedNs, liveNs *unstructured.Unstructured) (bool, error) { + if managedNs == nil { + return false, nil + } + + isNewNamespace := liveNs == nil + isManagedNamespace := syncPolicy != nil && syncPolicy.ManagedNamespaceMetadata != nil + + // should only sync the namespace if it doesn't exist in k8s or if + // syncPolicy is defined to manage the metadata + if !isManagedNamespace && !isNewNamespace { + return false, nil + } + + if isManagedNamespace { + managedNamespaceMetadata := syncPolicy.ManagedNamespaceMetadata + managedNs.SetLabels(managedNamespaceMetadata.Labels) + // managedNamespaceMetadata relies on SSA in order to avoid overriding + // existing labels and annotations in namespaces + managedNs.SetAnnotations(appendSSAAnnotation(managedNamespaceMetadata.Annotations)) + } + + // TODO: https://github.com/argoproj/argo-cd/issues/11196 + // err := resourceTracking.SetAppInstance(managedNs, appLabelKey, appName, "", trackingMethod) + // if err != nil { + // return false, fmt.Errorf("failed to set app instance tracking on the namespace %s: %s", managedNs.GetName(), err) + // } + + return true, nil + } +} + +// appendSSAAnnotation will set the managed namespace to be synced +// with server-side apply +func appendSSAAnnotation(in map[string]string) map[string]string { + r := map[string]string{} + for k, v := range in { + r[k] = v + } + r[gitopscommon.AnnotationSyncOptions] = gitopscommon.SyncOptionServerSideApply + return r +} diff --git a/controller/sync_namespace_test.go b/controller/sync_namespace_test.go new file mode 100644 index 0000000000000..e18f52800bf03 --- /dev/null +++ b/controller/sync_namespace_test.go @@ -0,0 +1,261 @@ +package controller + +import ( + "github.com/argoproj/argo-cd/v2/common" + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/argoproj/argo-cd/v2/util/argo" + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + "testing" +) + +func createFakeNamespace(uid string, resourceVersion string, labels map[string]string, annotations map[string]string) *unstructured.Unstructured { + un := unstructured.Unstructured{} + un.SetUID(types.UID(uid)) + un.SetResourceVersion(resourceVersion) + un.SetLabels(labels) + un.SetAnnotations(annotations) + un.SetKind("Namespace") + un.SetName("some-namespace") + return &un +} + +func Test_shouldNamespaceSync(t *testing.T) { + tests := []struct { + name string + syncPolicy *v1alpha1.SyncPolicy + managedNs *unstructured.Unstructured + liveNs *unstructured.Unstructured + expected bool + expectedLabels map[string]string + expectedAnnotations map[string]string + }{ + { + name: "liveNs is nil and syncPolicy is nil", + expected: false, + managedNs: nil, + liveNs: nil, + syncPolicy: nil, + }, + { + name: "liveNs is nil and syncPolicy is not nil", + expected: false, + managedNs: nil, + liveNs: nil, + syncPolicy: &v1alpha1.SyncPolicy{ + ManagedNamespaceMetadata: nil, + }, + }, + { + name: "liveNs is nil and syncPolicy has labels and annotations", + expected: false, + managedNs: nil, + liveNs: nil, + expectedLabels: map[string]string{"my-cool-label": "some-value"}, + expectedAnnotations: map[string]string{"my-cool-annotation": "some-value"}, + syncPolicy: &v1alpha1.SyncPolicy{ + ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{ + Labels: map[string]string{"my-cool-label": "some-value"}, + Annotations: map[string]string{"my-cool-annotation": "some-value"}, + }, + }, + }, + { + name: "namespace does not yet exist and managedNamespaceMetadata nil", + expected: true, + expectedLabels: map[string]string{}, + expectedAnnotations: map[string]string{}, + managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}), + liveNs: nil, + syncPolicy: &v1alpha1.SyncPolicy{ + ManagedNamespaceMetadata: nil, + }, + }, + { + name: "namespace does not yet exist and managedNamespaceMetadata not nil", + expected: true, + expectedAnnotations: map[string]string{"argocd.argoproj.io/sync-options": "ServerSideApply=true"}, + managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}), + liveNs: nil, + syncPolicy: &v1alpha1.SyncPolicy{ + ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{}, + }, + }, + { + name: "namespace does not yet exist and managedNamespaceMetadata has empty labels map", + expected: true, + expectedLabels: map[string]string{}, + expectedAnnotations: map[string]string{"argocd.argoproj.io/sync-options": "ServerSideApply=true"}, + managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}), + liveNs: nil, + syncPolicy: &v1alpha1.SyncPolicy{ + ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{ + Labels: map[string]string{}, + }, + }, + }, + { + name: "namespace does not yet exist and managedNamespaceMetadata has empty annotations map", + expected: true, + expectedAnnotations: map[string]string{"argocd.argoproj.io/sync-options": "ServerSideApply=true"}, + managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}), + liveNs: nil, + syncPolicy: &v1alpha1.SyncPolicy{ + ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{ + Annotations: map[string]string{}, + }, + }, + }, + { + name: "namespace does not yet exist and managedNamespaceMetadata has empty annotations and labels map", + expected: true, + expectedLabels: map[string]string{}, + expectedAnnotations: map[string]string{"argocd.argoproj.io/sync-options": "ServerSideApply=true"}, + managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}), + liveNs: nil, + syncPolicy: &v1alpha1.SyncPolicy{ + ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{ + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + }, + }, + { + name: "namespace does not yet exist and managedNamespaceMetadata has labels", + expected: true, + expectedLabels: map[string]string{"my-cool-label": "some-value"}, + expectedAnnotations: map[string]string{"argocd.argoproj.io/sync-options": "ServerSideApply=true"}, + managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}), + liveNs: nil, + syncPolicy: &v1alpha1.SyncPolicy{ + ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{ + Labels: map[string]string{"my-cool-label": "some-value"}, + Annotations: nil, + }, + }, + }, + { + name: "namespace does not yet exist and managedNamespaceMetadata has annotations", + expected: true, + expectedAnnotations: map[string]string{"my-cool-annotation": "some-value", "argocd.argoproj.io/sync-options": "ServerSideApply=true"}, + managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}), + liveNs: nil, + syncPolicy: &v1alpha1.SyncPolicy{ + ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{ + Labels: nil, + Annotations: map[string]string{"my-cool-annotation": "some-value"}, + }, + }, + }, + { + name: "namespace does not yet exist and managedNamespaceMetadata has annotations and labels", + expected: true, + expectedLabels: map[string]string{"my-cool-label": "some-value"}, + expectedAnnotations: map[string]string{"my-cool-annotation": "some-value", "argocd.argoproj.io/sync-options": "ServerSideApply=true"}, + managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}), + liveNs: nil, + syncPolicy: &v1alpha1.SyncPolicy{ + ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{ + Labels: map[string]string{"my-cool-label": "some-value"}, + Annotations: map[string]string{"my-cool-annotation": "some-value"}, + }, + }, + }, + { + name: "namespace exists with no labels or annotations and managedNamespaceMetadata has labels", + expected: true, + expectedLabels: map[string]string{"my-cool-label": "some-value"}, + expectedAnnotations: map[string]string{"argocd.argoproj.io/sync-options": "ServerSideApply=true"}, + managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}), + liveNs: createFakeNamespace("something", "1", map[string]string{}, map[string]string{}), + syncPolicy: &v1alpha1.SyncPolicy{ + ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{ + Labels: map[string]string{"my-cool-label": "some-value"}, + }, + }, + }, + { + name: "namespace exists with no labels or annotations and managedNamespaceMetadata has annotations", + expected: true, + expectedAnnotations: map[string]string{"my-cool-annotation": "some-value", "argocd.argoproj.io/sync-options": "ServerSideApply=true"}, + managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}), + liveNs: createFakeNamespace("something", "1", map[string]string{}, map[string]string{}), + syncPolicy: &v1alpha1.SyncPolicy{ + ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{ + Annotations: map[string]string{"my-cool-annotation": "some-value"}, + }, + }, + }, + { + name: "namespace exists with no labels or annotations and managedNamespaceMetadata has annotations and labels", + expected: true, + expectedLabels: map[string]string{"my-cool-label": "some-value"}, + expectedAnnotations: map[string]string{"my-cool-annotation": "some-value", "argocd.argoproj.io/sync-options": "ServerSideApply=true"}, + managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}), + liveNs: createFakeNamespace("something", "1", map[string]string{}, map[string]string{}), + syncPolicy: &v1alpha1.SyncPolicy{ + ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{ + Labels: map[string]string{"my-cool-label": "some-value"}, + Annotations: map[string]string{"my-cool-annotation": "some-value"}, + }, + }, + }, + { + name: "namespace exists with labels and managedNamespaceMetadata has mismatching labels", + expected: true, + expectedAnnotations: map[string]string{"argocd.argoproj.io/sync-options": "ServerSideApply=true"}, + expectedLabels: map[string]string{"my-cool-label": "some-value", "my-other-label": "some-other-value"}, + managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}), + liveNs: createFakeNamespace("something", "1", map[string]string{"my-cool-label": "some-value"}, map[string]string{}), + syncPolicy: &v1alpha1.SyncPolicy{ + ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{ + Labels: map[string]string{"my-cool-label": "some-value", "my-other-label": "some-other-value"}, + Annotations: map[string]string{}, + }, + }, + }, + { + name: "namespace exists with annotations and managedNamespaceMetadata has mismatching annotations", + expected: true, + expectedLabels: map[string]string{}, + expectedAnnotations: map[string]string{"my-cool-annotation": "some-value", "argocd.argoproj.io/sync-options": "ServerSideApply=true"}, + managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}), + liveNs: createFakeNamespace("something", "1", map[string]string{}, map[string]string{"my-cool-annotation": "some-value", "my-other-annotation": "some-other-value"}), + syncPolicy: &v1alpha1.SyncPolicy{ + ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{ + Labels: map[string]string{}, + Annotations: map[string]string{"my-cool-annotation": "some-value"}, + }, + }, + }, + { + name: "namespace exists with annotations and labels managedNamespaceMetadata has mismatching annotations and labels", + expected: true, + expectedLabels: map[string]string{"my-cool-label": "some-value", "my-other-label": "some-other-value"}, + expectedAnnotations: map[string]string{"my-cool-annotation": "some-value", "my-other-annotation": "some-other-value", "argocd.argoproj.io/sync-options": "ServerSideApply=true"}, + managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}), + liveNs: createFakeNamespace("something", "1", map[string]string{"my-cool-label": "some-value"}, map[string]string{"my-cool-annotation": "some-value"}), + syncPolicy: &v1alpha1.SyncPolicy{ + ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{ + Labels: map[string]string{"my-cool-label": "some-value", "my-other-label": "some-other-value"}, + Annotations: map[string]string{"my-cool-annotation": "some-value", "my-other-annotation": "some-other-value"}, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual, err := syncNamespace(argo.NewResourceTracking(), common.LabelKeyAppInstance, argo.TrackingMethodAnnotation, "some-app", tt.syncPolicy)(tt.managedNs, tt.liveNs) + assert.NoError(t, err) + + if tt.managedNs != nil { + assert.Equal(t, tt.expectedLabels, tt.managedNs.GetLabels()) + assert.Equal(t, tt.expectedAnnotations, tt.managedNs.GetAnnotations()) + } + + assert.Equalf(t, tt.expected, actual, "syncNamespace(%v)", tt.syncPolicy) + }) + } +} diff --git a/controller/sync_test.go b/controller/sync_test.go index e42468d6a6dbf..da68e5d9a3dfe 100644 --- a/controller/sync_test.go +++ b/controller/sync_test.go @@ -2,18 +2,22 @@ package controller import ( "context" - "os" "testing" + "github.com/argoproj/gitops-engine/pkg/sync" + "github.com/argoproj/gitops-engine/pkg/sync/common" "github.com/argoproj/gitops-engine/pkg/utils/kube" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "github.com/argoproj/argo-cd/v2/controller/testdata" "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" "github.com/argoproj/argo-cd/v2/reposerver/apiclient" "github.com/argoproj/argo-cd/v2/test" + "github.com/argoproj/argo-cd/v2/util/argo/diff" ) func TestPersistRevisionHistory(t *testing.T) { @@ -45,15 +49,55 @@ func TestPersistRevisionHistory(t *testing.T) { }} ctrl.appStateManager.SyncAppState(app, opState) // Ensure we record spec.source into sync result - assert.Equal(t, app.Spec.Source, opState.SyncResult.Source) + assert.Equal(t, app.Spec.GetSource(), opState.SyncResult.Source) updatedApp, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Get(context.Background(), app.Name, v1.GetOptions{}) assert.Nil(t, err) assert.Equal(t, 1, len(updatedApp.Status.History)) - assert.Equal(t, app.Spec.Source, updatedApp.Status.History[0].Source) + assert.Equal(t, app.Spec.GetSource(), updatedApp.Status.History[0].Source) assert.Equal(t, "abc123", updatedApp.Status.History[0].Revision) } +func TestPersistManagedNamespaceMetadataState(t *testing.T) { + app := newFakeApp() + app.Status.OperationState = nil + app.Status.History = nil + app.Spec.SyncPolicy.ManagedNamespaceMetadata = &v1alpha1.ManagedNamespaceMetadata{ + Labels: map[string]string{ + "foo": "bar", + }, + Annotations: map[string]string{ + "foo": "bar", + }, + } + + defaultProject := &v1alpha1.AppProject{ + ObjectMeta: v1.ObjectMeta{ + Namespace: test.FakeArgoCDNamespace, + Name: "default", + }, + } + data := fakeData{ + apps: []runtime.Object{app, defaultProject}, + manifestResponse: &apiclient.ManifestResponse{ + Manifests: []string{}, + Namespace: test.FakeDestNamespace, + Server: test.FakeClusterURL, + Revision: "abc123", + }, + managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured), + } + ctrl := newFakeController(&data) + + // Sync with source unspecified + opState := &v1alpha1.OperationState{Operation: v1alpha1.Operation{ + Sync: &v1alpha1.SyncOperation{}, + }} + ctrl.appStateManager.SyncAppState(app, opState) + // Ensure we record spec.syncPolicy.managedNamespaceMetadata into sync result + assert.Equal(t, app.Spec.SyncPolicy.ManagedNamespaceMetadata, opState.SyncResult.ManagedNamespaceMetadata) +} + func TestPersistRevisionHistoryRollback(t *testing.T) { app := newFakeApp() app.Status.OperationState = nil @@ -134,11 +178,211 @@ func TestSyncComparisonError(t *testing.T) { opState := &v1alpha1.OperationState{Operation: v1alpha1.Operation{ Sync: &v1alpha1.SyncOperation{}, }} - os.Setenv("ARGOCD_GPG_ENABLED", "true") - defer os.Setenv("ARGOCD_GPG_ENABLED", "false") + t.Setenv("ARGOCD_GPG_ENABLED", "true") ctrl.appStateManager.SyncAppState(app, opState) conditions := app.Status.GetConditions(map[v1alpha1.ApplicationConditionType]bool{v1alpha1.ApplicationConditionComparisonError: true}) assert.NotEmpty(t, conditions) assert.Equal(t, "abc123", opState.SyncResult.Revision) } + +func TestAppStateManager_SyncAppState(t *testing.T) { + type fixture struct { + project *v1alpha1.AppProject + application *v1alpha1.Application + controller *ApplicationController + } + + setup := func() *fixture { + app := newFakeApp() + app.Status.OperationState = nil + app.Status.History = nil + + project := &v1alpha1.AppProject{ + ObjectMeta: v1.ObjectMeta{ + Namespace: test.FakeArgoCDNamespace, + Name: "default", + }, + Spec: v1alpha1.AppProjectSpec{ + SignatureKeys: []v1alpha1.SignatureKey{{KeyID: "test"}}, + }, + } + data := fakeData{ + apps: []runtime.Object{app, project}, + manifestResponse: &apiclient.ManifestResponse{ + Manifests: []string{}, + Namespace: test.FakeDestNamespace, + Server: test.FakeClusterURL, + Revision: "abc123", + }, + managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured), + } + ctrl := newFakeController(&data) + + return &fixture{ + project: project, + application: app, + controller: ctrl, + } + } + + t.Run("will fail the sync if finds shared resources", func(t *testing.T) { + // given + t.Parallel() + f := setup() + syncErrorMsg := "deployment already applied by another application" + condition := v1alpha1.ApplicationCondition{ + Type: v1alpha1.ApplicationConditionSharedResourceWarning, + Message: syncErrorMsg, + } + f.application.Status.Conditions = append(f.application.Status.Conditions, condition) + + // Sync with source unspecified + opState := &v1alpha1.OperationState{Operation: v1alpha1.Operation{ + Sync: &v1alpha1.SyncOperation{ + Source: &v1alpha1.ApplicationSource{}, + SyncOptions: []string{"FailOnSharedResource=true"}, + }, + }} + + // when + f.controller.appStateManager.SyncAppState(f.application, opState) + + // then + assert.Equal(t, common.OperationFailed, opState.Phase) + assert.Contains(t, opState.Message, syncErrorMsg) + }) +} + +func TestNormalizeTargetResources(t *testing.T) { + type fixture struct { + comparisonResult *comparisonResult + } + setup := func(t *testing.T, ignores []v1alpha1.ResourceIgnoreDifferences) *fixture { + t.Helper() + dc, err := diff.NewDiffConfigBuilder(). + WithDiffSettings(ignores, nil, true). + WithNoCache(). + Build() + require.NoError(t, err) + live := test.YamlToUnstructured(testdata.LiveDeploymentYaml) + target := test.YamlToUnstructured(testdata.TargetDeploymentYaml) + return &fixture{ + &comparisonResult{ + reconciliationResult: sync.ReconciliationResult{ + Live: []*unstructured.Unstructured{live}, + Target: []*unstructured.Unstructured{target}, + }, + diffConfig: dc, + }, + } + } + t.Run("will modify target resource adding live state in fields it should ignore", func(t *testing.T) { + // given + ignore := v1alpha1.ResourceIgnoreDifferences{ + Group: "*", + Kind: "*", + ManagedFieldsManagers: []string{"janitor"}, + } + ignores := []v1alpha1.ResourceIgnoreDifferences{ignore} + f := setup(t, ignores) + + // when + targets, err := normalizeTargetResources(f.comparisonResult) + + // then + require.NoError(t, err) + require.Equal(t, 1, len(targets)) + iksmVersion := targets[0].GetAnnotations()["iksm-version"] + assert.Equal(t, "2.0", iksmVersion) + }) + t.Run("will not modify target resource if ignore difference is not configured", func(t *testing.T) { + // given + f := setup(t, []v1alpha1.ResourceIgnoreDifferences{}) + + // when + targets, err := normalizeTargetResources(f.comparisonResult) + + // then + require.NoError(t, err) + require.Equal(t, 1, len(targets)) + iksmVersion := targets[0].GetAnnotations()["iksm-version"] + assert.Equal(t, "1.0", iksmVersion) + }) + t.Run("will remove fields from target if not present in live", func(t *testing.T) { + ignore := v1alpha1.ResourceIgnoreDifferences{ + Group: "apps", + Kind: "Deployment", + JSONPointers: []string{"/metadata/annotations/iksm-version"}, + } + ignores := []v1alpha1.ResourceIgnoreDifferences{ignore} + f := setup(t, ignores) + live := f.comparisonResult.reconciliationResult.Live[0] + unstructured.RemoveNestedField(live.Object, "metadata", "annotations", "iksm-version") + + // when + targets, err := normalizeTargetResources(f.comparisonResult) + + // then + require.NoError(t, err) + require.Equal(t, 1, len(targets)) + _, ok := targets[0].GetAnnotations()["iksm-version"] + assert.False(t, ok) + }) + t.Run("will correctly normalize with multiple ignore configurations", func(t *testing.T) { + // given + ignores := []v1alpha1.ResourceIgnoreDifferences{ + { + Group: "apps", + Kind: "Deployment", + JSONPointers: []string{"/spec/replicas"}, + }, + { + Group: "*", + Kind: "*", + ManagedFieldsManagers: []string{"janitor"}, + }, + } + f := setup(t, ignores) + + // when + targets, err := normalizeTargetResources(f.comparisonResult) + + // then + require.NoError(t, err) + require.Equal(t, 1, len(targets)) + normalized := targets[0] + iksmVersion, ok := normalized.GetAnnotations()["iksm-version"] + require.True(t, ok) + assert.Equal(t, "2.0", iksmVersion) + replicas, ok, err := unstructured.NestedInt64(normalized.Object, "spec", "replicas") + require.NoError(t, err) + require.True(t, ok) + assert.Equal(t, int64(4), replicas) + }) + t.Run("will keep new array entries not found in live state if not ignored", func(t *testing.T) { + t.Skip("limitation in the current implementation") + // given + ignores := []v1alpha1.ResourceIgnoreDifferences{ + { + Group: "apps", + Kind: "Deployment", + JQPathExpressions: []string{".spec.template.spec.containers[] | select(.name == \"guestbook-ui\")"}, + }, + } + f := setup(t, ignores) + target := test.YamlToUnstructured(testdata.TargetDeploymentNewEntries) + f.comparisonResult.reconciliationResult.Target = []*unstructured.Unstructured{target} + + // when + targets, err := normalizeTargetResources(f.comparisonResult) + + // then + require.NoError(t, err) + require.Equal(t, 1, len(targets)) + containers, ok, err := unstructured.NestedSlice(targets[0].Object, "spec", "template", "spec", "containers") + require.NoError(t, err) + require.True(t, ok) + assert.Equal(t, 2, len(containers)) + }) +} diff --git a/controller/testdata/configmap.yaml b/controller/testdata/configmap.yaml new file mode 100644 index 0000000000000..b781028c394f5 --- /dev/null +++ b/controller/testdata/configmap.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: my-map +data: + foo: bar \ No newline at end of file diff --git a/controller/testdata/data.go b/controller/testdata/data.go new file mode 100644 index 0000000000000..a53c6a8a88b35 --- /dev/null +++ b/controller/testdata/data.go @@ -0,0 +1,14 @@ +package testdata + +import _ "embed" + +var ( + //go:embed live-deployment.yaml + LiveDeploymentYaml string + + //go:embed target-deployment.yaml + TargetDeploymentYaml string + + //go:embed target-deployment-new-entries.yaml + TargetDeploymentNewEntries string +) diff --git a/util/health/testdata/job-failed.yaml b/controller/testdata/job-failed.yaml similarity index 100% rename from util/health/testdata/job-failed.yaml rename to controller/testdata/job-failed.yaml diff --git a/controller/testdata/live-deployment.yaml b/controller/testdata/live-deployment.yaml new file mode 100644 index 0000000000000..731b5b720714c --- /dev/null +++ b/controller/testdata/live-deployment.yaml @@ -0,0 +1,177 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + argocd.argoproj.io/tracking-id: 'guestbook:apps/Deployment:default/kustomize-guestbook-ui' + deployment.kubernetes.io/revision: '9' + iksm-version: '2.0' + kubectl.kubernetes.io/last-applied-configuration: > + {"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"argocd.argoproj.io/tracking-id":"guestbook:apps/Deployment:default/kustomize-guestbook-ui","iksm-version":"2.0"},"name":"kustomize-guestbook-ui","namespace":"default"},"spec":{"replicas":4,"revisionHistoryLimit":3,"selector":{"matchLabels":{"app":"guestbook-ui"}},"template":{"metadata":{"labels":{"app":"guestbook-ui"}},"spec":{"containers":[{"env":[{"name":"SOME_ENV_VAR","value":"some_value"}],"image":"gcr.io/heptio-images/ks-guestbook-demo:0.1","name":"guestbook-ui","ports":[{"containerPort":80}],"resources":{"requests":{"cpu":"50m","memory":"100Mi"}}}]}}}} + creationTimestamp: '2022-01-05T15:45:21Z' + generation: 119 + managedFields: + - apiVersion: apps/v1 + fieldsType: FieldsV1 + fieldsV1: + 'f:metadata': + 'f:annotations': + 'f:iksm-version': {} + manager: janitor + operation: Apply + time: '2022-01-06T18:21:04Z' + - apiVersion: apps/v1 + fieldsType: FieldsV1 + fieldsV1: + 'f:metadata': + 'f:annotations': + .: {} + 'f:argocd.argoproj.io/tracking-id': {} + 'f:kubectl.kubernetes.io/last-applied-configuration': {} + 'f:spec': + 'f:progressDeadlineSeconds': {} + 'f:replicas': {} + 'f:revisionHistoryLimit': {} + 'f:selector': {} + 'f:strategy': + 'f:rollingUpdate': + .: {} + 'f:maxSurge': {} + 'f:maxUnavailable': {} + 'f:type': {} + 'f:template': + 'f:metadata': + 'f:labels': + .: {} + 'f:app': {} + 'f:spec': + 'f:containers': + 'k:{"name":"guestbook-ui"}': + .: {} + 'f:env': + .: {} + 'k:{"name":"SOME_ENV_VAR"}': + .: {} + 'f:name': {} + 'f:value': {} + 'f:image': {} + 'f:imagePullPolicy': {} + 'f:name': {} + 'f:ports': + .: {} + 'k:{"containerPort":80,"protocol":"TCP"}': + .: {} + 'f:containerPort': {} + 'f:protocol': {} + 'f:resources': + .: {} + 'f:requests': + .: {} + 'f:cpu': {} + 'f:memory': {} + 'f:terminationMessagePath': {} + 'f:terminationMessagePolicy': {} + 'f:dnsPolicy': {} + 'f:restartPolicy': {} + 'f:schedulerName': {} + 'f:securityContext': {} + 'f:terminationGracePeriodSeconds': {} + manager: argocd + operation: Update + time: '2022-01-06T15:04:15Z' + - apiVersion: apps/v1 + fieldsType: FieldsV1 + fieldsV1: + 'f:metadata': + 'f:annotations': + 'f:deployment.kubernetes.io/revision': {} + 'f:status': + 'f:availableReplicas': {} + 'f:conditions': + .: {} + 'k:{"type":"Available"}': + .: {} + 'f:lastTransitionTime': {} + 'f:lastUpdateTime': {} + 'f:message': {} + 'f:reason': {} + 'f:status': {} + 'f:type': {} + 'k:{"type":"Progressing"}': + .: {} + 'f:lastTransitionTime': {} + 'f:lastUpdateTime': {} + 'f:message': {} + 'f:reason': {} + 'f:status': {} + 'f:type': {} + 'f:observedGeneration': {} + 'f:readyReplicas': {} + 'f:replicas': {} + 'f:updatedReplicas': {} + manager: kube-controller-manager + operation: Update + time: '2022-01-06T18:15:14Z' + name: kustomize-guestbook-ui + namespace: default + resourceVersion: '8289211' + uid: ef253575-ce44-4c5e-84ad-16e81d0df6eb +spec: + progressDeadlineSeconds: 600 + replicas: 4 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: guestbook-ui + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + app: guestbook-ui + spec: + containers: + - env: + - name: SOME_ENV_VAR + value: some_value + image: 'gcr.io/heptio-images/ks-guestbook-demo:0.1' + imagePullPolicy: IfNotPresent + name: guestbook-ui + ports: + - containerPort: 80 + protocol: TCP + resources: + requests: + cpu: 50m + memory: 100Mi + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 +status: + availableReplicas: 4 + conditions: + - lastTransitionTime: '2022-01-05T22:20:37Z' + lastUpdateTime: '2022-01-05T22:43:47Z' + message: >- + ReplicaSet "kustomize-guestbook-ui-6549d54677" has successfully + progressed. + reason: NewReplicaSetAvailable + status: 'True' + type: Progressing + - lastTransitionTime: '2022-01-06T18:15:14Z' + lastUpdateTime: '2022-01-06T18:15:14Z' + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: 'True' + type: Available + observedGeneration: 119 + readyReplicas: 4 + replicas: 4 + updatedReplicas: 4 diff --git a/util/health/testdata/pod-running-restart-always.yaml b/controller/testdata/pod-running-restart-always.yaml similarity index 100% rename from util/health/testdata/pod-running-restart-always.yaml rename to controller/testdata/pod-running-restart-always.yaml diff --git a/controller/testdata/target-deployment-new-entries.yaml b/controller/testdata/target-deployment-new-entries.yaml new file mode 100644 index 0000000000000..b09ca1c9fbac8 --- /dev/null +++ b/controller/testdata/target-deployment-new-entries.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + argocd.argoproj.io/tracking-id: 'guestbook:apps/Deployment:default/kustomize-guestbook-ui' + iksm-version: '1.0' + name: kustomize-guestbook-ui + namespace: default +spec: + replicas: 1 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: guestbook-ui + template: + metadata: + labels: + app: guestbook-ui + spec: + containers: + - name: guestbook-ui + image: 'gcr.io/heptio-images/ks-guestbook-demo:0.1' + env: + - name: SOME_ENV_VAR + value: some_value + - name: NEW_ENV_VAR + value: new_value + ports: + - containerPort: 80 + - grpcPort: 8081 + resources: + requests: + cpu: 50m + memory: 100Mi + - name: new-container + image: 'new-image:1.0' diff --git a/controller/testdata/target-deployment.yaml b/controller/testdata/target-deployment.yaml new file mode 100644 index 0000000000000..111647f9ac2fd --- /dev/null +++ b/controller/testdata/target-deployment.yaml @@ -0,0 +1,31 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + argocd.argoproj.io/tracking-id: 'guestbook:apps/Deployment:default/kustomize-guestbook-ui' + iksm-version: '1.0' + name: kustomize-guestbook-ui + namespace: default +spec: + replicas: 1 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: guestbook-ui + template: + metadata: + labels: + app: guestbook-ui + spec: + containers: + - env: + - name: SOME_ENV_VAR + value: some_value + image: 'gcr.io/heptio-images/ks-guestbook-demo:0.1' + name: guestbook-ui + ports: + - containerPort: 80 + resources: + requests: + cpu: 50m + memory: 100Mi diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index dd68b9251bd85..0cef196dca5a1 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -1 +1 @@ -Please refer to [the Contribution Guide](https://argo-cd.readthedocs.io/en/latest/developer-guide/contributing/) +Please refer to [the Contribution Guide](https://argo-cd.readthedocs.io/en/latest/developer-guide/code-contributions/) diff --git a/docs/SUPPORT.md b/docs/SUPPORT.md index 48fb337a78954..e0adecf12d38a 100644 --- a/docs/SUPPORT.md +++ b/docs/SUPPORT.md @@ -1,6 +1,7 @@ # Support -1. Make sure you've read [understanding the basics](understand_the_basics.md) the [getting started guide](getting_started.md). -2. Looked for an answer in [the frequently asked questions](faq.md). -3. Ask a question in [the Argo CD Slack channel ⧉](https://argoproj.github.io/community/join-slack). -4. [Read issues, report a bug, or request a feature ⧉](https://github.com/argoproj/argo-cd/issues). +1. Make sure you've read [understanding the basics](understand_the_basics.md) and the [getting started guide](getting_started.md). +1. Looked for an answer in [the frequently asked questions](faq.md). +1. [Read existing issues ⧉](https://github.com/argoproj/argo-cd/issues). +1. Ask a question in [the Argo CD Slack channel ⧉](https://argoproj.github.io/community/join-slack). +1. [Report a bug, or request a feature ⧉](https://github.com/argoproj/argo-cd/issues/new/choose). diff --git a/docs/assets/applicationset/Argo-CD-Integration/ApplicationSet-Argo-Diagram-v2.odp b/docs/assets/applicationset/Argo-CD-Integration/ApplicationSet-Argo-Diagram-v2.odp new file mode 100644 index 0000000000000..47c1acf2f24d5 Binary files /dev/null and b/docs/assets/applicationset/Argo-CD-Integration/ApplicationSet-Argo-Diagram-v2.odp differ diff --git a/docs/assets/applicationset/Argo-CD-Integration/ApplicationSet-Argo-Relationship-v2.png b/docs/assets/applicationset/Argo-CD-Integration/ApplicationSet-Argo-Relationship-v2.png new file mode 100644 index 0000000000000..b1bd6dfc5dd16 Binary files /dev/null and b/docs/assets/applicationset/Argo-CD-Integration/ApplicationSet-Argo-Relationship-v2.png differ diff --git a/docs/assets/applicationset/Argo-CD-Integration/ApplicationSet-Argo-Relationship.png b/docs/assets/applicationset/Argo-CD-Integration/ApplicationSet-Argo-Relationship.png new file mode 100644 index 0000000000000..cde00e6851582 Binary files /dev/null and b/docs/assets/applicationset/Argo-CD-Integration/ApplicationSet-Argo-Relationship.png differ diff --git a/docs/assets/applicationset/Introduction/List-Example-In-Argo-CD-Web-UI.png b/docs/assets/applicationset/Introduction/List-Example-In-Argo-CD-Web-UI.png new file mode 100644 index 0000000000000..faf63fe7848b0 Binary files /dev/null and b/docs/assets/applicationset/Introduction/List-Example-In-Argo-CD-Web-UI.png differ diff --git a/docs/assets/applicationset/Use-Cases/Cluster-Add-Ons.png b/docs/assets/applicationset/Use-Cases/Cluster-Add-Ons.png new file mode 100644 index 0000000000000..f6353cc29e340 Binary files /dev/null and b/docs/assets/applicationset/Use-Cases/Cluster-Add-Ons.png differ diff --git a/docs/assets/applicationset/Use-Cases/Monorepos.png b/docs/assets/applicationset/Use-Cases/Monorepos.png new file mode 100644 index 0000000000000..523af303c1d40 Binary files /dev/null and b/docs/assets/applicationset/Use-Cases/Monorepos.png differ diff --git a/docs/assets/applicationset/logo.png b/docs/assets/applicationset/logo.png new file mode 100644 index 0000000000000..b9cb18e63431b Binary files /dev/null and b/docs/assets/applicationset/logo.png differ diff --git a/docs/assets/applicationset/webhook-config-merge-request-gitlab.png b/docs/assets/applicationset/webhook-config-merge-request-gitlab.png new file mode 100644 index 0000000000000..3e2029b44e80c Binary files /dev/null and b/docs/assets/applicationset/webhook-config-merge-request-gitlab.png differ diff --git a/docs/assets/applicationset/webhook-config-pull-request.png b/docs/assets/applicationset/webhook-config-pull-request.png new file mode 100644 index 0000000000000..434c895a8ab30 Binary files /dev/null and b/docs/assets/applicationset/webhook-config-pull-request.png differ diff --git a/docs/assets/applicationset/webhook-config-pullrequest-generator.png b/docs/assets/applicationset/webhook-config-pullrequest-generator.png new file mode 100644 index 0000000000000..d042da812affe Binary files /dev/null and b/docs/assets/applicationset/webhook-config-pullrequest-generator.png differ diff --git a/docs/assets/applicationset/webhook-config.png b/docs/assets/applicationset/webhook-config.png new file mode 100644 index 0000000000000..76a6400e5c7eb Binary files /dev/null and b/docs/assets/applicationset/webhook-config.png differ diff --git a/docs/assets/argocd-arch-authn-authz.jpg b/docs/assets/argocd-arch-authn-authz.jpg new file mode 100644 index 0000000000000..323f95fce1d11 Binary files /dev/null and b/docs/assets/argocd-arch-authn-authz.jpg differ diff --git a/docs/assets/argocd-components.png b/docs/assets/argocd-components.png new file mode 100644 index 0000000000000..35e214a10ca7f Binary files /dev/null and b/docs/assets/argocd-components.png differ diff --git a/docs/assets/argocd-core-components.png b/docs/assets/argocd-core-components.png new file mode 100644 index 0000000000000..f274f7cb821bb Binary files /dev/null and b/docs/assets/argocd-core-components.png differ diff --git a/docs/assets/argocd-ui.gif b/docs/assets/argocd-ui.gif index 49a4d9fac801d..cdef7f2876a28 100644 Binary files a/docs/assets/argocd-ui.gif and b/docs/assets/argocd-ui.gif differ diff --git a/docs/assets/azure-app-registration-authentication.png b/docs/assets/azure-app-registration-authentication.png new file mode 100644 index 0000000000000..f1013ae2d1e04 Binary files /dev/null and b/docs/assets/azure-app-registration-authentication.png differ diff --git a/docs/assets/azure-app-registration-overview.png b/docs/assets/azure-app-registration-overview.png new file mode 100644 index 0000000000000..a352db4737b8b Binary files /dev/null and b/docs/assets/azure-app-registration-overview.png differ diff --git a/docs/assets/azure-app-registration-secret.png b/docs/assets/azure-app-registration-secret.png new file mode 100644 index 0000000000000..0444397e96a06 Binary files /dev/null and b/docs/assets/azure-app-registration-secret.png differ diff --git a/docs/assets/azure-devops-webhook-config.png b/docs/assets/azure-devops-webhook-config.png new file mode 100644 index 0000000000000..26fb6d0683d63 Binary files /dev/null and b/docs/assets/azure-devops-webhook-config.png differ diff --git a/docs/assets/azure-enterprise-claims.png b/docs/assets/azure-enterprise-claims.png old mode 100755 new mode 100644 diff --git a/docs/assets/azure-enterprise-saml-urls.png b/docs/assets/azure-enterprise-saml-urls.png old mode 100755 new mode 100644 diff --git a/docs/assets/azure-enterprise-users.png b/docs/assets/azure-enterprise-users.png old mode 100755 new mode 100644 diff --git a/docs/assets/azure-sso-cli-log-in-success.png b/docs/assets/azure-sso-cli-log-in-success.png new file mode 100644 index 0000000000000..469919e6b40c8 Binary files /dev/null and b/docs/assets/azure-sso-cli-log-in-success.png differ diff --git a/docs/assets/azure-sso-web-application.png b/docs/assets/azure-sso-web-application.png new file mode 100644 index 0000000000000..c517d9d29a65c Binary files /dev/null and b/docs/assets/azure-sso-web-application.png differ diff --git a/docs/assets/azure-sso-web-log-in-via-azure.png b/docs/assets/azure-sso-web-log-in-via-azure.png new file mode 100644 index 0000000000000..1042368ea36c1 Binary files /dev/null and b/docs/assets/azure-sso-web-log-in-via-azure.png differ diff --git a/docs/assets/azure-sso-web-user-info.png b/docs/assets/azure-sso-web-user-info.png new file mode 100644 index 0000000000000..cd27d5f010089 Binary files /dev/null and b/docs/assets/azure-sso-web-user-info.png differ diff --git a/docs/assets/banner.png b/docs/assets/banner.png new file mode 100644 index 0000000000000..522199e1d8ee8 Binary files /dev/null and b/docs/assets/banner.png differ diff --git a/docs/assets/extra_info-1.png b/docs/assets/extra_info-1.png new file mode 100644 index 0000000000000..8ccf597c0879e Binary files /dev/null and b/docs/assets/extra_info-1.png differ diff --git a/docs/assets/extra_info-2.png b/docs/assets/extra_info-2.png new file mode 100644 index 0000000000000..3dde4e61fa4e0 Binary files /dev/null and b/docs/assets/extra_info-2.png differ diff --git a/docs/assets/extra_info.png b/docs/assets/extra_info.png new file mode 100644 index 0000000000000..c5064762e5138 Binary files /dev/null and b/docs/assets/extra_info.png differ diff --git a/docs/assets/google-admin-oidc-uris.png b/docs/assets/google-admin-oidc-uris.png new file mode 100644 index 0000000000000..0fcedcc2f3f08 Binary files /dev/null and b/docs/assets/google-admin-oidc-uris.png differ diff --git a/docs/assets/google-groups-membership.png b/docs/assets/google-groups-membership.png new file mode 100644 index 0000000000000..98aa661d76699 Binary files /dev/null and b/docs/assets/google-groups-membership.png differ diff --git a/docs/assets/keycloak-add-client.png b/docs/assets/keycloak-add-client.png index 36d598318cbe3..acdb3e725b8bf 100644 Binary files a/docs/assets/keycloak-add-client.png and b/docs/assets/keycloak-add-client.png differ diff --git a/docs/assets/keycloak-add-client_2.png b/docs/assets/keycloak-add-client_2.png new file mode 100644 index 0000000000000..b765bf89e5698 Binary files /dev/null and b/docs/assets/keycloak-add-client_2.png differ diff --git a/docs/assets/keycloak-add-scope.png b/docs/assets/keycloak-add-scope.png index 200486315e372..b2b759394619d 100644 Binary files a/docs/assets/keycloak-add-scope.png and b/docs/assets/keycloak-add-scope.png differ diff --git a/docs/assets/keycloak-client-scope-selected.png b/docs/assets/keycloak-client-scope-selected.png deleted file mode 100644 index f3ec6ded54c69..0000000000000 Binary files a/docs/assets/keycloak-client-scope-selected.png and /dev/null differ diff --git a/docs/assets/keycloak-client-scope.png b/docs/assets/keycloak-client-scope.png index 04d56583ab926..cd9609b5419b7 100644 Binary files a/docs/assets/keycloak-client-scope.png and b/docs/assets/keycloak-client-scope.png differ diff --git a/docs/assets/keycloak-client-secret.png b/docs/assets/keycloak-client-secret.png index b4679b0e9d4eb..c1a71c3d97f20 100644 Binary files a/docs/assets/keycloak-client-secret.png and b/docs/assets/keycloak-client-secret.png differ diff --git a/docs/assets/keycloak-configure-client.png b/docs/assets/keycloak-configure-client.png index d3805ed05df6a..cd711dfd602bf 100644 Binary files a/docs/assets/keycloak-configure-client.png and b/docs/assets/keycloak-configure-client.png differ diff --git a/docs/assets/keycloak-groups-mapper.png b/docs/assets/keycloak-groups-mapper.png index 3610aa5737a1a..b1ccabb30013a 100644 Binary files a/docs/assets/keycloak-groups-mapper.png and b/docs/assets/keycloak-groups-mapper.png differ diff --git a/docs/assets/keycloak-user-group.png b/docs/assets/keycloak-user-group.png index ff9825b99d708..5c9c21d4f555f 100644 Binary files a/docs/assets/keycloak-user-group.png and b/docs/assets/keycloak-user-group.png differ diff --git a/docs/assets/release-action.png b/docs/assets/release-action.png new file mode 100644 index 0000000000000..cadcda53fca09 Binary files /dev/null and b/docs/assets/release-action.png differ diff --git a/docs/assets/repo-add-google-cloud-source.png b/docs/assets/repo-add-google-cloud-source.png new file mode 100644 index 0000000000000..a201703592d14 Binary files /dev/null and b/docs/assets/repo-add-google-cloud-source.png differ diff --git a/docs/assets/repo-add-overview.png b/docs/assets/repo-add-overview.png index 303bf0dea7e5e..cd6cfda572506 100644 Binary files a/docs/assets/repo-add-overview.png and b/docs/assets/repo-add-overview.png differ diff --git a/docs/assets/terminal.png b/docs/assets/terminal.png new file mode 100644 index 0000000000000..2e89ddc4f6726 Binary files /dev/null and b/docs/assets/terminal.png differ diff --git a/docs/assets/versions.css b/docs/assets/versions.css index 49c3d830695d7..b8bb066929dd0 100644 --- a/docs/assets/versions.css +++ b/docs/assets/versions.css @@ -36,10 +36,13 @@ /* Version Warning */ div[data-md-component=announce] { - background-color: rgba(255,145,0,.1); + background-color: rgb(248, 243, 236); + position: sticky; + top: 0; + z-index: 2; } div[data-md-component=announce]>div#announce-msg{ - color: var(--md-admonition-fg-color); + color: var(--md-code-hl-number-color); font-size: .8rem; text-align: center; margin: 15px; diff --git a/docs/assets/versions.js b/docs/assets/versions.js index d67a39e72247c..7a2392a392dc8 100644 --- a/docs/assets/versions.js +++ b/docs/assets/versions.js @@ -9,16 +9,6 @@ setTimeout(function() { caret.innerHTML = "" caret.classList.add('dropdown-caret') div.querySelector('.rst-current-version').appendChild(caret); - div.querySelector('.rst-current-version').addEventListener('click', function() { - const classes = container.className.split(' '); - const index = classes.indexOf('shift-up'); - if (index === -1) { - classes.push('shift-up'); - } else { - classes.splice(index, 1); - } - container.className = classes.join(' '); - }); } var CSSLink = document.createElement('link'); @@ -35,10 +25,24 @@ setTimeout(function() { // VERSION WARNINGS window.addEventListener("DOMContentLoaded", function() { var rtdData = window['READTHEDOCS_DATA'] || { version: 'latest' }; + var margin = 30; + var headerHeight = document.getElementsByClassName("md-header")[0].offsetHeight; if (rtdData.version === "latest") { document.querySelector("div[data-md-component=announce]").innerHTML = "
You are viewing the docs for an unreleased version of Argo CD, click here to go to the latest stable version.
" + var bannerHeight = document.getElementById('announce-msg').offsetHeight + margin + document.querySelector("header.md-header").style.top = bannerHeight +"px"; + document.querySelector('style').textContent += + "@media screen and (min-width: 76.25em){ .md-sidebar { height: 0; top:"+ (bannerHeight+headerHeight)+"px !important; }}" + document.querySelector('style').textContent += + "@media screen and (min-width: 60em){ .md-sidebar--secondary { height: 0; top:"+ (bannerHeight+headerHeight)+"px !important; }}" } else if (rtdData.version !== "stable") { document.querySelector("div[data-md-component=announce]").innerHTML = "
You are viewing the docs for a previous version of Argo CD, click here to go to the latest stable version.
" + var bannerHeight = document.getElementById('announce-msg').offsetHeight + margin + document.querySelector("header.md-header").style.top = bannerHeight +"px"; + document.querySelector('style').textContent += + "@media screen and (min-width: 76.25em){ .md-sidebar { height: 0; top:"+ (bannerHeight+headerHeight)+"px !important; }}" + document.querySelector('style').textContent += + "@media screen and (min-width: 60em){ .md-sidebar--secondary { height: 0; top:"+ (bannerHeight+headerHeight)+"px !important; }}" } }); diff --git a/docs/assets/zitadel-actions.png b/docs/assets/zitadel-actions.png new file mode 100644 index 0000000000000..db0b37245c49e Binary files /dev/null and b/docs/assets/zitadel-actions.png differ diff --git a/docs/assets/zitadel-application-1.png b/docs/assets/zitadel-application-1.png new file mode 100644 index 0000000000000..f69acf9b6939e Binary files /dev/null and b/docs/assets/zitadel-application-1.png differ diff --git a/docs/assets/zitadel-application-2.png b/docs/assets/zitadel-application-2.png new file mode 100644 index 0000000000000..f0b4ff34eabed Binary files /dev/null and b/docs/assets/zitadel-application-2.png differ diff --git a/docs/assets/zitadel-application-3.png b/docs/assets/zitadel-application-3.png new file mode 100644 index 0000000000000..7650217264cbc Binary files /dev/null and b/docs/assets/zitadel-application-3.png differ diff --git a/docs/assets/zitadel-application-4.png b/docs/assets/zitadel-application-4.png new file mode 100644 index 0000000000000..d5660fc4f5f0e Binary files /dev/null and b/docs/assets/zitadel-application-4.png differ diff --git a/docs/assets/zitadel-application-secrets.png b/docs/assets/zitadel-application-secrets.png new file mode 100644 index 0000000000000..9530dc7f1de50 Binary files /dev/null and b/docs/assets/zitadel-application-secrets.png differ diff --git a/docs/assets/zitadel-application-settings.png b/docs/assets/zitadel-application-settings.png new file mode 100644 index 0000000000000..cbe2e62aee738 Binary files /dev/null and b/docs/assets/zitadel-application-settings.png differ diff --git a/docs/assets/zitadel-argocd-login.png b/docs/assets/zitadel-argocd-login.png new file mode 100644 index 0000000000000..b4f2e0b75ae77 Binary files /dev/null and b/docs/assets/zitadel-argocd-login.png differ diff --git a/docs/assets/zitadel-argocd-user-info.png b/docs/assets/zitadel-argocd-user-info.png new file mode 100644 index 0000000000000..88b5a2befa8f2 Binary files /dev/null and b/docs/assets/zitadel-argocd-user-info.png differ diff --git a/docs/assets/zitadel-project-authorizations.png b/docs/assets/zitadel-project-authorizations.png new file mode 100644 index 0000000000000..0ec01f7755dab Binary files /dev/null and b/docs/assets/zitadel-project-authorizations.png differ diff --git a/docs/assets/zitadel-project-roles.png b/docs/assets/zitadel-project-roles.png new file mode 100644 index 0000000000000..ee18d1e1f3dd0 Binary files /dev/null and b/docs/assets/zitadel-project-roles.png differ diff --git a/docs/assets/zitadel-project-settings.png b/docs/assets/zitadel-project-settings.png new file mode 100644 index 0000000000000..ac0c3fa8036c7 Binary files /dev/null and b/docs/assets/zitadel-project-settings.png differ diff --git a/docs/assets/zitadel-project.png b/docs/assets/zitadel-project.png new file mode 100644 index 0000000000000..4ed9b04f87541 Binary files /dev/null and b/docs/assets/zitadel-project.png differ diff --git a/docs/cli_installation.md b/docs/cli_installation.md index 3ed832bdaeeea..42938bcd751ba 100644 --- a/docs/cli_installation.md +++ b/docs/cli_installation.md @@ -2,7 +2,47 @@ You can download the latest Argo CD version from [the latest release page of this repository](https://github.com/argoproj/argo-cd/releases/latest), which will include the `argocd` CLI. -## Linux +## Linux and WSL + +### ArchLinux + +```bash +pacman -S argocd +``` + +### Homebrew + +```bash +brew install argocd +``` + +### Download With Curl + +#### Download latest version + +```bash +curl -sSL -o argocd-linux-amd64 https://github.com/argoproj/argo-cd/releases/latest/download/argocd-linux-amd64 +sudo install -m 555 argocd-linux-amd64 /usr/local/bin/argocd +rm argocd-linux-amd64 +``` + +#### Download concrete version + +Set `VERSION` replacing `` in the command below with the version of Argo CD you would like to download: + +```bash +VERSION= # Select desired TAG from https://github.com/argoproj/argo-cd/releases +curl -sSL -o argocd-linux-amd64 https://github.com/argoproj/argo-cd/releases/download/$VERSION/argocd-linux-amd64 +sudo install -m 555 argocd-linux-amd64 /usr/local/bin/argocd +rm argocd-linux-amd64 +``` + +You should now be able to run `argocd` commands. + + +## Mac (M1) + +### Download With Curl You can view the latest version of Argo CD at the link above or run the following command to grab the version: @@ -13,16 +53,16 @@ VERSION=$(curl --silent "https://api.github.com/repos/argoproj/argo-cd/releases/ Replace `VERSION` in the command below with the version of Argo CD you would like to download: ```bash -curl -sSL -o /usr/local/bin/argocd https://github.com/argoproj/argo-cd/releases/download/$VERSION/argocd-linux-amd64 +curl -sSL -o argocd-darwin-arm64 https://github.com/argoproj/argo-cd/releases/download/$VERSION/argocd-darwin-arm64 ``` -Make the `argocd` CLI executable: +Install the Argo CD CLI binary: ```bash -chmod +x /usr/local/bin/argocd +sudo install -m 555 argocd-darwin-arm64 /usr/local/bin/argocd +rm argocd-darwin-arm64 ``` -You should now be able to run `argocd` commands. ## Mac @@ -43,13 +83,14 @@ VERSION=$(curl --silent "https://api.github.com/repos/argoproj/argo-cd/releases/ Replace `VERSION` in the command below with the version of Argo CD you would like to download: ```bash -curl -sSL -o /usr/local/bin/argocd https://github.com/argoproj/argo-cd/releases/download/$VERSION/argocd-darwin-amd64 +curl -sSL -o argocd-darwin-amd64 https://github.com/argoproj/argo-cd/releases/download/$VERSION/argocd-darwin-amd64 ``` -Make the `argocd` CLI executable: +Install the Argo CD CLI binary: ```bash -chmod +x /usr/local/bin/argocd +sudo install -m 555 argocd-darwin-amd64 /usr/local/bin/argocd +rm argocd-darwin-amd64 ``` After finishing either of the instructions above, you should now be able to run `argocd` commands. @@ -57,7 +98,7 @@ After finishing either of the instructions above, you should now be able to run ## Windows -### Download With Powershell: Invoke-WebRequest +### Download With PowerShell: Invoke-WebRequest You can view the latest version of Argo CD at the link above or run the following command to grab the version: @@ -74,6 +115,11 @@ $output = "argocd.exe" Invoke-WebRequest -Uri $url -OutFile $output ``` Also please note you will probably need to move the file into your PATH. +Use following command to add Argo CD into environment variables PATH + +```powershell +[Environment]::SetEnvironmentVariable("Path", "$env:Path;C:\Path\To\ArgoCD-CLI", "User") +``` After finishing the instructions above, you should now be able to run `argocd` commands. diff --git a/docs/core_concepts.md b/docs/core_concepts.md index 08495edf41f6f..628eeebd44c74 100644 --- a/docs/core_concepts.md +++ b/docs/core_concepts.md @@ -1,6 +1,7 @@ # Core Concepts -Let's assume you're familiar with core Git, Docker, Kubernetes, Continuous Delivery, and GitOps concepts. +Let's assume you're familiar with core Git, Docker, Kubernetes, Continuous Delivery, and GitOps concepts. +Below are some of the concepts that are specific to Argo CD. * **Application** A group of Kubernetes resources as defined by a manifest. This is a Custom Resource Definition (CRD). * **Application source type** Which **Tool** is used to build the application. @@ -11,6 +12,6 @@ Let's assume you're familiar with core Git, Docker, Kubernetes, Continuous Deliv * **Sync operation status** Whether or not a sync succeeded. * **Refresh** Compare the latest code in Git with the live state. Figure out what is different. * **Health** The health of the application, is it running correctly? Can it serve requests? -* **Tool** A tool to create manifests from a directory of files. E.g. Kustomize or Ksonnet. See **Application Source Type**. +* **Tool** A tool to create manifests from a directory of files. E.g. Kustomize. See **Application Source Type**. * **Configuration management tool** See **Tool**. * **Configuration management plugin** A custom tool. diff --git a/docs/developer-guide/api-docs.md b/docs/developer-guide/api-docs.md index b96d0c5b46a90..289e4d466652e 100644 --- a/docs/developer-guide/api-docs.md +++ b/docs/developer-guide/api-docs.md @@ -1,6 +1,6 @@ # API Docs -You can find the Swagger docs by setting the path to `/swagger-ui` in your Argo CD UI's. E.g. [http://localhost:8080/swagger-ui](http://localhost:8080/swagger-ui). +You can find the Swagger docs by setting the path to `/swagger-ui` in your Argo CD UI. E.g. [http://localhost:8080/swagger-ui](http://localhost:8080/swagger-ui). ## Authorization @@ -11,21 +11,23 @@ $ curl $ARGOCD_SERVER/api/v1/session -d $'{"username":"admin","password":"passwo {"token":"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpYXQiOjE1Njc4MTIzODcsImlzcyI6ImFyZ29jZCIsIm5iZiI6MTU2NzgxMjM4Nywic3ViIjoiYWRtaW4ifQ.ejyTgFxLhuY9mOBtKhcnvobg3QZXJ4_RusN_KIdVwao"} ``` -> <=v1.2 - -Then pass using the HTTP `SetCookie` header, prefixing with `argocd.token`: +Then pass using the HTTP `Authorization` header, prefixing with `Bearer `: ```bash -$ curl $ARGOCD_SERVER/api/v1/applications --cookie "argocd.token=$ARGOCD_TOKEN" +$ curl $ARGOCD_SERVER/api/v1/applications -H "Authorization: Bearer $ARGOCD_TOKEN" {"metadata":{"selfLink":"/apis/argoproj.io/v1alpha1/namespaces/argocd/applications","resourceVersion":"37755"},"items":...} ``` -> v1.3 +## Services -Then pass using the HTTP `Authorization` header, prefixing with `Bearer `: +### Applications API -```bash -$ curl $ARGOCD_SERVER/api/v1/applications -H "Authorization: Bearer $ARGOCD_TOKEN" -{"metadata":{"selfLink":"/apis/argoproj.io/v1alpha1/namespaces/argocd/applications","resourceVersion":"37755"},"items":...} -``` - +#### How to Avoid 403 Errors for Missing Applications + +All endpoints of the Applications API accept an optional `project` query string parameter. If the parameter is +specified, and the specified Application does not exist, or if the Application does exist but is not in the given +project, the API will return a `404` error. + +If the `project` query string parameter is specified, and the Application does not exist, the API will return a `403` +error. This is to prevent leaking information about the existence of Applications to users who do not have access to +them. diff --git a/docs/developer-guide/architecture/authz-authn.md b/docs/developer-guide/architecture/authz-authn.md new file mode 100644 index 0000000000000..af32a9176eec9 --- /dev/null +++ b/docs/developer-guide/architecture/authz-authn.md @@ -0,0 +1,109 @@ +# Authentication and Authorization + +This document describes how authentication (authn) and authorization +(authz) are implemented in Argo CD. There is a clear distinction in +the code base of when and how these two security concepts are +enforced. + +## Logical layers + +The diagram bellow suggests 4 different logical layers (represented by +4 boxes: HTTP, gRPC, AuthN and AuthZ) inside Argo CD API server that +collaborate to provide authentication and authorization. + +- **HTTP**: The HTTP layer groups the *logical elements* that + collaborate to handle HTTP requests. Every incoming request reaches + the same HTTP server at the same port (8080). This server will + analyze the request headers and dispatch to the proper internal + server: gRPC or standard HTTP. + +- **gRPC**: The [gRPC][4] layer groups the logical elements responsible for + the gRPC implementation. + +- **AuthN**: The AuthN represents the layer responsible for + authentication. + +- **AuthZ**: The AuthZ represents the layer responsible for + authorization. + +![Argo CD Architecture](../../assets/argocd-arch-authn-authz.jpg) + +## Logical elements + +The logical elements (identified by numbers) can represent an object, +a function or a component in the code base. Note that this particular +distinction is not represented in the diagram. + +Incoming requests can reach Argo CD API server from the web UI as well +as from the `argocd` CLI. The responsibility of the represented +elements are described below with their respective numbers: + +1. **Cmux**: Uses the [cmux][1] library to provide a connection + multiplexer capability making it possible to use the same port to + handle standard HTTP as well as gRPC requests. It is responsible + for inspecting incoming requests and dispatch to appropriate + internal servers. If the request version is `http1.x` it will + delegate to the *http mux*. If the request version is `http2` and + has the header `content-type: application/grpc`, it will delegate + to the *gRPC Server*. + +1. **HTTP mux**: A [standard HTTP multiplexer][8] that will handle non + gRPC requests. It is responsible for serving a unified [REST + API][3] to the web UI exposing all gRPC and non-gRPC services. + +1. **gRPC-gateway**: Uses the [grpc-gateway][2] library to translate + internal gRPC services and expose them as a [REST API][3]. The + great majority of API services in Argo CD are implemented in gRPC. + The grpc-gateway makes it possible to access gRPC services from the + web UI. + +1. **Server**: The internal gRPC Server responsible for handling gRPC + requests. + +1. **AuthN**: Is responsible for invoking the authentication logic. It + is registered as a gRPC interceptor which will automatically + trigger for every gRPC request. + +1. **Session Manager**: Is the object responsible for managing Argo CD + API server session. It provides the functionality to verify the + validity of the authentication token provided in the request. + Depending on how Argo CD is configured it may or may not delegate + to an external AuthN provider to verify the token. + +1. **AuthN Provider**: Describes the component that can be plugged in + Argo CD API server to provide the authentication functionality such + as the login and the token verification process. + +1. **Service Method**: represents the method implementing the business + logic (core functionality) requested. An example of business logic + is: `List Applications`. Service methods are also responsible for + invoking the [RBAC][7] enforcement function to validate if the + authenticated user has permission to execute this method. + +1. **RBAC**: Is a collection of functions to provide the capability to + verify if the user has permission to execute a specific action in + Argo CD. It does so by validating the incoming request action + against predefined [RBAC][7] rules that can be configured in Argo CD + API server as well as in Argo CD `Project` CRD. + +1. **Casbin**: Uses the [Casbin][5] library to enforce [RBAC][7] rules. + +1. **AuthN Middleware**: Is an [HTTP Middleware][6] configured to + invoke the logic to verify the token for HTTP services that are not + implemented as gRPC and requires authentication. + +1. **HTTP Handler**: represents the http handlers responsible for + invoking the business logic (core functionality) requested. An + example of business logic is: `List Applications`. Http handlers + are also responsible for invoking the [RBAC][7] enforcement function to + validate if the authenticated user has permission to execute this + business logic. + +[1]: https://github.com/soheilhy/cmux +[2]: https://github.com/grpc-ecosystem/grpc-gateway +[3]: https://en.wikipedia.org/wiki/Representational_state_transfer +[4]: https://grpc.io/ +[5]: https://casbin.org/ +[6]: https://github.com/golang/go/wiki/LearnServerProgramming#middleware +[7]: https://en.wikipedia.org/wiki/Role-based_access_control +[8]: https://pkg.go.dev/net/http#ServeMux diff --git a/docs/developer-guide/architecture/components.md b/docs/developer-guide/architecture/components.md new file mode 100644 index 0000000000000..eb2904b531ccb --- /dev/null +++ b/docs/developer-guide/architecture/components.md @@ -0,0 +1,118 @@ +# Component Architecture + +Argo CD is designed with a component based architecture. The goal is +to separate the responsibility in different deployable units in order +to have the following benefits: + +- **Modularity**: Provides great level of flexibility. Components + interact with each other via an interface. This means that as long + as the interface contract is respected, a given component can be + replaced without requiring the rest of the system to adapt. It is + also possible to run the system without certain components if a + specific group of functionality isn't desired. +- **Single responsibility**: Helps to determine where the different + types of functionality should be implemented which drives for + better system cohesiveness. +- **Reusability**: Clearly defined interfaces helps in functionality + discoverability which benefits reusability of services. + +The default Argo CD installation is composed by different components +and different Kubernetes controllers. The controllers aren't +categorized as components as they have proprietary interfaces (CRDs) +and therefore, miss the modular nature. There are more resources +created while installing Argo CD (ConfigMaps, Services, etc), but for +simplicity we are covering just the ones directly related with the +componentized architecture. + +## Dependencies + +The diagram below has represented all dependencies between the +different components used by the default Argo CD installation: + +![Components Diagram](../../assets/argocd-components.png) + +There are 4 logical layers represented in the diagram: + +- **UI**: This is the presentation layer. Users interact with Argo CD + mainly by components from this layer. +- **Application**: The capabilities required to support the components + from the UI layer. +- **Core**: The main Argo CD gitops functionality is implemented by + components and Kubernetes controllers from the Core layer. +- **Infra**: Represent the tools that Argo CD depends on as part of + its infrastructure. + +The logical layers also help making the diagram easier to follow as +dependencies are represented in a top-down relationship. This means +that components from the top layers will be allowed to depend on any +component from any of the bottom layers. However components from the +bottom layers will never depend on any ones from upper layers. + +## Responsibility + +Below you can refer to a brief description of Argo CD components and +its main responsibilities. + +### Webapp + +Argo CD ships with a powerful web interface that allows managing +applications deployed in a given Kubernetes cluster. + +### CLI + +Argo CD provides a CLI that can be used by users to interact with Argo +CD API. The CLI can also be used for automation and scripting. + +### API Server + +Defines the proprietary API exposed by Argo CD that powers the Webapp +and the CLI functionalities. + +### Application Controller + +The Application Controller is responsible for reconciling the +Application resource in Kubernetes syncronizing the desired +application state (provided in Git) with the live state (in +Kubernetes). The Application Controller is also responsible for +reconciling the Project resource. + +### ApplicationSet Controller + +The ApplicationSet Controller is responsible for reconciling the +ApplicationSet resource. + +### Repo Server + +Repo Server plays an important role in Argo CD architecture as it is +responsible for interacting with the Git repository to generate the +desired state for all Kubernetes resources that belongs to a given +application. + +### Redis + +Redis is used by Argo CD to provide a cache layer reducing requests +sent to the Kube API as well as to the Git provider. It also supports +a few UI operations. + +### Kube API + +Argo CD controllers will connect to the Kubernetes API in order to run +the reconciliation loop. + +### Git + +As a gitops tool Argo CD requires that the desired state of the +Kubernetes resources to be provided in a Git repository. + +We use "git" here to stand in for an actual git repo, a Helm repo, +or an OCI artifact repo. Argo CD supports all those options. + +### Dex + +Argo CD relies on Dex to provide authentication with external OIDC +providers. However other tools can be used instead of Dex. Check the +[user management +documentation](../../operator-manual/user-management/index.md) for +more details. + + diff --git a/docs/developer-guide/ci.md b/docs/developer-guide/ci.md index 4d830332a911d..921f9d69d4c57 100644 --- a/docs/developer-guide/ci.md +++ b/docs/developer-guide/ci.md @@ -12,10 +12,10 @@ To read more about The GitHub actions are configured in [`ci-build.yaml`](https: Since the CI pipeline is triggered on Git commits, there is currently no (known) way on how to retrigger the CI checks without pushing a new commit to your branch. -If you are absolutely sure that the failure was due to a failure in the pipeline, and not an error within the changes you commited, you can push an empty commit to your branch, thus retriggering the pipeline without any code changes. To do so, issue +If you are absolutely sure that the failure was due to a failure in the pipeline, and not an error within the changes you committed, you can push an empty commit to your branch, thus retriggering the pipeline without any code changes. To do so, issue ```bash -git commit --allow-empty -m "Retrigger CI pipeline" +git commit -s --allow-empty -m "Retrigger CI pipeline" git push origin ``` @@ -23,11 +23,13 @@ git push origin First, make sure the failing build step succeeds on your machine. Remember the containerized build toolchain is available, too. -If the build is failing at the `Ensuring Gopkg.lock is up-to-date` step, you need to update the dependencies before you push your commits. Run `make dep-ensure` and `make dep` and commit the changes to `Gopkg.lock` to your branch. +If the build is failing at the `Ensure Go modules synchronicity` step, you need to first download all Go dependent modules locally via `go mod download` and then run `go mod tidy` to make sure the dependent Go modules are tidied up. Finally, commit and push your changes to `go.mod` and `go.sum` to your branch. + +If the build is failing at the `Build & cache Go code`, you need to make sure `make build-local` runs successfully on your local machine. ### Why does the codegen step fail? -If the codegen step fails with "Check nothing has changed...", chances are high that you did not run `make codegen`, or did not commit the changes it made. You should double check by running `make codegen` followed by `git status` in the local working copy of your branch. Commit any changes and push them to your GH branch to have the CI check it again. +If the codegen step fails with "Check nothing has changed...", chances are high that you did not run `make codegen`, or did not commit the changes it made. You should double-check by running `make codegen` followed by `git status` in the local working copy of your branch. Commit any changes and push them to your GH branch to have the CI check it again. A second common case for this is, when you modified any of the auto generated assets, as these will be overwritten upon `make codegen`. @@ -63,12 +65,12 @@ make builder-image IMAGE_NAMESPACE=argoproj IMAGE_TAG=v1.0.0 ## Public CD -Every commit to master is built and published to `ghcr.io/argoproj/argocd:-`. The list of images is available at -https://github.com/argoproj/argo-cd/packages. +Every commit to master is built and published to `ghcr.io/argoproj/argo-cd/argocd:-`. The list of images is available at +[https://github.com/argoproj/argo-cd/packages](https://github.com/argoproj/argo-cd/packages). !!! note - Github docker registry [requires](https://github.community/t5/GitHub-Actions/docker-pull-from-public-GitHub-Package-Registry-fail-with-quot/m-p/32888#M1294) authentication to read + GitHub docker registry [requires](https://github.community/t5/GitHub-Actions/docker-pull-from-public-GitHub-Package-Registry-fail-with-quot/m-p/32888#M1294) authentication to read even publicly available packages. Follow the steps from Kubernetes [documentation](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry) - to configure image pull secret if you want to use `ghcr.io/argoproj/argocd` image. + to configure image pull secret if you want to use `ghcr.io/argoproj/argo-cd/argocd` image. The image is automatically deployed to the dev Argo CD instance: [https://cd.apps.argoproj.io/](https://cd.apps.argoproj.io/) diff --git a/docs/developer-guide/code-contributions.md b/docs/developer-guide/code-contributions.md new file mode 100644 index 0000000000000..b02bf64e15505 --- /dev/null +++ b/docs/developer-guide/code-contributions.md @@ -0,0 +1,112 @@ +# Submitting code contributions to Argo CD + +## Preface + +The Argo CD project continuously grows, both in terms of features and community size. It gets adopted by more and more organisations which entrust Argo CD to handle their critical production workloads. Thus, we need to take great care with any changes that affect compatibility, performance, scalability, stability and security of Argo CD. For this reason, every new feature or larger enhancement must be properly designed and discussed before it gets accepted into the code base. + +We do welcome and encourage everyone to participate in the Argo CD project, but please understand that we can't accept each and every contribution from the community, for various reasons. + +If you want to submit code for a great new feature or enhancement, we kindly ask you to take a look at the +enhancement process outlined below before you start to write code or submit a PR. This will ensure that your idea is well aligned with the project's strategy and technical requirements, and it will help greatly in getting your code merged into our code base. + +Before submitting code for a new feature (and also, to some extent, for more complex bug fixes) please +[raise an Enhancement Proposal or Bug Issue](https://github.com/argoproj/argo-cd/issues/new/choose) +first. + +Each enhancement proposal needs to go through our +[triage process](#triage-process) +before we accept code contributions. To facilitate triage and to provide transparency, we use +[this GitHub project](https://github.com/orgs/argoproj/projects/18) to keep track of this process' outcome. + +_Please_ do not spend too much time on larger features or refactorings before the corresponding enhancement has been triaged. This may save everyone some amount of frustration and time, as the enhancement proposal might be rejected, and the code would never get merged. However, sometimes it's helpful to have some PoC code along with a proposal. + +We will do our best to triage incoming enhancement proposals quickly, with one of the following outcomes: + +* Accepted +* Rejected +* Proposal requires a design document to be further discussed + +Depending on how many enhancement proposals we receive at given times, it may take some time until we can look at yours. + +Also, please make sure you have read our +[Toolchain Guide](toolchain-guide.md) +to understand our toolchain and our continuous integration processes. It contains some invaluable information to get started with the complex code base that makes up Argo CD. + +## Quick start + +If you want a quick start contributing to Argo CD, take a look at issues that are labeled with +[help wanted](https://github.com/argoproj/argo-cd/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) +or +[good first issue](https://github.com/argoproj/argo-cd/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22). + +These are issues that were already triaged and accepted. + +If the issue is already attached to next +[version milestone](https://github.com/argoproj/argo-cd/milestones), +we have decided to also dedicate some of our time on reviews to PRs received for these issues. + +We encourage our community to pick up issues that are labeled in this way *and* are attached to the next version's milestone, with a promise for them to get a proper review with the clear intention for the incoming PRs to get merged. + +## Triage process + +### Overview + +Our triage process for enhancements proposals ensures that we take a look at all incoming enhancements to determine whether we will accept code submissions to implement them. + +The process works as follows: + +* New Enhancement Proposals raised on our GitHub issue tracker are moved to the _Incoming_ column of the project's board. These are the proposals that are in the queue for triage. +* The _Active_ column holds the issues that are currently being triaged, or will be triaged next. +* The _Accepted_ column holds the issues that have been triaged and are considered good to be implemented (e.g. the project agreed that the feature would be great to have) +* The _Declined_ column holds the issues that were rejected during triage. The issue will be updated with information about why the proposal has been rejected. +* The _Needs discussion_ column holds the issues that were found to require additional information, or even a design document, during triage. + +### Triage cadence + +Triage of enhancement proposals is performed transparently, offline using issue comments and online in our weekly contributor's meeting. _Everyone_ is invited to participate in triaging, the process is not limited to participation only by maintainers. + +Usually, we will triage enhancement proposals in a First-In-First-Out order, which mean that oldest proposals will be triaged first. + +We aim to triage at least 10 proposals a week. Depending on our available time, we may be triaging a higher or lower number of proposals in any given week. + +## Proposal states + +### Accepted proposals + +When a proposal is considered _Accepted_, it was decided that this enhancement would be valuable to the community at large and fits into the overall strategic roadmap of the project. + +Implementation of the issue may be started, either by the proposal's creator or another community member (including maintainers of the project). + +The issue should be refined enough by now to contain any concerns and guidelines to be taken into consideration during implementation. + +### Declined proposals + +We don't decline proposals lightly, and we will do our best to give a proper reasoning why we think that the proposal does not fit with the future of the project. Reasons for declining proposals may be - amongst others - that the change would be breaking for many, or that it does not meet the strategic direction of the project. Usually, discussion will be facilitated with the enhancement's creator before declining a proposal. + +Once a proposal is in _Declined_ state it's unlikely that we will accept code contributions for its implementation. + +### Proposals that need discussion + +Sometimes, we can't completely understand a proposal from its GitHub issue and require more information on the original intent or on more details about the implementation. If we are confronted with such an issue during the triage, we move this issue to the _Needs discussion_ column to indicate that we expect the issue's creator to supply more information on their idea. We may ask you to provide this information, either by adding that information to the issue itself or by joining one of our +[regular contributor's meeting](#regular-contributor-meeting) +to discuss the proposal with us. + +Also, issues that we find to require a more formal design document will be moved to this column. + +## Design documents + +For some enhancement proposals (especially those that will change behavior of Argo CD substantially, are attached with some caveats or where upgrade/downgrade paths are not clear), a more formal design document will be required in order to fully discuss and understand the enhancement in the broader community. This requirement is usually determined during triage. If you submitted an enhancement proposal, we may ask you to provide this more formal write down, along with some concerns or topics that need to be addressed. + +Design documents are usually submitted as PR and use [this template](https://github.com/argoproj/argo-cd/blob/master/docs/proposals/001-proposal-template.md) as a guide what kind of information we're looking for. Discussion will take place in the review process. When a design document gets merged, we consider it as approved and code can be written and submitted to implement this specific design. + +## Regular contributor meeting + +Our community regularly meets virtually to discuss issues, ideas and enhancements around Argo CD. We do invite you to join this virtual meetings if you want to bring up certain things (including your enhancement proposals), participate in our triaging or just want to get to know other contributors. + +The current cadence of our meetings is weekly, every Thursday at 4:15pm UTC (8:15am Pacific, 11:15am Eastern, 5:15pm Central European, 9:45pm Indian). We use Zoom to conduct these meetings. + +* [Agenda document (Google Docs, includes Zoom link)](https://docs.google.com/document/d/1xkoFkVviB70YBzSEa4bDnu-rUZ1sIFtwKKG1Uw8XsY8) + +If you want to discuss something, we kindly ask you to put your item on the +[agenda](https://docs.google.com/document/d/1xkoFkVviB70YBzSEa4bDnu-rUZ1sIFtwKKG1Uw8XsY8) +for one of the upcoming meetings so that we can plan in the time for discussing it. \ No newline at end of file diff --git a/docs/developer-guide/contributing.md b/docs/developer-guide/contributing.md index a608260b71a48..d7524c8323f51 100644 --- a/docs/developer-guide/contributing.md +++ b/docs/developer-guide/contributing.md @@ -1,327 +1,2 @@ -# Contribution guide - -## Preface - -We want to make contributing to ArgoCD as simple and smooth as possible. - -This guide shall help you in setting up your build & test environment, so that you can start developing and testing bug fixes and feature enhancements without having to make too much effort in setting up a local toolchain. - -If you want to submit a PR, please read this document carefully, as it contains important information guiding you through our PR quality gates. - -As is the case with the development process, this document is under constant change. If you notice any error, or if you think this document is out-of-date, or if you think it is missing something: Feel free to submit a PR or submit a bug to our GitHub issue tracker. - -If you need guidance with submitting a PR, or have any other questions regarding development of ArgoCD, do not hesitate to [join our Slack](https://argoproj.github.io/community/join-slack) and get in touch with us in the `#argo-dev` channel! - -## Before you start - -You will need at least the following things in your toolchain in order to develop and test ArgoCD locally: - -* A Kubernetes cluster. You won't need a fully blown multi-master, multi-node cluster, but you will need something like K3S, Minikube or microk8s. You will also need a working Kubernetes client (`kubectl`) configuration in your development environment. The configuration must reside in `~/.kube/config` and the API server URL must point to the IP address of your local machine (or VM), and **not** to `localhost` or `127.0.0.1` if you are using the virtualized development toolchain (see below) - -* You will also need a working Docker runtime environment, to be able to build and run images. -The Docker version must be fairly recent, and support multi-stage builds. You should not work as root. Make your local user a member of the `docker` group to be able to control the Docker service on your machine. - -* Obviously, you will need a `git` client for pulling source code and pushing back your changes. - -* Last but not least, you will need a Go SDK and related tools (such as GNU `make`) installed and working on your development environment. The minimum required Go version for building ArgoCD is **v1.14.0**. - -* We will assume that your Go workspace is at `~/go`. - -!!! note - **Attention minikube users**: By default, minikube will create Kubernetes client configuration that uses authentication data from files. This is incompatible with the virtualized toolchain. So if you intend to use the virtualized toolchain, you have to embed this authentication data into the client configuration. To do so, issue `minikube config set embed-certs true` and restart your minikube. Please also note that minikube using the Docker driver is currently not supported with the virtualized toolchain, because the Docker driver exposes the API server on 127.0.0.1 hard-coded. If in doubt, run `make verify-kube-connect` to find out. - -## Submitting PRs - -When you submit a PR against ArgoCD's GitHub repository, a couple of CI checks will be run automatically to ensure your changes will build fine and meet certain quality standards. Your contribution needs to pass those checks in order to be merged into the repository. - -In general, it might be beneficial to only submit a PR for an existing issue. Especially for larger changes, an Enhancement Proposal should exist before. - -!!!note - - Please make sure that you always create PRs from a branch that is up-to-date with the latest changes from ArgoCD's master branch. Depending on how long it takes for the maintainers to review and merge your PR, it might be necessary to pull in latest changes into your branch again. - -Please understand that we, as an Open Source project, have limited capacities for reviewing and merging PRs to ArgoCD. We will do our best to review your PR and give you feedback as soon as possible, but please bear with us if it takes a little longer as expected. - -The following read will help you to submit a PR that meets the standards of our CI tests: - -### Title of the PR - -Please use a meaningful and concise title for your PR. This will help us to pick PRs for review quickly, and the PR title will also end up in the Changelog. - -We use the [Semantic PR title checker](https://github.com/zeke/semantic-pull-requests) to categorize your PR into one of the following categories: - -* `fix` - Your PR contains one or more code bug fixes -* `feat` - Your PR contains a new feature -* `docs` - Your PR improves the documentation -* `chore` - Your PR improves any internals of ArgoCD, such as the build process, unit tests, etc - -Please prefix the title of your PR with one of the valid categories. For example, if you chose the title your PR `Add documentation for GitHub SSO integration`, please use `docs: Add documentation for GitHub SSO integration` instead. - -### Contributor License Agreement - -Every contributor to ArgoCD must have signed the current Contributor License Agreement (CLA). You only have to sign the CLA when you are a first time contributor, or when the agreement has changed since your last time signing it. The main purpose of the CLA is to ensure that you hold the required rights for your contribution. The CLA signing is an automated process. - -You can read the current version of the CLA [here](https://cla-assistant.io/argoproj/argo-cd). - -### PR template checklist - -Upon opening a PR, the details will contain a checklist from a template. Please read the checklist, and tick those marks that apply to you. - -### Automated builds & tests - -After you have submitted your PR, and whenever you push new commits to that branch, GitHub will run a number of Continuous Integration checks against your code. It will execute the following actions, and each of them has to pass: - -* Build the Go code (`make build`) -* Generate API glue code and manifests (`make codegen`) -* Run a Go linter on the code (`make lint`) -* Run the unit tests (`make test`) -* Run the End-to-End tests (`make test-e2e`) -* Build and lint the UI code (`make lint-ui`) -* Build the `argocd` CLI (`make cli`) - -If any of these tests in the CI pipeline fail, it means that some of your contribution is considered faulty (or a test might be flaky, see below). - -### Code test coverage - -We use [CodeCov](https://codecov.io) in our CI pipeline to check for test coverage, and once you submit your PR, it will run and report on the coverage difference as a comment within your PR. If the difference is too high in the negative, i.e. your submission introduced a significant drop in code coverage, the CI check will fail. - -Whenever you develop a new feature or submit a bug fix, please also write appropriate unit tests for it. If you write a completely new module, please aim for at least 80% of coverage. -If you want to see how much coverage just a specific module (i.e. your new one) has, you can set the `TEST_MODULE` to the (fully qualified) name of that module with `make test`, i.e.: - -```bash - make test TEST_MODULE=github.com/argoproj/argo-cd/server/cache -... -ok github.com/argoproj/argo-cd/server/cache 0.029s coverage: 89.3% of statements -``` - -## Local vs Virtualized toolchain - -ArgoCD provides a fully virtualized development and testing toolchain using Docker images. It is recommended to use those images, as they provide the same runtime environment as the final product and it is much easier to keep up-to-date with changes to the toolchain and dependencies. But as using Docker comes with a slight performance penalty, you might want to setup a local toolchain. - -Most relevant targets for the build & test cycles in the `Makefile` provide two variants, one of them suffixed with `-local`. For example, `make test` will run unit tests in the Docker container, `make test-local` will run it natively on your local system. - -If you are going to use the virtualized toolchain, please bear in mind the following things: - -* Your Kubernetes API server must listen on the interface of your local machine or VM, and not on `127.0.0.1` only. -* Your Kubernetes client configuration (`~/.kube/config`) must not use an API URL that points to `localhost` or `127.0.0.1`. - -You can test whether the virtualized toolchain has access to your Kubernetes cluster by running `make verify-kube-connect` (*after* you have setup your development environment, as described below), which will run `kubectl version` inside the Docker container used for running all tests. - -The Docker container for the virtualized toolchain will use the following local mounts from your workstation, and possibly modify its contents: - -* `~/go/src` - Your Go workspace's source directory (modifications expected) -* `~/.cache/go-build` - Your Go build cache (modifications expected) -* `~/.kube` - Your Kubernetes client configuration (no modifications) -* `/tmp` - Your system's temp directory (modifications expected) - -## Setting up your development environment - -The following steps are required no matter whether you chose to use a virtualized or a local toolchain. - -### Clone the ArgoCD repository from your personal fork on GitHub - -* `mkdir -p ~/go/src/github.com/argoproj` -* `cd ~/go/src/github.com/argoproj` -* `git clone https://github.com/yourghuser/argo-cd` -* `cd argo-cd` - -### Optional: Setup an additional Git remote - -While everyone has their own Git workflow, the author of this document recommends to create a remote called `upstream` in your local copy pointing to the original ArgoCD repository. This way, you can easily keep your local branches up-to-date by merging in latest changes from the ArgoCD repository, i.e. by doing a `git pull upstream master` in your locally checked out branch. To create the remote, run `git remote add upstream https://github.com/argoproj/argo-cd` - -### Install the must-have requirements - -Make sure you fulfill the pre-requisites above and run some preliminary tests. Neither of them should report an error. - -* Run `kubectl version` -* Run `docker version` -* Run `go version` - -### Build (or pull) the required Docker image - -Build the required Docker image by running `make test-tools-image` or pull the latest version by issuing `docker pull argoproj/argocd-test-tools`. - -The `Dockerfile` used to build these images can be found at `test/container/Dockerfile`. - -### Test connection from build container to your K8s cluster - -Run `make verify-kube-connect`, it should execute without error. - -If you receive an error similar to the following: - -``` -The connection to the server 127.0.0.1:6443 was refused - did you specify the right host or port? -make: *** [Makefile:386: verify-kube-connect] Error 1 -``` - -you should edit your `~/.kube/config` and modify the `server` option to point to your correct K8s API (as described above). - -### Using k3d - -[k3d](https://github.com/rancher/k3d) is a lightweight wrapper to run [k3s](https://github.com/rancher/k3s), a minimal Kubernetes distribution, in docker. Because it's running in a docker container, you're dealing with docker's internal networking rules when using k3d. A typical Kubernetes cluster running on your local machine is part of the same network that you're on so you can access it using **kubectl**. However, a Kubernetes cluster running within a docker container (in this case, the one launched by make) cannot access 0.0.0.0 from inside the container itself, when 0.0.0.0 is a network resource outside the container itself (and/or the container's network). This is the cost of a fully self-contained, disposable Kubernetes cluster. The following steps should help with a successful `make verify-kube-connect` execution. - -1. Find your host IP by executing `ifconfig` on Mac/Linux and `ipconfig` on Windows. For most users, the following command works to find the IP address. - - * For Mac: - - ``` - IP=`ifconfig en0 | grep inet | grep -v inet6 | awk '{print $2}'` - echo $IP - ``` - - * For Linux: - - ``` - IP=`ifconfig eth0 | grep inet | grep -v inet6 | awk '{print $2}'` - echo $IP - ``` - - Keep in mind that this IP is dynamically assigned by the router so if your router restarts for any reason, your IP might change. - -2. Edit your ~/.kube/config and replace 0.0.0.0 with the above IP address. - -3. Execute a `kubectl version` to make sure you can still connect to the Kubernetes API server via this new IP. Run `make verify-kube-connect` and check if it works. - -4. Finally, so that you don't have to keep updating your kube-config whenever you spin up a new k3d cluster, add `--api-port $IP:6550` to your **k3d cluster create** command, where $IP is the value from step 1. An example command is provided here: - -``` -k3d cluster create my-cluster --wait --k3s-server-arg '--disable=traefik' --api-port $IP:6550 -p 443:443@loadbalancer -``` - -## The development cycle - -When you have developed and possibly manually tested the code you want to contribute, you should ensure that everything will build correctly. Commit your changes to the local copy of your Git branch and perform the following steps: - -### Pull in all build dependencies - -As build dependencies change over time, you have to synchronize your development environment with the current specification. In order to pull in all required dependencies, issue: - -* `make dep-ui` - -ArgoCD recently migrated to Go modules. Usually, dependencies will be downloaded on build time, but the Makefile provides two targets to download and vendor all dependencies: - -* `make mod-download` will download all required Go modules and -* `make mod-vendor` will vendor those dependencies into the ArgoCD source tree - -### Generate API glue code and other assets - -ArgoCD relies on Google's [Protocol Buffers](https://developers.google.com/protocol-buffers) for its API, and this makes heavy use of auto-generated glue code and stubs. Whenever you touched parts of the API code, you must re-generate the auto generated code. - -* Run `make codegen`, this might take a while -* Check if something has changed by running `git status` or `git diff` -* Commit any possible changes to your local Git branch, an appropriate commit message would be `Changes from codegen`, for example. - -!!!note - There are a few non-obvious assets that are auto-generated. You should not change the autogenerated assets, as they will be overwritten by a subsequent run of `make codegen`. Instead, change their source files. Prominent examples of non-obvious auto-generated code are `swagger.json` or the installation manifest YAMLs. - -### Build your code and run unit tests - -After the code glue has been generated, your code should build and the unit tests should run without any errors. Execute the following statements: - -* `make build` -* `make test` - -These steps are non-modifying, so there's no need to check for changes afterwards. - -### Lint your code base - -In order to keep a consistent code style in our source tree, your code must be well-formed in accordance to some widely accepted rules, which are applied by a Linter. - -The Linter might make some automatic changes to your code, such as indentation fixes. Some other errors reported by the Linter have to be fixed manually. - -* Run `make lint` and observe any errors reported by the Linter -* Fix any of the errors reported and commit to your local branch -* Finally, after the Linter reports no errors anymore, run `git status` or `git diff` to check for any changes made automatically by Lint -* If there were automatic changes, commit them to your local branch - -If you touched UI code, you should also run the Yarn linter on it: - -* Run `make lint-ui` -* Fix any of the errors reported by it - -## Contributing to Argo CD UI - -Argo CD, along with Argo Workflows, uses shared React components from [Argo UI](https://github.com/argoproj/argo-ui). Examples of some of these components include buttons, containers, form controls, -and others. Although you can make changes to these files and run them locally, in order to have these changes added to the Argo CD repo, you will need to follow these steps. - -1. Fork and clone the [Argo UI repository](https://github.com/argoproj/argo-ui). - -2. `cd` into your `argo-ui` directory, and then run `yarn install`. - -3. Make your file changes. - -4. Run `yarn start` to start a [storybook](https://storybook.js.org/) dev server and view the components in your browser. Make sure all your changes work as expected. - -5. Use [yarn link](https://classic.yarnpkg.com/en/docs/cli/link/) to link Argo UI package to your Argo CD repository. (Commands below assume that `argo-ui` and `argo-cd` are both located within the same parent folder) - - * `cd argo-ui` - * `yarn link` - * `cd ../argo-cd/ui` - * `yarn link argo-ui` - - Once `argo-ui` package has been successfully linked, test out changes in your local development environment. - -6. Commit changes and open a PR to [Argo UI](https://github.com/argoproj/argo-ui). - -7. Once your PR has been merged in Argo UI, `cd` into your `argo-cd` folder and run `yarn add https://github.com/argoproj/argo-ui.git`. This will update the commit SHA in the `ui/yarn.lock` file to use the lastest master commit for argo-ui. - -8. Submit changes to `ui/yarn.lock`in a PR to Argo CD. - -## Setting up a local toolchain - -For development, you can either use the fully virtualized toolchain provided as Docker images, or you can set up the toolchain on your local development machine. Due to the dynamic nature of requirements, you might want to stay with the virtualized environment. - -### Install required dependencies and build-tools - -!!!note - The installations instructions are valid for Linux hosts only. Mac instructions will follow shortly. - -For installing the tools required to build and test ArgoCD on your local system, we provide convenient installer scripts. By default, they will install binaries to `/usr/local/bin` on your system, which might require `root` privileges. - -You can change the target location by setting the `BIN` environment before running the installer scripts. For example, you can install the binaries into `~/go/bin` (which should then be the first component in your `PATH` environment, i.e. `export PATH=~/go/bin:$PATH`): - -```shell -make BIN=~/go/bin install-tools-local -``` - -Additionally, you have to install at least the following tools via your OS's package manager (this list might not be always up-to-date): - -* Git LFS plugin -* GnuPG version 2 - -### Install Go dependencies - -You need to pull in all required Go dependencies. To do so, run - -* `make mod-download-local` -* `make mod-vendor-local` - -### Test your build toolchain - -The first thing you can do whether your build toolchain is setup correctly is by generating the glue code for the API and after that, run a normal build: - -* `make codegen-local` -* `make build-local` - -This should return without any error. - -### Run unit-tests - -The next thing is to make sure that unit tests are running correctly on your system. These will require that all dependencies, such as Helm, Kustomize, Git, GnuPG, etc are correctly installed and fully functioning: - -* `make test-local` - -### Run end-to-end tests - -The final step is running the End-to-End testsuite, which makes sure that your Kubernetes dependencies are working properly. This will involve starting all of the ArgoCD components locally on your computer. The end-to-end tests consists of two parts: a server component, and a client component. - -* First, start the End-to-End server: `make start-e2e-local`. This will spawn a number of processes and services on your system. -* When all components have started, run `make test-e2e-local` to run the end-to-end tests against your local services. - -For more information about End-to-End tests, refer to the [End-to-End test documentation](test-e2e.md). - - -## Enhancement proposals - -If you are proposing a major feature, change in design or process refactor, please help define how it would look like with a new enhancement proposal as described in the enhancement proposal [template](/docs/proposals/001-proposal-template.md). - +The contents of this document have been moved to the +[Toolchain guide](toolchain-guide.md) \ No newline at end of file diff --git a/docs/developer-guide/contributors-quickstart.md b/docs/developer-guide/contributors-quickstart.md new file mode 100644 index 0000000000000..0e98fab7ec940 --- /dev/null +++ b/docs/developer-guide/contributors-quickstart.md @@ -0,0 +1,103 @@ +# Contributors Quick-Start + +This guide is a starting point for first-time contributors running Argo CD locally for the first time. + +It skips advanced topics such as codegen, which are covered in the [running locally guide](running-locally.md) +and the [toolchain guide](toolchain-guide.md). + +## Getting Started + +### Install Go + +Install version 1.18 or newer (Verify version by running `go version`) + +### Clone the Argo CD repo + +```shell +mkdir -p $GOPATH/src/github.com/argoproj/ && +cd $GOPATH/src/github.com/argoproj && +git clone https://github.com/argoproj/argo-cd.git +``` + +### Install Docker + + + +### Install or Upgrade `kind` (Optional - Should work with any local cluster) + + + +### Start Your Local Cluster + +```shell +kind create cluster +``` + +### Install Argo CD + +```shell +kubectl create namespace argocd && +kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/master/manifests/install.yaml +``` + +Set kubectl config to avoid specifying the namespace in every kubectl command. +All following commands in this guide assume the namespace is already set. + +```shell +kubectl config set-context --current --namespace=argocd +``` + +### Install `yarn` + + + +### Install `goreman` + + + +### Run Argo CD + +```shell +cd argo-cd +make start-local ARGOCD_GPG_ENABLED=false +``` + +- Navigate to [localhost:4000](http://localhost:4000) in your browser to load the Argo CD UI +- It may take a few minutes for the UI to be responsive + +!!! note + If the UI is not working, check the logs from `make start-local`. The logs are `DEBUG` level by default. If the logs are + too noisy to find the problem, try editing log levels for the commands in the `Procfile` in the root of the Argo CD repo. + +## Making Changes + +### UI Changes + +Modifying the User-Interface (by editing .tsx or .scss files) auto-reloads the changes on port 4000. + +### Backend Changes + +Modifying the API server, repo server, or a controller requires restarting the current `make start-local` session to reflect the changes. + +### CLI Changes + +Modifying the CLI requires restarting the current `make start-local` session to reflect the changes. + +To test most CLI commands, you will need to log in. + +First, get the auto-generated secret: + +```shell +kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d; echo +``` + +Then log in using that password and username `admin`: + +```shell +dist/argocd login localhost:8080 +``` + +--- +Congrats on making it to the end of this runbook! 🚀 + +For more on Argo CD, find us in Slack - [#argo-contributors](https://cloud-native.slack.com/archives/C020XM04CUW) diff --git a/docs/developer-guide/debugging-remote-environment.md b/docs/developer-guide/debugging-remote-environment.md index 46894f793e171..7f8102a75c502 100644 --- a/docs/developer-guide/debugging-remote-environment.md +++ b/docs/developer-guide/debugging-remote-environment.md @@ -20,6 +20,36 @@ curl -sSfL https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/i ## Connect Connect to one of the services, for example, to debug the main ArgoCD server run: ```shell +kubectl config set-context --current --namespace argocd +telepresence helm install # Installs telepresence into your cluster +telepresence connect # Starts the connection to your cluster (bound to the current namespace) +telepresence intercept argocd-server --port 8080:http --env-file .envrc.remote # Starts the interception +``` +* `--port` forwards traffic of remote port http to 8080 locally (use `--port 8080:https` if argocd-server terminates TLS) +* `--env-file` writes all the environment variables of the remote pod into a local file, the variables are also set on the subprocess of the `--run` command + +With this, any traffic that hits your argocd-server service in the cluster (e.g. through a LB / ingress) will be forwarded to your laptop on port 8080. So that you can now start argocd-server locally to debug or test new code. If you launch argocd-server using the environment variables in `.envrc.remote`, it is able to fetch all the configmaps, secrets and so on from the cluster and transparently connect to the other microservices so that no further configuration should be necessary, and it behaves exactly the same as in the cluster. + +List current status of Telepresence using: +```shell +telepresence status +``` + +Stop the intercept using: +```shell +telepresence leave argocd-server-argocd +``` + +And uninstall telepresence from your cluster: +```shell +telepresence helm uninstall +``` + +See [this quickstart](https://www.telepresence.io/docs/latest/howtos/intercepts/) for more information on how to intercept services using Telepresence. + +### Connect (telepresence v1) +Use the following command instead: +```shell telepresence --swap-deployment argocd-server --namespace argocd --env-file .envrc.remote --expose 8080:8080 --expose 8083:8083 --run bash ``` * `--swap-deployment` changes the argocd-server deployment @@ -27,7 +57,6 @@ telepresence --swap-deployment argocd-server --namespace argocd --env-file .envr * `--env-file` writes all the environment variables of the remote pod into a local file, the variables are also set on the subprocess of the `--run` command * `--run` defines which command to run once a connection is established, use `bash`, `zsh` or others - ## Debug Once a connection is established, use your favorite tools to start the server locally. @@ -36,22 +65,23 @@ Once a connection is established, use your favorite tools to start the server lo * Run `./dist/argocd-server` ### VSCode -In VSCode use the integrated terminal to run the Telepresence command to connect. Then, to run argocd-server service use the following configuration. -Make sure to run `packr` before starting the debugging session to generate the assets. -Update the configuration file to point to kubeconfig file: `KUBECONFIG=` (required) +In VSCode use the following launch configuration to run argocd-server: + ```json { - "name": "Launch", + "name": "Launch argocd-server", "type": "go", "request": "launch", "mode": "auto", - "program": "${workspaceFolder}/cmd/argocd-server", + "program": "${workspaceFolder}/cmd/main.go", "envFile": [ "${workspaceFolder}/.envrc.remote", ], "env": { + "ARGOCD_BINARY_NAME": "argocd-server", "CGO_ENABLED": "0", "KUBECONFIG": "/path/to/kube/config" } } -``` \ No newline at end of file +``` + diff --git a/docs/developer-guide/dependencies.md b/docs/developer-guide/dependencies.md index a6cee142e5f29..410fd1241b1b2 100644 --- a/docs/developer-guide/dependencies.md +++ b/docs/developer-guide/dependencies.md @@ -45,7 +45,7 @@ If you make changes to the Argo UI component, and your Argo CD changes depend on 1. Make changes to Argo UI and submit the PR request. 2. Also, prepare your Argo CD changes, but don't create the PR just yet. 3. **After** the Argo UI PR has been merged to master, then as part of your Argo CD changes: - - Run `yarn add git+https://github.com/argoproj/argo-ui.git`, and then, + - Run `yarn add git+https://github.com/argoproj/argo-ui.git` in the `ui/` directory, and then, - Check in the regenerated yarn.lock file as part of your Argo CD commit 4. Create the Argo CD PR when you are ready. The PR build and test checks should pass. diff --git a/docs/developer-guide/extensions/proxy-extensions.md b/docs/developer-guide/extensions/proxy-extensions.md new file mode 100644 index 0000000000000..9982a5cdee59a --- /dev/null +++ b/docs/developer-guide/extensions/proxy-extensions.md @@ -0,0 +1,335 @@ +# Proxy Extensions +*Current Status: [Alpha][1] (Since v2.7.0)* + +## Overview + +With UI extensions it is possible to enhance Argo CD web interface to +provide valuable data to the user. However the data is restricted to +the resources that belongs to the Application. With proxy extensions +it is also possible to add additional functionality that have access +to data provided by backend services. In this case Argo CD API server +acts as a reverse-proxy authenticating and authorizing incoming +requests before forwarding to the backend service. + +## Configuration + +As proxy extension is in [Alpha][1] phase, the feature is disabled by +default. To enable it, it is necessary to configure the feature flag +in Argo CD command parameters. The easiest way to to properly enable +this feature flag is by adding the `server.enable.proxy.extension` key +in the existing `argocd-cmd-params-cm`. For example: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cmd-params-cm + namespace: argocd +data: + server.enable.proxy.extension: "true" +``` + +Once the proxy extension is enabled, it can be configured in the main +Argo CD configmap ([argocd-cm][2]). + +The example below demonstrates all possible configurations available +for proxy extensions: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm + namespace: argocd +data: + extension.config: | + extensions: + - name: httpbin + backend: + connectionTimeout: 2s + keepAlive: 15s + idleConnectionTimeout: 60s + maxIdleConnections: 30 + services: + - url: http://httpbin.org + headers: + - name: some-header + value: '$some.argocd.secret.key' + cluster: + name: some-cluster + server: https://some-cluster +``` + +Note: There is no need to restart Argo CD Server after modifiying the +`extension.config` entry in Argo CD configmap. Changes will be +automatically applied. A new proxy registry will be built making +all new incoming extensions requests (`/extensions/*`) to +respect the new configuration. + +Every configuration entry is explained below: + +#### `extensions` (*list*) + +Defines configurations for all extensions enabled. + +#### `extensions.name` (*string*) +(mandatory) + +Defines the endpoint that will be used to register the extension +route. For example, if the value of the property is `extensions.name: +my-extension` then the backend service will be exposed under the +following url: + + /extensions/my-extension + +#### `extensions.backend.connectionTimeout` (*duration string*) +(optional. Default: 2s) + +Is the maximum amount of time a dial to the extension server will wait +for a connect to complete. + +#### `extensions.backend.keepAlive` (*duration string*) +(optional. Default: 15s) + +Specifies the interval between keep-alive probes for an active network +connection between the API server and the extension server. + +#### `extensions.backend.idleConnectionTimeout` (*duration string*) +(optional. Default: 60s) + +Is the maximum amount of time an idle (keep-alive) connection between +the API server and the extension server will remain idle before +closing itself. + +#### `extensions.backend.maxIdleConnections` (*int*) +(optional. Default: 30) + +Controls the maximum number of idle (keep-alive) connections between +the API server and the extension server. + +#### `extensions.backend.services` (*list*) + +Defines a list with backend url by cluster. + +#### `extensions.backend.services.url` (*string*) +(mandatory) + +Is the address where the extension backend must be available. + +#### `extensions.backend.services.headers` (*list*) + +If provided, the headers list will be added on all outgoing requests +for this service config. Existing headers in the incoming request with +the same name will be overriden by the one in this list. Reserved header +names will be ignored (see the [headers](#incoming-request-headers) below). + +#### `extensions.backend.services.headers.name` (*string*) +(mandatory) + +Defines the name of the header. It is a mandatory field if a header is +provided. + +#### `extensions.backend.services.headers.value` (*string*) +(mandatory) + +Defines the value of the header. It is a mandatory field if a header is +provided. The value can be provided as verbatim or as a reference to an +Argo CD secret key. In order to provide it as a reference, it is +necessary to prefix it with a dollar sign. + +Example: + + value: '$some.argocd.secret.key' + +In the example above, the value will be replaced with the one from +the argocd-secret with key 'some.argocd.secret.key'. + +#### `extensions.backend.services.cluster` (*object*) +(optional) + +If provided, and multiple services are configured, will have to match +the application destination name or server to have requests properly +forwarded to this service URL. If there are multiple backends for the +same extension this field is required. In this case at least one of +the two will be required: name or server. It is better to provide both +values to avoid problems with applications unable to send requests to +the proper backend service. If only one backend service is +configured, this field is ignored, and all requests are forwarded to +the configured one. + +#### `extensions.backend.services.cluster.name` (*string*) +(optional) + +It will be matched with the value from +`Application.Spec.Destination.Name` + +#### `extensions.backend.services.cluster.server` (*string*) +(optional) + +It will be matched with the value from +`Application.Spec.Destination.Server`. + +## Usage + +Once a proxy extension is configured it will be made available under +the `/extensions/` endpoint exposed by Argo CD API +server. The example above will proxy requests to +`/extensions/httpbin/` to `http://httpbin.org`. + +The diagram below illustrates an interaction possible with this +configuration: + +``` + ┌─────────────┐ + │ Argo CD UI │ + └────┬────────┘ + │ ▲ + GET /extensions/httpbin/anything │ │ 200 OK + + authn/authz headers │ │ + ▼ │ + ┌─────────┴────────┐ + │Argo CD API Server│ + └──────┬───────────┘ + │ ▲ + GET http://httpbin.org/anything │ │ 200 OK + │ │ + ▼ │ + ┌────────┴────────┐ + │ Backend Service │ + └─────────────────┘ +``` + +### Incoming Request Headers + +Note that Argo CD API Server requires additional HTTP headers to be +sent in order to enforce if the incoming request is authenticated and +authorized before being proxied to the backend service. The headers +are documented below: + +#### `Cookie` + +Argo CD UI keeps the authentication token stored in a cookie +(`argocd.token`). This value needs to be sent in the `Cookie` header +so the API server can validate its authenticity. + +Example: + + Cookie: argocd.token=eyJhbGciOiJIUzI1Ni... + +The entire Argo CD cookie list can also be sent. The API server will +only use the `argocd.token` attribute in this case. + +#### `Argocd-Application-Name` (mandatory) + +This is the name of the project for the application for which the +extension is being invoked. The header value must follow the format: +`":"`. + +Example: + + Argocd-Application-Name: namespace:app-name + +#### `Argocd-Project-Name` (mandatory) + +The logged in user must have access to this project in order to be +authorized. + +Example: + + Argocd-Project-Name: default + +Argo CD API Server will ensure that the logged in user has the +permission to access the resources provided by the headers above. The +validation is based on pre-configured [Argo CD RBAC rules][3]. The +same headers are also sent to the backend service. The backend service +must also validate if the validated headers are compatible with the +rest of the incoming request. + +### Outgoing Requests Headers + +Requests sent to backend services will be decorated with additional +headers. The outgoing request headers are documented below: + +#### `Argocd-Target-Cluster-Name` + +Will be populated with the value from `app.Spec.Destination.Name` if +it is not empty string in the application resource. + +#### `Argocd-Target-Cluster-URL` + +Will be populated with the value from `app.Spec.Destination.Server` if +it is not empty string is the Application resource. + +Note that additional pre-configured headers can be added to outgoing +request. See [backend service headers](#extensionsbackendservicesheaders-list) +section for more details. + +### Multi Backend Use-Case + +In some cases when Argo CD is configured to sync with multiple remote +clusters, there might be a need to call a specific backend service in +each of those clusters. The proxy-extension can be configured to +address this use-case by defining multiple services for the same +extension. Consider the following configuration as an example: + +```yaml +extension.config: | + extensions: + - name: some-extension + backend: + services: + - url: http://extension-name.com:8080 + cluster + name: kubernetes.local + - url: https://extension-name.ppd.cluster.k8s.local:8080 + cluster + server: user@ppd.cluster.k8s.local +``` + +In the example above, the API server will inspect the Application +destination to verify which URL should be used to proxy the incoming +request to. + +## Security + +When a request to `/extensions/*` reaches the API Server, it will +first verify if it is authenticated with a valid token. It does so by +inspecting if the `Cookie` header is properly sent from Argo CD UI +extension. + +Once the request is authenticated it is then verified if the +user has permission to invoke this extension. The permission is +enforced by Argo CD RBAC configuration. The details about how to +configure the RBAC for proxy-extensions can be found in the [RBAC +documentation][3] page. + +Once the request is authenticated and authorized by the API server, it +is then sanitized before being sent to the backend service. The +request sanitization will remove sensitive information from the +request like the `Cookie` and `Authorization` headers. + +A new `Authorization` header can be added to the outgoing request by +defining it as a header in the `extensions.backend.services.headers` +configuration. Consider the following example: + +```yaml +extension.config: | + extensions: + - name: some-extension + backend: + services: + - url: http://extension-name.com:8080 + headers: + - name: Authorization + value: '$some-extension.authorization.header' +``` + +In the example above, all requests sent to +`http://extension-name.com:8080` will have an additional +`Authorization` header. The value of this header will be the one from +the [argocd-secret](../../operator-manual/argocd-secret-yaml.md) with +key `some-extension.authorization.header` + +[1]: https://github.com/argoproj/argoproj/blob/master/community/feature-status.md +[2]: https://argo-cd.readthedocs.io/en/stable/operator-manual/argocd-cm.yaml +[3]: ../../operator-manual/rbac.md#the-extensions-resource diff --git a/docs/developer-guide/extensions/ui-extensions.md b/docs/developer-guide/extensions/ui-extensions.md new file mode 100644 index 0000000000000..2c25748beb148 --- /dev/null +++ b/docs/developer-guide/extensions/ui-extensions.md @@ -0,0 +1,97 @@ +# UI Extensions + +Argo CD web user interface can be extended with additional UI elements. Extensions should be delivered as a javascript file +in the `argocd-server` Pods that are placed in the `/tmp/extensions` directory and starts with `extension` prefix ( matches to `^extension(.*)\.js$` regex ). + +``` +/tmp/extensions +├── example1 +│   └── extension-1.js +└── example2 + └── extension-2.js +``` + +Extensions are loaded during initial page rendering and should register themselves using API exposed in the `extensionsAPI` global variable. (See +corresponding extension type details for additional information). + +The extension should provide a React component that is responsible for rendering the UI element. Extension should not bundle the React library. +Instead extension should use the `react` global variable. You can leverage `externals` setting if you are using webpack: + +```js +externals: { + react: "React"; +} +``` + +## Resource Tab Extensions + +Resource Tab extensions is an extension that provides an additional tab for the resource sliding panel at the Argo CD Application details page. + +The resource tab extension should be registered using the `extensionsAPI.registerResourceExtension` method: + +```typescript +registerResourceExtension(component: ExtensionComponent, group: string, kind: string, tabTitle: string) +``` + +- `component: ExtensionComponent` is a React component that receives the following properties: + + - application: Application - Argo CD Application resource; + - resource: State - the kubernetes resource object; + - tree: ApplicationTree - includes list of all resources that comprise the application; + + See properties interfaces in [models.ts](https://github.com/argoproj/argo-cd/blob/master/ui/src/app/shared/models.ts) + +- `group: string` - the glob expression that matches the group of the resource; note: use globstar (`**`) to match all groups including empty string; +- `kind: string` - the glob expression that matches the kind of the resource; +- `tabTitle: string` - the extension tab title. +- `opts: Object` - additional options: + - `icon: string` - the class name the represents the icon from the [https://fontawesome.com/](https://fontawesome.com/) library (e.g. 'fa-calendar-alt'); + +Below is an example of a resource tab extension: + +```javascript +((window) => { + const component = () => { + return React.createElement("div", {}, "Hello World"); + }; + window.extensionsAPI.registerResourceExtension( + component, + "*", + "*", + "Nice extension" + ); +})(window); +``` + +## System Level Extensions + +Argo CD allows you to add new items to the sidebar that will be displayed as a new page with a custom component when clicked. The system level extension should be registered using the `extensionsAPI.registerSystemLevelExtension` method: + +```typescript +registerSystemLevelExtension(component: ExtensionComponent, title: string, options: {icon?: string}) +``` + +Below is an example of a simple system level extension: + +```typescript +((window) => { + const component = () => { + return React.createElement( + "div", + { style: { padding: "10px" } }, + "Hello World" + ); + }; + window.extensionsAPI.registerSystemLevelExtension( + component, + "Test Ext", + "/hello", + "fa-flask" + ); +})(window); +``` + +## Application Tab Extensions + +Since the Argo CD Application is a Kubernetes resource, application tabs can be the same as any other resource tab. +Make sure to use 'argoproj.io'/'Application' as group/kind and an extension will be used to render the application-level tab. diff --git a/docs/developer-guide/faq.md b/docs/developer-guide/faq.md index 2614c5530acde..5d9dda31949f7 100644 --- a/docs/developer-guide/faq.md +++ b/docs/developer-guide/faq.md @@ -4,13 +4,22 @@ ### Can I discuss my contribution ideas somewhere? -Sure thing! You can either open an Enhancement Proposal in our GitHub issue tracker or you can [join us on Slack](https://argoproj.github.io/community/join-slack) in channel #argo-dev to discuss your ideas and get guidance for submitting a PR. +Sure thing! You can either open an Enhancement Proposal in our GitHub issue tracker or you can [join us on Slack](https://argoproj.github.io/community/join-slack) in channel #argo-contributors to discuss your ideas and get guidance for submitting a PR. + +!!! note + Regular [contributor meetings](https://argo-cd.readthedocs.io/en/latest/developer-guide/code-contributions/#regular-contributor-meeting) are held weekly. Please follow the link for more details. ### No one has looked at my PR yet. Why? -As we have limited manpower, it can sometimes take a while for someone to respond to your PR. Especially, when your PR contains complex or non-obvious changes. Please bear with us, we try to look at every PR that we receive. +As we have limited resources, it can sometimes take a while for someone to respond to your PR. Especially, when your PR contains complex or non-obvious changes. Please bear with us, we try to look at every PR that we receive. Kindly ensure all applicable requirements have been met in your PR checklist. + +### How do I get my PR labeled `ready-for-review` ? + +Conventionally an initial review is performed from a Argo member or reviewer. Once the initial review is approved, it can be labeled `ready-for-review` and then added to the [Argo CD Review](https://github.com/orgs/argoproj/projects/28) Github project. Details of the project dashboard can be found [here](https://github.com/orgs/argoproj/projects/28?pane=info). + +High quality reviews are extremely encouraged from the community. A member/reviewer may work with a community reviewer to get a PR labeled `ready-for-review`. It can then be added to the project dashboard and marked `Community Reviewed`. -### Why has my PR been declined? I put much work in it! +### Why has my PR been declined? I put so much work into it! We appreciate that you have put your valuable time and know how into a contribution. Alas, some changes do not fit into the overall ArgoCD philosophy, and therefore can't be merged into the official ArgoCD source tree. diff --git a/docs/developer-guide/release-process-and-cadence.md b/docs/developer-guide/release-process-and-cadence.md new file mode 100644 index 0000000000000..051de617f0776 --- /dev/null +++ b/docs/developer-guide/release-process-and-cadence.md @@ -0,0 +1,80 @@ +# Release Process And Cadence + +## Release Cycle + +### Schedule + +These are the upcoming releases dates: + +| Release | Release Planning Meeting | Release Candidate 1 | General Availability | Release Champion | Checklist | +|---------|--------------------------|-----------------------|----------------------|-------------------------------------------------------|---------------------------------------------------------------| +| v2.6 | Monday, Dec. 12, 2022 | Monday, Dec. 19, 2022 | Monday, Feb. 6, 2023 | [William Tam](https://github.com/wtam2018) | [checklist](https://github.com/argoproj/argo-cd/issues/11563) | +| v2.7 | Monday, Mar. 6, 2023 | Monday, Mar. 20, 2023 | Monday, May. 1, 2023 | [Pavel Kostohrys](https://github.com/pasha-codefresh) | [checklist](https://github.com/argoproj/argo-cd/issues/12762) | +| v2.8 | Monday, Jun. 20, 2023 | Monday, Jun. 26, 2023 | Monday, Aug. 7, 2023 | [Keith Chong](https://github.com/keithchong) | [checklist](https://github.com/argoproj/argo-cd/issues/13742) | +| v2.9 | Monday, Sep. 4, 2023 | Monday, Sep. 18, 2023 | Monday, Nov. 6, 2023 | [Leonardo Almeida](https://github.com/leoluz) | [checklist](https://github.com/argoproj/argo-cd/issues/14078) | +| v2.10 | Monday, Dec. 4, 2023 | Monday, Dec. 18, 2023 | Monday, Feb. 5, 2024 | + + +Actual release dates might differ from the plan by a few days. + +### Release Process + +#### Minor Releases (e.g. 2.x.0) + +A minor Argo CD release occurs four times a year, once every three months. Each General Availability (GA) release is +preceded by several Release Candidates (RCs). The first RC is released three weeks before the scheduled GA date. This +effectively means that there is a three-week feature freeze. + +These are the approximate release dates: + +* The first Monday of February +* The first Monday of May +* The first Monday of August +* The first Monday of November + +Dates may be shifted slightly to accommodate holidays. Those shifts should be minimal. + +#### Patch Releases (e.g. 2.5.x) + +Argo CD patch releases occur on an as-needed basis. Only the three most recent minor versions are eligible for patch +releases. Versions older than the three most recent minor versions are considered EOL and will not receive bug fixes or +security updates. + +#### Minor Release Planning Meeting + +Roughly two weeks before the RC date, there will be a meeting to discuss which features are planned for the RC. This meeting is +for contributors to advocate for certain features. Features which have at least one approver (besides the contributor) +who can assure they will review/merge by the RC date will be included in the release milestone. All other features will +be dropped from the milestone (and potentially shifted to the next one). + +Since not everyone will be able to attend the meeting, there will be a meeting doc. Contributors can add their feature +to a table, and Approvers can add their name to the table. Features with a corresponding approver will remain in the +release milestone. + +#### Release Champion + +To help manage all the steps involved in a release, we will have a Release Champion. The Release Champion will be +responsible for a checklist of items for their release. The checklist is an issue template in the Argo CD repository. + +The Release Champion can be anyone in the Argo CD community. Some tasks (like cherry-picking bug fixes and cutting +releases) require [Approver](https://github.com/argoproj/argoproj/blob/master/community/membership.md#community-membership) +membership. The Release Champion can delegate tasks when necessary and will be responsible for coordinating with the +Approver. + +### Feature Acceptance Criteria + +To be eligible for inclusion in a minor release, a new feature must meet the following criteria before the release’s RC +date. + +If it is a large feature that involves significant design decisions, that feature must be described in a Proposal, and +that Proposal must be reviewed and merged. + +The feature PR must include: + +* Tests (passing) +* Documentation +* If necessary, a note in the Upgrading docs for the planned minor release +* The PR must be reviewed, approved, and merged by an Approver. + +If these criteria are not met by the RC date, the feature will be ineligible for inclusion in the RC series or GA for +that minor release. It will have to wait for the next minor release. diff --git a/docs/developer-guide/releasing.md b/docs/developer-guide/releasing.md index ed8297fb0e5c5..bb51ebfa8d14b 100644 --- a/docs/developer-guide/releasing.md +++ b/docs/developer-guide/releasing.md @@ -1,113 +1,86 @@ # Releasing -## Automated release procedure +## Introduction -Starting from `release-1.6` branch, ArgoCD can be released in an automated fashion -using GitHub actions. The release process takes about 20 minutes, sometimes a -little less, depending on the performance of GitHub Actions runners. +Argo CD is released in a 2 step automated fashion using GitHub actions. The release process takes about 60 minutes, +sometimes a little less, depending on the performance of GitHub Actions runners. The target release branch must already exist in the GitHub repository. If you for -example want to create a release `v1.7.0`, the corresponding release branch -`release-1.7` needs to exist, otherwise, the release cannot be built. Also, +example want to create a release `v2.7.0`, the corresponding release branch +`release-2.7` needs to exist, otherwise, the release cannot be built. Also, the trigger tag should always be created in the release branch, checked out in your local repository clone. Before triggering the release automation, the `CHANGELOG.md` should be updated with the latest information, and this change should be committed and pushed to the GitHub repository to the release branch. Afterward, the automation can be -triggered. +triggered. This will be automated in the very near future. **Manual steps before release creation:** * Update `CHANGELOG.md` with changes for this release * Commit & push changes to `CHANGELOG.md` -* Prepare release notes (save to some file, or copy from Changelog) -**The automation will perform the following steps:** +**The `Init ARGOCD Release` workflow will perform the following steps:** * Update `VERSION` file in the release branch * Update manifests with image tags of the new version in the release branch -* Build the Docker image and push to Docker Hub -* Create a release tag in the GitHub repository -* Create a GitHub release and attach the required assets to it (CLI binaries, ...) +* Create a pull request to submit the above changes -Finally, it will the remove trigger tag from the repository again. +**The `Publish ArgoCD Release` workflow will perform the following steps:** -Automation supports both, GA and pre-releases. The automation is triggered by -pushing a tag to the repository. The tag must be in one of the following formats -to trigger the GH workflow: +* Build, push, and signs the container image to Quay.io +* Generate a provenance for the container image +* Builds the CLI binaries, release-notes, and then creates a GitHub release and attaches the required assets. +* Generate a provenance for the CLI binaries +* Generate and sign a sbom +* Update the stable tag when applicable +* Update `VERSION` file in the master branch when a new release is GA -* GA: `release-v..` -* Pre-release: `release-v..-rc` +## Steps -The tag must be an annotated tag, and it must contain the release notes in the -commit message. Please note that Markdown uses `#` character for formatting, but -Git uses it as comment char. To solve this, temporarily switch Git's comment char -to something else, the `;` character is recommended. +### Step 1 - Update Version and Manifest -For example, consider you have configured the Git remote for the repository to -`github.com/argoproj/argo-cd` to be named `upstream` and are in your locally -checked out repo: +1. Ensure that the TARGET_BRANCH already exist. +2. Visit the [Release GitHub Action](https://github.com/argoproj/argo-cd/actions/workflows/init-release.yaml) +and choose which branch you would like to work from. +3. Enter the TARGET_BRANCH to checkout. +4. Enter the TARGET_VERSION that will be used to build manifest and `VERSION` file. (e.g `2.7.0-rc1`) -```shell -git config core.commentChar ';' -git tag -a -F /path/to/release-notes.txt release-v1.6.0-rc2 -git push upstream release-v1.6.0-rc2 -git tag -d release-v1.6.0-rc2 -git config core.commentChar '#' +![GitHub Release Action](../assets/release-action.png) -``` +When the action is completed a pull request will be generated that contains the updated manifest and `Version` file. + +5. Merge the pull request and proceed to step 2. -For convenience, there is a shell script in the tree that ensures all the -pre-requisites are met and that the trigger is well-formed before pushing -it to the GitHub repo. +### Step 2 - Tag Release Branch -In summary, the modifications it does are: +The steps below need to be executed by someone with write access in Argo CD upstream repo. -* Create annotated trigger tag in your local repository -* Push the tag to the GitHub repository to trigger the workflow -* Remove trigger tag from your local repository +1. Checkout the release branch. Example: `git fetch upstream && git + checkout release-2.7` +2. Run the script found at `hack/trigger-release.sh` as follows: -The script can be found at `hack/trigger-release.sh` and is used as follows: +```shell +./hack/trigger-release.sh +``` +Example: ```shell -./hack/trigger-release.sh [] +./hack/trigger-release.sh v2.7.2 upstream ``` -The `` identifier needs to be specified **without** the `release-` -prefix, so just specify it as `v1.6.0-rc2` for example. The `` -specifies the name of the remote used to push to the GitHub repository. - -If you omit the ``, an editor will pop-up asking you to -enter the tag's annotation so you can paste the release notes, save, and exit. -It will also take care of temporarily configuring the `core.commentChar` and -setting it back to its original state. - -:warning: - It is strongly recommended to use this script to trigger the workflow - instead of manually pushing a tag to the repository. - -Once the trigger tag is pushed to the repo, the GitHub workflow will start -execution. You can follow its progress under the `Actions` tab, the name of the -action is `Create release`. Don't get confused by the name of the running -workflow, it will be the commit message of the latest commit to the `master` -branch, this is a limitation of GH actions. - -The workflow performs necessary checks so that the release can be successfully -built before the build actually starts. It will error when one of the -prerequisites is not met, or if the release cannot be built (i.e. already -exists, release notes invalid, etc etc). You can see a summary of what has -failed in the job's overview page and more detailed errors in the output -of the step that has failed. - -:warning: +!!! tip + The tag must be in one of the following formats to trigger the GH workflow:
+ * GA: `v..`
+ * Pre-release: `v..-rc` + +Once the script is executed successfully, a GitHub workflow will start +execution. You can follow its progress under the [Actions](https://github.com/argoproj/argo-cd/actions/workflows/release.yaml) tab, the name of the action is `Publish ArgoCD Release`. + +!!! warning You cannot perform more than one release on the same release branch at the - same time. For example, both `v1.6.0` and `v1.6.1` would operate on the - `release-1.6` branch. If you submit `v1.6.1` while `v1.6.0` is still - executing, the release automation will not execute. You have to either - cancel `v1.6.0` before submitting `v1.6.1` or wait until it has finished. - You can execute releases on different release branches simultaneously, for - example, `v1.6.0` and `v1.7.0-rc1`, without problems. + same time. ### Verifying automated release @@ -118,119 +91,26 @@ checks to see if the release came out correctly: * Check [https://github.com/argoproj/argo-cd/releases](https://github.com/argoproj/argo-cd/releases) to see if the release has been correctly created and if all required assets are attached. -* Check whether the image has been published on DockerHub correctly +* Check whether the image has been published on Quay.io correctly ### If something went wrong If something went wrong, damage should be limited. Depending on the steps that have been performed, you will need to manually clean up. -* Delete the release tag (e.g. `v1.6.0-rc2`) created in the GitHub repository. This - will immediately set the release (if created) to `draft` status, invisible to the - general public. -* Delete the draft release (if created) from the `Releases` page on GitHub -* If Docker image has been pushed to DockerHub, delete it -* If commits have been performed to the release branch, revert them. Paths that could have been committed to are: - * `VERSION` - * `manifests/*` - -### Post-process manual steps - -For now, the only manual steps left are to - -* update stable tag in the GitHub repository to point to new the release (if appropriate) -* update the `VERSION` file on `master` if this is a new major release - -These may be automated as well in the future. - -## Manual releasing - -The automatic release process does not interfere with the manual release process, since -the trigger tag does not match a normal release tag. If you prefer to perform, -manual release or if automatic release is for some reason broken, these are the -steps: - -Make sure you are logged into Docker Hub: - -```bash -docker login -``` - -Export the upstream repository and branch name, e.g.: - -```bash -REPO=upstream ;# or origin -BRANCH=release-1.3 -``` - -Set the `VERSION` environment variable: - -```bash -# release candidate -VERSION=v1.3.0-rc1 -# GA release -VERSION=v1.3.1 -``` - -Update `VERSION` and manifests with the new version: - -```bash -git checkout $BRANCH -echo ${VERSION:1} > VERSION -make dev-tools-image -make manifests IMAGE_TAG=$VERSION -git commit -am "Update manifests to $VERSION" -git tag $VERSION -``` - -Build, and push release to Docker Hub - -```bash -git clean -fd -make release IMAGE_NAMESPACE=argoproj IMAGE_TAG=$VERSION DOCKER_PUSH=true -git push $REPO $BRANCH -git push $REPO $VERSION -``` - -Update [GitHub releases](https://github.com/argoproj/argo-cd/releases) with: - -* Getting started (copy from the previous release) -* Changelog -* Binaries (e.g. `dist/argocd-darwin-amd64`). - -## Update brew formulae (manual) - -If GA, update the Brew formula: - -```bash -brew bump-formula-pr argocd --version ${VERSION:1} -``` - -## Update stable tag (manual) - -If GA, update `stable` tag: - -```bash -git tag stable --force && git push $REPO stable --force -``` - -## Verify release +* If the container image has been pushed to Quay.io, delete it +* Delete the release (if created) from the `Releases` page on GitHub -Locally: +### Manual releasing -```bash -kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/$VERSION/manifests/install.yaml -``` - -Follow the [Getting Started Guide](../getting_started/). - -If GA: - -```bash -brew upgrade argocd -/usr/local/bin/argocd version -``` +The release process does not allow a manual release process. Image signatures and provenance need to be created using GitHub Actions. -Sync Argo CD in [https://cd.apps.argoproj.io/applications/argo-cd](https://cd.apps.argoproj.io/applications/argo-cd). +## Notable files that involve the release process. -Deploy the [site](site.md). +| File | Description | +|------------------------------------|--------------------------------------------------------| +|goreleaser.yaml |Config to build CLI binaries, checksums, release-notes | +|.github/workflows/image-reuse.yaml |Reusable workflow used to generate container images | +|.github/workflows/init-release.yaml |Used to generate manifest and `VERSION` file | +|.github/workflows/release.yaml |Build image, CLI binaries, provenances, sbom, post jobs | +|./hack/trigger-release.sh |Ensures all pre-requistes are met and pushes the tag | diff --git a/docs/developer-guide/running-locally.md b/docs/developer-guide/running-locally.md index 0ea809f8bd999..25f4510e9e18a 100644 --- a/docs/developer-guide/running-locally.md +++ b/docs/developer-guide/running-locally.md @@ -1,14 +1,14 @@ -# Running ArgoCD locally +# Running Argo CD locally -## Run ArgoCD outside of Kubernetes +## Run Argo CD outside of Kubernetes -During development, it might be viable to run ArgoCD outside of a Kubernetes cluster. This will greatly speed up development, as you don't have to constantly build, push and install new ArgoCD Docker images with your latest changes. +During development, it might be viable to run Argo CD outside a Kubernetes cluster. This will greatly speed up development, as you don't have to constantly build, push and install new Argo CD Docker images with your latest changes. -You will still need a working Kubernetes cluster, as described in the [Contribution Guide](contributing.md), where ArgoCD will store all of its resources. +You will still need a working Kubernetes cluster, as described in the [Toolchain Guide](toolchain-guide.md), where Argo CD will store all of its resources and configuration. -If you followed the [Contribution Guide](contributing.md) in setting up your toolchain, you can run ArgoCD locally with these simple steps: +If you followed the [Toolchain Guide](toolchain-guide.md) in setting up your toolchain, you can run Argo CD locally with these simple steps: -### Install ArgoCD resources to your cluster +### Install Argo CD resources to your cluster First push the installation manifest into argocd namespace: @@ -17,9 +17,9 @@ kubectl create namespace argocd kubectl apply -n argocd --force -f manifests/install.yaml ``` -### Scale down any ArgoCD instance in your cluster +### Scale down any Argo CD instance in your cluster -Make sure that ArgoCD is not running in your development cluster by scaling down the deployments: +Make sure that Argo CD is not running in your development cluster by scaling down the deployments: ```shell kubectl -n argocd scale statefulset/argocd-application-controller --replicas 0 @@ -27,22 +27,39 @@ kubectl -n argocd scale deployment/argocd-dex-server --replicas 0 kubectl -n argocd scale deployment/argocd-repo-server --replicas 0 kubectl -n argocd scale deployment/argocd-server --replicas 0 kubectl -n argocd scale deployment/argocd-redis --replicas 0 +kubectl -n argocd scale deployment/argocd-applicationset-controller --replicas 0 +kubectl -n argocd scale deployment/argocd-notifications-controller --replicas 0 ``` -### Start local services +### Start local services (virtualized toolchain inside Docker) -Before starting local services, make sure you are present in `argocd` namespace. When you use the virtualized toolchain, starting local services is as simple as running +The started services assume you are running in the namespace where Argo CD is installed. You can set the current context default namespace as follows: + +```bash +kubectl config set-context --current --namespace=argocd +``` + +When you use the virtualized toolchain, starting local services is as simple as running ```bash make start ``` -This will start all ArgoCD services and the UI in a Docker container and expose the following ports to your host: +This will start all Argo CD services and the UI in a Docker container and expose the following ports to your host: -* The ArgoCD API server on port 8080 -* The ArgoCD UI server on port 4000 +* The Argo CD API server on port 8080 +* The Argo CD UI server on port 4000 +* The Helm registry server on port 5000 + +You may get an error listening on port 5000 on macOS: + +```text +docker: Error response from daemon: Ports are not available: exposing port TCP 0.0.0.0:5000 -> 0.0.0.0:0: listen tcp 0.0.0.0:5000: bind: address already in use. +``` -You can now use either the web UI by pointing your browser to `http://localhost:4000` or use the CLI against the API at `http://localhost:8080`. Be sure to use the `--insecure` and `--plaintext` options to the CLI. +In that case, you can disable "AirPlay Receiver" in macOS System Preferences. + +You can now use either the web UI by pointing your browser to `http://localhost:4000` or use the CLI against the API at `http://localhost:8080`. Be sure to use the `--insecure` and `--plaintext` options to the CLI. Webpack will take a while to bundle resources initially, so the first page load can take several seconds or minutes. As an alternative to using the above command line parameters each time you call `argocd` CLI, you can set the following environment variables: @@ -51,9 +68,102 @@ export ARGOCD_SERVER=127.0.0.1:8080 export ARGOCD_OPTS="--plaintext --insecure" ``` -### Scale up ArgoCD in your cluster +### Start local services (running on local machine) + +The `make start` command of the virtualized toolchain runs the build and programs inside a Docker container using the test tools image. That makes everything repeatable, but can slow down the development workflow. Particularly on macOS where Docker and the Linux kernel run inside a VM, you may want to try developing fully locally. + +Docker should be installed already. Assuming you manage installed software using [Homebrew](https://brew.sh/), you can install other prerequisites like this: + +```sh +# goreman is used to start all needed processes to get a working Argo CD development +# environment (defined in `Procfile`) +brew install goreman + +# You can use `kind` to run Kubernetes inside Docker. But pointing to any other +# development cluster works fine as well as long as Argo CD can reach it. +brew install kind +``` + +To set up Kubernetes, you can use kind: + +```sh +kind create cluster --kubeconfig ~/.kube/config-kind + +# The started services assume you are running in the namespace where Argo CD is +# installed. Set the current context default namespace. +export KUBECONFIG=~/.kube/config-kind +kubectl config set-context --current --namespace=argocd +``` + +Follow the above sections "Install Argo CD resources to your cluster" and "Scale down any Argo CD instance in your cluster" to deploy all needed manifests such as config maps. + +Start local services: + +```sh +# Ensure you point to the correct Kubernetes cluster as shown above. For example: +export KUBECONFIG=~/.kube/config-kind + +make start-local +``` + +This will start all Argo CD services and the UI in a Docker container and expose the following ports to your host: + +* The Argo CD API server on port 8080 +* The Argo CD UI server on port 4000 +* The Helm registry server on port 5000 + +If you get firewall dialogs, for example on macOS, you can click "Deny", since no access from outside your computer is typically desired. + +Check that all programs have started: + +```text +$ goreman run status +*controller +*api-server +[...] +``` + +If not all critical processes run (marked with `*`), check logs to see why they terminated. + +In case of an error like `gpg: key generation failed: Unknown elliptic curve` (a [gnupg bug](https://dev.gnupg.org/T5444)), disable GPG verification before running `make start-local`: + +```sh +export ARGOCD_GPG_ENABLED=false +``` + +You may get an error listening on port 5000 on macOS: + +```text +docker: Error response from daemon: Ports are not available: exposing port TCP 0.0.0.0:5000 -> 0.0.0.0:0: listen tcp 0.0.0.0:5000: bind: address already in use. +``` + +In that case, you can disable "AirPlay Receiver" in macOS System Preferences. + +You can now use either the web UI by pointing your browser to `http://localhost:4000` or use the CLI against the API at `http://localhost:8080`. Be sure to use the `--insecure` and `--plaintext` options to the CLI. Webpack will take a while to bundle resources initially, so the first page load can take several seconds or minutes. + +As an alternative to using the above command line parameters each time you call `argocd` CLI, you can set the following environment variables: + +```bash +export ARGOCD_SERVER=127.0.0.1:8080 +export ARGOCD_OPTS="--plaintext --insecure" +``` + +After making a code change, ensure to rebuild and restart the respective service: + +```sh +# Example for working on the repo server Go code, see other service names in `Procfile` +goreman run restart repo-server +``` + +Clean up when you're done: + +```sh +kind delete cluster; rm -f ~/.kube/config-kind +``` + +### Scale up Argo CD in your cluster -Once you have finished testing your changes locally and want to bring back ArgoCD in your development cluster, simply scale the deployments up again: +Once you have finished testing your changes locally and want to bring back Argo CD in your development cluster, simply scale the deployments up again: ```bash kubectl -n argocd scale statefulset/argocd-application-controller --replicas 1 @@ -63,13 +173,13 @@ kubectl -n argocd scale deployment/argocd-server --replicas 1 kubectl -n argocd scale deployment/argocd-redis --replicas 1 ``` -## Run your own ArgoCD images on your cluster +## Run your own Argo CD images on your cluster For your final tests, it might be necessary to build your own images and run them in your development cluster. ### Create Docker account and login -You might need to create a account on [Docker Hub](https://hub.docker.com) if you don't have one already. Once you created your account, login from your development environment: +You might need to create an account on [Docker Hub](https://hub.docker.com) if you don't have one already. Once you created your account, login from your development environment: ```bash docker login diff --git a/docs/developer-guide/site.md b/docs/developer-guide/site.md index c1ff0cac6251e..47c1f57e29bb7 100644 --- a/docs/developer-guide/site.md +++ b/docs/developer-guide/site.md @@ -2,19 +2,14 @@ ## Developing And Testing -The web site is build using `mkdocs` and `mkdocs-material`. +The website is build using `mkdocs` and `mkdocs-material`. To test: ```bash make serve-docs ``` - -Check for broken external links: - -```bash -make lint-docs -``` +Once running, you can view your locally built documentation at [http://0.0.0.0:8000/](http://0.0.0.0:8000/). ## Deploying diff --git a/docs/developer-guide/test-e2e.md b/docs/developer-guide/test-e2e.md index bb7fc85be7e0a..477723016bd75 100644 --- a/docs/developer-guide/test-e2e.md +++ b/docs/developer-guide/test-e2e.md @@ -11,7 +11,7 @@ Git repository via file url: `file:///tmp/argocd-e2e***`. 1. Start the e2e version `make start-e2e` 1. Run the tests: `make test-e2e` -You can observe the tests by using the UI [http://localhost:4000/applications](http://localhost:4000/applications). +You can observe the tests by using the UI [http://localhost:8080/applications](http://localhost:8080/applications) with username `"admin"` and password `"password"`. ## Configuration of E2E Tests execution @@ -33,7 +33,7 @@ Some effort has been made to balance test isolation with speed. Tests are isolat * A random 5 character ID. * A unique Git repository containing the `testdata` in `/tmp/argocd-e2e/${id}`. * A namespace `argocd-e2e-ns-${id}`. -* An primary name for the app `argocd-e2e-${id}`. +* A primary name for the app `argocd-e2e-${id}`. ## Troubleshooting diff --git a/docs/developer-guide/toolchain-guide.md b/docs/developer-guide/toolchain-guide.md new file mode 100644 index 0000000000000..42ca7fac87404 --- /dev/null +++ b/docs/developer-guide/toolchain-guide.md @@ -0,0 +1,344 @@ +# Development toolchain + +## Preface + +!!!note "Before you start" + The Argo CD project continuously grows, both in terms of features and community size. It gets adopted by more and more organisations which entrust Argo CD to handle their critical production workloads. Thus, we need to take great care with any changes that affect compatibility, performance, scalability, stability and security of Argo CD. For this reason, every new feature or larger enhancement must be properly designed and discussed before it gets accepted into the code base. + + We do welcome and encourage everyone to participate in the Argo CD project, but please understand that we can't accept each and every contribution from the community, for various reasons. If you want to submit code for a great new feature or enhancement, we kindly ask you to take a look at the + [code contribution guide](code-contributions.md#) before you start to write code or submit a PR. + +We want to make contributing to Argo CD as simple and smooth as possible. + +This guide shall help you in setting up your build & test environment, so that you can start developing and testing bug fixes and feature enhancements without having to make too much effort in setting up a local toolchain. + +If you want to submit a PR, please read this document carefully, as it contains important information guiding you through our PR quality gates. + +As is the case with the development process, this document is under constant change. If you notice any error, or if you think this document is out-of-date, or if you think it is missing something: Feel free to submit a PR or submit a bug to our GitHub issue tracker. + +If you need guidance with submitting a PR, or have any other questions regarding development of Argo CD, do not hesitate to [join our Slack](https://argoproj.github.io/community/join-slack) and get in touch with us in the `#argo-contributors` channel! + +## Before you start + +You will need at least the following things in your toolchain in order to develop and test Argo CD locally: + +* A Kubernetes cluster. You won't need a fully blown multi-master, multi-node cluster, but you will need something like K3S, Minikube or microk8s. You will also need a working Kubernetes client (`kubectl`) configuration in your development environment. The configuration must reside in `~/.kube/config` and the API server URL must point to the IP address of your local machine (or VM), and **not** to `localhost` or `127.0.0.1` if you are using the virtualized development toolchain (see below) + +* You will also need a working Docker runtime environment, to be able to build and run images. The Docker version must be 17.05.0 or higher, to support multi-stage builds. + +* Obviously, you will need a `git` client for pulling source code and pushing back your changes. + +* Last but not least, you will need a Go SDK and related tools (such as GNU `make`) installed and working on your development environment. The minimum required Go version for building and testing Argo CD is **v1.17**. + +* We will assume that your Go workspace is at `~/go`. + +!!! note + **Attention minikube users**: By default, minikube will create Kubernetes client configuration that uses authentication data from files. This is incompatible with the virtualized toolchain. So if you intend to use the virtualized toolchain, you have to embed this authentication data into the client configuration. To do so, start minikube using `minikube start --embed-certs`. Please also note that minikube using the Docker driver is currently not supported with the virtualized toolchain, because the Docker driver exposes the API server on 127.0.0.1 hard-coded. If in doubt, run `make verify-kube-connect` to find out. + +## Submitting PRs + +### Continuous Integration process + +When you submit a PR against Argo CD's GitHub repository, a couple of CI checks will be run automatically to ensure your changes will build fine and meet certain quality standards. Your contribution needs to pass those checks in order to be merged into the repository. + +!!!note + + Please make sure that you always create PRs from a branch that is up-to-date with the latest changes from Argo CD's master branch. Depending on how long it takes for the maintainers to review and merge your PR, it might be necessary to pull in latest changes into your branch again. + +Please understand that we, as an Open Source project, have limited capacities for reviewing and merging PRs to Argo CD. We will do our best to review your PR and give you feedback as soon as possible, but please bear with us if it takes a little longer as expected. + +The following read will help you to submit a PR that meets the standards of our CI tests: + +### Title of the PR + +Please use a meaningful and concise title for your PR. This will help us to pick PRs for review quickly, and the PR title will also end up in the Changelog. + +We use [PR title checker](https://github.com/marketplace/actions/pr-title-checker) to categorize your PR into one of the following categories: + +* `fix` - Your PR contains one or more code bug fixes +* `feat` - Your PR contains a new feature +* `docs` - Your PR improves the documentation +* `chore` - Your PR improves any internals of Argo CD, such as the build process, unit tests, etc + +Please prefix the title of your PR with one of the valid categories. For example, if you chose the title your PR `Add documentation for GitHub SSO integration`, please use `docs: Add documentation for GitHub SSO integration` instead. + +### PR template checklist + +Upon opening a PR, the details will contain a checklist from a template. Please read the checklist, and tick those marks that apply to you. + +### Automated builds & tests + +After you have submitted your PR, and whenever you push new commits to that branch, GitHub will run a number of Continuous Integration checks against your code. It will execute the following actions, and each of them has to pass: + +* Build the Go code (`make build`) +* Generate API glue code and manifests (`make codegen`) +* Run a Go linter on the code (`make lint`) +* Run the unit tests (`make test`) +* Run the End-to-End tests (`make test-e2e`) +* Build and lint the UI code (`make lint-ui`) +* Build the `argocd` CLI (`make cli`) + +If any of these tests in the CI pipeline fail, it means that some of your contribution is considered faulty (or a test might be flaky, see below). + +### Code test coverage + +We use [CodeCov](https://codecov.io) in our CI pipeline to check for test coverage, and once you submit your PR, it will run and report on the coverage difference as a comment within your PR. If the difference is too high in the negative, i.e. your submission introduced a significant drop in code coverage, the CI check will fail. + +Whenever you develop a new feature or submit a bug fix, please also write appropriate unit tests for it. If you write a completely new module, please aim for at least 80% of coverage. +If you want to see how much coverage just a specific module (i.e. your new one) has, you can set the `TEST_MODULE` to the (fully qualified) name of that module with `make test`, i.e.: + +```bash + make test TEST_MODULE=github.com/argoproj/argo-cd/server/cache +... +ok github.com/argoproj/argo-cd/server/cache 0.029s coverage: 89.3% of statements +``` + +## Local vs Virtualized toolchain + +Argo CD provides a fully virtualized development and testing toolchain using Docker images. It is recommended to use those images, as they provide the same runtime environment as the final product and it is much easier to keep up-to-date with changes to the toolchain and dependencies. But as using Docker comes with a slight performance penalty, you might want to setup a local toolchain. + +Most relevant targets for the build & test cycles in the `Makefile` provide two variants, one of them suffixed with `-local`. For example, `make test` will run unit tests in the Docker container, `make test-local` will run it natively on your local system. + +If you are going to use the virtualized toolchain, please bear in mind the following things: + +* Your Kubernetes API server must listen on the interface of your local machine or VM, and not on `127.0.0.1` only. +* Your Kubernetes client configuration (`~/.kube/config`) must not use an API URL that points to `localhost` or `127.0.0.1`. + +You can test whether the virtualized toolchain has access to your Kubernetes cluster by running `make verify-kube-connect` (*after* you have setup your development environment, as described below), which will run `kubectl version` inside the Docker container used for running all tests. + +The Docker container for the virtualized toolchain will use the following local mounts from your workstation, and possibly modify its contents: + +* `~/go/src` - Your Go workspace's source directory (modifications expected) +* `~/.cache/go-build` - Your Go build cache (modifications expected) +* `~/.kube` - Your Kubernetes client configuration (no modifications) +* `/tmp` - Your system's temp directory (modifications expected) + +## Setting up your development environment + +The following steps are required no matter whether you chose to use a virtualized or a local toolchain. + +!!!note "Docker privileges" + If you opt in to use the virtualized toolchain, you will need to have the + appropriate privileges to interact with the Docker daemon. It is not + recommended to work as the root user, and if your user does not have the + permissions to talk to the Docker user, but you have `sudo` setup on your + system, you can set the environment variable `SUDO` to `sudo` in order to + have the build scripts make any calls to the `docker` CLI using sudo, + without affecting the other parts of the build scripts (which should be + executed with your normal user privileges). + + You can either set this before calling `make`, like so for example: + + ``` + SUDO=sudo make sometarget + ``` + + Or you can opt to export this permanently to your environment, for example + ``` + export SUDO=sudo + ``` + +### Clone the Argo CD repository from your personal fork on GitHub + +* `mkdir -p ~/go/src/github.com/argoproj` +* `cd ~/go/src/github.com/argoproj` +* `git clone https://github.com/yourghuser/argo-cd` +* `cd argo-cd` + +### Optional: Setup an additional Git remote + +While everyone has their own Git workflow, the author of this document recommends to create a remote called `upstream` in your local copy pointing to the original Argo CD repository. This way, you can easily keep your local branches up-to-date by merging in latest changes from the Argo CD repository, i.e. by doing a `git pull upstream master` in your locally checked out branch. To create the remote, run `git remote add upstream https://github.com/argoproj/argo-cd` + +### Install the must-have requirements + +Make sure you fulfill the pre-requisites above and run some preliminary tests. Neither of them should report an error. + +* Run `kubectl version` +* Run `docker version` +* Run `go version` + +### Build the required Docker image + +Build the required Docker image by running `make test-tools-image`. This image offers the environment of the virtualized toolchain. + +The `Dockerfile` used to build these images can be found at `test/container/Dockerfile`. + +### Test connection from build container to your K8s cluster + +Run `make verify-kube-connect`, it should execute without error. + +If you receive an error similar to the following: + +``` +The connection to the server 127.0.0.1:6443 was refused - did you specify the right host or port? +make: *** [Makefile:386: verify-kube-connect] Error 1 +``` + +you should edit your `~/.kube/config` and modify the `server` option to point to your correct K8s API (as described above). + +### Using k3d + +[k3d](https://github.com/rancher/k3d) is a lightweight wrapper to run [k3s](https://github.com/rancher/k3s), a minimal Kubernetes distribution, in docker. Because it's running in a docker container, you're dealing with docker's internal networking rules when using k3d. A typical Kubernetes cluster running on your local machine is part of the same network that you're on, so you can access it using **kubectl**. However, a Kubernetes cluster running within a docker container (in this case, the one launched by make) cannot access 0.0.0.0 from inside the container itself, when 0.0.0.0 is a network resource outside the container itself (and/or the container's network). This is the cost of a fully self-contained, disposable Kubernetes cluster. The following steps should help with a successful `make verify-kube-connect` execution. + +1. Find your host IP by executing `ifconfig` on Mac/Linux and `ipconfig` on Windows. For most users, the following command works to find the IP address. + + * For Mac: + + ``` + IP=`ifconfig en0 | grep inet | grep -v inet6 | awk '{print $2}'` + echo $IP + ``` + + * For Linux: + + ``` + IP=`ifconfig eth0 | grep inet | grep -v inet6 | awk '{print $2}'` + echo $IP + ``` + + Keep in mind that this IP is dynamically assigned by the router so if your router restarts for any reason, your IP might change. + +2. Edit your ~/.kube/config and replace 0.0.0.0 with the above IP address. + +3. Execute a `kubectl version` to make sure you can still connect to the Kubernetes API server via this new IP. Run `make verify-kube-connect` and check if it works. + +4. Finally, so that you don't have to keep updating your kube-config whenever you spin up a new k3d cluster, add `--api-port $IP:6550` to your **k3d cluster create** command, where $IP is the value from step 1. An example command is provided here: + +``` +k3d cluster create my-cluster --wait --k3s-arg '--disable=traefik@server:*' --api-port $IP:6550 -p 443:443@loadbalancer +``` + +!!!note +For k3d versions less than v5.0.0, the example command flags `--k3s-arg` and `'--disable=traefik@server:*'` should change to `--k3s-server-arg` and `'--disable=traefik'`, respectively. + +## The development cycle + +When you have developed and possibly manually tested the code you want to contribute, you should ensure that everything will build correctly. Commit your changes to the local copy of your Git branch and perform the following steps: + +### Pull in all build dependencies + +As build dependencies change over time, you have to synchronize your development environment with the current specification. In order to pull in all required dependencies, issue: + +* `make dep-ui` + +Argo CD recently migrated to Go modules. Usually, dependencies will be downloaded on build time, but the Makefile provides two targets to download and vendor all dependencies: + +* `make mod-download` will download all required Go modules and +* `make mod-vendor` will vendor those dependencies into the Argo CD source tree + +### Generate API glue code and other assets + +Argo CD relies on Google's [Protocol Buffers](https://developers.google.com/protocol-buffers) for its API, and this makes heavy use of auto-generated glue code and stubs. Whenever you touched parts of the API code, you must re-generate the auto generated code. + +* Run `make codegen`, this might take a while +* Check if something has changed by running `git status` or `git diff` +* Commit any possible changes to your local Git branch, an appropriate commit message would be `Changes from codegen`, for example. + +!!!note + There are a few non-obvious assets that are auto-generated. You should not change the autogenerated assets, as they will be overwritten by a subsequent run of `make codegen`. Instead, change their source files. Prominent examples of non-obvious auto-generated code are `swagger.json` or the installation manifest YAMLs. + +### Build your code and run unit tests + +After the code glue has been generated, your code should build and the unit tests should run without any errors. Execute the following statements: + +* `make build` +* `make test` + +These steps are non-modifying, so there's no need to check for changes afterwards. + +### Lint your code base + +In order to keep a consistent code style in our source tree, your code must be well-formed in accordance to some widely accepted rules, which are applied by a Linter. + +The Linter might make some automatic changes to your code, such as indentation fixes. Some other errors reported by the Linter have to be fixed manually. + +* Run `make lint` and observe any errors reported by the Linter +* Fix any of the errors reported and commit to your local branch +* Finally, after the Linter reports no errors anymore, run `git status` or `git diff` to check for any changes made automatically by Lint +* If there were automatic changes, commit them to your local branch + +If you touched UI code, you should also run the Yarn linter on it: + +* Run `make lint-ui` +* Fix any of the errors reported by it + +## Contributing to Argo CD UI + +Argo CD, along with Argo Workflows, uses shared React components from [Argo UI](https://github.com/argoproj/argo-ui). Examples of some of these components include buttons, containers, form controls, +and others. Although you can make changes to these files and run them locally, in order to have these changes added to the Argo CD repo, you will need to follow these steps. + +1. Fork and clone the [Argo UI repository](https://github.com/argoproj/argo-ui). + +2. `cd` into your `argo-ui` directory, and then run `yarn install`. + +3. Make your file changes. + +4. Run `yarn start` to start a [storybook](https://storybook.js.org/) dev server and view the components in your browser. Make sure all your changes work as expected. + +5. Use [yarn link](https://classic.yarnpkg.com/en/docs/cli/link/) to link Argo UI package to your Argo CD repository. (Commands below assume that `argo-ui` and `argo-cd` are both located within the same parent folder) + + * `cd argo-ui` + * `yarn link` + * `cd ../argo-cd/ui` + * `yarn link argo-ui` + + Once `argo-ui` package has been successfully linked, test out changes in your local development environment. + +6. Commit changes and open a PR to [Argo UI](https://github.com/argoproj/argo-ui). + +7. Once your PR has been merged in Argo UI, `cd` into your `argo-cd/ui` folder and run `yarn add git+https://github.com/argoproj/argo-ui.git`. This will update the commit SHA in the `ui/yarn.lock` file to use the lastest master commit for argo-ui. + +8. Submit changes to `ui/yarn.lock`in a PR to Argo CD. + +## Setting up a local toolchain + +For development, you can either use the fully virtualized toolchain provided as Docker images, or you can set up the toolchain on your local development machine. Due to the dynamic nature of requirements, you might want to stay with the virtualized environment. + +### Install required dependencies and build-tools + +!!!note + The installations instructions are valid for Linux hosts only. Mac instructions will follow shortly. + +For installing the tools required to build and test Argo CD on your local system, we provide convenient installer scripts. By default, they will install binaries to `/usr/local/bin` on your system, which might require `root` privileges. + +You can change the target location by setting the `BIN` environment before running the installer scripts. For example, you can install the binaries into `~/go/bin` (which should then be the first component in your `PATH` environment, i.e. `export PATH=~/go/bin:$PATH`): + +```shell +make BIN=~/go/bin install-tools-local +``` + +Additionally, you have to install at least the following tools via your OS's package manager (this list might not be always up-to-date): + +* Git LFS plugin +* GnuPG version 2 + +### Install Go dependencies + +You need to pull in all required Go dependencies. To do so, run + +* `make mod-download-local` +* `make mod-vendor-local` + +### Test your build toolchain + +The first thing you can do to test whether your build toolchain is setup correctly is by generating the glue code for the API and after that, run a normal build: + +* `make codegen-local` +* `make build-local` + +This should return without any error. + +### Run unit-tests + +The next thing is to make sure that unit tests are running correctly on your system. These will require that all dependencies, such as Helm, Kustomize, Git, GnuPG, etc are correctly installed and fully functioning: + +* `make test-local` + +### Run end-to-end tests + +The final step is running the End-to-End testsuite, which makes sure that your Kubernetes dependencies are working properly. This will involve starting all the Argo CD components locally on your computer. The end-to-end tests consists of two parts: a server component, and a client component. + +* First, start the End-to-End server: `make start-e2e-local`. This will spawn a number of processes and services on your system. +* When all components have started, run `make test-e2e-local` to run the end-to-end tests against your local services. + +For more information about End-to-End tests, refer to the [End-to-End test documentation](test-e2e.md). diff --git a/docs/developer-guide/ui-extensions.md b/docs/developer-guide/ui-extensions.md new file mode 100644 index 0000000000000..dfabfb5574ead --- /dev/null +++ b/docs/developer-guide/ui-extensions.md @@ -0,0 +1,2 @@ +The contents of this document have been moved to the +[extensions guide](./extensions/ui-extensions.md) diff --git a/docs/developer-guide/use-gitpod.md b/docs/developer-guide/use-gitpod.md index a670baf9039c1..12b2c49eabf40 100644 --- a/docs/developer-guide/use-gitpod.md +++ b/docs/developer-guide/use-gitpod.md @@ -8,8 +8,8 @@ for Argo CD development. 1. Fork [https://github.com/argoproj/argo-cd](https://github.com/argoproj/argo-cd) repository 1. Create Gitpod workspace by opening the following url in the browser: - https://gitpod.io/#https://github.com//argo-cd - `The is your Github username` + `https://gitpod.io/#https://github.com//argo-cd` where + `` is your GitHub username. 1. Once workspace is created you should see VSCode editor in the browser as well as workspace initialization logs in the VSCode terminal. The initialization process downloads all backend and UI dependencies as well @@ -40,4 +40,4 @@ There are some known limitations: * Free plan provides 50 hours per month * [Envtest](https://book.kubebuilder.io/reference/envtest.html) based Kubernetes is only control plane. So you won't be able to deploy Argo CD applications that runs actual pods. -* Codegen tools are not available. E.g. you won't be able to use `make codegen-local`. \ No newline at end of file +* Codegen tools are not available. E.g. you won't be able to use `make codegen-local`. diff --git a/docs/faq.md b/docs/faq.md index 8c4ccc81b6dcd..19273acc04d23 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -14,7 +14,7 @@ to ignore fields when differences are expected. ## Why is my application stuck in `Progressing` state? -Argo CD provides health for several standard Kubernetes types. The `Ingress` and `StatefulSet` types have known issues +Argo CD provides health for several standard Kubernetes types. The `Ingress`, `StatefulSet` and `SealedSecret` types have known issues which might cause health check to return `Progressing` state instead of `Healthy`. * `Ingress` is considered healthy if `status.loadBalancer.ingress` list is non-empty, with at least one value @@ -31,6 +31,7 @@ which might cause health check to return `Progressing` state instead of `Healthy in `Progressing` state. * Your `StatefulSet` or `DaemonSet` is using `OnDelete` instead of `RollingUpdate` strategy. See [#1881](https://github.com/argoproj/argo-cd/issues/1881). +* For `SealedSecret`, see [Why are resources of type `SealedSecret` stuck in the `Progressing` state?](#sealed-secret-stuck-progressing) As workaround Argo CD allows providing [health check](operator-manual/health.md) customization which overrides default behavior. @@ -42,8 +43,13 @@ per [the getting started guide](getting_started.md). For Argo CD v1.9 and later, a secret named `argocd-initial-admin-secret`. To change the password, edit the `argocd-secret` secret and update the `admin.password` field with a new bcrypt hash. -You can use a site like [https://www.browserling.com/tools/bcrypt](https://www.browserling.com/tools/bcrypt) to generate -a new hash. For example: + +!!! note "Generating a bcrypt hash" + Use the following command to generate a bcrypt hash for `admin.password` + + argocd account bcrypt --password + +To apply the new password hash, use the following command (replacing the hash with your own): ```bash # bcrypt(password)=$2a$10$rRyBsGSHK6.uc8fntPwVIuLVHgsAhAX7TcdrqW/RADU0uh7CaChLa @@ -62,7 +68,7 @@ or a randomly generated password stored in a secret (Argo CD 1.9 and later). ## How to disable admin user? Add `admin.enabled: "false"` to the `argocd-cm` ConfigMap ( -see [user management](operator-manual/user-management/index.md)). +see [user management](./operator-manual/user-management/index.md)). ## Argo CD cannot deploy Helm Chart based applications without internet access, how can I solve it? @@ -74,17 +80,24 @@ might decide to refresh `stable` repo. As workaround override ```yaml data: - # v1.2 or earlier use `helm.repositories` - helm.repositories: | - - url: http://:8080 - name: stable - # v1.3 or later use `repositories` with `type: helm` repositories: | - type: helm url: http://:8080 name: stable ``` +## After deploying my Helm application with Argo CD I cannot see it with `helm ls` and other Helm commands + +When deploying a Helm application Argo CD is using Helm +only as a template mechanism. It runs `helm template` and +then deploys the resulting manifests on the cluster instead of doing `helm install`. This means that you cannot use any Helm command +to view/verify the application. It is fully managed by Argo CD. +Note that Argo CD supports natively some capabilities that you might miss in Helm (such as the history and rollback commands). + +This decision was made so that Argo CD is neutral +to all manifest generators. + + ## I've configured [cluster secret](./operator-manual/declarative-setup.md#clusters) but it does not show up in CLI/UI, how do I fix it? Check if cluster secret has `argocd.argoproj.io/secret-type: cluster` label. If secret has the label but the cluster is @@ -97,7 +110,7 @@ Use the following steps to reconstruct configured cluster config and connect to ```bash kubectl exec -it bash # ssh into any argocd server pod -argocd-util cluster kubeconfig https:// /tmp/config --namespace argocd # generate your cluster config +argocd admin cluster kubeconfig https:// /tmp/config --namespace argocd # generate your cluster config KUBECONFIG=/tmp/config kubectl get pods # test connection manually ``` @@ -109,20 +122,27 @@ To terminate the sync, click on the "synchronisation" then "terminate": ![Synchronization](assets/synchronization-button.png) ![Terminate](assets/terminate-button.png) -## Why Is My App Out Of Sync Even After Syncing? +## Why Is My App `Out Of Sync` Even After Syncing? -Is some cases, the tool you use may conflict with Argo CD by adding the `app.kubernetes.io/instance` label. E.g. using +In some cases, the tool you use may conflict with Argo CD by adding the `app.kubernetes.io/instance` label. E.g. using Kustomize common labels feature. Argo CD automatically sets the `app.kubernetes.io/instance` label and uses it to determine which resources form the app. If the tool does this too, this causes confusion. You can change this label by setting the `application.instanceLabelKey` value in the `argocd-cm`. We recommend that you use `argocd.argoproj.io/instance`. -!!! note When you make this change your applications will become out of sync and will need re-syncing. +!!! note + When you make this change your applications will become out of sync and will need re-syncing. See [#1482](https://github.com/argoproj/argo-cd/issues/1482). -## Why Are My Resource Limits Out Of Sync? +## How often does Argo CD check for changes to my Git or Helm repository ? + +The default polling interval is 3 minutes (180 seconds). +You can change the setting by updating the `timeout.reconciliation` value in the [argocd-cm](https://github.com/argoproj/argo-cd/blob/2d6ce088acd4fb29271ffb6f6023dbb27594d59b/docs/operator-manual/argocd-cm.yaml#L279-L282) config map. If there are any Git changes, Argo CD will only update applications with the [auto-sync setting](user-guide/auto_sync.md) enabled. If you set it to `0` then Argo CD will stop polling Git repositories automatically and you can only use alternative methods such as [webhooks](operator-manual/webhook.md) and/or manual syncs for deploying applications. + + +## Why Are My Resource Limits `Out Of Sync`? Kubernetes has normalized your resource limits when they are applied, and then Argo CD has then compared the version in your generated manifests to the normalized one is Kubernetes - they won't match. @@ -137,7 +157,7 @@ E.g. To fix this use diffing customizations [settings](./user-guide/diffing.md#known-kubernetes-types-in-crds-resource-limits-volume-mounts-etc). -## How Do I Fix "invalid cookie, longer than max length 4093"? +## How Do I Fix `invalid cookie, longer than max length 4093`? Argo CD uses a JWT as the auth token. You likely are part of many groups and have gone over the 4KB limit which is set for cookies. You can get the list of groups by opening "developer tools -> network" @@ -160,7 +180,9 @@ argocd ... --grpc-web ## Why Am I Getting `x509: certificate signed by unknown authority` When Using The CLI? -Your not running your server with correct certs. +The certificate created by default by Argo CD is not automatically recognised by the Argo CD CLI, in order +to create a secure system you must follow the instructions to [install a certificate](/operator-manual/tls/) +and configure your client OS to trust that certificate. If you're not running in a production system (e.g. you're testing Argo CD out), try the `--insecure` flag: @@ -172,8 +194,8 @@ argocd ... --insecure ## I have configured Dex via `dex.config` in `argocd-cm`, it still says Dex is unconfigured. Why? -Most likely you forgot to set the `url` in `argocd-cm` to point to your ArgoCD as well. See also -[the docs](/operator-manual/user-management/#2-configure-argo-cd-for-sso). +Most likely you forgot to set the `url` in `argocd-cm` to point to your Argo CD as well. See also +[the docs](./operator-manual/user-management/index.md#2-configure-argo-cd-for-sso). ## Why are `SealedSecret` resources reporting a `Status`? @@ -183,25 +205,57 @@ be exposed (on k8s `1.16+`). If your Kubernetes deployment is [modern]( https://www.openshift.com/blog/a-look-into-the-technical-details-of-kubernetes-1-16), ensure you're using a fixed CRD if you want this feature to work at all. -## Why are resources of type `SealedSecret` stuck in the `Progressing` state? +## Why are resources of type `SealedSecret` stuck in the `Progressing` state? The controller of the `SealedSecret` resource may expose the status condition on resource it provisioned. Since -version `v2.0.0` ArgoCD picks up that status condition to derive a health status for the `SealedSecret`. +version `v2.0.0` Argo CD picks up that status condition to derive a health status for the `SealedSecret`. Versions before `v0.15.0` of the `SealedSecret` controller are affected by an issue regarding this status conditions updates, which is why this feature is disabled by default in these versions. Status condition updates may be enabled by starting the `SealedSecret` controller with the `--update-status` command line parameter or by setting the `SEALED_SECRETS_UPDATE_STATUS` environment variable. -To disable ArgoCD from checking the status condition on `SealedSecret` resources, add the following resource -customization in your `argocd-cm` ConfigMap: +To disable Argo CD from checking the status condition on `SealedSecret` resources, add the following resource +customization in your `argocd-cm` ConfigMap via `resource.customizations.health.` key. ```yaml -resource.customizations: | - bitnami.com/SealedSecret: - health.lua: | - hs = {} - hs.status = "Healthy" - hs.message = "Controller doesn't report resource status" - return hs +resource.customizations.health.bitnami.com_SealedSecret: | + hs = {} + hs.status = "Healthy" + hs.message = "Controller doesn't report resource status" + return hs ``` + +## How do I fix `The order in patch list … doesn't match $setElementOrder list: …`? + +An application may trigger a sync error labeled a `ComparisonError` with a message like: + +> The order in patch list: [map[name:**KEY_BC** value:150] map[name:**KEY_BC** value:500] map[name:**KEY_BD** value:250] map[name:**KEY_BD** value:500] map[name:KEY_BI value:something]] doesn't match $setElementOrder list: [map[name:KEY_AA] map[name:KEY_AB] map[name:KEY_AC] map[name:KEY_AD] map[name:KEY_AE] map[name:KEY_AF] map[name:KEY_AG] map[name:KEY_AH] map[name:KEY_AI] map[name:KEY_AJ] map[name:KEY_AK] map[name:KEY_AL] map[name:KEY_AM] map[name:KEY_AN] map[name:KEY_AO] map[name:KEY_AP] map[name:KEY_AQ] map[name:KEY_AR] map[name:KEY_AS] map[name:KEY_AT] map[name:KEY_AU] map[name:KEY_AV] map[name:KEY_AW] map[name:KEY_AX] map[name:KEY_AY] map[name:KEY_AZ] map[name:KEY_BA] map[name:KEY_BB] map[name:**KEY_BC**] map[name:**KEY_BD**] map[name:KEY_BE] map[name:KEY_BF] map[name:KEY_BG] map[name:KEY_BH] map[name:KEY_BI] map[name:**KEY_BC**] map[name:**KEY_BD**]] + + +There are two parts to the message: + +1. `The order in patch list: [` + + This identifies values for items, especially items that appear multiple times: + + > map[name:**KEY_BC** value:150] map[name:**KEY_BC** value:500] map[name:**KEY_BD** value:250] map[name:**KEY_BD** value:500] map[name:KEY_BI value:something] + + You'll want to identify the keys that are duplicated -- you can focus on the first part, as each duplicated key will appear, once for each of its value with its value in the first list. The second list is really just + + `]` + +2. `doesn't match $setElementOrder list: [` + + This includes all of the keys. It's included for debugging purposes -- you don't need to pay much attention to it. It will give you a hint about the precise location in the list for the duplicated keys: + + > map[name:KEY_AA] map[name:KEY_AB] map[name:KEY_AC] map[name:KEY_AD] map[name:KEY_AE] map[name:KEY_AF] map[name:KEY_AG] map[name:KEY_AH] map[name:KEY_AI] map[name:KEY_AJ] map[name:KEY_AK] map[name:KEY_AL] map[name:KEY_AM] map[name:KEY_AN] map[name:KEY_AO] map[name:KEY_AP] map[name:KEY_AQ] map[name:KEY_AR] map[name:KEY_AS] map[name:KEY_AT] map[name:KEY_AU] map[name:KEY_AV] map[name:KEY_AW] map[name:KEY_AX] map[name:KEY_AY] map[name:KEY_AZ] map[name:KEY_BA] map[name:KEY_BB] map[name:**KEY_BC**] map[name:**KEY_BD**] map[name:KEY_BE] map[name:KEY_BF] map[name:KEY_BG] map[name:KEY_BH] map[name:KEY_BI] map[name:**KEY_BC**] map[name:**KEY_BD**] + + `]` + +In this case, the duplicated keys have been **emphasized** to help you identify the problematic keys. Many editors have the ability to highlight all instances of a string, using such an editor can help with such problems. + +The most common instance of this error is with `env:` fields for `containers`. + +!!! note "Dynamic applications" + It's possible that your application is being generated by a tool in which case the duplication might not be evident within the scope of a single file. If you have trouble debugging this problem, consider filing a ticket to the owner of the generator tool asking them to improve its validation and error reporting. diff --git a/docs/getting_started.md b/docs/getting_started.md index eec0a8e2c975a..d81bd08897ad8 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -7,6 +7,7 @@ * Installed [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) command-line tool. * Have a [kubeconfig](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) file (default location is `~/.kube/config`). +* CoreDNS. Can be enabled for microk8s by `microk8s enable dns && microk8s stop && microk8s start` ## 1. Install Argo CD @@ -17,18 +18,31 @@ kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/st This will create a new namespace, `argocd`, where Argo CD services and application resources will live. -!!! note - If you are not interested in UI, SSO, multi-cluster management and just want to pull changes into the cluster then you can disable - authentication using `--disable-auth` flag and access Argo CD via CLI using `--port-forward` or `--port-forward-namespace` flags - and proceed to step [#6](#6-create-an-application-from-a-git-repository): - - `kubectl patch deploy argocd-server -n argocd -p '[{"op": "add", "path": "/spec/template/spec/containers/0/command/-", "value": "--disable-auth"}]' --type json` +!!! warning + The installation manifests include `ClusterRoleBinding` resources that reference `argocd` namespace. If you are installing Argo CD into a different + namespace then make sure to update the namespace reference. + +If you are not interested in UI, SSO, multi-cluster features then you can install [core](operator-manual/installation.md#core) Argo CD components only: + +```bash +kubectl create namespace argocd +kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/core-install.yaml +``` + +This default installation will have a self-signed certificate and cannot be accessed without a bit of extra work. +Do one of: + +* Follow the [instructions to configure a certificate](operator-manual/tls.md) (and ensure that the client OS trusts it). +* Configure the client OS to trust the self signed certificate. +* Use the --insecure flag on all Argo CD CLI operations in this guide. + +Use `argocd login --core` to [configure](./user-guide/commands/argocd_login.md) CLI access and skip steps 3-5. ## 2. Download Argo CD CLI Download the latest Argo CD version from [https://github.com/argoproj/argo-cd/releases/latest](https://github.com/argoproj/argo-cd/releases/latest). More detailed installation instructions can be found via the [CLI installation documentation](cli_installation.md). -Also available in Mac Homebrew: +Also available in Mac, Linux and WSL Homebrew: ```bash brew install argocd @@ -56,58 +70,26 @@ Kubectl port-forwarding can also be used to connect to the API server without ex kubectl port-forward svc/argocd-server -n argocd 8080:443 ``` -The API server can then be accessed using the localhost:8080 +The API server can then be accessed using https://localhost:8080 ## 4. Login Using The CLI -!!! warning - We strongly advise to change the initially generated administrative password - as soon as after your first login to the system. - -Depending on the Argo CD version you are installing, the method how to get the -initial password for the `admin` user is different. - -> Argo CD 1.8 and earlier - -The initial password is autogenerated to be the pod name of the -Argo CD API server. This can be retrieved with the command: - -```bash -kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2 -``` - -Using the username `admin` and the password from above, login to Argo CD's IP or hostname: - -```bash -argocd login # e.g. localhost:8080 or argocd.example.com -``` - -Change the password using the command: - -```bash -argocd account update-password -``` - -!!! note - The initial password is set in a kubernetes secret, named `argocd-secret`, during ArgoCD's initial start up. This means if you edit - the deployment in any way which causes a new pod to be deployed, such as disabling TLS on the Argo CD API server. Take note of the initial - pod name when you first install Argo CD, or reset the password by following [these instructions](../faq/#i-forgot-the-admin-password-how-do-i-reset-it) - -> Argo CD v1.9 and later - The initial password for the `admin` account is auto-generated and stored as clear text in the field `password` in a secret named `argocd-initial-admin-secret` in your Argo CD installation namespace. You can simply retrieve this password -using `kubectl`: +using the `argocd` CLI: ```bash -kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d +argocd admin initial-password -n argocd ``` -For better readability, e.g. if you want to copy & paste the generated password, -you can simply append `&& echo` to above command, which will add a newline to -the output. +!!! warning + You should delete the `argocd-initial-admin-secret` from the Argo CD + namespace once you changed the password. The secret serves no other + purpose than to store the initially generated password in clear and can + safely be deleted at any time. It will be re-created on demand by Argo CD + if a new admin password must be re-generated. Using the username `admin` and the password from above, login to Argo CD's IP or hostname: @@ -115,19 +97,15 @@ Using the username `admin` and the password from above, login to Argo CD's IP or argocd login ``` +!!! note + The CLI environment must be able to communicate with the Argo CD API server. If it isn't directly accessible as described above in step 3, you can tell the CLI to access it using port forwarding through one of these mechanisms: 1) add `--port-forward-namespace argocd` flag to every CLI command; or 2) set `ARGOCD_OPTS` environment variable: `export ARGOCD_OPTS='--port-forward-namespace argocd'`. + Change the password using the command: ```bash argocd account update-password ``` -!!! note - You should delete the `argocd-initial-admin-secret` from the Argo CD - namespace once you changed the password. The secret serves no other - purpose than to store the initially generated password in clear and can - safely be deleted at any time. It will be re-created on demand by Argo CD - if a new admin password must be re-generated. - ## 5. Register A Cluster To Deploy Apps To (Optional) This step registers a cluster's credentials to Argo CD, and is only necessary when deploying to @@ -145,12 +123,12 @@ for docker-desktop context, run: argocd cluster add docker-desktop ``` -The above command installs a ServiceAccount (`argocd-manager`), into the kube-system namespace of +The above command installs a ServiceAccount (`argocd-manager`), into the kube-system namespace of that kubectl context, and binds the service account to an admin-level ClusterRole. Argo CD uses this service account token to perform its management tasks (i.e. deploy/monitoring). !!! note - The rules of the `argocd-manager-role` role can be modified such that it only has `create`, `update`, `patch`, `delete` privileges to a limited set of namespaces, groups, kinds. + The rules of the `argocd-manager-role` role can be modified such that it only has `create`, `update`, `patch`, `delete` privileges to a limited set of namespaces, groups, kinds. However `get`, `list`, `watch` privileges are required at the cluster-scope for Argo CD to function. ## 6. Create An Application From A Git Repository @@ -160,10 +138,17 @@ An example repository containing a guestbook application is available at ### Creating Apps Via CLI -!!! note - You can access Argo CD using port forwarding: add `--port-forward-namespace argocd` flag to every CLI command or set `ARGOCD_OPTS` environment variable: `export ARGOCD_OPTS='--port-forward-namespace argocd'`: +First we need to set the current namespace to argocd running the following command: - `argocd app create guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path guestbook --dest-server https://kubernetes.default.svc --dest-namespace default` +```bash +kubectl config set-context --current --namespace=argocd +``` + +Create the example guestbook application with the following command: + +```bash +argocd app create guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path guestbook --dest-server https://kubernetes.default.svc --dest-namespace default +``` ### Creating Apps Via UI @@ -181,7 +166,7 @@ Connect the [https://github.com/argoproj/argocd-example-apps.git](https://github ![connect repo](assets/connect-repo.png) -For **Destination**, set cluster to `in-cluster` and namespace to `default`: +For **Destination**, set cluster URL to `https://kubernetes.default.svc` (or `in-cluster` for cluster name) and namespace to `default`: ![destination](assets/destination.png) diff --git a/docs/index.md b/docs/index.md index 454b8c64589c1..6315ced37efad 100644 --- a/docs/index.md +++ b/docs/index.md @@ -25,7 +25,7 @@ kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/st ``` Follow our [getting started guide](getting_started.md). Further user oriented [documentation](user-guide/) -is provided for additional features. If you are looking to upgrade ArgoCD, see the [upgrade guide](./operator-manual/upgrading/overview.md). +is provided for additional features. If you are looking to upgrade Argo CD, see the [upgrade guide](./operator-manual/upgrading/overview.md). Developer oriented [documentation](developer-guide/) is available for people interested in building third-party integrations. ## How it works @@ -35,7 +35,6 @@ the desired application state. Kubernetes manifests can be specified in several * [kustomize](https://kustomize.io) applications * [helm](https://helm.sh) charts -* [ksonnet](https://ksonnet.io) applications * [jsonnet](https://jsonnet.org) files * Plain directory of YAML/json manifests * Any custom config management tool configured as a config management plugin @@ -67,7 +66,7 @@ For additional details, see [architecture overview](operator-manual/architecture ## Features * Automated deployment of applications to specified target environments -* Support for multiple config management/templating tools (Kustomize, Helm, Ksonnet, Jsonnet, plain-YAML) +* Support for multiple config management/templating tools (Kustomize, Helm, Jsonnet, plain-YAML) * Ability to manage and deploy to multiple clusters * SSO Integration (OIDC, OAuth2, LDAP, SAML 2.0, GitHub, GitLab, Microsoft, LinkedIn) * Multi-tenancy and RBAC policies for authorization @@ -82,7 +81,7 @@ For additional details, see [architecture overview](operator-manual/architecture * PreSync, Sync, PostSync hooks to support complex application rollouts (e.g.blue/green & canary upgrades) * Audit trails for application events and API calls * Prometheus metrics -* Parameter overrides for overriding ksonnet/helm parameters in Git +* Parameter overrides for overriding helm parameters in Git ## Development Status diff --git a/docs/operator-manual/app-any-namespace.md b/docs/operator-manual/app-any-namespace.md new file mode 100644 index 0000000000000..21743b7bc003d --- /dev/null +++ b/docs/operator-manual/app-any-namespace.md @@ -0,0 +1,222 @@ +# Applications in any namespace + +**Current feature state**: Beta + +!!! warning + Please read this documentation carefully before you enable this feature. Misconfiguration could lead to potential security issues. + +## Introduction + +As of version 2.5, Argo CD supports managing `Application` resources in namespaces other than the control plane's namespace (which is usually `argocd`), but this feature has to be explicitly enabled and configured appropriately. + +Argo CD administrators can define a certain set of namespaces where `Application` resources may be created, updated and reconciled in. However, applications in these additional namespaces will only be allowed to use certain `AppProjects`, as configured by the Argo CD administrators. This allows ordinary Argo CD users (e.g. application teams) to use patterns like declarative management of `Application` resources, implementing app-of-apps and others without the risk of a privilege escalation through usage of other `AppProjects` that would exceed the permissions granted to the application teams. + +Some manual steps will need to be performed by the Argo CD administrator in order to enable this feature. + +!!! note + This feature is considered beta as of now. Some of the implementation details may change over the course of time until it is promoted to a stable status. We will be happy if early adopters use this feature and provide us with bug reports and feedback. + +## Prerequisites + +### Cluster-scoped Argo CD installation + +This feature can only be enabled and used when your Argo CD is installed as a cluster-wide instance, so it has permissions to list and manipulate resources on a cluster scope. It will not work with an Argo CD installed in namespace-scoped mode. + +### Switch resource tracking method + +Also, while technically not necessary, it is strongly suggested that you switch the application tracking method from the default `label` setting to either `annotation` or `annotation+label`. The reasoning for this is, that application names will be a composite of the namespace's name and the name of the `Application`, and this can easily exceed the 63 characters length limit imposed on label values. Annotations have a notably greater length limit. + +To enable annotation based resource tracking, refer to the documentation about [resource tracking methods](../../user-guide/resource_tracking/) + +## Implementation details + +### Overview + +In order for an application to be managed and reconciled outside the Argo CD's control plane namespace, two prerequisites must match: + +1. The `Application`'s namespace must be explicitly enabled using the `--application-namespaces` parameter for the `argocd-application-controller` and `argocd-server` workloads. This parameter controls the list of namespaces that Argo CD will be allowed to source `Application` resources from globally. Any namespace not configured here cannot be used from any `AppProject`. +1. The `AppProject` referenced by the `.spec.project` field of the `Application` must have the namespace listed in its `.spec.sourceNamespaces` field. This setting will determine whether an `Application` may use a certain `AppProject`. If an `Application` specifies an `AppProject` that is not allowed, Argo CD refuses to process this `Application`. As stated above, any namespace configured in the `.spec.sourceNamespaces` field must also be enabled globally. + +`Applications` in different namespaces can be created and managed just like any other `Application` in the `argocd` namespace previously, either declaratively or through the Argo CD API (e.g. using the CLI, the web UI, the REST API, etc). + +### Reconfigure Argo CD to allow certain namespaces + +#### Change workload startup parameters + +In order to enable this feature, the Argo CD administrator must reconfigure the `argocd-server` and `argocd-application-controller` workloads to add the `--application-namespaces` parameter to the container's startup command. + +The `--application-namespaces` parameter takes a comma-separated list of namespaces where `Applications` are to be allowed in. Each entry of the list supports shell-style wildcards such as `*`, so for example the entry `app-team-*` would match `app-team-one` and `app-team-two`. To enable all namespaces on the cluster where Argo CD is running on, you can just specify `*`, i.e. `--application-namespaces=*`. + +The startup parameters for both, the `argocd-server` and the `argocd-application-controller` can also be conveniently set up and kept in sync by specifying the `application.namespaces` settings in the `argocd-cmd-params-cm` ConfigMap _instead_ of changing the manifests for the respective workloads. For example: + +```yaml +data: + application.namespaces: app-team-one, app-team-two +``` + +would allow the `app-team-one` and `app-team-two` namespaces for managing `Application` resources. After a change to the `argocd-cmd-params-cm` namespace, the appropriate workloads need to be restarted: + +```bash +kubectl rollout restart -n argocd deployment argocd-server +kubectl rollout restart -n argocd statefulset argocd-application-controller +``` + +#### Adapt Kubernetes RBAC + +We decided to not extend the Kubernetes RBAC for the `argocd-server` workload by default for the time being. If you want `Applications` in other namespaces to be managed by the Argo CD API (i.e. the CLI and UI), you need to extend the Kubernetes permissions for the `argocd-server` ServiceAccount. + +We supply a `ClusterRole` and `ClusterRoleBinding` suitable for this purpose in the `examples/k8s-rbac/argocd-server-applications` directory. For a default Argo CD installation (i.e. installed to the `argocd` namespace), you can just apply them as-is: + +```shell +kubectl apply -k examples/k8s-rbac/argocd-server-applications/ +``` + +`argocd-notifications-controller-rbac-clusterrole.yaml` and `argocd-notifications-controller-rbac-clusterrolebinding.yaml` are used to support notifications controller to notify apps in all namespaces. + +!!! note + At some later point in time, we may make this cluster role part of the default installation manifests. + +### Allowing additional namespaces in an AppProject + +Any user with Kubernetes access to the Argo CD control plane's namespace (`argocd`), especially those with permissions to create or update `Applications` in a declarative way, is to be considered an Argo CD admin. + +This prevented unprivileged Argo CD users from declaratively creating or managing `Applications` in the past. Those users were constrained to using the API instead, subject to Argo CD RBAC which ensures only `Applications` in allowed `AppProjects` were created. + +For an `Application` to be created outside the `argocd` namespace, the `AppProject` referred to in the `Application`'s `.spec.project` field must include the `Application`'s namespace in its `.spec.sourceNamespaces` field. + +For example, consider the two following (incomplete) `AppProject` specs: + +```yaml +kind: AppProject +apiVersion: argoproj.io/v1alpha1 +metadata: + name: project-one + namespace: argocd +spec: + sourceNamespaces: + - namespace-one +``` + +and + +```yaml +kind: AppProject +apiVersion: argoproj.io/v1alpha1 +metadata: + name: project-two + namespace: argocd +spec: + sourceNamespaces: + - namespace-two +``` + +In order for an Application to set `.spec.project` to `project-one`, it would have to be created in either namespace `namespace-one` or `argocd`. Likewise, in order for an Application to set `.spec.project` to `project-two`, it would have to be created in either namespace `namespace-two` or `argocd`. + +If an Application in `namespace-two` would set their `.spec.project` to `project-one` or an Application in `namespace-one` would set their `.spec.project` to `project-two`, Argo CD would consider this as a permission violation and refuse to reconcile the Application. + +Also, the Argo CD API will enforce these constraints, regardless of the Argo CD RBAC permissions. + +The `.spec.sourceNamespaces` field of the `AppProject` is a list that can contain an arbitrary amount of namespaces, and each entry supports shell-style wildcard, so that you can allow namespaces with patterns like `team-one-*`. + +!!! warning + Do not add user controlled namespaces in the `.spec.sourceNamespaces` field of any privileged AppProject like the `default` project. Always make sure that the AppProject follows the principle of granting least required privileges. Never grant access to the `argocd` namespace within the AppProject. + +!!! note + For backwards compatibility, Applications in the Argo CD control plane's namespace (`argocd`) are allowed to set their `.spec.project` field to reference any AppProject, regardless of the restrictions placed by the AppProject's `.spec.sourceNamespaces` field. + +### Application names + +For the CLI and UI, applications are now referred to and displayed as in the format `/`. + +For backwards compatibility, if the namespace of the Application is the control plane's namespace (i.e. `argocd`), the `` can be omitted from the application name when referring to it. For example, the application names `argocd/someapp` and `someapp` are semantically the same and refer to the same application in the CLI and the UI. + +### Application RBAC + +The RBAC syntax for Application objects has been changed from `/` to `//` to accommodate the need to restrict access based on the source namespace of the Application to be managed. + +For backwards compatibility, Applications in the `argocd` namespace can still be refered to as `/` in the RBAC policy rules. + +Wildcards do not make any distinction between project and application namespaces yet. For example, the following RBAC rule would match any application belonging to project `foo`, regardless of the namespace it is created in: + +``` +p, somerole, applications, get, foo/*, allow +``` + +If you want to restrict access to be granted only to `Applications` in project `foo` within namespace `bar`, the rule would need to be adapted as follows: + +``` +p, somerole, applications, get, foo/bar/*, allow +``` + +## Managing applications in other namespaces + +### Declaratively + +For declarative management of Applications, just create the Application from a YAML or JSON manifest in the desired namespace. Make sure that the `.spec.project` field refers to an AppProject that allows this namespace. For example, the following (incomplete) Application manifest creates an Application in the namespace `some-namespace`: + +```yaml +kind: Application +apiVersion: argoproj.io/v1alpha1 +metadata: + name: some-app + namespace: some-namespace +spec: + project: some-project + # ... +``` + +The project `some-project` will then need to specify `some-namespace` in the list of allowed source namespaces, e.g. + +```yaml +kind: AppProject +apiVersion: argoproj.io/v1alpha1 +metadata: + name: some-project + namespace: argocd +spec: + sourceNamespaces: + - some-namespace +``` + +### Using the CLI + +You can use all existing Argo CD CLI commands for managing applications in other namespaces, exactly as you would use the CLI to manage applications in the control plane's namespace. + +For example, to retrieve the `Application` named `foo` in the namespace `bar`, you can use the following CLI command: + +```shell +argocd app get foo/bar +``` + +Likewise, to manage this application, keep referring to it as `foo/bar`: + +```bash +# Create an application +argocd app create foo/bar ... +# Sync the application +argocd app sync foo/bar +# Delete the application +argocd app delete foo/bar +# Retrieve application's manifest +argocd app manifests foo/bar +``` + +As stated previously, for applications in the Argo CD's control plane namespace, you can omit the namespace from the application name. + +### Using the UI + +Similar to the CLI, you can refer to the application in the UI as `foo/bar`. + +For example, to create an application named `bar` in the namespace `foo` in the web UI, set the application name in the creation dialogue's _Application Name_ field to `foo/bar`. If the namespace is omitted, the control plane's namespace will be used. + +### Using the REST API + +If you are using the REST API, the namespace for `Application` cannot be specified as the application name, and resources need to be specified using the optional `appNamespace` query parameter. For example, to work with the `Application` resource named `foo` in the namespace `bar`, the request would look like follows: + +```bash +GET /api/v1/applications/foo?appNamespace=bar +``` + +For other operations such as `POST` and `PUT`, the `appNamespace` parameter must be part of the request's payload. + +For `Application` resources in the control plane namespace, this parameter can be omitted. diff --git a/docs/operator-manual/application.yaml b/docs/operator-manual/application.yaml index 4172e970d7fc9..75a0d3b0df8ae 100644 --- a/docs/operator-manual/application.yaml +++ b/docs/operator-manual/application.yaml @@ -4,21 +4,29 @@ metadata: name: guestbook # You'll usually want to add your resources to the argocd namespace. namespace: argocd - # Add a this finalizer ONLY if you want these to cascade delete. + # Add this finalizer ONLY if you want these to cascade delete. finalizers: + # The default behaviour is foreground cascading deletion - resources-finalizer.argocd.argoproj.io + # Alternatively, you can use background cascading deletion + # - resources-finalizer.argocd.argoproj.io/background + # Add labels to your application object. + labels: + name: guestbook spec: # The project the application belongs to. project: default # Source of the application manifests source: - repoURL: https://github.com/argoproj/argocd-example-apps.git - targetRevision: HEAD - path: guestbook + repoURL: https://github.com/argoproj/argocd-example-apps.git # Can point to either a Helm chart repo or a git repo. + targetRevision: HEAD # For Helm, this refers to the chart version. + path: guestbook # This has no meaning for Helm charts pulled directly from a Helm repo instead of git. # helm specific config + chart: chart-name # Set this when pulling directly from a Helm repo. DO NOT set for git-hosted Helm charts. helm: + passCredentials: false # If true then adds --pass-credentials to Helm commands to pass credentials to all domains # Extra parameters to set (same as setting through values.yaml, but these take precedence) parameters: - name: "nginx-ingress.controller.service.annotations.external-dns\\.alpha\\.kubernetes\\.io/hostname" @@ -27,6 +35,11 @@ spec: value: "true" forceString: true # ensures that value is treated as a string + # Use the contents of files as parameters (uses Helm's --set-file) + fileParameters: + - name: config + path: files/config.json + # Release name override (defaults to application name) releaseName: guestbook @@ -35,7 +48,10 @@ spec: valueFiles: - values-prod.yaml - # Values file as block file + # Ignore locally missing valueFiles when installing Helm chart. Defaults to false + ignoreMissingValueFiles: false + + # Values file as block file. Prefer to use valuesObject if possible (see below) values: | ingress: enabled: true @@ -51,7 +67,26 @@ spec: hosts: - mydomain.example.com - # Optional Helm version to template with. If omitted it will fallback to look at the 'apiVersion' in Chart.yaml + # Values file as block file. This takes precedence over values + valuesObject: + ingress: + enabled: true + path: / + hosts: + - mydomain.example.com + annotations: + kubernetes.io/ingress.class: nginx + kubernetes.io/tls-acme: "true" + labels: {} + tls: + - secretName: mydomain-tls + hosts: + - mydomain.example.com + + # Skip custom resource definition installation if chart contains custom resource definitions. Defaults to false + skipCrds: false + + # Optional Helm version to template with. If omitted it will fall back to look at the 'apiVersion' in Chart.yaml # and decide which Helm binary to use automatically. This field can be either 'v2' or 'v3'. version: v2 @@ -59,11 +94,22 @@ spec: kustomize: # Optional kustomize version. Note: version must be configured in argocd-cm ConfigMap version: v3.5.4 - # Optional image name prefix + # Supported kustomize transformers. https://kubectl.docs.kubernetes.io/references/kustomize/kustomization/ namePrefix: prod- - # Optional images passed to "kustomize edit set image". + nameSuffix: -some-suffix + commonLabels: + foo: bar + commonAnnotations: + beep: boop-${ARGOCD_APP_REVISION} + # Toggle which enables/disables env variables substitution in commonAnnotations + commonAnnotationsEnvsubst: true images: - gcr.io/heptio-images/ks-guestbook-demo:0.2 + - my-app=gcr.io/my-repo/my-app:0.1 + namespace: custom-namespace + replicas: + - name: kustomize-guestbook-ui + count: 4 # directory directory: @@ -82,20 +128,55 @@ spec: - code: false name: foo value: bar + # Exclude contains a glob pattern to match paths against that should be explicitly excluded from being used during + # manifest generation. This takes precedence over the `include` field. + # To match multiple patterns, wrap the patterns in {} and separate them with commas. For example: '{config.yaml,env-use2/*}' + exclude: 'config.yaml' + # Include contains a glob pattern to match paths against that should be explicitly included during manifest + # generation. If this field is set, only matching manifests will be included. + # To match multiple patterns, wrap the patterns in {} and separate them with commas. For example: '{*.yml,*.yaml}' + include: '*.yaml' # plugin specific config plugin: + # If the plugin is defined as a sidecar and name is not passed, the plugin will be automatically matched with the + # Application according to the plugin's discovery rules. name: mypluginname # environment variables passed to the plugin env: - name: FOO value: bar + # Plugin parameters are new in v2.5. + parameters: + - name: string-param + string: example-string + - name: array-param + array: [item1, item2] + - name: map-param + map: + param-name: param-value + + # Sources field specifies the list of sources for the application + sources: + - repoURL: https://github.com/argoproj/argocd-example-apps.git # Can point to either a Helm chart repo or a git repo. + targetRevision: HEAD # For Helm, this refers to the chart version. + path: guestbook # This has no meaning for Helm charts pulled directly from a Helm repo instead of git. + ref: my-repo # For Helm, acts as a reference to this source for fetching values files from this source. Has no meaning when under `source` field # Destination cluster and namespace to deploy the application destination: + # cluster API URL server: https://kubernetes.default.svc + # or cluster name + # name: in-cluster + # The namespace will only be set for namespace-scoped resources that have not set a value for .metadata.namespace namespace: guestbook - + + # Extra information to show in the Argo CD Application details tab + info: + - name: 'Example:' + value: 'https://example.com' + # Sync policy syncPolicy: automated: # automated sync by default retries failed attempts 5 times with following delays between attempts ( 5s, 10s, 20s, 40s, 80s ); retry controlled using `retry` field. @@ -107,6 +188,16 @@ spec: - CreateNamespace=true # Namespace Auto-Creation ensures that namespace specified as the application destination exists in the destination cluster. - PrunePropagationPolicy=foreground # Supported policies are background, foreground and orphan. - PruneLast=true # Allow the ability for resource pruning to happen as a final, implicit wave of a sync operation + - RespectIgnoreDifferences=true # When syncing changes, respect fields ignored by the ignoreDifferences configuration + managedNamespaceMetadata: # Sets the metadata for the application namespace. Only valid if CreateNamespace=true (see above), otherwise it's a no-op. + labels: # The labels to set on the application namespace + any: label + you: like + annotations: # The annotations to set on the application namespace + the: same + applies: for + annotations: on-the-namespace + # The retry feature is available since v1.7 retry: limit: 5 # number of failed sync attempt retries; unlimited number of attempts if less than 0 @@ -115,9 +206,28 @@ spec: factor: 2 # a factor to multiply the base duration after each failed retry maxDuration: 3m # the maximum amount of time allowed for the backoff strategy - # Ignore differences at the specified json pointers + # Will ignore differences between live and desired states during the diff. Note that these configurations are not + # used during the sync process unless the `RespectIgnoreDifferences=true` sync option is enabled. ignoreDifferences: + # for the specified json pointers - group: apps kind: Deployment jsonPointers: - /spec/replicas + - kind: ConfigMap + jqPathExpressions: + - '.data["config.yaml"].auth' + # for the specified managedFields managers + - group: "*" + kind: "*" + managedFieldsManagers: + - kube-controller-manager + # Name and namespace are optional. If specified, they must match exactly, these are not glob patterns. + name: my-deployment + namespace: my-namespace + + # RevisionHistoryLimit limits the number of items kept in the application's revision history, which is used for + # informational purposes as well as for rollbacks to previous versions. This should only be changed in exceptional + # circumstances. Setting to zero will store no history. This will reduce storage used. Increasing will increase the + # space used to store the history, so we do not recommend increasing it. + revisionHistoryLimit: 10 diff --git a/docs/operator-manual/applicationset.yaml b/docs/operator-manual/applicationset.yaml new file mode 100644 index 0000000000000..65935802c674a --- /dev/null +++ b/docs/operator-manual/applicationset.yaml @@ -0,0 +1,38 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: test-hello-world-appset + namespace: argocd +spec: + # See docs for available generators and their specs. + generators: + - list: + elements: + - cluster: https://kubernetes.default.svc + # Determines whether go templating will be used in the `template` field below. + goTemplate: false + # Optional list of go templating options, see https://pkg.go.dev/text/template#Template.Option + # This is only relevant if `goTemplate` is true + goTemplateOptions: ["missingkey="] + # These fields are identical to the Application spec. + template: + metadata: + name: test-hello-world-app + spec: + project: my-project + # This sync policy pertains to the ApplicationSet, not to the Applications it creates. + syncPolicy: + # Determines whether the controller will delete Applications when an ApplicationSet is deleted. + preserveResourcesOnDeletion: false + # Alpha feature to determine the order in which ApplicationSet applies changes. + strategy: + # This field lets you define fields which should be ignored when applying Application resources. This is helpful if you + # want to use ApplicationSets to create apps, but also want to allow users to modify those apps without having their + # changes overwritten by the ApplicationSet. + ignoreApplicationDifferences: + - jsonPointers: + - /spec/source/targetRevision + - name: some-app + jqExpressions: + - .spec.source.helm.values + diff --git a/docs/operator-manual/applicationset/Application-Deletion.md b/docs/operator-manual/applicationset/Application-Deletion.md new file mode 100644 index 0000000000000..b59a556ec7f40 --- /dev/null +++ b/docs/operator-manual/applicationset/Application-Deletion.md @@ -0,0 +1,29 @@ +# Application Pruning & Resource Deletion + +All `Application` resources created by the ApplicationSet controller (from an ApplicationSet) will contain: + +- A `.metadata.ownerReferences` reference back to the *parent* `ApplicationSet` resource +- An Argo CD `resources-finalizer.argocd.argoproj.io` finalizer in `.metadata.finalizers` of the Application if `.syncPolicy.preserveResourcesOnDeletion` is set to false. + +The end result is that when an ApplicationSet is deleted, the following occurs (in rough order): + +- The `ApplicationSet` resource itself is deleted +- Any `Application` resources that were created from this `ApplicationSet` (as identified by owner reference) +- Any deployed resources (`Deployments`, `Services`, `ConfigMaps`, etc) on the managed cluster, that were created from that `Application` resource (by Argo CD), will be deleted. + - Argo CD is responsible for handling this deletion, via [the deletion finalizer](../../../user-guide/app_deletion/#about-the-deletion-finalizer). + - To preserve deployed resources, set `.syncPolicy.preserveResourcesOnDeletion` to true in the ApplicationSet. + +Thus the lifecycle of the `ApplicationSet`, the `Application`, and the `Application`'s resources, are equivalent. + +!!! note + See also the [controlling resource modification](Controlling-Resource-Modification.md) page for more information about how to prevent deletion or modification of Application resources by the ApplicationSet controller. + +It *is* still possible to delete an `ApplicationSet` resource, while preventing `Application`s (and their deployed resources) from also being deleted, using a non-cascading delete: +``` +kubectl delete ApplicationSet (NAME) --cascade=orphan +``` + +!!! warning + Even if using a non-cascaded delete, the `resources-finalizer.argocd.argoproj.io` is still specified on the `Application`. Thus, when the `Application` is deleted, all of its deployed resources will also be deleted. (The lifecycle of the Application, and its *child* objects, are still equivalent.) + + To prevent the deletion of the resources of the Application, such as Services, Deployments, etc, set `.syncPolicy.preserveResourcesOnDeletion` to true in the ApplicationSet. This syncPolicy parameter prevents the finalizer from being added to the Application. \ No newline at end of file diff --git a/docs/operator-manual/applicationset/Appset-Any-Namespace.md b/docs/operator-manual/applicationset/Appset-Any-Namespace.md new file mode 100644 index 0000000000000..61716414aeb69 --- /dev/null +++ b/docs/operator-manual/applicationset/Appset-Any-Namespace.md @@ -0,0 +1,227 @@ +# ApplicationSet in any namespace + +**Current feature state**: Beta + +!!! warning + Please read this documentation carefully before you enable this feature. Misconfiguration could lead to potential security issues. + +## Introduction + +As of version 2.8, Argo CD supports managing `ApplicationSet` resources in namespaces other than the control plane's namespace (which is usually `argocd`), but this feature has to be explicitly enabled and configured appropriately. + +Argo CD administrators can define a certain set of namespaces where `ApplicationSet` resources may be created, updated and reconciled in. + +As Applications generated by an ApplicationSet are generated in the same namespace as the ApplicationSet itself, this works in combination with [App in any namespace](../app-any-namespace.md). + +## Prerequisites + +### App in any namespace configured + +This feature needs [App in any namespace](../app-any-namespace.md) feature activated. The list of namespaces must be the same. + +### Cluster-scoped Argo CD installation + +This feature can only be enabled and used when your Argo CD ApplicationSet controller is installed as a cluster-wide instance, so it has permissions to list and manipulate resources on a cluster scope. It will *not* work with an Argo CD installed in namespace-scoped mode. + +### SCM Providers secrets consideration + +By allowing ApplicationSet in any namespace you must be aware that any secrets can be exfiltrated using `scmProvider` or `pullRequest` generators. + +Here is an example: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: myapps +spec: + generators: + - scmProvider: + gitea: + # The Gitea owner to scan. + owner: myorg + # With this malicious setting, user can send all request to a Pod that will log incoming requests including headers with tokens + api: http://my-service.my-namespace.svc.cluster.local + # If true, scan every branch of every repository. If false, scan only the default branch. Defaults to false. + allBranches: true + # By changing this token reference, user can exfiltrate any secrets + tokenRef: + secretName: gitea-token + key: token + template: +``` + +Therefore administrator must restrict the urls of the allowed SCM Providers (example: `https://git.mydomain.com/,https://gitlab.mydomain.com/`) by setting the environment variable `ARGOCD_APPLICATIONSET_CONTROLLER_ALLOWED_SCM_PROVIDERS` to argocd-cmd-params-cm `applicationsetcontroller.allowed.scm.providers`. If another url is used, it will be rejected by the applicationset controller. + +For example: +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cmd-params-cm +data: + applicationsetcontroller.allowed.scm.providers: https://git.mydomain.com/,https://gitlab.mydomain.com/ +``` + +!!! note + Please note url used in the `api` field of the `ApplicationSet` must match the url declared by the Administrator including the protocol + +!!! warning + The allow-list only applies to SCM providers for which the user may configure a custom `api`. Where an SCM or PR + generator does not accept a custom API URL, the provider is implicitly allowed. + +If you do not intend to allow users to use the SCM or PR generators, you can disable them entirely by setting the environment variable `ARGOCD_APPLICATIONSET_CONTROLLER_ALLOW_SCM_PROVIDERS` to argocd-cmd-params-cm `applicationsetcontroller.allow.scm.providers` to `false`. + +### Overview + +In order for an ApplicationSet to be managed and reconciled outside the Argo CD's control plane namespace, two prerequisites must match: + +1. The namespace list from which `argocd-applicationset-controller` can source `ApplicationSets` must be explicitly set using environment variable `ARGOCD_APPLICATIONSET_CONTROLLER_NAMESPACES` or alternatively using parameter `--applicationset-namespaces`. +2. The enabled namespaces must be entirely covered by the [App in any namespace](../app-any-namespace.md), otherwise the generated Applications generated outside the allowed Application namespaces won't be reconciled + +It can be achieved by setting the environment variable `ARGOCD_APPLICATIONSET_CONTROLLER_NAMESPACES` to argocd-cmd-params-cm `applicationsetcontroller.namespaces` + +`ApplicationSets` in different namespaces can be created and managed just like any other `ApplicationSet` in the `argocd` namespace previously, either declaratively or through the Argo CD API (e.g. using the CLI, the web UI, the REST API, etc). + +### Reconfigure Argo CD to allow certain namespaces + +#### Change workload startup parameters + +In order to enable this feature, the Argo CD administrator must reconfigure the and `argocd-applicationset-controller` workloads to add the `--applicationset-namespaces` parameter to the container's startup command. + +### Safely template project + +As [App in any namespace](../app-any-namespace.md) is a prerequisite, it is possible to safely template project. + +Let's take an example with two teams and an infra project: + +```yaml +kind: AppProject +apiVersion: argoproj.io/v1alpha1 +metadata: + name: infra-project + namespace: argocd +spec: + destinations: + - namespace: '*' +``` + +```yaml +kind: AppProject +apiVersion: argoproj.io/v1alpha1 +metadata: + name: team-one-project + namespace: argocd +spec: + sourceNamespaces: + - team-one-cd +``` + +```yaml +kind: AppProject +apiVersion: argoproj.io/v1alpha1 +metadata: + name: team-two-project + namespace: argocd +spec: + sourceNamespaces: + - team-two-cd +``` + +Creating following `ApplicationSet` generates two Applications `infra-escalation` and `team-two-escalation`. Both will be rejected as they are outside `argocd` namespace, therefore `sourceNamespaces` will be checked + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: team-one-product-one + namespace: team-one-cd +spec: + generators: + list: + - id: infra + project: infra-project + - id: team-two + project: team-two-project + template: + metadata: + name: '{{name}}-escalation' + spec: + project: "{{project}}" +``` + +### ApplicationSet names + +For the CLI, applicationSets are now referred to and displayed as in the format `/`. + +For backwards compatibility, if the namespace of the ApplicationSet is the control plane's namespace (i.e. `argocd`), the `` can be omitted from the applicationset name when referring to it. For example, the application names `argocd/someappset` and `someappset` are semantically the same and refer to the same application in the CLI and the UI. + +### Applicationsets RBAC + +The RBAC syntax for Application objects has been changed from `/` to `//` to accommodate the need to restrict access based on the source namespace of the Application to be managed. + +For backwards compatibility, Applications in the argocd namespace can still be referred to as `/` in the RBAC policy rules. + +Wildcards do not make any distinction between project and applicationset namespaces yet. For example, the following RBAC rule would match any application belonging to project foo, regardless of the namespace it is created in: + + +``` +p, somerole, applicationsets, get, foo/*, allow +``` + +If you want to restrict access to be granted only to `ApplicationSets` with project `foo` within namespace `bar`, the rule would need to be adapted as follows: + +``` +p, somerole, applicationsets, get, foo/bar/*, allow +``` + +## Managing applicationSets in other namespaces + +### Using the CLI + +You can use all existing Argo CD CLI commands for managing applications in other namespaces, exactly as you would use the CLI to manage applications in the control plane's namespace. + +For example, to retrieve the `ApplicationSet` named `foo` in the namespace `bar`, you can use the following CLI command: + +```shell +argocd appset get foo/bar +``` + +Likewise, to manage this applicationSet, keep referring to it as `foo/bar`: + +```bash +# Delete the application +argocd appset delete foo/bar +``` + +There is no change on the create command as it is using a file. You just need to add the namespace in the `metadata.namespace` field. + +As stated previously, for applicationSets in the Argo CD's control plane namespace, you can omit the namespace from the application name. + +### Using the REST API + +If you are using the REST API, the namespace for `ApplicationSet` cannot be specified as the application name, and resources need to be specified using the optional `appNamespace` query parameter. For example, to work with the `ApplicationSet` resource named `foo` in the namespace `bar`, the request would look like follows: + +```bash +GET /api/v1/applicationsets/foo?appsetNamespace=bar +``` + +For other operations such as `POST` and `PUT`, the `appNamespace` parameter must be part of the request's payload. + +For `ApplicationSet` resources in the control plane namespace, this parameter can be omitted. + +## Clusters secrets consideration + +By allowing ApplicationSet in any namespace you must be aware that clusters can be discovered and used. + +Example: + +Following will discover all clusters + +```yaml +spec: + generators: + - clusters: {} # Automatically use all clusters defined within Argo CD +``` + +If you don't want to allow users to discover all clusters with ApplicationSets from other namespaces you may consider deploying ArgoCD in namespace scope or use OPA rules. \ No newline at end of file diff --git a/docs/operator-manual/applicationset/Argo-CD-Integration.md b/docs/operator-manual/applicationset/Argo-CD-Integration.md new file mode 100644 index 0000000000000..d6179b570eebb --- /dev/null +++ b/docs/operator-manual/applicationset/Argo-CD-Integration.md @@ -0,0 +1,29 @@ +# How ApplicationSet controller interacts with Argo CD + +When you create, update, or delete an `ApplicationSet` resource, the ApplicationSet controller responds by creating, updating, or deleting one or more corresponding Argo CD `Application` resources. + +In fact, the *sole* responsibility of the ApplicationSet controller is to create, update, and delete `Application` resources within the Argo CD namespace. The controller's only job is to ensure that the `Application` resources remain consistent with the defined declarative `ApplicationSet` resource, and nothing more. + +Thus the ApplicationSet controller: + +- Does not create/modify/delete Kubernetes resources (other than the `Application` CR) +- Does not connect to clusters other than the one Argo CD is deployed to +- Does not interact with namespaces other than the one Argo CD is deployed within + +!!!important "Use the Argo CD namespace" + All ApplicationSet resources and the ApplicationSet controller must be installed in the same namespace as Argo CD. + ApplicationSet resources in a different namespace will be ignored. + +It is Argo CD itself that is responsible for the actual deployment of the generated child `Application` resources, such as Deployments, Services, and ConfigMaps. + +The ApplicationSet controller can thus be thought of as an `Application` 'factory', taking an `ApplicationSet` resource as input, and outputting one or more Argo CD `Application` resources that correspond to the parameters of that set. + +![ApplicationSet controller vs Argo CD, interaction diagram](../../assets/applicationset/Argo-CD-Integration/ApplicationSet-Argo-Relationship-v2.png) + +In this diagram an `ApplicationSet` resource is defined, and it is the responsibility of the ApplicationSet controller to create the corresponding `Application` resources. The resulting `Application` resources are then managed Argo CD: that is, Argo CD is responsible for actually deploying the child resources. + +Argo CD generates the application's Kubernetes resources based on the contents of the Git repository defined within the Application `spec` field, deploying e.g. Deployments, Service, and other resources. + +Creation, update, or deletion of ApplicationSets will have a direct effect on the Applications present in the Argo CD namespace. Likewise, cluster events (the addition/deletion of Argo CD cluster secrets, when using Cluster generator), or changes in Git (when using Git generator), will be used as input to the ApplicationSet controller in constructing `Application` resources. + +Argo CD and the ApplicationSet controller work together to ensure a consistent set of Application resources exist, and are deployed across the target clusters. diff --git a/docs/operator-manual/applicationset/Controlling-Resource-Modification.md b/docs/operator-manual/applicationset/Controlling-Resource-Modification.md new file mode 100644 index 0000000000000..73f8a5a3eeb50 --- /dev/null +++ b/docs/operator-manual/applicationset/Controlling-Resource-Modification.md @@ -0,0 +1,222 @@ +# Controlling if/when the ApplicationSet controller modifies `Application` resources + +The ApplicationSet controller supports a number of settings that limit the ability of the controller to make changes to generated Applications, for example, preventing the controller from deleting child Applications. + +These settings allow you to exert control over when, and how, changes are made to your Applications, and to their corresponding cluster resources (`Deployments`, `Services`, etc). + +Here are some of the controller settings that may be modified to alter the ApplicationSet controller's resource-handling behaviour. + +### Dry run: prevent ApplicationSet from creating, modifying, or deleting all Applications + +To prevent the ApplicationSet controller from creating, modifying, or deleting any `Application` resources, you may enable `dry-run` mode. This essentially switches the controller into a "read only" mode, where the controller Reconcile loop will run, but no resources will be modified. + +To enable dry-run, add `--dryrun true` to the ApplicationSet Deployment's container launch parameters. + +See 'How to modify ApplicationSet container parameters' below for detailed steps on how to add this parameter to the controller. + +### Managed Applications modification Policies + +The ApplicationSet controller supports a parameter `--policy`, which is specified on launch (within the controller Deployment container), and which restricts what types of modifications will be made to managed Argo CD `Application` resources. + +The `--policy` parameter takes four values: `sync`, `create-only`, `create-delete`, and `create-update`. (`sync` is the default, which is used if the `--policy` parameter is not specified; the other policies are described below). + +It is also possible to set this policy per ApplicationSet. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +spec: + # (...) + syncPolicy: + applicationsSync: create-only # create-update, create-delete sync + +``` + +- Policy `create-only`: Prevents ApplicationSet controller from modifying or deleting Applications. +- Policy `create-update`: Prevents ApplicationSet controller from deleting Applications. Update is allowed. +- Policy `create-delete`: Prevents ApplicationSet controller from modifying Applications. Delete is allowed. +- Policy `sync`: Update and Delete are allowed. + +If the controller parameter `--policy` is set, it takes precedence on the field `applicationsSync`. It is possible to allow per ApplicationSet sync policy by setting variable `ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_POLICY_OVERRIDE` to argocd-cmd-params-cm `applicationsetcontroller.enable.policy.override` or directly with controller parameter `--enable-policy-override` (default to `false`). + +This does not prevent deletion of Applications if the ApplicationSet is deleted + +#### Controller parameter + +To allow the ApplicationSet controller to *create* `Application` resources, but prevent any further modification, such as deletion, or modification of Application fields, add this parameter in the ApplicationSet controller: +``` +--policy create-only +``` + +At ApplicationSet level + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +spec: + # (...) + syncPolicy: + applicationsSync: create-only +``` + +### Policy - `create-update`: Prevent ApplicationSet controller from deleting Applications + +To allow the ApplicationSet controller to create or modify `Application` resources, but prevent Applications from being deleted, add the following parameter to the ApplicationSet controller `Deployment`: +``` +--policy create-update +``` + +This may be useful to users looking for additional protection against deletion of the Applications generated by the controller. + +At ApplicationSet level + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +spec: + # (...) + syncPolicy: + applicationsSync: create-update +``` + +### Ignore certain changes to Applications + +The ApplicationSet spec includes an `ignoreApplicationDifferences` field, which allows you to specify which fields of +the ApplicationSet should be ignored when comparing Applications. + +The field supports multiple ignore rules. Each ignore rule may specify a list of either `jsonPointers` or +`jqPathExpressions` to ignore. + +You may optionally also specify a `name` to apply the ignore rule to a specific Application, or omit the `name` to apply +the ignore rule to all Applications. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +spec: + ignoreApplicationDifferences: + - jsonPointers: + - /spec/source/targetRevision + - name: some-app + jqExpressions: + - .spec.source.helm.values +``` + +### Prevent an `Application`'s child resources from being deleted, when the parent Application is deleted + +By default, when an `Application` resource is deleted by the ApplicationSet controller, all of the child resources of the Application will be deleted as well (such as, all of the Application's `Deployments`, `Services`, etc). + +To prevent an Application's child resources from being deleted when the parent Application is deleted, add the `preserveResourcesOnDeletion: true` field to the `syncPolicy` of the ApplicationSet: +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +spec: + # (...) + syncPolicy: + preserveResourcesOnDeletion: true +``` + +More information on the specific behaviour of `preserveResourcesOnDeletion`, and deletion in ApplicationSet controller and Argo CD in general, can be found on the [Application Deletion](Application-Deletion.md) page. + + +### Prevent an Application's child resources from being modified + +Changes made to the ApplicationSet will propagate to the Applications managed by the ApplicationSet, and then Argo CD will propagate the Application changes to the underlying cluster resources (as per [Argo CD Integration](Argo-CD-Integration.md)). + +The propagation of Application changes to the cluster is managed by the [automated sync settings](../../user-guide/auto_sync.md), which are referenced in the ApplicationSet `template` field: + +- `spec.template.syncPolicy.automated`: If enabled, changes to Applications will automatically propagate to the cluster resources of the cluster. + - Unset this within the ApplicationSet template to 'pause' updates to cluster resources managed by the `Application` resource. +- `spec.template.syncPolicy.automated.prune`: By default, Automated sync will not delete resources when Argo CD detects the resource is no longer defined in Git. + - For extra safety, set this to false to prevent unexpected changes to the backing Git repository from affecting cluster resources. + + +## How to modify ApplicationSet container launch parameters + +There are a couple of ways to modify the ApplicationSet container parameters, so as to enable the above settings. + +### A) Use `kubectl edit` to modify the deployment on the cluster + +Edit the applicationset-controller `Deployment` resource on the cluster: +``` +kubectl edit deployment/argocd-applicationset-controller -n argocd +``` + +Locate the `.spec.template.spec.containers[0].command` field, and add the required parameter(s): +```yaml +spec: + # (...) + template: + # (...) + spec: + containers: + - command: + - entrypoint.sh + - argocd-applicationset-controller + # Insert new parameters here, for example: + # --policy create-only + # (...) +``` + +Save and exit the editor. Wait for a new `Pod` to start containing the updated parameters. + +### Or, B) Edit the `install.yaml` manifest for the ApplicationSet installation + +Rather than directly editing the cluster resource, you may instead choose to modify the installation YAML that is used to install the ApplicationSet controller: + +Applicable for applicationset versions less than 0.4.0. +```bash +# Clone the repository + +git clone https://github.com/argoproj/applicationset + +# Checkout the version that corresponds to the one you have installed. +git checkout "(version of applicationset)" +# example: git checkout "0.1.0" + +cd applicationset/manifests + +# open 'install.yaml' in a text editor, make the same modifications to Deployment +# as described in the previous section. + +# Apply the change to the cluster +kubectl apply -n argocd -f install.yaml +``` + +## Preserving changes made to an Applications annotations and labels + +It is common practice in Kubernetes to store state in annotations, operators will often make use of this. To allow for this, it is possible to configure a list of annotations that the ApplicationSet should preserve when reconciling. + +For example, imagine that we have an Application created from an ApplicationSet, but a custom annotation and label has since been added (to the Application) that does not exist in the `ApplicationSet` resource: +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + # This annotation and label exists only on this Application, and not in + # the parent ApplicationSet template: + annotations: + my-custom-annotation: some-value + labels: + my-custom-label: some-value +spec: + # (...) +``` + +To preserve this annotation and label we can use the `preservedFields` property of the `ApplicationSet` like so: +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +spec: + # (...) + preservedFields: + annotations: ["my-custom-annotation"] + labels: ["my-custom-label"] +``` + +The ApplicationSet controller will leave this annotation and label as-is when reconciling, even though it is not defined in the metadata of the ApplicationSet itself. + +By default, the Argo CD notifications and the Argo CD refresh type annotations are also preserved. + +!!!note + One can also set global preserved fields for the controller by passing a comma separated list of annotations and labels to + `ARGOCD_APPLICATIONSET_CONTROLLER_GLOBAL_PRESERVED_ANNOTATIONS` and `ARGOCD_APPLICATIONSET_CONTROLLER_GLOBAL_PRESERVED_LABELS` respectively. diff --git a/docs/operator-manual/applicationset/Generators-Cluster-Decision-Resource.md b/docs/operator-manual/applicationset/Generators-Cluster-Decision-Resource.md new file mode 100644 index 0000000000000..8f5bb491b8b44 --- /dev/null +++ b/docs/operator-manual/applicationset/Generators-Cluster-Decision-Resource.md @@ -0,0 +1,83 @@ +# Cluster Decision Resource Generator + +The cluster decision resource generates a list of Argo CD clusters. This is done using [duck-typing](https://pkg.go.dev/knative.dev/pkg/apis/duck), which does not require knowledge of the full shape of the referenced kubernetes resource. The following is an example of a cluster-decision-resource-based ApplicationSet generator: +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook + namespace: argocd +spec: + generators: + - clusterDecisionResource: + # ConfigMap with GVK information for the duck type resource + configMapRef: my-configmap + name: quak # Choose either "name" of the resource or "labelSelector" + labelSelector: + matchLabels: # OPTIONAL + duck: spotted + matchExpressions: # OPTIONAL + - key: duck + operator: In + values: + - "spotted" + - "canvasback" + # OPTIONAL: Checks for changes every 60sec (default 3min) + requeueAfterSeconds: 60 + template: + metadata: + name: '{{name}}-guestbook' + spec: + project: "default" + source: + repoURL: https://github.com/argoproj/argocd-example-apps/ + targetRevision: HEAD + path: guestbook + destination: + server: '{{clusterName}}' # 'server' field of the secret + namespace: guestbook +``` +The `quak` resource, referenced by the ApplicationSet `clusterDecisionResource` generator: +```yaml +apiVersion: mallard.io/v1beta1 +kind: Duck +metadata: + name: quak +spec: {} +status: + # Duck-typing ignores all other aspects of the resource except + # the "decisions" list + decisions: + - clusterName: cluster-01 + - clusterName: cluster-02 +``` +The `ApplicationSet` resource references a `ConfigMap` that defines the resource to be used in this duck-typing. Only one ConfigMap is required per `ArgoCD` instance, to identify a resource. You can support multiple resource types by creating a `ConfigMap` for each. +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: my-configmap +data: + # apiVersion of the target resource + apiVersion: mallard.io/v1beta1 + # kind of the target resource + kind: ducks + # status key name that holds the list of Argo CD clusters + statusListKey: decisions + # The key in the status list whose value is the cluster name found in Argo CD + matchKey: clusterName +``` + +(*The full example can be found [here](https://github.com/argoproj/argo-cd/tree/master/applicationset/examples/clusterDecisionResource).*) + +This example leverages the cluster management capabilities of the [open-cluster-management.io community](https://open-cluster-management.io/). By creating a `ConfigMap` with the GVK for the `open-cluster-management.io` Placement rule, your ApplicationSet can provision to different clusters in a number of novel ways. One example is to have the ApplicationSet maintain only two Argo CD Applications across 3 or more clusters. Then as maintenance or outages occur, the ApplicationSet will always maintain two Applications, moving the application to available clusters under the Placement rule's direction. + +## How it works +The ApplicationSet needs to be created in the Argo CD namespace, placing the `ConfigMap` in the same namespace allows the ClusterDecisionResource generator to read it. The `ConfigMap` stores the GVK information as well as the status key definitions. In the open-cluster-management example, the ApplicationSet generator will read the kind `placementrules` with an apiVersion of `apps.open-cluster-management.io/v1`. It will attempt to extract the **list** of clusters from the key `decisions`. It then validates the actual cluster name as defined in Argo CD against the **value** from the key `clusterName` in each of the elements in the list. + +The ClusterDecisionResource generator passes the 'name', 'server' and any other key/value in the duck-type resource's status list as parameters into the ApplicationSet template. In this example, the decision array contained an additional key `clusterName`, which is now available to the ApplicationSet template. + +!!! note "Clusters listed as `Status.Decisions` must be predefined in Argo CD" + The cluster names listed in the `Status.Decisions` *must* be defined within Argo CD, in order to generate applications for these values. The ApplicationSet controller does not create clusters within Argo CD. + + The Default Cluster list key is `clusters`. \ No newline at end of file diff --git a/docs/operator-manual/applicationset/Generators-Cluster.md b/docs/operator-manual/applicationset/Generators-Cluster.md new file mode 100644 index 0000000000000..92507645a4ffe --- /dev/null +++ b/docs/operator-manual/applicationset/Generators-Cluster.md @@ -0,0 +1,219 @@ +# Cluster Generator + +In Argo CD, managed clusters [are stored within Secrets](../../declarative-setup/#clusters) in the Argo CD namespace. The ApplicationSet controller uses those same Secrets to generate parameters to identify and target available clusters. + +For each cluster registered with Argo CD, the Cluster generator produces parameters based on the list of items found within the cluster secret. + +It automatically provides the following parameter values to the Application template for each cluster: + +- `name` +- `nameNormalized` *('name' but normalized to contain only lowercase alphanumeric characters, '-' or '.')* +- `server` +- `metadata.labels.` *(for each label in the Secret)* +- `metadata.annotations.` *(for each annotation in the Secret)* + +!!! note + Use the `nameNormalized` parameter if your cluster name contains characters (such as underscores) that are not valid for Kubernetes resource names. This prevents rendering invalid Kubernetes resources with names like `my_cluster-app1`, and instead would convert them to `my-cluster-app1`. + + +Within [Argo CD cluster Secrets](../../declarative-setup/#clusters) are data fields describing the cluster: +```yaml +kind: Secret +data: + # Within Kubernetes these fields are actually encoded in Base64; they are decoded here for convenience. + # (They are likewise decoded when passed as parameters by the Cluster generator) + config: "{'tlsClientConfig':{'insecure':false}}" + name: "in-cluster2" + server: "https://kubernetes.default.svc" +metadata: + labels: + argocd.argoproj.io/secret-type: cluster +# (...) +``` + +The Cluster generator will automatically identify clusters defined with Argo CD, and extract the cluster data as parameters: +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook + namespace: argocd +spec: + generators: + - clusters: {} # Automatically use all clusters defined within Argo CD + template: + metadata: + name: '{{name}}-guestbook' # 'name' field of the Secret + spec: + project: "my-project" + source: + repoURL: https://github.com/argoproj/argocd-example-apps/ + targetRevision: HEAD + path: guestbook + destination: + server: '{{server}}' # 'server' field of the secret + namespace: guestbook +``` +(*The full example can be found [here](https://github.com/argoproj/argo-cd/tree/master/applicationset/examples/cluster).*) + +In this example, the cluster secret's `name` and `server` fields are used to populate the `Application` resource `name` and `server` (which are then used to target that same cluster). + +### Label selector + +A label selector may be used to narrow the scope of targeted clusters to only those matching a specific label: +```yaml +kind: ApplicationSet +metadata: + name: guestbook + namespace: argocd +spec: + generators: + - clusters: + selector: + matchLabels: + staging: true + # The cluster generator also supports matchExpressions. + #matchExpressions: + # - key: staging + # operator: In + # values: + # - "true" + template: + # (...) +``` + +This would match an Argo CD cluster secret containing: +```yaml +kind: Secret +data: + # (... fields as above ...) +metadata: + labels: + argocd.argoproj.io/secret-type: cluster + staging: "true" +# (...) +``` + +The cluster selector also supports set-based requirements, as used by [several core Kubernetes resources](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#resources-that-support-set-based-requirements). + +### Deploying to the local cluster + +In Argo CD, the 'local cluster' is the cluster upon which Argo CD (and the ApplicationSet controller) is installed. This is to distinguish it from 'remote clusters', which are those that are added to Argo CD [declaratively](../../declarative-setup/#clusters) or via the [Argo CD CLI](../../getting_started.md/#5-register-a-cluster-to-deploy-apps-to-optional). + +The cluster generator will automatically target both local and non-local clusters, for every cluster that matches the cluster selector. + +If you wish to target only remote clusters with your Applications (e.g. you want to exclude the local cluster), then use a cluster selector with labels, for example: +```yaml +spec: + generators: + - clusters: + selector: + matchLabels: + argocd.argoproj.io/secret-type: cluster + # The cluster generator also supports matchExpressions. + #matchExpressions: + # - key: staging + # operator: In + # values: + # - "true" +``` + +This selector will not match the default local cluster, since the default local cluster does not have a Secret (and thus does not have the `argocd.argoproj.io/secret-type` label on that secret). Any cluster selector that selects on that label will automatically exclude the default local cluster. + +However, if you do wish to target both local and non-local clusters, while also using label matching, you can create a secret for the local cluster within the Argo CD web UI: + +1. Within the Argo CD web UI, select *Settings*, then *Clusters*. +2. Select your local cluster, usually named `in-cluster`. +3. Click the *Edit* button, and change the *NAME* of the cluster to another value, for example `in-cluster-local`. Any other value here is fine. +4. Leave all other fields unchanged. +5. Click *Save*. + +These steps might seem counterintuitive, but the act of changing one of the default values for the local cluster causes the Argo CD Web UI to create a new secret for this cluster. In the Argo CD namespace, you should now see a Secret resource named `cluster-(cluster suffix)` with label `argocd.argoproj.io/secret-type": "cluster"`. You may also create a local [cluster secret declaratively](../../declarative-setup/#clusters), or with the CLI using `argocd cluster add "(context name)" --in-cluster`, rather than through the Web UI. + +### Pass additional key-value pairs via `values` field + +You may pass additional, arbitrary string key-value pairs via the `values` field of the cluster generator. Values added via the `values` field are added as `values.(field)` + +In this example, a `revision` parameter value is passed, based on matching labels on the cluster secret: +```yaml +spec: + generators: + - clusters: + selector: + matchLabels: + type: 'staging' + # A key-value map for arbitrary parameters + values: + revision: HEAD # staging clusters use HEAD branch + - clusters: + selector: + matchLabels: + type: 'production' + values: + # production uses a different revision value, for 'stable' branch + revision: stable + template: + metadata: + name: '{{name}}-guestbook' + spec: + project: "my-project" + source: + repoURL: https://github.com/argoproj/argocd-example-apps/ + # The cluster values field for each generator will be substituted here: + targetRevision: '{{values.revision}}' + path: guestbook + destination: + server: '{{server}}' + namespace: guestbook +``` + +In this example the `revision` value from the `generators.clusters` fields is passed into the template as `values.revision`, containing either `HEAD` or `stable` (based on which generator generated the set of parameters). + +!!! note + The `values.` prefix is always prepended to values provided via `generators.clusters.values` field. Ensure you include this prefix in the parameter name within the `template` when using it. + +In `values` we can also interpolate the following parameter values (i.e. the same values as presented in the beginning of this page) + +- `name` +- `nameNormalized` *('name' but normalized to contain only lowercase alphanumeric characters, '-' or '.')* +- `server` +- `metadata.labels.` *(for each label in the Secret)* +- `metadata.annotations.` *(for each annotation in the Secret)* + +Extending the example above, we could do something like this: + +```yaml +spec: + generators: + - clusters: + selector: + matchLabels: + type: 'staging' + # A key-value map for arbitrary parameters + values: + # If `my-custom-annotation` is in your cluster secret, `revision` will be substituted with it. + revision: '{{metadata.annotations.my-custom-annotation}}' + clusterName: '{{name}}' + - clusters: + selector: + matchLabels: + type: 'production' + values: + # production uses a different revision value, for 'stable' branch + revision: stable + clusterName: '{{name}}' + template: + metadata: + name: '{{name}}-guestbook' + spec: + project: "my-project" + source: + repoURL: https://github.com/argoproj/argocd-example-apps/ + # The cluster values field for each generator will be substituted here: + targetRevision: '{{values.revision}}' + path: guestbook + destination: + # In this case this is equivalent to just using {{name}} + server: '{{values.clusterName}}' + namespace: guestbook +``` diff --git a/docs/operator-manual/applicationset/Generators-Git-File-Globbing.md b/docs/operator-manual/applicationset/Generators-Git-File-Globbing.md new file mode 100644 index 0000000000000..4f8967b5937fa --- /dev/null +++ b/docs/operator-manual/applicationset/Generators-Git-File-Globbing.md @@ -0,0 +1,85 @@ +# Git File Generator Globbing + +## Problem Statement + +The original and default implementation of the Git file generator does very greedy globbing. This can trigger errors or catch users off-guard. For example, consider the following repository layout: + +``` +└── cluster-charts/ + ├── cluster1 + │ ├── mychart/ + │ │  ├── charts/ + │ │   │   └── mysubchart/ + │ │ │ ├── values.yaml + │ │   │   └── etc… + │ │   ├── values.yaml + │ │ └── etc… + │ └── myotherchart/ + │ ├── values.yaml + │ └── etc… + └── cluster2 + └── etc… +``` + +In `cluster1` we have two charts, one of them with a subchart. + +Assuming we need the ApplicationSet to template values in the `values.yaml`, then we need to use a Git file generator instead of a directory generator. The value of the `path` key of the Git file generator should be set to: + +``` +path: cluster-charts/*/*/values.yaml +``` + +However, the default implementation will interpret the above pattern as: + +``` +path: cluster-charts/**/values.yaml +``` + +Meaning, for `mychart` in `cluster1`, that it will pick up both the chart's `values.yaml` but also the one from its subchart. This will most likely fail, and even if it didn't it would be wrong. + +There are multiple other ways this undesirable globbing can fail. For example: + +``` +path: some-path/*.yaml +``` + +This will return all YAML files in any directory at any level under `some-path`, instead of only those directly under it. + +## Enabling the New Globbing + +Since some users may rely on the old behavior it was decided to make the fix optional and not enabled by default. + +It can be enabled in any of these ways: + +1. Pass `--enable-new-git-file-globbing` to the ApplicationSet controller args. +1. Set `ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_NEW_GIT_FILE_GLOBBING=true` in the ApplicationSet controller environment variables. +1. Set `applicationsetcontroller.enable.new.git.file.globbing: true` in the Argo CD ConfigMap. + +Note that the default may change in the future. + +## Usage + +The new Git file generator globbing uses the `doublestar` package. You can find it [here](https://github.com/bmatcuk/doublestar). + +Below is a short excerpt from its documentation. + +doublestar patterns match files and directories recursively. For example, if +you had the following directory structure: + +```bash +grandparent +`-- parent + |-- child1 + `-- child2 +``` + +You could find the children with patterns such as: `**/child*`, +`grandparent/**/child?`, `**/parent/*`, or even just `**` by itself (which will +return all files and directories recursively). + +Bash's globstar is doublestar's inspiration and, as such, works similarly. +Note that the doublestar must appear as a path component by itself. A pattern +such as `/path**` is invalid and will be treated the same as `/path*`, but +`/path*/**` should achieve the desired result. Additionally, `/path/**` will +match all directories and files under the path directory, but `/path/**/` will +only match directories. diff --git a/docs/operator-manual/applicationset/Generators-Git.md b/docs/operator-manual/applicationset/Generators-Git.md new file mode 100644 index 0000000000000..1dcd85ea24b2a --- /dev/null +++ b/docs/operator-manual/applicationset/Generators-Git.md @@ -0,0 +1,439 @@ +# Git Generator + +The Git generator contains two subtypes: the Git directory generator, and Git file generator. + +!!! warning + Git generators are often used to make it easier for (non-admin) developers to create Applications. + If the `project` field in your ApplicationSet is templated, developers may be able to create Applications under Projects with excessive permissions. + For ApplicationSets with a templated `project` field, [the source of truth _must_ be controlled by admins](./Security.md#templated-project-field) + - in the case of git generators, PRs must require admin approval. + +## Git Generator: Directories + +The Git directory generator, one of two subtypes of the Git generator, generates parameters using the directory structure of a specified Git repository. + +Suppose you have a Git repository with the following directory structure: +``` +├── argo-workflows +│ ├── kustomization.yaml +│ └── namespace-install.yaml +└── prometheus-operator + ├── Chart.yaml + ├── README.md + ├── requirements.yaml + └── values.yaml +``` + +This repository contains two directories, one for each of the workloads to deploy: + +- an Argo Workflow controller kustomization YAML file +- a Prometheus Operator Helm chart + +We can deploy both workloads, using this example: +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: cluster-addons + namespace: argocd +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - git: + repoURL: https://github.com/argoproj/argo-cd.git + revision: HEAD + directories: + - path: applicationset/examples/git-generator-directory/cluster-addons/* + template: + metadata: + name: '{{.path.basename}}' + spec: + project: "my-project" + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: '{{.path.path}}' + destination: + server: https://kubernetes.default.svc + namespace: '{{.path.basename}}' + syncPolicy: + syncOptions: + - CreateNamespace=true +``` +(*The full example can be found [here](https://github.com/argoproj/argo-cd/tree/master/applicationset/examples/git-generator-directory).*) + +The generator parameters are: + +- `{{.path.path}}`: The directory paths within the Git repository that match the `path` wildcard. +- `{{index .path.segments n}}`: The directory paths within the Git repository that match the `path` wildcard, split into array elements (`n` - array index) +- `{{.path.basename}}`: For any directory path within the Git repository that matches the `path` wildcard, the right-most path name is extracted (e.g. `/directory/directory2` would produce `directory2`). +- `{{.path.basenameNormalized}}`: This field is the same as `path.basename` with unsupported characters replaced with `-` (e.g. a `path` of `/directory/directory_2`, and `path.basename` of `directory_2` would produce `directory-2` here). + +**Note**: The right-most path name always becomes `{{.path.basename}}`. For example, for `- path: /one/two/three/four`, `{{.path.basename}}` is `four`. + +**Note**: If the `pathParamPrefix` option is specified, all `path`-related parameter names above will be prefixed with the specified value and a dot separator. E.g., if `pathParamPrefix` is `myRepo`, then the generated parameter name would be `.myRepo.path` instead of `.path`. Using this option is necessary in a Matrix generator where both child generators are Git generators (to avoid conflicts when merging the child generators’ items). + +Whenever a new Helm chart/Kustomize YAML/Application/plain subdirectory is added to the Git repository, the ApplicationSet controller will detect this change and automatically deploy the resulting manifests within new `Application` resources. + +As with other generators, clusters *must* already be defined within Argo CD, in order to generate Applications for them. + +### Exclude directories + +The Git directory generator will automatically exclude directories that begin with `.` (such as `.git`). + +The Git directory generator also supports an `exclude` option in order to exclude directories in the repository from being scanned by the ApplicationSet controller: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: cluster-addons + namespace: argocd +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - git: + repoURL: https://github.com/argoproj/argo-cd.git + revision: HEAD + directories: + - path: applicationset/examples/git-generator-directory/excludes/cluster-addons/* + - path: applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook + exclude: true + template: + metadata: + name: '{{.path.basename}}' + spec: + project: "my-project" + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: '{{.path.path}}' + destination: + server: https://kubernetes.default.svc + namespace: '{{.path.basename}}' +``` +(*The full example can be found [here](https://github.com/argoproj/argo-cd/tree/master/applicationset/examples/git-generator-directory/excludes).*) + +This example excludes the `exclude-helm-guestbook` directory from the list of directories scanned for this `ApplicationSet` resource. + +!!! note "Exclude rules have higher priority than include rules" + + If a directory matches at least one `exclude` pattern, it will be excluded. Or, said another way, *exclude rules take precedence over include rules.* + + As a corollary, which directories are included/excluded is not affected by the order of `path`s in the `directories` field list (because, as above, exclude rules always take precedence over include rules). + +For example, with these directories: + +``` +. +└── d + ├── e + ├── f + └── g +``` +Say you want to include `/d/e`, but exclude `/d/f` and `/d/g`. This will *not* work: + +```yaml +- path: /d/e + exclude: false +- path: /d/* + exclude: true +``` +Why? Because the exclude `/d/*` exclude rule will take precedence over the `/d/e` include rule. When the `/d/e` path in the Git repository is processed by the ApplicationSet controller, the controller detects that at least one exclude rule is matched, and thus that directory should not be scanned. + +You would instead need to do: + +```yaml +- path: /d/* +- path: /d/f + exclude: true +- path: /d/g + exclude: true +``` + +Or, a shorter way (using [path.Match](https://golang.org/pkg/path/#Match) syntax) would be: + +```yaml +- path: /d/* +- path: /d/[fg] + exclude: true +``` + +### Root Of Git Repo + +The Git directory generator can be configured to deploy from the root of the git repository by providing `'*'` as the `path`. + +To exclude directories, you only need to put the name/[path.Match](https://golang.org/pkg/path/#Match) of the directory you do not want to deploy. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: cluster-addons + namespace: argocd +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - git: + repoURL: https://github.com/example/example-repo.git + revision: HEAD + directories: + - path: '*' + - path: donotdeploy + exclude: true + template: + metadata: + name: '{{.path.basename}}' + spec: + project: "my-project" + source: + repoURL: https://github.com/example/example-repo.git + targetRevision: HEAD + path: '{{.path.path}}' + destination: + server: https://kubernetes.default.svc + namespace: '{{.path.basename}}' +``` + +### Pass additional key-value pairs via `values` field + +You may pass additional, arbitrary string key-value pairs via the `values` field of the git directory generator. Values added via the `values` field are added as `values.(field)`. + +In this example, a `cluster` parameter value is passed. It is interpolated from the `branch` and `path` variable, to then be used to determine the destination namespace. +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: cluster-addons + namespace: argocd +spec: + generators: + - git: + repoURL: https://github.com/example/example-repo.git + revision: HEAD + directories: + - path: '*' + values: + cluster: '{{branch}}-{{path}}' + template: + metadata: + name: '{{path.basename}}' + spec: + project: "my-project" + source: + repoURL: https://github.com/example/example-repo.git + targetRevision: HEAD + path: '{{path}}' + destination: + server: https://kubernetes.default.svc + namespace: '{{values.cluster}}' +``` + +!!! note + The `values.` prefix is always prepended to values provided via `generators.git.values` field. Ensure you include this prefix in the parameter name within the `template` when using it. + +In `values` we can also interpolate all fields set by the git directory generator as mentioned above. + +## Git Generator: Files + +The Git file generator is the second subtype of the Git generator. The Git file generator generates parameters using the contents of JSON/YAML files found within a specified repository. + +Suppose you have a Git repository with the following directory structure: +``` +├── apps +│ └── guestbook +│ ├── guestbook-ui-deployment.yaml +│ ├── guestbook-ui-svc.yaml +│ └── kustomization.yaml +├── cluster-config +│ └── engineering +│ ├── dev +│ │ └── config.json +│ └── prod +│ └── config.json +└── git-generator-files.yaml +``` + +The directories are: + +- `guestbook` contains the Kubernetes resources for a simple guestbook application +- `cluster-config` contains JSON/YAML files describing the individual engineering clusters: one for `dev` and one for `prod`. +- `git-generator-files.yaml` is the example `ApplicationSet` resource that deploys `guestbook` to the specified clusters. + +The `config.json` files contain information describing the cluster (along with extra sample data): +```json +{ + "aws_account": "123456", + "asset_id": "11223344", + "cluster": { + "owner": "cluster-admin@company.com", + "name": "engineering-dev", + "address": "https://1.2.3.4" + } +} +``` + +Git commits containing changes to the `config.json` files are automatically discovered by the Git generator, and the contents of those files are parsed and converted into template parameters. Here are the parameters generated for the above JSON: +```text +aws_account: 123456 +asset_id: 11223344 +cluster.owner: cluster-admin@company.com +cluster.name: engineering-dev +cluster.address: https://1.2.3.4 +``` + + +And the generated parameters for all discovered `config.json` files will be substituted into ApplicationSet template: +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook + namespace: argocd +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - git: + repoURL: https://github.com/argoproj/argo-cd.git + revision: HEAD + files: + - path: "applicationset/examples/git-generator-files-discovery/cluster-config/**/config.json" + template: + metadata: + name: '{{.cluster.name}}-guestbook' + spec: + project: default + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: "applicationset/examples/git-generator-files-discovery/apps/guestbook" + destination: + server: '{{.cluster.address}}' + namespace: guestbook +``` +(*The full example can be found [here](https://github.com/argoproj/argo-cd/tree/master/applicationset/examples/git-generator-files-discovery).*) + +Any `config.json` files found under the `cluster-config` directory will be parameterized based on the `path` wildcard pattern specified. Within each file JSON fields are flattened into key/value pairs, with this ApplicationSet example using the `cluster.address` and `cluster.name` parameters in the template. + +As with other generators, clusters *must* already be defined within Argo CD, in order to generate Applications for them. + +In addition to the flattened key/value pairs from the configuration file, the following generator parameters are provided: + +- `{{path}}`: The path to the directory containing matching configuration file within the Git repository. Example: `/clusters/clusterA`, if the config file was `/clusters/clusterA/config.json` +- `{{path[n]}}`: The path to the matching configuration file within the Git repository, split into array elements (`n` - array index). Example: `path[0]: clusters`, `path[1]: clusterA` +- `{{path.basename}}`: Basename of the path to the directory containing the configuration file (e.g. `clusterA`, with the above example.) +- `{{path.basenameNormalized}}`: This field is the same as `path.basename` with unsupported characters replaced with `-` (e.g. a `path` of `/directory/directory_2`, and `path.basename` of `directory_2` would produce `directory-2` here). +- `{{path.filename}}`: The matched filename. e.g., `config.json` in the above example. +- `{{path.filenameNormalized}}`: The matched filename with unsupported characters replaced with `-`. + +**Note**: The right-most *directory* name always becomes `{{path.basename}}`. For example, from `- path: /one/two/three/four/config.json`, `{{path.basename}}` will be `four`. +The filename can always be accessed using `{{path.filename}}`. + +**Note**: If the `pathParamPrefix` option is specified, all `path`-related parameter names above will be prefixed with the specified value and a dot separator. E.g., if `pathParamPrefix` is `myRepo`, then the generated parameter name would be `myRepo.path` instead of `path`. Using this option is necessary in a Matrix generator where both child generators are Git generators (to avoid conflicts when merging the child generators’ items). + +**Note**: The default behavior of the Git file generator is very greedy. Please see [Git File Generator Globbing](./Generators-Git-File-Globbing.md) for more information. + +### Pass additional key-value pairs via `values` field + +You may pass additional, arbitrary string key-value pairs via the `values` field of the git files generator. Values added via the `values` field are added as `values.(field)`. + +In this example, a `base_dir` parameter value is passed. It is interpolated from `path` segments, to then be used to determine the source path. +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook + namespace: argocd +spec: + generators: + - git: + repoURL: https://github.com/argoproj/argo-cd.git + revision: HEAD + files: + - path: "applicationset/examples/git-generator-files-discovery/cluster-config/**/config.json" + values: + base_dir: "{{path[0]}}/{{path[1]}}/{{path[2]}}" + template: + metadata: + name: '{{cluster.name}}-guestbook' + spec: + project: default + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: "{{values.base_dir}}/apps/guestbook" + destination: + server: '{{cluster.address}}' + namespace: guestbook +``` + +!!! note + The `values.` prefix is always prepended to values provided via `generators.git.values` field. Ensure you include this prefix in the parameter name within the `template` when using it. + +In `values` we can also interpolate all fields set by the git files generator as mentioned above. + +## Webhook Configuration + +When using a Git generator, ApplicationSet polls Git repositories every three minutes to detect changes. To eliminate +this delay from polling, the ApplicationSet webhook server can be configured to receive webhook events. ApplicationSet supports +Git webhook notifications from GitHub and GitLab. The following explains how to configure a Git webhook for GitHub, but the same process should be applicable to other providers. + +!!! note + The ApplicationSet controller webhook does not use the same webhook as the API server as defined [here](../webhook.md). ApplicationSet exposes a webhook server as a service of type ClusterIP. An ApplicationSet specific Ingress resource needs to be created to expose this service to the webhook source. + +### 1. Create the webhook in the Git provider + +In your Git provider, navigate to the settings page where webhooks can be configured. The payload +URL configured in the Git provider should use the `/api/webhook` endpoint of your ApplicationSet instance +(e.g. `https://applicationset.example.com/api/webhook`). If you wish to use a shared secret, input an +arbitrary value in the secret. This value will be used when configuring the webhook in the next step. + +![Add Webhook](../../assets/applicationset/webhook-config.png "Add Webhook") + +!!! note + When creating the webhook in GitHub, the "Content type" needs to be set to "application/json". The default value "application/x-www-form-urlencoded" is not supported by the library used to handle the hooks + +### 2. Configure ApplicationSet with the webhook secret (Optional) + +Configuring a webhook shared secret is optional, since ApplicationSet will still refresh applications +generated by Git generators, even with unauthenticated webhook events. This is safe to do since +the contents of webhook payloads are considered untrusted, and will only result in a refresh of the +application (a process which already occurs at three-minute intervals). If ApplicationSet is publicly +accessible, then configuring a webhook secret is recommended to prevent a DDoS attack. + +In the `argocd-secret` kubernetes secret, include the Git provider's webhook secret configured in step 1. + +Edit the Argo CD kubernetes secret: + +```bash +kubectl edit secret argocd-secret -n argocd +``` + +TIP: for ease of entering secrets, kubernetes supports inputting secrets in the `stringData` field, +which saves you the trouble of base64 encoding the values and copying it to the `data` field. +Simply copy the shared webhook secret created in step 1, to the corresponding +GitHub/GitLab/BitBucket key under the `stringData` field: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: argocd-secret + namespace: argocd +type: Opaque +data: +... + +stringData: + # github webhook secret + webhook.github.secret: shhhh! it's a github secret + + # gitlab webhook secret + webhook.gitlab.secret: shhhh! it's a gitlab secret +``` + +After saving, please restart the ApplicationSet pod for the changes to take effect. diff --git a/docs/operator-manual/applicationset/Generators-List.md b/docs/operator-manual/applicationset/Generators-List.md new file mode 100644 index 0000000000000..a99229f858da4 --- /dev/null +++ b/docs/operator-manual/applicationset/Generators-List.md @@ -0,0 +1,115 @@ +# List Generator + +The List generator generates parameters based on an arbitrary list of key/value pairs (as long as the values are string values). In this example, we're targeting a local cluster named `engineering-dev`: +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook + namespace: argocd +spec: + generators: + - list: + elements: + - cluster: engineering-dev + url: https://kubernetes.default.svc +# - cluster: engineering-prod +# url: https://kubernetes.default.svc +# foo: bar + template: + metadata: + name: '{{cluster}}-guestbook' + spec: + project: "my-project" + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: applicationset/examples/list-generator/guestbook/{{cluster}} + destination: + server: '{{url}}' + namespace: guestbook +``` +(*The full example can be found [here](https://github.com/argoproj/argo-cd/tree/master/applicationset/examples/list-generator).*) + +In this example, the List generator passes the `url` and `cluster` fields as parameters into the template. If we wanted to add a second environment, we could uncomment the second element and the ApplicationSet controller would automatically target it with the defined application. + +With the ApplicationSet v0.1.0 release, one could *only* specify `url` and `cluster` element fields (plus arbitrary `values`). As of ApplicationSet v0.2.0, any key/value `element` pair is supported (which is also fully backwards compatible with the v0.1.0 form): +```yaml +spec: + generators: + - list: + elements: + # v0.1.0 form - requires cluster/url keys: + - cluster: engineering-dev + url: https://kubernetes.default.svc + values: + additional: value + # v0.2.0+ form - does not require cluster/URL keys + # (but they are still supported). + - staging: "true" + gitRepo: https://kubernetes.default.svc +# (...) +``` + +!!! note "Clusters must be predefined in Argo CD" + These clusters *must* already be defined within Argo CD, in order to generate applications for these values. The ApplicationSet controller does not create clusters within Argo CD (for instance, it does not have the credentials to do so). + +## Dynamically generated elements +The List generator can also dynamically generate its elements based on a yaml/json it gets from a previous generator like git by combining the two with a matrix generator. In this example we are using the matrix generator with a git followed by a list generator and pass the content of a file in git as input to the `elementsYaml` field of the list generator: +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: elementsYaml + namespace: argocd +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - matrix: + generators: + - git: + repoURL: https://github.com/argoproj/argo-cd.git + revision: HEAD + files: + - path: applicationset/examples/list-generator/list-elementsYaml-example.yaml + - list: + elementsYaml: "{{ .key.components | toJson }}" + template: + metadata: + name: '{{.name}}' + spec: + project: default + syncPolicy: + automated: + selfHeal: true + syncOptions: + - CreateNamespace=true + sources: + - chart: '{{.chart}}' + repoURL: '{{.repoUrl}}' + targetRevision: '{{.version}}' + helm: + releaseName: '{{.releaseName}}' + destination: + server: https://kubernetes.default.svc + namespace: '{{.namespace}}' +``` + +where `list-elementsYaml-example.yaml` content is: +```yaml +key: + components: + - name: component1 + chart: podinfo + version: "6.3.2" + releaseName: component1 + repoUrl: "https://stefanprodan.github.io/podinfo" + namespace: component1 + - name: component2 + chart: podinfo + version: "6.3.3" + releaseName: component2 + repoUrl: "ghcr.io/stefanprodan/charts" + namespace: component2 +``` diff --git a/docs/operator-manual/applicationset/Generators-Matrix.md b/docs/operator-manual/applicationset/Generators-Matrix.md new file mode 100644 index 0000000000000..6684cdc90f73b --- /dev/null +++ b/docs/operator-manual/applicationset/Generators-Matrix.md @@ -0,0 +1,427 @@ +# Matrix Generator + +The Matrix generator combines the parameters generated by two child generators, iterating through every combination of each generator's generated parameters. + +By combining both generators parameters, to produce every possible combination, this allows you to gain the intrinsic properties of both generators. For example, a small subset of the many possible use cases include: + +- *SCM Provider Generator + Cluster Generator*: Scanning the repositories of a GitHub organization for application resources, and targeting those resources to all available clusters. +- *Git File Generator + List Generator*: Providing a list of applications to deploy via configuration files, with optional configuration options, and deploying them to a fixed list of clusters. +- *Git Directory Generator + Cluster Decision Resource Generator*: Locate application resources contained within folders of a Git repository, and deploy them to a list of clusters provided via an external custom resource. +- And so on... + +Any set of generators may be used, with the combined values of those generators inserted into the `template` parameters, as usual. + +**Note**: If both child generators are Git generators, one or both of them must use the `pathParamPrefix` option to avoid conflicts when merging the child generators’ items. + +## Example: Git Directory generator + Cluster generator + +As an example, imagine that we have two clusters: + +- A `staging` cluster (at `https://1.2.3.4`) +- A `production` cluster (at `https://2.4.6.8`) + +And our application YAMLs are defined in a Git repository: + +- Argo Workflows controller (examples/git-generator-directory/cluster-addons/argo-workflows) +- Prometheus operator (/examples/git-generator-directory/cluster-addons/prometheus-operator) + +Our goal is to deploy both applications onto both clusters, and, more generally, in the future to automatically deploy new applications in the Git repository, and to new clusters defined within Argo CD, as well. + +For this we will use the Matrix generator, with the Git and the Cluster as child generators: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: cluster-git +spec: + generators: + # matrix 'parent' generator + - matrix: + generators: + # git generator, 'child' #1 + - git: + repoURL: https://github.com/argoproj/argo-cd.git + revision: HEAD + directories: + - path: applicationset/examples/matrix/cluster-addons/* + # cluster generator, 'child' #2 + - clusters: + selector: + matchLabels: + argocd.argoproj.io/secret-type: cluster + template: + metadata: + name: '{{path.basename}}-{{name}}' + spec: + project: '{{metadata.labels.environment}}' + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: '{{path}}' + destination: + server: '{{server}}' + namespace: '{{path.basename}}' +``` + +First, the Git directory generator will scan the Git repository, discovering directories under the specified path. It discovers the argo-workflows and prometheus-operator applications, and produces two corresponding sets of parameters: +```yaml +- path: /examples/git-generator-directory/cluster-addons/argo-workflows + path.basename: argo-workflows + +- path: /examples/git-generator-directory/cluster-addons/prometheus-operator + path.basename: prometheus-operator +``` + +Next, the Cluster generator scans the [set of clusters defined in Argo CD](Generators-Cluster.md), finds the staging and production cluster secrets, and produce two corresponding sets of parameters: +```yaml +- name: staging + server: https://1.2.3.4 + +- name: production + server: https://2.4.6.8 +``` + +Finally, the Matrix generator will combine both sets of outputs, and produce: +```yaml +- name: staging + server: https://1.2.3.4 + path: /examples/git-generator-directory/cluster-addons/argo-workflows + path.basename: argo-workflows + +- name: staging + server: https://1.2.3.4 + path: /examples/git-generator-directory/cluster-addons/prometheus-operator + path.basename: prometheus-operator + +- name: production + server: https://2.4.6.8 + path: /examples/git-generator-directory/cluster-addons/argo-workflows + path.basename: argo-workflows + +- name: production + server: https://2.4.6.8 + path: /examples/git-generator-directory/cluster-addons/prometheus-operator + path.basename: prometheus-operator +``` +(*The full example can be found [here](https://github.com/argoproj/argo-cd/tree/master/applicationset/examples/matrix).*) + +## Using Parameters from one child generator in another child generator + +The Matrix generator allows using the parameters generated by one child generator inside another child generator. +Below is an example that uses a git-files generator in conjunction with a cluster generator. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: cluster-git +spec: + generators: + # matrix 'parent' generator + - matrix: + generators: + # git generator, 'child' #1 + - git: + repoURL: https://github.com/argoproj/applicationset.git + revision: HEAD + files: + - path: "examples/git-generator-files-discovery/cluster-config/**/config.json" + # cluster generator, 'child' #2 + - clusters: + selector: + matchLabels: + argocd.argoproj.io/secret-type: cluster + kubernetes.io/environment: '{{path.basename}}' + template: + metadata: + name: '{{name}}-guestbook' + spec: + project: default + source: + repoURL: https://github.com/argoproj/applicationset.git + targetRevision: HEAD + path: "examples/git-generator-files-discovery/apps/guestbook" + destination: + server: '{{server}}' + namespace: guestbook +``` +Here is the corresponding folder structure for the git repository used by the git-files generator: + +``` +├── apps +│ └── guestbook +│ ├── guestbook-ui-deployment.yaml +│ ├── guestbook-ui-svc.yaml +│ └── kustomization.yaml +├── cluster-config +│ └── engineering +│ ├── dev +│ │ └── config.json +│ └── prod +│ └── config.json +└── git-generator-files.yaml +``` +In the above example, the `{{path.basename}}` parameters produced by the git-files generator will resolve to `dev` and `prod`. +In the 2nd child generator, the label selector with label `kubernetes.io/environment: {{path.basename}}` will resolve with the values produced by the first child generator's parameters (`kubernetes.io/environment: prod` and `kubernetes.io/environment: dev`). + +So in the above example, clusters with the label `kubernetes.io/environment: prod` will have only prod-specific configuration (ie. `prod/config.json`) applied to it, wheres clusters +with the label `kubernetes.io/environment: dev` will have only dev-specific configuration (ie. `dev/config.json`) + +## Overriding parameters from one child generator in another child generator + +The Matrix Generator allows parameters with the same name to be defined in multiple child generators. This is useful, for example, to define default values for all stages in one generator and override them with stage-specific values in another generator. The example below generates a Helm-based application using a matrix generator with two git generators: the first provides stage-specific values (one directory per stage) and the second provides global values for all stages. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: parameter-override-example +spec: + generators: + - matrix: + generators: + - git: + repoURL: https://github.com/example/values.git + revision: HEAD + files: + - path: "**/stage.values.yaml" + - git: + repoURL: https://github.com/example/values.git + revision: HEAD + files: + - path: "global.values.yaml" + goTemplate: true + template: + metadata: + name: example + spec: + project: default + source: + repoURL: https://github.com/example/example-app.git + targetRevision: HEAD + path: . + helm: + values: | + {{ `{{ . | mustToPrettyJson }}` }} + destination: + server: in-cluster + namespace: default +``` + +Given the following structure/content of the example/values repository: + +``` +├── test +│ └── stage.values.yaml +│ stageName: test +│ cpuRequest: 100m +│ debugEnabled: true +├── staging +│ └── stage.values.yaml +│ stageName: staging +├── production +│ └── stage.values.yaml +│ stageName: production +│ memoryLimit: 512Mi +│ debugEnabled: false +└── global.values.yaml + cpuRequest: 200m + memoryLimit: 256Mi + debugEnabled: true +``` + +The matrix generator above would yield the following results: + +```yaml +- stageName: test + cpuRequest: 100m + memoryLimit: 256Mi + debugEnabled: true + +- stageName: staging + cpuRequest: 200m + memoryLimit: 256Mi + debugEnabled: true + +- stageName: production + cpuRequest: 200m + memoryLimit: 512Mi + debugEnabled: false +``` + +## Example: Two Git Generators Using `pathParamPrefix` + +The matrix generator will fail if its children produce results containing identical keys with differing values. +This poses a problem for matrix generators where both children are Git generators since they auto-populate `path`-related parameters in their outputs. +To avoid this problem, specify a `pathParamPrefix` on one or both of the child generators to avoid conflicting parameter keys in the output. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: two-gits-with-path-param-prefix +spec: + generators: + - matrix: + generators: + # git file generator referencing files containing details about each + # app to be deployed (e.g., `appName`). + - git: + repoURL: https://github.com/some-org/some-repo.git + revision: HEAD + files: + - path: "apps/*.json" + pathParamPrefix: app + # git file generator referencing files containing details about + # locations to which each app should deploy (e.g., `region` and + # `clusterName`). + - git: + repoURL: https://github.com/some-org/some-repo.git + revision: HEAD + files: + - path: "targets/{{appName}}/*.json" + pathParamPrefix: target + template: {} # ... +``` + +Then, given the following file structure/content: + +``` +├── apps +│ ├── app-one.json +│ │ { "appName": "app-one" } +│ └── app-two.json +│ { "appName": "app-two" } +└── targets + ├── app-one + │ ├── east-cluster-one.json + │ │ { "region": "east", "clusterName": "cluster-one" } + │ └── east-cluster-two.json + │ { "region": "east", "clusterName": "cluster-two" } + └── app-two + ├── east-cluster-one.json + │ { "region": "east", "clusterName": "cluster-one" } + └── west-cluster-three.json + { "region": "west", "clusterName": "cluster-three" } +``` + +…the matrix generator above would yield the following results: + +```yaml +- appName: app-one + app.path: /apps + app.path.filename: app-one.json + # plus additional path-related parameters from the first child generator, all + # prefixed with "app". + region: east + clusterName: cluster-one + target.path: /targets/app-one + target.path.filename: east-cluster-one.json + # plus additional path-related parameters from the second child generator, all + # prefixed with "target". + +- appName: app-one + app.path: /apps + app.path.filename: app-one.json + region: east + clusterName: cluster-two + target.path: /targets/app-one + target.path.filename: east-cluster-two.json + +- appName: app-two + app.path: /apps + app.path.filename: app-two.json + region: east + clusterName: cluster-one + target.path: /targets/app-two + target.path.filename: east-cluster-one.json + +- appName: app-two + app.path: /apps + app.path.filename: app-two.json + region: west + clusterName: cluster-three + target.path: /targets/app-two + target.path.filename: west-cluster-three.json +``` + +## Restrictions + +1. The Matrix generator currently only supports combining the outputs of only two child generators (eg does not support generating combinations for 3 or more). + +1. You should specify only a single generator per array entry, eg this is not valid: + + - matrix: + generators: + - list: # (...) + git: # (...) + + - While this *will* be accepted by Kubernetes API validation, the controller will report an error on generation. Each generator should be specified in a separate array element, as in the examples above. + +1. The Matrix generator does not currently support [`template` overrides](Template.md#generator-templates) specified on child generators, eg this `template` will not be processed: + + - matrix: + generators: + - list: + elements: + - # (...) + template: { } # Not processed + +1. Combination-type generators (matrix or merge) can only be nested once. For example, this will not work: + + - matrix: + generators: + - matrix: + generators: + - matrix: # This third level is invalid. + generators: + - list: + elements: + - # (...) + +1. When using parameters from one child generator inside another child generator, the child generator that *consumes* the parameters **must come after** the child generator that *produces* the parameters. +For example, the below example would be invalid (cluster-generator must come after the git-files generator): + + - matrix: + generators: + # cluster generator, 'child' #1 + - clusters: + selector: + matchLabels: + argocd.argoproj.io/secret-type: cluster + kubernetes.io/environment: '{{path.basename}}' # {{path.basename}} is produced by git-files generator + # git generator, 'child' #2 + - git: + repoURL: https://github.com/argoproj/applicationset.git + revision: HEAD + files: + - path: "examples/git-generator-files-discovery/cluster-config/**/config.json" + +1. You cannot have both child generators consuming parameters from each another. In the example below, the cluster generator is consuming the `{{path.basename}}` parameter produced by the git-files generator, whereas the git-files generator is consuming the `{{name}}` parameter produced by the cluster generator. This will result in a circular dependency, which is invalid. + + - matrix: + generators: + # cluster generator, 'child' #1 + - clusters: + selector: + matchLabels: + argocd.argoproj.io/secret-type: cluster + kubernetes.io/environment: '{{path.basename}}' # {{path.basename}} is produced by git-files generator + # git generator, 'child' #2 + - git: + repoURL: https://github.com/argoproj/applicationset.git + revision: HEAD + files: + - path: "examples/git-generator-files-discovery/cluster-config/engineering/{{name}}**/config.json" # {{name}} is produced by cluster generator + +1. When using a Matrix generator nested inside another Matrix or Merge generator, [Post Selectors](Generators-Post-Selector.md) for this nested generator's generators will only be applied when enabled via `spec.applyNestedSelectors`. You may also need to enable this even if your Post Selectors are not within the nested matrix or Merge generator, but are instead a sibling of a nested Matrix or Merge generator. + + - matrix: + generators: + - matrix: + generators: + - list + elements: + - # (...) + selector: { } # Only applied when applyNestedSelectors is true diff --git a/docs/operator-manual/applicationset/Generators-Merge.md b/docs/operator-manual/applicationset/Generators-Merge.md new file mode 100644 index 0000000000000..50da174cf349a --- /dev/null +++ b/docs/operator-manual/applicationset/Generators-Merge.md @@ -0,0 +1,228 @@ +# Merge Generator + +The Merge generator combines parameters produced by the base (first) generator with matching parameter sets produced by subsequent generators. A _matching_ parameter set has the same values for the configured _merge keys_. _Non-matching_ parameter sets are discarded. Override precedence is bottom-to-top: the values from a matching parameter set produced by generator 3 will take precedence over the values from the corresponding parameter set produced by generator 2. + +Using a Merge generator is appropriate when a subset of parameter sets require overriding. + +## Example: Base Cluster generator + override Cluster generator + List generator + +As an example, imagine that we have two clusters: + +- A `staging` cluster (at `https://1.2.3.4`) +- A `production` cluster (at `https://2.4.6.8`) + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: cluster-git +spec: + generators: + # merge 'parent' generator + - merge: + mergeKeys: + - server + generators: + - clusters: + values: + kafka: 'true' + redis: 'false' + # For clusters with a specific label, enable Kafka. + - clusters: + selector: + matchLabels: + use-kafka: 'false' + values: + kafka: 'false' + # For a specific cluster, enable Redis. + - list: + elements: + - server: https://2.4.6.8 + values.redis: 'true' + template: + metadata: + name: '{{name}}' + spec: + project: '{{metadata.labels.environment}}' + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: app + helm: + parameters: + - name: kafka + value: '{{values.kafka}}' + - name: redis + value: '{{values.redis}}' + destination: + server: '{{server}}' + namespace: default +``` + +The base Cluster generator scans the [set of clusters defined in Argo CD](Generators-Cluster.md), finds the staging and production cluster secrets, and produces two corresponding sets of parameters: +```yaml +- name: staging + server: https://1.2.3.4 + values.kafka: 'true' + values.redis: 'false' + +- name: production + server: https://2.4.6.8 + values.kafka: 'true' + values.redis: 'false' +``` + +The override Cluster generator scans the [set of clusters defined in Argo CD](Generators-Cluster.md), finds the staging cluster secret (which has the required label), and produces the following parameters: +```yaml +- name: staging + server: https://1.2.3.4 + values.kafka: 'false' +``` + +When merged with the base generator's parameters, the `values.kafka` value for the staging cluster is set to `'false'`. +```yaml +- name: staging + server: https://1.2.3.4 + values.kafka: 'false' + values.redis: 'false' + +- name: production + server: https://2.4.6.8 + values.kafka: 'true' + values.redis: 'false' +``` + +Finally, the List cluster generates a single set of parameters: +```yaml +- server: https://2.4.6.8 + values.redis: 'true' +``` + +When merged with the updated base parameters, the `values.redis` value for the production cluster is set to `'true'`. This is the merge generator's final output: +```yaml +- name: staging + server: https://1.2.3.4 + values.kafka: 'false' + values.redis: 'false' + +- name: production + server: https://2.4.6.8 + values.kafka: 'true' + values.redis: 'true' +``` + +## Example: Use value interpolation in merge + +Some generators support additional values and interpolating from generated variables to selected values. This can be used to teach the merge generator which generated variables to use to combine different generators. + +The following example combines discovered clusters and a git repository by cluster labels and the branch name: +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: cluster-git +spec: + generators: + # merge 'parent' generator: + # Use the selector set by both child generators to combine them. + - merge: + mergeKeys: + # Note that this would not work with goTemplate enabled, + # nested merge keys are not supported there. + - values.selector + generators: + # Assuming, all configured clusters have a label for their location: + # Set the selector to this location. + - clusters: + values: + selector: '{{ metadata.labels.location }}' + # The git repo may have different directories which correspond to the + # cluster locations, using these as a selector. + - git: + repoURL: https://github.com/argoproj/argocd-example-apps/ + revision: HEAD + directories: + - path: '*' + values: + selector: '{{ path }}' + template: + metadata: + name: '{{name}}' + spec: + project: '{{metadata.labels.environment}}' + source: + repoURL: https://github.com/argoproj/argocd-example-apps/ + # The cluster values field for each generator will be substituted here: + targetRevision: HEAD + path: '{{path}}' + destination: + server: '{{server}}' + namespace: default +``` + +Assuming a cluster named `germany01` with the label `metadata.labels.location=Germany` and a git repository containing a directory called `Germany`, this could combine to values as follows: + +```yaml + # From the cluster generator +- name: germany01 + server: https://1.2.3.4 + # From the git generator + path: Germany + # Combining selector with the merge generator + values.selector: 'Germany' + # More values from cluster & git generator + # […] +``` + + +## Restrictions + +1. You should specify only a single generator per array entry. This is not valid: + + - merge: + generators: + - list: # (...) + git: # (...) + + - While this *will* be accepted by Kubernetes API validation, the controller will report an error on generation. Each generator should be specified in a separate array element, as in the examples above. + +1. The Merge generator does not support [`template` overrides](Template.md#generator-templates) specified on child generators. This `template` will not be processed: + + - merge: + generators: + - list: + elements: + - # (...) + template: { } # Not processed + +1. Combination-type generators (Matrix or Merge) can only be nested once. For example, this will not work: + + - merge: + generators: + - merge: + generators: + - merge: # This third level is invalid. + generators: + - list: + elements: + - # (...) + +1. Merging on nested values while using `goTemplate: true` is currently not supported, this will not work + + spec: + goTemplate: true + generators: + - merge: + mergeKeys: + - values.merge + +1. When using a Merge generator nested inside another Matrix or Merge generator, [Post Selectors](Generators-Post-Selector.md) for this nested generator's generators will only be applied when enabled via `spec.applyNestedSelectors`. + + - merge: + generators: + - merge: + generators: + - list + elements: + - # (...) + selector: { } # Only applied when applyNestedSelectors is true diff --git a/docs/operator-manual/applicationset/Generators-Plugin.md b/docs/operator-manual/applicationset/Generators-Plugin.md new file mode 100644 index 0000000000000..3747c38865df5 --- /dev/null +++ b/docs/operator-manual/applicationset/Generators-Plugin.md @@ -0,0 +1,341 @@ +# Plugin Generator + +Plugins allow you to provide your own generator. + +- You can write in any language +- Simple: a plugin just responds to RPC HTTP requests. +- You can use it in a sidecar, or standalone deployment. +- You can get your plugin running today, no need to wait 3-5 months for review, approval, merge and an Argo software + release. +- You can combine it with Matrix or Merge. + +To start working on your own plugin, you can generate a new repository based on the example +[applicationset-hello-plugin](https://github.com/argoproj-labs/applicationset-hello-plugin). + +## Simple example + +Using a generator plugin without combining it with Matrix or Merge. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: myplugin +spec: + generators: + - plugin: + # Specify the configMap where the plugin configuration is located. + configMapRef: + name: my-plugin + # You can pass arbitrary parameters to the plugin. `input.parameters` is a map, but values may be any type. + # These parameters will also be available on the generator's output under the `generator.input.parameters` key. + input: + parameters: + key1: "value1" + key2: "value2" + list: ["list", "of", "values"] + boolean: true + map: + key1: "value1" + key2: "value2" + key3: "value3" + + # You can also attach arbitrary values to the generator's output under the `values` key. These values will be + # available in templates under the `values` key. + values: + value1: something + + # When using a Plugin generator, the ApplicationSet controller polls every `requeueAfterSeconds` interval (defaulting to every 30 minutes) to detect changes. + requeueAfterSeconds: 30 + template: + metadata: + name: myplugin + annotations: + example.from.input.parameters: "{{ generator.input.parameters.map.key1 }}" + example.from.values: "{{ values.value1 }}" + # The plugin determines what else it produces. + example.from.plugin.output: "{{ something.from.the.plugin }}" +``` + +- `configMapRef.name`: A `ConfigMap` name containing the plugin configuration to use for RPC call. +- `input.parameters`: Input parameters included in the RPC call to the plugin. (Optional) + +!!! note + The concept of the plugin should not undermine the spirit of GitOps by externalizing data outside of Git. The goal is to be complementary in specific contexts. + For example, when using one of the PullRequest generators, it's impossible to retrieve parameters related to the CI (only the commit hash is available), which limits the possibilities. By using a plugin, it's possible to retrieve the necessary parameters from a separate data source and use them to extend the functionality of the generator. + +### Add a ConfigMap to configure the access of the plugin + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: my-plugin + namespace: argocd +data: + token: "$plugin.myplugin.token" # Alternatively $:plugin.myplugin.token + baseUrl: "http://myplugin.plugin-ns.svc.cluster.local." +``` + +- `token`: Pre-shared token used to authenticate HTTP request (points to the right key you created in the `argocd-secret` Secret) +- `baseUrl`: BaseUrl of the k8s service exposing your plugin in the cluster. + +### Store credentials + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: argocd-secret + namespace: argocd + labels: + app.kubernetes.io/name: argocd-secret + app.kubernetes.io/part-of: argocd +type: Opaque +data: + # ... + # The secret value must be base64 encoded **once**. + # this value corresponds to: `printf "strong-password" | base64`. + plugin.myplugin.token: "c3Ryb25nLXBhc3N3b3Jk" + # ... +``` + +#### Alternative + +If you want to store sensitive data in **another** Kubernetes `Secret`, instead of `argocd-secret`, ArgoCD knows how to check the keys under `data` in your Kubernetes `Secret` for a corresponding key whenever a value in a configmap starts with `$`, then your Kubernetes `Secret` name and `:` (colon) followed by the key name. + +Syntax: `$:` + +> NOTE: Secret must have label `app.kubernetes.io/part-of: argocd` + +##### Example + +`another-secret`: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: another-secret + namespace: argocd + labels: + app.kubernetes.io/part-of: argocd +type: Opaque +data: + # ... + # Store client secret like below. + # The secret value must be base64 encoded **once**. + # This value corresponds to: `printf "strong-password" | base64`. + plugin.myplugin.token: "c3Ryb25nLXBhc3N3b3Jk" +``` + +### HTTP server + +#### A Simple Python Plugin + +You can deploy it either as a sidecar or as a standalone deployment (the latter is recommended). + +In the example, the token is stored in a file at this location : `/var/run/argo/token` + +``` +strong-password +``` + +```python +import json +from http.server import BaseHTTPRequestHandler, HTTPServer + +with open("/var/run/argo/token") as f: + plugin_token = f.read().strip() + + +class Plugin(BaseHTTPRequestHandler): + + def args(self): + return json.loads(self.rfile.read(int(self.headers.get('Content-Length')))) + + def reply(self, reply): + self.send_response(200) + self.end_headers() + self.wfile.write(json.dumps(reply).encode("UTF-8")) + + def forbidden(self): + self.send_response(403) + self.end_headers() + + def unsupported(self): + self.send_response(404) + self.end_headers() + + def do_POST(self): + if self.headers.get("Authorization") != "Bearer " + plugin_token: + self.forbidden() + + if self.path == '/api/v1/getparams.execute': + args = self.args() + self.reply({ + "output": { + "parameters": [ + { + "key1": "val1", + "key2": "val2" + }, + { + "key1": "val2", + "key2": "val2" + } + ] + } + }) + else: + self.unsupported() + + +if __name__ == '__main__': + httpd = HTTPServer(('', 4355), Plugin) + httpd.serve_forever() +``` + +Execute getparams with curl : + +``` +curl http://localhost:4355/api/v1/getparams.execute -H "Authorization: Bearer strong-password" -d \ +'{ + "applicationSetName": "fake-appset", + "input": { + "parameters": { + "param1": "value1" + } + } +}' +``` + +Some things to note here: + +- You only need to implement the calls `/api/v1/getparams.execute` +- You should check that the `Authorization` header contains the same bearer value as `/var/run/argo/token`. Return 403 if not +- The input parameters are included in the request body and can be accessed using the `input.parameters` variable. +- The output must always be a list of object maps nested under the `output.parameters` key in a map. +- `generator.input.parameters` and `values` are reserved keys. If present in the plugin output, these keys will be overwritten by the + contents of the `input.parameters` and `values` keys in the ApplicationSet's plugin generator spec. + +## With matrix and pull request example + +In the following example, the plugin implementation is returning a set of image digests for the given branch. The returned list contains only one item corresponding to the latest built image for the branch. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: fb-matrix +spec: + goTemplate: true + generators: + - matrix: + generators: + - pullRequest: + github: ... + requeueAfterSeconds: 30 + - plugin: + configMapRef: + name: cm-plugin + input: + parameters: + branch: "{{.branch}}" # provided by generator pull request + values: + branchLink: "https://git.example.com/org/repo/tree/{{.branch}}" + template: + metadata: + name: "fb-matrix-{{.branch}}" + spec: + source: + repoURL: "https://github.com/myorg/myrepo.git" + targetRevision: "HEAD" + path: charts/my-chart + helm: + releaseName: fb-matrix-{{.branch}} + valueFiles: + - values.yaml + values: | + front: + image: myregistry:{{.branch}}@{{ .digestFront }} # digestFront is generated by the plugin + back: + image: myregistry:{{.branch}}@{{ .digestBack }} # digestBack is generated by the plugin + project: default + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true + destination: + server: https://kubernetes.default.svc + namespace: "{{.branch}}" + info: + - name: Link to the Application's branch + value: "{{values.branchLink}}" +``` + +To illustrate : + +- The generator pullRequest would return, for example, 2 branches: `feature-branch-1` and `feature-branch-2`. + +- The generator plugin would then perform 2 requests as follows : + +```shell +curl http://localhost:4355/api/v1/getparams.execute -H "Authorization: Bearer strong-password" -d \ +'{ + "applicationSetName": "fb-matrix", + "input": { + "parameters": { + "branch": "feature-branch-1" + } + } +}' +``` + +Then, + +```shell +curl http://localhost:4355/api/v1/getparams.execute -H "Authorization: Bearer strong-password" -d \ +'{ + "applicationSetName": "fb-matrix", + "input": { + "parameters": { + "branch": "feature-branch-2" + } + } +}' +``` + +For each call, it would return a unique result such as : + +```json +{ + "output": { + "parameters": [ + { + "digestFront": "sha256:a3f18c17771cc1051b790b453a0217b585723b37f14b413ad7c5b12d4534d411", + "digestBack": "sha256:4411417d614d5b1b479933b7420079671facd434fd42db196dc1f4cc55ba13ce" + } + ] + } +} +``` + +Then, + +```json +{ + "output": { + "parameters": [ + { + "digestFront": "sha256:7c20b927946805124f67a0cb8848a8fb1344d16b4d0425d63aaa3f2427c20497", + "digestBack": "sha256:e55e7e40700bbab9e542aba56c593cb87d680cefdfba3dd2ab9cfcb27ec384c2" + } + ] + } +} +``` + +In this example, by combining the two, you ensure that one or more pull requests are available and that the generated tag has been properly generated. This wouldn't have been possible with just a commit hash because a hash alone does not certify the success of the build. diff --git a/docs/operator-manual/applicationset/Generators-Post-Selector.md b/docs/operator-manual/applicationset/Generators-Post-Selector.md new file mode 100644 index 0000000000000..d8570859084ff --- /dev/null +++ b/docs/operator-manual/applicationset/Generators-Post-Selector.md @@ -0,0 +1,59 @@ +# Post Selector all generators + +The Selector allows to post-filter based on generated values using the kubernetes common labelSelector format. In the example, the list generator generates a set of two application which then filter by the key value to only select the `env` with value `staging`: + +## Example: List generator + Post Selector +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + generators: + - list: + elements: + - cluster: engineering-dev + url: https://kubernetes.default.svc + env: staging + - cluster: engineering-prod + url: https://kubernetes.default.svc + env: prod + selector: + matchLabels: + env: staging + template: + metadata: + name: '{{cluster}}-guestbook' + spec: + project: default + source: + repoURL: https://github.com/argoproj-labs/applicationset.git + targetRevision: HEAD + path: examples/list-generator/guestbook/{{cluster}} + destination: + server: '{{url}}' + namespace: guestbook +``` + +The List generator + Post Selector generates a single set of parameters: + +```yaml +- cluster: engineering-dev + url: https://kubernetes.default.svc + env: staging +``` + +It is also possible to use `matchExpressions` for more powerful selectors. + +```yaml +spec: + generators: + - clusters: {} + selector: + matchExpressions: + - key: server + operator: In + values: + - https://kubernetes.default.svc + - https://some-other-cluster +``` diff --git a/docs/operator-manual/applicationset/Generators-Pull-Request.md b/docs/operator-manual/applicationset/Generators-Pull-Request.md new file mode 100644 index 0000000000000..298e5135392ce --- /dev/null +++ b/docs/operator-manual/applicationset/Generators-Pull-Request.md @@ -0,0 +1,433 @@ +# Pull Request Generator + +The Pull Request generator uses the API of an SCMaaS provider (GitHub, Gitea, or Bitbucket Server) to automatically discover open pull requests within a repository. This fits well with the style of building a test environment when you create a pull request. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: myapps +spec: + generators: + - pullRequest: + # When using a Pull Request generator, the ApplicationSet controller polls every `requeueAfterSeconds` interval (defaulting to every 30 minutes) to detect changes. + requeueAfterSeconds: 1800 + # See below for provider specific options. + github: + # ... +``` + +!!! note + Know the security implications of PR generators in ApplicationSets. + [Only admins may create ApplicationSets](./Security.md#only-admins-may-createupdatedelete-applicationsets) to avoid + leaking Secrets, and [only admins may create PRs](./Security.md#templated-project-field) if the `project` field of + an ApplicationSet with a PR generator is templated, to avoid granting management of out-of-bounds resources. + +## GitHub + +Specify the repository from which to fetch the GitHub Pull requests. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: myapps +spec: + generators: + - pullRequest: + github: + # The GitHub organization or user. + owner: myorg + # The Github repository + repo: myrepository + # For GitHub Enterprise (optional) + api: https://git.example.com/ + # Reference to a Secret containing an access token. (optional) + tokenRef: + secretName: github-token + key: token + # (optional) use a GitHub App to access the API instead of a PAT. + appSecretName: github-app-repo-creds + # Labels is used to filter the PRs that you want to target. (optional) + labels: + - preview + requeueAfterSeconds: 1800 + template: + # ... +``` + +* `owner`: Required name of the GitHub organization or user. +* `repo`: Required name of the GitHub repository. +* `api`: If using GitHub Enterprise, the URL to access it. (Optional) +* `tokenRef`: A `Secret` name and key containing the GitHub access token to use for requests. If not specified, will make anonymous requests which have a lower rate limit and can only see public repositories. (Optional) +* `labels`: Filter the PRs to those containing **all** of the labels listed. (Optional) +* `appSecretName`: A `Secret` name containing a GitHub App secret in [repo-creds format][repo-creds]. + +[repo-creds]: ../declarative-setup.md#repository-credentials + +## GitLab + +Specify the project from which to fetch the GitLab merge requests. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: myapps +spec: + generators: + - pullRequest: + gitlab: + # The GitLab project. + project: myproject + # For self-hosted GitLab (optional) + api: https://git.example.com/ + # Reference to a Secret containing an access token. (optional) + tokenRef: + secretName: gitlab-token + key: token + # Labels is used to filter the MRs that you want to target. (optional) + labels: + - preview + # MR state is used to filter MRs only with a certain state. (optional) + pullRequestState: opened + # If true, skips validating the SCM provider's TLS certificate - useful for self-signed certificates. + insecure: false + requeueAfterSeconds: 1800 + template: + # ... +``` + +* `project`: Required name of the GitLab project. +* `api`: If using self-hosted GitLab, the URL to access it. (Optional) +* `tokenRef`: A `Secret` name and key containing the GitLab access token to use for requests. If not specified, will make anonymous requests which have a lower rate limit and can only see public repositories. (Optional) +* `labels`: Labels is used to filter the MRs that you want to target. (Optional) +* `pullRequestState`: PullRequestState is an additional MRs filter to get only those with a certain state. Default: "" (all states) +* `insecure`: By default (false) - Skip checking the validity of the SCM's certificate - useful for self-signed TLS certificates. + +As a preferable alternative to setting `insecure` to true, you can configure self-signed TLS certificates for Gitlab by [mounting self-signed certificate to the applicationset controller](./Generators-SCM-Provider.md#self-signed-tls-certificates). + +## Gitea + +Specify the repository from which to fetch the Gitea Pull requests. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: myapps +spec: + generators: + - pullRequest: + gitea: + # The Gitea organization or user. + owner: myorg + # The Gitea repository + repo: myrepository + # The Gitea url to use + api: https://gitea.mydomain.com/ + # Reference to a Secret containing an access token. (optional) + tokenRef: + secretName: gitea-token + key: token + # many gitea deployments use TLS, but many are self-hosted and self-signed certificates + insecure: true + requeueAfterSeconds: 1800 + template: + # ... +``` + +* `owner`: Required name of the Gitea organization or user. +* `repo`: Required name of the Gitea repository. +* `api`: The url of the Gitea instance. +* `tokenRef`: A `Secret` name and key containing the Gitea access token to use for requests. If not specified, will make anonymous requests which have a lower rate limit and can only see public repositories. (Optional) +* `insecure`: `Allow for self-signed certificates, primarily for testing.` + +## Bitbucket Server + +Fetch pull requests from a repo hosted on a Bitbucket Server (not the same as Bitbucket Cloud). + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: myapps +spec: + generators: + - pullRequest: + bitbucketServer: + project: myproject + repo: myrepository + # URL of the Bitbucket Server. Required. + api: https://mycompany.bitbucket.org + # Credentials for Basic authentication. Required for private repositories. + basicAuth: + # The username to authenticate with + username: myuser + # Reference to a Secret containing the password or personal access token. + passwordRef: + secretName: mypassword + key: password + # Labels are not supported by Bitbucket Server, so filtering by label is not possible. + # Filter PRs using the source branch name. (optional) + filters: + - branchMatch: ".*-argocd" + template: + # ... +``` + +* `project`: Required name of the Bitbucket project +* `repo`: Required name of the Bitbucket repository. +* `api`: Required URL to access the Bitbucket REST API. For the example above, an API request would be made to `https://mycompany.bitbucket.org/rest/api/1.0/projects/myproject/repos/myrepository/pull-requests` +* `branchMatch`: Optional regexp filter which should match the source branch name. This is an alternative to labels which are not supported by Bitbucket server. + +If you want to access a private repository, you must also provide the credentials for Basic auth (this is the only auth supported currently): +* `username`: The username to authenticate with. It only needs read access to the relevant repo. +* `passwordRef`: A `Secret` name and key containing the password or personal access token to use for requests. + +## Bitbucket Cloud + +Fetch pull requests from a repo hosted on a Bitbucket Cloud. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: myapps +spec: + generators: + - pullRequest: + bitbucket: + # Workspace name where the repoistory is stored under. Required. + owner: myproject + # Repository slug. Required. + repo: myrepository + # URL of the Bitbucket Server. (optional) Will default to 'https://api.bitbucket.org/2.0'. + api: https://api.bitbucket.org/2.0 + # Credentials for Basic authentication (App Password). Either basicAuth or bearerToken + # authentication is required to access private repositories + basicAuth: + # The username to authenticate with + username: myuser + # Reference to a Secret containing the password or personal access token. + passwordRef: + secretName: mypassword + key: password + # Credentials for Bearer Token (App Token) authentication. Either basicAuth or bearerToken + # authentication is required to access private repositories + bearerToken: + tokenRef: + secretName: repotoken + key: token + # Labels are not supported by Bitbucket Cloud, so filtering by label is not possible. + # Filter PRs using the source branch name. (optional) + filters: + - branchMatch: ".*-argocd" + template: + # ... +``` + +- `owner`: Required name of the Bitbucket workspace +- `repo`: Required name of the Bitbucket repository. +- `api`: Optional URL to access the Bitbucket REST API. For the example above, an API request would be made to `https://api.bitbucket.org/2.0/repositories/{workspace}/{repo_slug}/pullrequests`. If not set, defaults to `https://api.bitbucket.org/2.0` +- `branchMatch`: Optional regexp filter which should match the source branch name. This is an alternative to labels which are not supported by Bitbucket server. + +If you want to access a private repository, Argo CD will need credentials to access repository in Bitbucket Cloud. You can use Bitbucket App Password (generated per user, with access to whole workspace), or Bitbucket App Token (generated per repository, with access limited to repository scope only). If both App Password and App Token are defined, App Token will be used. + +To use Bitbucket App Password, use `basicAuth` section. +- `username`: The username to authenticate with. It only needs read access to the relevant repo. +- `passwordRef`: A `Secret` name and key containing the password or personal access token to use for requests. + +In case of Bitbucket App Token, go with `bearerToken` section. +- `tokenRef`: A `Secret` name and key containing the app token to use for requests. + +## Azure DevOps + +Specify the organization, project and repository from which you want to fetch pull requests. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: myapps +spec: + generators: + - pullRequest: + azuredevops: + # Azure DevOps org to scan. Required. + organization: myorg + # Azure DevOps project name to scan. Required. + project: myproject + # Azure DevOps repo name to scan. Required. + repo: myrepository + # The Azure DevOps API URL to talk to. If blank, use https://dev.azure.com/. + api: https://dev.azure.com/ + # Reference to a Secret containing an access token. (optional) + tokenRef: + secretName: azure-devops-token + key: token + # Labels is used to filter the PRs that you want to target. (optional) + labels: + - preview + requeueAfterSeconds: 1800 + template: + # ... +``` + +* `organization`: Required name of the Azure DevOps organization. +* `project`: Required name of the Azure DevOps project. +* `repo`: Required name of the Azure DevOps repository. +* `api`: If using self-hosted Azure DevOps Repos, the URL to access it. (Optional) +* `tokenRef`: A `Secret` name and key containing the Azure DevOps access token to use for requests. If not specified, will make anonymous requests which have a lower rate limit and can only see public repositories. (Optional) +* `labels`: Filter the PRs to those containing **all** of the labels listed. (Optional) + +## Filters + +Filters allow selecting which pull requests to generate for. Each filter can declare one or more conditions, all of which must pass. If multiple filters are present, any can match for a repository to be included. If no filters are specified, all pull requests will be processed. +Currently, only a subset of filters is available when comparing with [SCM provider](Generators-SCM-Provider.md) filters. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: myapps +spec: + generators: + - pullRequest: + # ... + # Include any pull request ending with "argocd". (optional) + filters: + - branchMatch: ".*-argocd" + template: + # ... +``` + +* `branchMatch`: A regexp matched against source branch names. +* `targetBranchMatch`: A regexp matched against target branch names. + +[GitHub](#github) and [GitLab](#gitlab) also support a `labels` filter. + +## Template + +As with all generators, several keys are available for replacement in the generated application. + +The following is a comprehensive Helm Application example; + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: myapps +spec: + generators: + - pullRequest: + # ... + template: + metadata: + name: 'myapp-{{branch}}-{{number}}' + spec: + source: + repoURL: 'https://github.com/myorg/myrepo.git' + targetRevision: '{{head_sha}}' + path: kubernetes/ + helm: + parameters: + - name: "image.tag" + value: "pull-{{head_sha}}" + project: "my-project" + destination: + server: https://kubernetes.default.svc + namespace: default +``` + +And, here is a robust Kustomize example; + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: myapps +spec: + generators: + - pullRequest: + # ... + template: + metadata: + name: 'myapp-{{branch}}-{{number}}' + spec: + source: + repoURL: 'https://github.com/myorg/myrepo.git' + targetRevision: '{{head_sha}}' + path: kubernetes/ + kustomize: + nameSuffix: {{branch}} + commonLabels: + app.kubernetes.io/instance: {{branch}}-{{number}} + images: + - ghcr.io/myorg/myrepo:{{head_sha}} + project: "my-project" + destination: + server: https://kubernetes.default.svc + namespace: default +``` + +* `number`: The ID number of the pull request. +* `branch`: The name of the branch of the pull request head. +* `branch_slug`: The branch name will be cleaned to be conform to the DNS label standard as defined in [RFC 1123](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names), and truncated to 50 characters to give room to append/suffix-ing it with 13 more characters. +* `target_branch`: The name of the target branch of the pull request. +* `target_branch_slug`: The target branch name will be cleaned to be conform to the DNS label standard as defined in [RFC 1123](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names), and truncated to 50 characters to give room to append/suffix-ing it with 13 more characters. +* `head_sha`: This is the SHA of the head of the pull request. +* `head_short_sha`: This is the short SHA of the head of the pull request (8 characters long or the length of the head SHA if it's shorter). +* `head_short_sha_7`: This is the short SHA of the head of the pull request (7 characters long or the length of the head SHA if it's shorter). +* `labels`: The array of pull request labels. (Supported only for Go Template ApplicationSet manifests.) + +## Webhook Configuration + +When using a Pull Request generator, the ApplicationSet controller polls every `requeueAfterSeconds` interval (defaulting to every 30 minutes) to detect changes. To eliminate this delay from polling, the ApplicationSet webhook server can be configured to receive webhook events, which will trigger Application generation by the Pull Request generator. + +The configuration is almost the same as the one described [in the Git generator](Generators-Git.md), but there is one difference: if you want to use the Pull Request Generator as well, additionally configure the following settings. + +!!! note + The ApplicationSet controller webhook does not use the same webhook as the API server as defined [here](../webhook.md). ApplicationSet exposes a webhook server as a service of type ClusterIP. An ApplicationSet specific Ingress resource needs to be created to expose this service to the webhook source. + +### Github webhook configuration + +In section 1, _"Create the webhook in the Git provider"_, add an event so that a webhook request will be sent when a pull request is created, closed, or label changed. + +Add Webhook URL with uri `/api/webhook` and select content-type as json +![Add Webhook URL](../../assets/applicationset/webhook-config-pullrequest-generator.png "Add Webhook URL") + +Select `Let me select individual events` and enable the checkbox for `Pull requests`. + +![Add Webhook](../../assets/applicationset/webhook-config-pull-request.png "Add Webhook Pull Request") + +The Pull Request Generator will requeue when the next action occurs. + +- `opened` +- `closed` +- `reopened` +- `labeled` +- `unlabeled` +- `synchronized` + +For more information about each event, please refer to the [official documentation](https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads). + +### Gitlab webhook configuration + +Enable checkbox for "Merge request events" in triggers list. + +![Add Gitlab Webhook](../../assets/applicationset/webhook-config-merge-request-gitlab.png "Add Gitlab Merge request Webhook") + +The Pull Request Generator will requeue when the next action occurs. + +- `open` +- `close` +- `reopen` +- `update` +- `merge` + +For more information about each event, please refer to the [official documentation](https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#merge-request-events). + +## Lifecycle + +An Application will be generated when a Pull Request is discovered when the configured criteria is met - i.e. for GitHub when a Pull Request matches the specified `labels` and/or `pullRequestState`. Application will be removed when a Pull Request no longer meets the specified criteria. diff --git a/docs/operator-manual/applicationset/Generators-SCM-Provider.md b/docs/operator-manual/applicationset/Generators-SCM-Provider.md new file mode 100644 index 0000000000000..5e3c4a6ab8aa4 --- /dev/null +++ b/docs/operator-manual/applicationset/Generators-SCM-Provider.md @@ -0,0 +1,467 @@ +# SCM Provider Generator + +The SCM Provider generator uses the API of an SCMaaS provider (eg GitHub) to automatically discover repositories within an organization. This fits well with GitOps layout patterns that split microservices across many repositories. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: myapps +spec: + generators: + - scmProvider: + # Which protocol to clone using. + cloneProtocol: ssh + # See below for provider specific options. + github: + # ... +``` + +* `cloneProtocol`: Which protocol to use for the SCM URL. Default is provider-specific but ssh if possible. Not all providers necessarily support all protocols, see provider documentation below for available options. + +!!! note + Know the security implications of using SCM generators. [Only admins may create ApplicationSets](./Security.md#only-admins-may-createupdatedelete-applicationsets) + to avoid leaking Secrets, and [only admins may create repos/branches](./Security.md#templated-project-field) if the + `project` field of an ApplicationSet with an SCM generator is templated, to avoid granting management of + out-of-bounds resources. + +## GitHub + +The GitHub mode uses the GitHub API to scan an organization in either github.com or GitHub Enterprise. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: myapps +spec: + generators: + - scmProvider: + github: + # The GitHub organization to scan. + organization: myorg + # For GitHub Enterprise: + api: https://git.example.com/ + # If true, scan every branch of every repository. If false, scan only the default branch. Defaults to false. + allBranches: true + # Reference to a Secret containing an access token. (optional) + tokenRef: + secretName: github-token + key: token + # (optional) use a GitHub App to access the API instead of a PAT. + appSecretName: gh-app-repo-creds + template: + # ... +``` + +* `organization`: Required name of the GitHub organization to scan. If you have multiple organizations, use multiple generators. +* `api`: If using GitHub Enterprise, the URL to access it. +* `allBranches`: By default (false) the template will only be evaluated for the default branch of each repo. If this is true, every branch of every repository will be passed to the filters. If using this flag, you likely want to use a `branchMatch` filter. +* `tokenRef`: A `Secret` name and key containing the GitHub access token to use for requests. If not specified, will make anonymous requests which have a lower rate limit and can only see public repositories. +* `appSecretName`: A `Secret` name containing a GitHub App secret in [repo-creds format][repo-creds]. + +[repo-creds]: ../declarative-setup.md#repository-credentials + +For label filtering, the repository topics are used. + +Available clone protocols are `ssh` and `https`. + +## Gitlab + +The GitLab mode uses the GitLab API to scan and organization in either gitlab.com or self-hosted GitLab. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: myapps +spec: + generators: + - scmProvider: + gitlab: + # The base GitLab group to scan. You can either use the group id or the full namespaced path. + group: "8675309" + # For self-hosted GitLab: + api: https://gitlab.example.com/ + # If true, scan every branch of every repository. If false, scan only the default branch. Defaults to false. + allBranches: true + # If true, recurses through subgroups. If false, it searches only in the base group. Defaults to false. + includeSubgroups: true + # If true and includeSubgroups is also true, include Shared Projects, which is gitlab API default. + # If false only search Projects under the same path. Defaults to true. + includeSharedProjects: false + # filter projects by topic. A single topic is supported by Gitlab API. Defaults to "" (all topics). + topic: "my-topic" + # Reference to a Secret containing an access token. (optional) + tokenRef: + secretName: gitlab-token + key: token + # If true, skips validating the SCM provider's TLS certificate - useful for self-signed certificates. + insecure: false + template: + # ... +``` + +* `group`: Required name of the base GitLab group to scan. If you have multiple base groups, use multiple generators. +* `api`: If using self-hosted GitLab, the URL to access it. +* `allBranches`: By default (false) the template will only be evaluated for the default branch of each repo. If this is true, every branch of every repository will be passed to the filters. If using this flag, you likely want to use a `branchMatch` filter. +* `includeSubgroups`: By default (false) the controller will only search for repos directly in the base group. If this is true, it will recurse through all the subgroups searching for repos to scan. +* `includeSharedProjects`: If true and includeSubgroups is also true, include Shared Projects, which is gitlab API default. If false only search Projects under the same path. In general most would want the behaviour when set to false. Defaults to true. +* `topic`: filter projects by topic. A single topic is supported by Gitlab API. Defaults to "" (all topics). +* `tokenRef`: A `Secret` name and key containing the GitLab access token to use for requests. If not specified, will make anonymous requests which have a lower rate limit and can only see public repositories. +* `insecure`: By default (false) - Skip checking the validity of the SCM's certificate - useful for self-signed TLS certificates. + +For label filtering, the repository tags are used. + +Available clone protocols are `ssh` and `https`. + +### Self-signed TLS Certificates + +As a preferable alternative to setting `insecure` to true, you can configure self-signed TLS certificates for Gitlab. + +In order for a self-signed TLS certificate be used by an ApplicationSet's SCM / PR Gitlab Generator, the certificate needs to be mounted on the applicationset-controller. The path of the mounted certificate must be explicitly set using the environment variable `ARGOCD_APPLICATIONSET_CONTROLLER_SCM_ROOT_CA_PATH` or alternatively using parameter `--scm-root-ca-path`. The applicationset controller will read the mounted certificate to create the Gitlab client for SCM/PR Providers + +This can be achieved conveniently by setting `applicationsetcontroller.scm.root.ca.path` in the argocd-cmd-params-cm ConfigMap. Be sure to restart the ApplicationSet controller after setting this value. + +## Gitea + +The Gitea mode uses the Gitea API to scan organizations in your instance + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: myapps +spec: + generators: + - scmProvider: + gitea: + # The Gitea owner to scan. + owner: myorg + # The Gitea instance url + api: https://gitea.mydomain.com/ + # If true, scan every branch of every repository. If false, scan only the default branch. Defaults to false. + allBranches: true + # Reference to a Secret containing an access token. (optional) + tokenRef: + secretName: gitea-token + key: token + template: + # ... +``` + +* `owner`: Required name of the Gitea organization to scan. If you have multiple organizations, use multiple generators. +* `api`: The URL of the Gitea instance you are using. +* `allBranches`: By default (false) the template will only be evaluated for the default branch of each repo. If this is true, every branch of every repository will be passed to the filters. If using this flag, you likely want to use a `branchMatch` filter. +* `tokenRef`: A `Secret` name and key containing the Gitea access token to use for requests. If not specified, will make anonymous requests which have a lower rate limit and can only see public repositories. +* `insecure`: Allow for self-signed TLS certificates. + +This SCM provider does not yet support label filtering + +Available clone protocols are `ssh` and `https`. + +## Bitbucket Server + +Use the Bitbucket Server API (1.0) to scan repos in a project. Note that Bitbucket Server is not to same as Bitbucket Cloud (API 2.0) + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: myapps +spec: + generators: + - scmProvider: + bitbucketServer: + project: myproject + # URL of the Bitbucket Server. Required. + api: https://mycompany.bitbucket.org + # If true, scan every branch of every repository. If false, scan only the default branch. Defaults to false. + allBranches: true + # Credentials for Basic authentication. Required for private repositories. + basicAuth: + # The username to authenticate with + username: myuser + # Reference to a Secret containing the password or personal access token. + passwordRef: + secretName: mypassword + key: password + # Support for filtering by labels is TODO. Bitbucket server labels are not supported for PRs, but they are for repos + template: + # ... +``` + +* `project`: Required name of the Bitbucket project +* `api`: Required URL to access the Bitbucket REST api. +* `allBranches`: By default (false) the template will only be evaluated for the default branch of each repo. If this is true, every branch of every repository will be passed to the filters. If using this flag, you likely want to use a `branchMatch` filter. + +If you want to access a private repository, you must also provide the credentials for Basic auth (this is the only auth supported currently): +* `username`: The username to authenticate with. It only needs read access to the relevant repo. +* `passwordRef`: A `Secret` name and key containing the password or personal access token to use for requests. + +Available clone protocols are `ssh` and `https`. + +## Azure DevOps + +Uses the Azure DevOps API to look up eligible repositories based on a team project within an Azure DevOps organization. +The default Azure DevOps URL is `https://dev.azure.com`, but this can be overridden with the field `azureDevOps.api`. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: myapps +spec: + generators: + - scmProvider: + azureDevOps: + # The Azure DevOps organization. + organization: myorg + # URL to Azure DevOps. Optional. Defaults to https://dev.azure.com. + api: https://dev.azure.com + # If true, scan every branch of eligible repositories. If false, check only the default branch of the eligible repositories. Defaults to false. + allBranches: true + # The team project within the specified Azure DevOps organization. + teamProject: myProject + # Reference to a Secret containing the Azure DevOps Personal Access Token (PAT) used for accessing Azure DevOps. + accessTokenRef: + secretName: azure-devops-scm + key: accesstoken + template: + # ... +``` + +* `organization`: Required. Name of the Azure DevOps organization. +* `teamProject`: Required. The name of the team project within the specified `organization`. +* `accessTokenRef`: Required. A `Secret` name and key containing the Azure DevOps Personal Access Token (PAT) to use for requests. +* `api`: Optional. URL to Azure DevOps. If not set, `https://dev.azure.com` is used. +* `allBranches`: Optional, default `false`. If `true`, scans every branch of eligible repositories. If `false`, check only the default branch of the eligible repositories. + +## Bitbucket Cloud + +The Bitbucket mode uses the Bitbucket API V2 to scan a workspace in bitbucket.org. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: myapps +spec: + generators: + - scmProvider: + bitbucket: + # The workspace id (slug). + owner: "example-owner" + # The user to use for basic authentication with an app password. + user: "example-user" + # If true, scan every branch of every repository. If false, scan only the main branch. Defaults to false. + allBranches: true + # Reference to a Secret containing an app password. + appPasswordRef: + secretName: appPassword + key: password + template: + # ... +``` + +* `owner`: The workspace ID (slug) to use when looking up repositories. +* `user`: The user to use for authentication to the Bitbucket API V2 at bitbucket.org. +* `allBranches`: By default (false) the template will only be evaluated for the main branch of each repo. If this is true, every branch of every repository will be passed to the filters. If using this flag, you likely want to use a `branchMatch` filter. +* `appPasswordRef`: A `Secret` name and key containing the bitbucket app password to use for requests. + +This SCM provider does not yet support label filtering + +Available clone protocols are `ssh` and `https`. + +## AWS CodeCommit (Alpha) + +Uses AWS ResourceGroupsTagging and AWS CodeCommit APIs to scan repos across AWS accounts and regions. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: myapps +spec: + generators: + - scmProvider: + awsCodeCommit: + # AWS region to scan repos. + # default to the environmental region from ApplicationSet controller. + region: us-east-1 + # AWS role to assume to scan repos. + # default to the environmental role from ApplicationSet controller. + role: arn:aws:iam::111111111111:role/argocd-application-set-discovery + # If true, scan every branch of every repository. If false, scan only the main branch. Defaults to false. + allBranches: true + # AWS resource tags to filter repos with. + # see https://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/API_GetResources.html#resourcegrouptagging-GetResources-request-TagFilters for details + # default to no tagFilters, to include all repos in the region. + tagFilters: + - key: organization + value: platform-engineering + - key: argo-ready + template: + # ... +``` + +* `region`: (Optional) AWS region to scan repos. By default, use ApplicationSet controller's current region. +* `role`: (Optional) AWS role to assume to scan repos. By default, use ApplicationSet controller's current role. +* `allBranches`: (Optional) If `true`, scans every branch of eligible repositories. If `false`, check only the default branch of the eligible repositories. Default `false`. +* `tagFilters`: (Optional) A list of tagFilters to filter AWS CodeCommit repos with. See [AWS ResourceGroupsTagging API](https://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/API_GetResources.html#resourcegrouptagging-GetResources-request-TagFilters) for details. By default, no filter is included. + +This SCM provider does not support the following features + +* label filtering +* `sha`, `short_sha` and `short_sha_7` template parameters + +Available clone protocols are `ssh`, `https` and `https-fips`. + +### AWS IAM Permission Considerations + +In order to call AWS APIs to discover AWS CodeCommit repos, ApplicationSet controller must be configured with valid environmental AWS config, like current AWS region and AWS credentials. +AWS config can be provided via all standard options, like Instance Metadata Service (IMDS), config file, environment variables, or IAM roles for service accounts (IRSA). + +Depending on whether `role` is provided in `awsCodeCommit` property, AWS IAM permission requirement is different. + +#### Discover AWS CodeCommit Repositories in the same AWS Account as ApplicationSet Controller + +Without specifying `role`, ApplicationSet controller will use its own AWS identity to scan AWS CodeCommit repos. +This is suitable when you have a simple setup that all AWS CodeCommit repos reside in the same AWS account as your Argo CD. + +As the ApplicationSet controller AWS identity is used directly for repo discovery, it must be granted below AWS permissions. + +* `tag:GetResources` +* `codecommit:ListRepositories` +* `codecommit:GetRepository` +* `codecommit:GetFolder` +* `codecommit:ListBranches` + +#### Discover AWS CodeCommit Repositories across AWS Accounts and Regions + +By specifying `role`, ApplicationSet controller will first assume the `role`, and use it for repo discovery. +This enables more complicated use cases to discover repos from different AWS accounts and regions. + +The ApplicationSet controller AWS identity should be granted permission to assume target AWS roles. + +* `sts:AssumeRole` + +All AWS roles must have repo discovery related permissions. + +* `tag:GetResources` +* `codecommit:ListRepositories` +* `codecommit:GetRepository` +* `codecommit:GetFolder` +* `codecommit:ListBranches` + +## Filters + +Filters allow selecting which repositories to generate for. Each filter can declare one or more conditions, all of which must pass. If multiple filters are present, any can match for a repository to be included. If no filters are specified, all repositories will be processed. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: myapps +spec: + generators: + - scmProvider: + filters: + # Include any repository starting with "myapp" AND including a Kustomize config AND labeled with "deploy-ok" ... + - repositoryMatch: ^myapp + pathsExist: [kubernetes/kustomization.yaml] + labelMatch: deploy-ok + # ... OR include any repository starting with "otherapp" AND a Helm folder and doesn't have file disabledrepo.txt. + - repositoryMatch: ^otherapp + pathsExist: [helm] + pathsDoNotExist: [disabledrepo.txt] + template: + # ... +``` + +* `repositoryMatch`: A regexp matched against the repository name. +* `pathsExist`: An array of paths within the repository that must exist. Can be a file or directory. +* `pathsDoNotExist`: An array of paths within the repository that must not exist. Can be a file or directory. +* `labelMatch`: A regexp matched against repository labels. If any label matches, the repository is included. +* `branchMatch`: A regexp matched against branch names. + +## Template + +As with all generators, several parameters are generated for use within the `ApplicationSet` resource template. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: myapps +spec: + generators: + - scmProvider: + # ... + template: + metadata: + name: '{{ repository }}' + spec: + source: + repoURL: '{{ url }}' + targetRevision: '{{ branch }}' + path: kubernetes/ + project: default + destination: + server: https://kubernetes.default.svc + namespace: default +``` + +* `organization`: The name of the organization the repository is in. +* `repository`: The name of the repository. +* `url`: The clone URL for the repository. +* `branch`: The default branch of the repository. +* `sha`: The Git commit SHA for the branch. +* `short_sha`: The abbreviated Git commit SHA for the branch (8 chars or the length of the `sha` if it's shorter). +* `short_sha_7`: The abbreviated Git commit SHA for the branch (7 chars or the length of the `sha` if it's shorter). +* `labels`: A comma-separated list of repository labels in case of Gitea, repository topics in case of Gitlab and Github. Not supported by Bitbucket Cloud, Bitbucket Server, or Azure DevOps. +* `branchNormalized`: The value of `branch` normalized to contain only lowercase alphanumeric characters, '-' or '.'. + +## Pass additional key-value pairs via `values` field + +You may pass additional, arbitrary string key-value pairs via the `values` field of any SCM generator. Values added via the `values` field are added as `values.(field)`. + +In this example, a `name` parameter value is passed. It is interpolated from `organization` and `repository` to generate a different template name. +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: myapps +spec: + generators: + - scmProvider: + bitbucketServer: + project: myproject + api: https://mycompany.bitbucket.org + allBranches: true + basicAuth: + username: myuser + passwordRef: + secretName: mypassword + key: password + values: + name: "{{organization}}-{{repository}}" + + template: + metadata: + name: '{{ values.name }}' + spec: + source: + repoURL: '{{ url }}' + targetRevision: '{{ branch }}' + path: kubernetes/ + project: default + destination: + server: https://kubernetes.default.svc + namespace: default +``` + +!!! note + The `values.` prefix is always prepended to values provided via `generators.scmProvider.values` field. Ensure you include this prefix in the parameter name within the `template` when using it. + +In `values` we can also interpolate all fields set by the SCM generator as mentioned above. diff --git a/docs/operator-manual/applicationset/Generators.md b/docs/operator-manual/applicationset/Generators.md new file mode 100644 index 0000000000000..78600c771fddd --- /dev/null +++ b/docs/operator-manual/applicationset/Generators.md @@ -0,0 +1,21 @@ +# Generators + +Generators are responsible for generating *parameters*, which are then rendered into the `template:` fields of the ApplicationSet resource. See the [Introduction](index.md) for an example of how generators work with templates, to create Argo CD Applications. + +Generators are primarily based on the data source that they use to generate the template parameters. For example: the List generator provides a set of parameters from a *literal list*, the Cluster generator uses the *Argo CD cluster list* as a source, the Git generator uses files/directories from a *Git repository*, and so. + +As of this writing there are nine generators: + +- [List generator](Generators-List.md): The List generator allows you to target Argo CD Applications to clusters based on a fixed list of any chosen key/value element pairs. +- [Cluster generator](Generators-Cluster.md): The Cluster generator allows you to target Argo CD Applications to clusters, based on the list of clusters defined within (and managed by) Argo CD (which includes automatically responding to cluster addition/removal events from Argo CD). +- [Git generator](Generators-Git.md): The Git generator allows you to create Applications based on files within a Git repository, or based on the directory structure of a Git repository. +- [Matrix generator](Generators-Matrix.md): The Matrix generator may be used to combine the generated parameters of two separate generators. +- [Merge generator](Generators-Merge.md): The Merge generator may be used to merge the generated parameters of two or more generators. Additional generators can override the values of the base generator. +- [SCM Provider generator](Generators-SCM-Provider.md): The SCM Provider generator uses the API of an SCM provider (eg GitHub) to automatically discover repositories within an organization. +- [Pull Request generator](Generators-Pull-Request.md): The Pull Request generator uses the API of an SCMaaS provider (eg GitHub) to automatically discover open pull requests within an repository. +- [Cluster Decision Resource generator](Generators-Cluster-Decision-Resource.md): The Cluster Decision Resource generator is used to interface with Kubernetes custom resources that use custom resource-specific logic to decide which set of Argo CD clusters to deploy to. +- [Plugin generator](Generators-Plugin.md): The Plugin generator make RPC HTTP request to provide parameters. + +All generators can be filtered by using the [Post Selector](Generators-Post-Selector.md) + +If you are new to generators, begin with the **List** and **Cluster** generators. For more advanced use cases, see the documentation for the remaining generators above. diff --git a/docs/operator-manual/applicationset/Getting-Started.md b/docs/operator-manual/applicationset/Getting-Started.md new file mode 100644 index 0000000000000..0f89f33124464 --- /dev/null +++ b/docs/operator-manual/applicationset/Getting-Started.md @@ -0,0 +1,107 @@ +# Getting Started + +This guide assumes you are familiar with Argo CD and its basic concepts. See the [Argo CD documentation](../../core_concepts.md) for more information. + +## Requirements + +* Installed [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) command-line tool +* Have a [kubeconfig](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) file (default location is `~/.kube/config`). + +## Installation + +There are a few options for installing the ApplicationSet controller. + + +### A) Install ApplicationSet as part of Argo CD + +Starting with Argo CD v2.3, the ApplicationSet controller is bundled with Argo CD. It is no longer necessary to install the ApplicationSet controller separately from Argo CD. + +Follow the [Argo CD Getting Started](../../getting_started.md) instructions for more information. + + + +### B) Install ApplicationSet into an existing Argo CD install (pre-Argo CD v2.3) + +**Note**: These instructions only apply to versions of Argo CD before v2.3.0. + +The ApplicationSet controller *must* be installed into the same namespace as the Argo CD it is targeting. + +Presuming that Argo CD is installed into the `argocd` namespace, run the following command: + +```bash +kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/applicationset/v0.4.0/manifests/install.yaml +``` + +Once installed, the ApplicationSet controller requires no additional setup. + +The `manifests/install.yaml` file contains the Kubernetes manifests required to install the ApplicationSet controller: + +- CustomResourceDefinition for `ApplicationSet` resource +- Deployment for `argocd-applicationset-controller` +- ServiceAccount for use by ApplicationSet controller, to access Argo CD resources +- Role granting RBAC access to needed resources, for ServiceAccount +- RoleBinding to bind the ServiceAccount and Role + + + + + + +## Enabling high availability mode + +To enable high availability, you have to set the command ``` --enable-leader-election=true ``` in argocd-applicationset-controller container and increase the replicas. + +do following changes in manifests/install.yaml + +```bash + spec: + containers: + - command: + - entrypoint.sh + - argocd-applicationset-controller + - --enable-leader-election=true +``` + +### Optional: Additional Post-Upgrade Safeguards + +See the [Controlling Resource Modification](Controlling-Resource-Modification.md) page for information on additional parameters you may wish to add to the ApplicationSet Resource in `install.yaml`, to provide extra security against any initial, unexpected post-upgrade behaviour. + +For instance, to temporarily prevent the upgraded ApplicationSet controller from making any changes, you could: + +- Enable dry-run +- Use a create-only policy +- Enable `preserveResourcesOnDeletion` on your ApplicationSets +- Temporarily disable automated sync in your ApplicationSets' template + +These parameters would allow you to observe/control the behaviour of the new version of the ApplicationSet controller in your environment, to ensure you are happy with the result (see the ApplicationSet log file for details). Just don't forget to remove any temporary changes when you are done testing! + +However, as mentioned above, these steps are not strictly necessary: upgrading the ApplicationSet controller should be a minimally invasive process, and these are only suggested as an optional precaution for extra safety. + +## Next Steps + +Once your ApplicationSet controller is up and running, proceed to [Use Cases](Use-Cases.md) to learn more about the supported scenarios, or proceed directly to [Generators](Generators.md) to see example `ApplicationSet` resources. diff --git a/docs/operator-manual/applicationset/GoTemplate.md b/docs/operator-manual/applicationset/GoTemplate.md new file mode 100644 index 0000000000000..08c1f3feb035a --- /dev/null +++ b/docs/operator-manual/applicationset/GoTemplate.md @@ -0,0 +1,266 @@ +# Go Template + +## Introduction + +ApplicationSet is able to use [Go Text Template](https://pkg.go.dev/text/template). To activate this feature, add +`goTemplate: true` to your ApplicationSet manifest. + +The [Sprig function library](https://masterminds.github.io/sprig/) (except for `env`, `expandenv` and `getHostByName`) +is available in addition to the default Go Text Template functions. + +An additional `normalize` function makes any string parameter usable as a valid DNS name by replacing invalid characters +with hyphens and truncating at 253 characters. This is useful when making parameters safe for things like Application +names. + +If you want to customize [options defined by text/template](https://pkg.go.dev/text/template#Template.Option), you can +add the `goTemplateOptions: ["opt1", "opt2", ...]` key to your ApplicationSet next to `goTemplate: true`. Note that at +the time of writing, there is only one useful option defined, which is `missingkey=error`. + +The recommended setting of `goTemplateOptions` is `["missingkey=error"]`, which ensures that if undefined values are +looked up by your template then an error is reported instead of being ignored silently. This is not currently the default +behavior, for backwards compatibility. + +## Motivation + +Go Template is the Go Standard for string templating. It is also more powerful than fasttemplate (the default templating +engine) as it allows doing complex templating logic. + +## Limitations + +Go templates are applied on a per-field basis, and only on string fields. Here are some examples of what is **not** +possible with Go text templates: + +- Templating a boolean field. + + ::yaml + apiVersion: argoproj.io/v1alpha1 + kind: ApplicationSet + spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + template: + spec: + source: + helm: + useCredentials: "{{.useCredentials}}" # This field may NOT be templated, because it is a boolean field. + +- Templating an object field: + + ::yaml + apiVersion: argoproj.io/v1alpha1 + kind: ApplicationSet + spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + template: + spec: + syncPolicy: "{{.syncPolicy}}" # This field may NOT be templated, because it is an object field. + +- Using control keywords across fields: + + ::yaml + apiVersion: argoproj.io/v1alpha1 + kind: ApplicationSet + spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + template: + spec: + source: + helm: + parameters: + # Each of these fields is evaluated as an independent template, so the first one will fail with an error. + - name: "{{range .parameters}}" + - name: "{{.name}}" + value: "{{.value}}" + - name: throw-away + value: "{{end}}" + + +## Migration guide + +### Globals + +All your templates must replace parameters with GoTemplate Syntax: + +Example: `{{ some.value }}` becomes `{{ .some.value }}` + +### Cluster Generators + +By activating Go Templating, `{{ .metadata }}` becomes an object. + +- `{{ metadata.labels.my-label }}` becomes `{{ index .metadata.labels "my-label" }}` +- `{{ metadata.annotations.my/annotation }}` becomes `{{ index .metadata.annotations "my/annotation" }}` + +### Git Generators + +By activating Go Templating, `{{ .path }}` becomes an object. Therefore, some changes must be made to the Git +generators' templating: + +- `{{ path }}` becomes `{{ .path.path }}` +- `{{ path.basename }}` becomes `{{ .path.basename }}` +- `{{ path.basenameNormalized }}` becomes `{{ .path.basenameNormalized }}` +- `{{ path.filename }}` becomes `{{ .path.filename }}` +- `{{ path.filenameNormalized }}` becomes `{{ .path.filenameNormalized }}` +- `{{ path[n] }}` becomes `{{ index .path.segments n }}` +- `{{ values }}` if being used in the file generator becomes `{{ .values }}` + +Here is an example: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: cluster-addons +spec: + generators: + - git: + repoURL: https://github.com/argoproj/argo-cd.git + revision: HEAD + directories: + - path: applicationset/examples/git-generator-directory/cluster-addons/* + template: + metadata: + name: '{{path.basename}}' + spec: + project: default + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: '{{path}}' + destination: + server: https://kubernetes.default.svc + namespace: '{{path.basename}}' +``` + +becomes + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: cluster-addons +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - git: + repoURL: https://github.com/argoproj/argo-cd.git + revision: HEAD + directories: + - path: applicationset/examples/git-generator-directory/cluster-addons/* + template: + metadata: + name: '{{.path.basename}}' + spec: + project: default + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: '{{.path.path}}' + destination: + server: https://kubernetes.default.svc + namespace: '{{.path.basename}}' +``` + +It is also possible to use Sprig functions to construct the path variables manually: + +| with `goTemplate: false` | with `goTemplate: true` | with `goTemplate: true` + Sprig | +| ------------ | ----------- | --------------------- | +| `{{path}}` | `{{.path.path}}` | `{{.path.path}}` | +| `{{path.basename}}` | `{{.path.basename}}` | `{{base .path.path}}` | +| `{{path.filename}}` | `{{.path.filename}}` | `{{.path.filename}}` | +| `{{path.basenameNormalized}}` | `{{.path.basenameNormalized}}` | `{{normalize .path.path}}` | +| `{{path.filenameNormalized}}` | `{{.path.filenameNormalized}}` | `{{normalize .path.filename}}` | +| `{{path[N]}}` | `-` | `{{index .path.segments N}}` | + +## Available template functions + +ApplicationSet controller provides: + +- all [sprig](http://masterminds.github.io/sprig/) Go templates function except `env`, `expandenv` and `getHostByName` +- `normalize`: sanitizes the input so that it complies with the following rules: + 1. contains no more than 253 characters + 2. contains only lowercase alphanumeric characters, '-' or '.' + 3. starts and ends with an alphanumeric character +- `toYaml` / `fromYaml` / `fromYamlArray` helm like functions + + +## Examples + +### Basic Go template usage + +This example shows basic string parameter substitution. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - list: + elements: + - cluster: engineering-dev + url: https://1.2.3.4 + - cluster: engineering-prod + url: https://2.4.6.8 + - cluster: finance-preprod + url: https://9.8.7.6 + template: + metadata: + name: '{{.cluster}}-guestbook' + spec: + project: my-project + source: + repoURL: https://github.com/infra-team/cluster-deployments.git + targetRevision: HEAD + path: guestbook/{{.cluster}} + destination: + server: '{{.url}}' + namespace: guestbook +``` + +### Fallbacks for unset parameters + +For some generators, a parameter of a certain name might not always be populated (for example, with the values generator +or the git files generator). In these cases, you can use a Go template to provide a fallback value. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + goTemplate: true + goTemplateOptions: ["missingkey=error"] + generators: + - list: + elements: + - cluster: engineering-dev + url: https://kubernetes.default.svc + - cluster: engineering-prod + url: https://kubernetes.default.svc + nameSuffix: -my-name-suffix + template: + metadata: + name: '{{.cluster}}{{dig "nameSuffix" "" .}}' + spec: + project: default + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + path: applicationset/examples/list-generator/guestbook/{{.cluster}} + destination: + server: '{{.url}}' + namespace: guestbook +``` + +This ApplicationSet will produce an Application called `engineering-dev` and another called +`engineering-prod-my-name-suffix`. + +Note that unset parameters are an error, so you need to avoid looking up a property that doesn't exist. Instead, use +template functions like `dig` to do the lookup with a default. If you prefer to have unset parameters default to zero, +you can remove `goTemplateOptions: ["missingkey=error"]` or set it to `goTemplateOptions: ["missingkey=invalid"]` diff --git a/docs/operator-manual/applicationset/Progressive-Syncs.md b/docs/operator-manual/applicationset/Progressive-Syncs.md new file mode 100644 index 0000000000000..8864151e9dcb7 --- /dev/null +++ b/docs/operator-manual/applicationset/Progressive-Syncs.md @@ -0,0 +1,112 @@ +# Progressive Syncs + +!!! warning "Alpha Feature" + This is an experimental, alpha-quality feature that allows you to control the order in which the ApplicationSet controller will create or update the Applications owned by an ApplicationSet resource. It may be removed in future releases or modified in backwards-incompatible ways. + +## Use Cases +The Progressive Syncs feature set is intended to be light and flexible. The feature only interacts with the health of managed Applications. It is not intended to support direct integrations with other Rollout controllers (such as the native ReplicaSet controller or Argo Rollouts). + +* Progressive Syncs watch for the managed Application resources to become "Healthy" before proceeding to the next stage. +* Deployments, DaemonSets, StatefulSets, and [Argo Rollouts](https://argoproj.github.io/argo-rollouts/) are all supported, because the Application enters a "Progressing" state while pods are being rolled out. In fact, any resource with a health check that can report a "Progressing" status is supported. +* [Argo CD Resource Hooks](../../user-guide/resource_hooks.md) are supported. We recommend this approach for users that need advanced functionality when an Argo Rollout cannot be used, such as smoke testing after a DaemonSet change. + +## Enabling Progressive Syncs +As an experimental feature, progressive syncs must be explicitly enabled, in one of these ways. + +1. Pass `--enable-progressive-syncs` to the ApplicationSet controller args. +1. Set `ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS=true` in the ApplicationSet controller environment variables. +1. Set `applicationsetcontroller.enable.progressive.syncs: true` in the Argo CD `argocd-cmd-params-cm` ConfigMap. + +## Strategies + +* AllAtOnce (default) +* RollingSync + +### AllAtOnce +This default Application update behavior is unchanged from the original ApplicationSet implementation. + +All Applications managed by the ApplicationSet resource are updated simultaneously when the ApplicationSet is updated. + +### RollingSync +This update strategy allows you to group Applications by labels present on the generated Application resources. +When the ApplicationSet changes, the changes will be applied to each group of Application resources sequentially. + +* Application groups are selected using their labels and `matchExpressions`. +* All `matchExpressions` must be true for an Application to be selected (multiple expressions match with AND behavior). +* The `In` and `NotIn` operators must match at least one value to be considered true (OR behavior). +* The `NotIn` operator has priority in the event that both a `NotIn` and `In` operator produce a match. +* All Applications in each group must become Healthy before the ApplicationSet controller will proceed to update the next group of Applications. +* The number of simultaneous Application updates in a group will not exceed its `maxUpdate` parameter (default is 100%, unbounded). +* RollingSync will capture external changes outside the ApplicationSet resource, since it relies on watching the OutOfSync status of the managed Applications. +* RollingSync will force all generated Applications to have autosync disabled. Warnings are printed in the applicationset-controller logs for any Application specs with an automated syncPolicy enabled. +* Sync operations are triggered the same way as if they were triggered by the UI or CLI (by directly setting the `operation` status field on the Application resource). This means that a RollingSync will respect sync windows just as if a user had clicked the "Sync" button in the Argo UI. +* When a sync is triggered, the sync is performed with the same syncPolicy configured for the Application. For example, this preserves the Application's retry settings. +* If an Application is considered "Pending" for `applicationsetcontroller.default.application.progressing.timeout` seconds, the Application is automatically moved to Healthy status (default 300). + +#### Example +The following example illustrates how to stage a progressive sync over Applications with explicitly configured environment labels. + +Once a change is pushed, the following will happen in order. + +* All `env-dev` Applications will be updated simultaneously. +* The rollout will wait for all `env-qa` Applications to be manually synced via the `argocd` CLI or by clicking the Sync button in the UI. +* 10% of all `env-prod` Applications will be updated at a time until all `env-prod` Applications have been updated. + +``` +--- +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + generators: + - list: + elements: + - cluster: engineering-dev + url: https://1.2.3.4 + env: env-dev + - cluster: engineering-qa + url: https://2.4.6.8 + env: env-qa + - cluster: engineering-prod + url: https://9.8.7.6/ + env: env-prod + strategy: + type: RollingSync + rollingSync: + steps: + - matchExpressions: + - key: envLabel + operator: In + values: + - env-dev + #maxUpdate: 100% # if undefined, all applications matched are updated together (default is 100%) + - matchExpressions: + - key: envLabel + operator: In + values: + - env-qa + maxUpdate: 0 # if 0, no matched applications will be updated + - matchExpressions: + - key: envLabel + operator: In + values: + - env-prod + maxUpdate: 10% # maxUpdate supports both integer and percentage string values (rounds down, but floored at 1 Application for >0%) + goTemplate: true + goTemplateOptions: ["missingkey=error"] + template: + metadata: + name: '{{.cluster}}-guestbook' + labels: + envLabel: '{{.env}}' + spec: + project: my-project + source: + repoURL: https://github.com/infra-team/cluster-deployments.git + targetRevision: HEAD + path: guestbook/{{.cluster}} + destination: + server: '{{.url}}' + namespace: guestbook +``` diff --git a/docs/operator-manual/applicationset/Security.md b/docs/operator-manual/applicationset/Security.md new file mode 100644 index 0000000000000..5df7a797de300 --- /dev/null +++ b/docs/operator-manual/applicationset/Security.md @@ -0,0 +1,38 @@ +# ApplicationSet Security + +ApplicationSet is a powerful tool, and it is crucial to understand its security implications before using it. + +## Only admins may create/update/delete ApplicationSets + +ApplicationSets can create Applications under arbitrary [Projects](../../user-guide/projects.md). Argo CD setups often +include Projects (such as the `default`) with high levels of permissions, often including the ability to manage the +resources of Argo CD itself (like the RBAC ConfigMap). + +ApplicationSets can also quickly create an arbitrary number of Applications and just as quickly delete them. + +Finally, ApplicationSets can reveal privileged information. For example, the [git generator](./Generators-Git.md) can +read Secrets in the Argo CD namespace and send them to arbitrary URLs (e.g. URL provided for the `api` field) as auth headers. +(This functionality is intended for authorizing requests to SCM providers like GitHub, but it could be abused by a malicious user.) + +For these reasons, **only admins** may be given permission (via Kubernetes RBAC or any other mechanism) to create, +update, or delete ApplicationSets. + +## Admins must apply appropriate controls for ApplicationSets' sources of truth + +Even if non-admins can't create ApplicationSet resources, they may be able to affect the behavior of ApplicationSets. + +For example, if an ApplicationSet uses a [git generator](./Generators-Git.md), a malicious user with push access to the +source git repository could generate an excessively high number of Applications, putting strain on the ApplicationSet +and Application controllers. They could also cause the SCM provider's rate limiting to kick in, degrading ApplicationSet +service. + +### Templated `project` field + +It's important to pay special attention to ApplicationSets where the `project` field is templated. A malicious user with +write access to the generator's source of truth (for example, someone with push access to the git repo for a git +generator) could create Applications under Projects with insufficient restrictions. A malicious user with the ability to +create an Application under an unrestricted Project (like the `default` Project) could take control of Argo CD itself +by, for example, modifying its RBAC ConfigMap. + +If the `project` field is not hard-coded in an ApplicationSet's template, then admins _must_ control all sources of +truth for the ApplicationSet's generators. diff --git a/docs/operator-manual/applicationset/Template.md b/docs/operator-manual/applicationset/Template.md new file mode 100644 index 0000000000000..f66a403586bbd --- /dev/null +++ b/docs/operator-manual/applicationset/Template.md @@ -0,0 +1,110 @@ +# Templates + +The template fields of the ApplicationSet `spec` are used to generate Argo CD `Application` resources. + +ApplicationSet is using [fasttemplate](https://github.com/valyala/fasttemplate) but will be soon deprecated in favor of Go Template. + +## Template fields + +An Argo CD Application is created by combining the parameters from the generator with fields of the template (via `{{values}}`), and from that a concrete `Application` resource is produced and applied to the cluster. + +Here is the template subfield from a Cluster generator: +```yaml +# (...) + template: + metadata: + name: '{{cluster}}-guestbook' + spec: + source: + repoURL: https://github.com/infra-team/cluster-deployments.git + targetRevision: HEAD + path: guestbook/{{cluster}} + destination: + server: '{{url}}' + namespace: guestbook +``` + +The template subfields correspond directly to [the spec of an Argo CD `Application` resource](../../declarative-setup/#applications): + +- `project` refers to the [Argo CD Project](../../user-guide/projects.md) in use (`default` may be used here to utilize the default Argo CD Project) +- `source` defines from which Git repository to extract the desired Application manifests + - **repoURL**: URL of the repository (eg `https://github.com/argoproj/argocd-example-apps.git`) + - **targetRevision**: Revision (tag/branch/commit) of the repository (eg `HEAD`) + - **path**: Path within the repository where Kubernetes manifests (and/or Helm, Kustomize, Jsonnet resources) are located +- `destination`: Defines which Kubernetes cluster/namespace to deploy to + - **name**: Name of the cluster (within Argo CD) to deploy to + - **server**: API Server URL for the cluster (Example: `https://kubernetes.default.svc`) + - **namespace**: Target namespace in which to deploy the manifests from `source` (Example: `my-app-namespace`) + +Note: + +- Referenced clusters must already be defined in Argo CD, for the ApplicationSet controller to use them +- Only **one** of `name` or `server` may be specified: if both are specified, an error is returned. + +The `metadata` field of template may also be used to set an Application `name`, or to add labels or annotations to the Application. + +While the ApplicationSet spec provides a basic form of templating, it is not intended to replace the full-fledged configuration management capabilities of tools such as Kustomize, Helm, or Jsonnet. + +### Deploying ApplicationSet resources as part of a Helm chart + +ApplicationSet uses the same templating notation as Helm (`{{}}`). If the ApplicationSet templates aren't written as +Helm string literals, Helm will throw an error like `function "cluster" not defined`. To avoid that error, write the +template as a Helm string literal. For example: + +```yaml + metadata: + name: '{{`{{.cluster}}`}}-guestbook' +``` + +This _only_ applies if you use Helm to deploy your ApplicationSet resources. + +## Generator templates + +In addition to specifying a template within the `.spec.template` of the `ApplicationSet` resource, templates may also be specified within generators. This is useful for overriding the values of the `spec`-level template. + +The generator's `template` field takes precedence over the `spec`'s template fields: + +- If both templates contain the same field, the generator's field value will be used. +- If only one of those templates' fields has a value, that value will be used. + +Generator templates can thus be thought of as patches against the outer `spec`-level template fields. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + generators: + - list: + elements: + - cluster: engineering-dev + url: https://kubernetes.default.svc + template: + metadata: {} + spec: + project: "default" + source: + revision: HEAD + repoURL: https://github.com/argoproj/argo-cd.git + # New path value is generated here: + path: 'applicationset/examples/template-override/{{cluster}}-override' + destination: {} + + template: + metadata: + name: '{{cluster}}-guestbook' + spec: + project: "default" + source: + repoURL: https://github.com/argoproj/argo-cd.git + targetRevision: HEAD + # This 'default' value is not used: it is is replaced by the generator's template path, above + path: applicationset/examples/template-override/default + destination: + server: '{{url}}' + namespace: guestbook +``` +(*The full example can be found [here](https://github.com/argoproj/argo-cd/tree/master/applicationset/examples/template-override).*) + +In this example, the ApplicationSet controller will generate an `Application` resource using the `path` generated by the List generator, rather than the `path` value defined in `.spec.template`. diff --git a/docs/operator-manual/applicationset/Use-Cases.md b/docs/operator-manual/applicationset/Use-Cases.md new file mode 100644 index 0000000000000..0e9c65d3963ee --- /dev/null +++ b/docs/operator-manual/applicationset/Use-Cases.md @@ -0,0 +1,92 @@ +# Use cases supported by the ApplicationSet controller + +With the concept of generators, the ApplicationSet controller provides a powerful set of tools to automate the templating and modification of Argo CD Applications. Generators produce template parameter data from a variety of sources, including Argo CD clusters and Git repositories, supporting and enabling new use cases. + +While these tools may be utilized for whichever purpose is desired, here are some of the specific use cases that the ApplicationSet controller was designed to support. + +## Use case: cluster add-ons + +An initial design focus of the ApplicationSet controller was to allow an infrastructure team's Kubernetes cluster administrators the ability to automatically create a large, diverse set of Argo CD Applications, across a significant number of clusters, and manage those Applications as a single unit. One example of why this is needed is the *cluster add-on use case*. + +In the *cluster add-on use case*, an administrator is responsible for provisioning cluster add-ons to one or more Kubernetes clusters: cluster-addons are operators such as the [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator), or controllers such as the [argo-workflows controller](https://argoproj.github.io/argo-workflows/) (part of the [Argo ecosystem](https://argoproj.github.io/)). + +Typically these add-ons are required by the applications of development teams (as tenants of a multi-tenant cluster, for instance, they may wish to provide metrics data to Prometheus or orchestrate workflows via Argo Workflows). + +Since installing these add-ons requires cluster-level permissions not held by individual development teams, installation is the responsibility of the infrastructure/ops team of an organization, and within a large organization this team might be responsible for tens, hundreds, or thousands of Kubernetes clusters (with new clusters being added/modified/removed on a regular basis). + +The need to scale across a large number of clusters, and automatically respond to the lifecycle of new clusters, necessarily mandates some form of automation. A further requirement would be allowing the targeting of add-ons to a subset of clusters using specific criteria (eg staging vs production). + +![Cluster add-on diagram](../../assets/applicationset/Use-Cases/Cluster-Add-Ons.png) + +In this example, the infrastructure team maintains a Git repository containing application manifests for the Argo Workflows controller, and Prometheus operator. + +The infrastructure team would like to deploy both these add-on to a large number of clusters, using Argo CD, and likewise wishes to easily manage the creation/deletion of new clusters. + +In this use case, we may use either the List, Cluster, or Git generators of the ApplicationSet controller to provide the required behaviour: + +- *List generator*: Administrators maintain two `ApplicationSet` resources, one for each application (Workflows and Prometheus), and include the list of clusters they wish to target within the List generator elements of each. + - With this generator, adding/removing clusters requires manually updating the `ApplicationSet` resource's list elements. +- *Cluster generator*: Administrators maintain two `ApplicationSet` resources, one for each application (Workflows and Prometheus), and ensure that all new cluster are defined within Argo CD. + - Since the Cluster generator automatically detects and targets the clusters defined within Argo CD, [adding/remove a cluster from Argo CD](../../declarative-setup/#clusters) will automatically cause Argo CD Application resources (for each application) to be created by the ApplicationSet controller. +- *Git generator*: The Git generator is the most flexible/powerful of the generators, and thus there are a number of different ways to tackle this use case. Here are a couple: + - Using the Git generator `files` field: A list of clusters is kept as a JSON file within a Git repository. Updates to the JSON file, through Git commits, cause new clusters to be added/removed. + - Using the Git generator `directories` field: For each target cluster, a corresponding directory of that name exists in a Git repository. Adding/modifying a directory, through Git commits, would trigger an update for the cluster that has shares the directory name. + +See the [generators section](Generators.md) for details on each of the generators. + +## Use case: monorepos + +In the *monorepo use case*, Kubernetes cluster administrators manage the entire state of a single Kubernetes cluster from a single Git repository. + +Manifest changes merged into the Git repository should automatically deploy to the cluster. + +![Monorepo diagram](../../assets/applicationset/Use-Cases/Monorepos.png) + +In this example, the infrastructure team maintains a Git repository containing application manifests for an Argo Workflows controller, and a Prometheus operator. Independent development teams also have added additional services they wish to deploy to the cluster. + +Changes made to the Git repository -- for example, updating the version of a deployed artifact -- should automatically cause that update to be applied to the corresponding Kubernetes cluster by Argo CD. + +The Git generator may be used to support this use case: + +- The Git generator `directories` field may be used to specify particular subdirectories (using wildcards) containing the individual applications to deploy. +- The Git generator `files` field may reference Git repository files containing JSON metadata, with that metadata describing the individual applications to deploy. +- See the Git generator documentation for more details. + +## Use case: self-service of Argo CD Applications on multitenant clusters + +The *self-service use case* seeks to allow developers (as the end users of a multitenant Kubernetes cluster) greater flexibility to: + +- Deploy multiple applications to a single cluster, in an automated fashion, using Argo CD +- Deploy to multiple clusters, in an automated fashion, using Argo CD +- But, in both cases, to empower those developers to be able to do so without needing to involve a cluster administrator (to create the necessarily Argo CD Applications/AppProject resources on their behalf) + +One potential solution to this use case is for development teams to define Argo CD `Application` resources within a Git repository (containing the manifests they wish to deploy), in an [app-of-apps pattern](../../cluster-bootstrapping/#app-of-apps-pattern), and for cluster administrators to then review/accept changes to this repository via merge requests. + +While this might sound like an effective solution, a major disadvantage is that a high degree of trust/scrutiny is needed to accept commits containing Argo CD `Application` spec changes. This is because there are many sensitive fields contained within the `Application` spec, including `project`, `cluster`, and `namespace`. An inadvertent merge might allow applications to access namespaces/clusters where they did not belong. + +Thus in the self-service use case, administrators desire to only allow some fields of the `Application` spec to be controlled by developers (eg the Git source repository) but not other fields (eg the target namespace, or target cluster, should be restricted). + +Fortunately, the ApplicationSet controller presents an alternative solution to this use case: cluster administrators may safely create an `ApplicationSet` resource containing a Git generator that restricts deployment of application resources to fixed values with the `template` field, while allowing customization of 'safe' fields by developers, at will. + +```yaml +kind: ApplicationSet +# (...) +spec: + generators: + - git: + repoURL: https://github.com/argoproj/argo-cd.git + files: + - path: "apps/**/config.json" + template: + spec: + project: dev-team-one # project is restricted + source: + # developers may customize app details using JSON files from above repo URL + repoURL: {{app.source}} + targetRevision: {{app.revision}} + path: {{app.path}} + destination: + name: production-cluster # cluster is restricted + namespace: dev-team-one # namespace is restricted +``` +See the [Git generator](Generators-Git.md) for more details. diff --git a/docs/operator-manual/applicationset/applicationset-specification.md b/docs/operator-manual/applicationset/applicationset-specification.md new file mode 100644 index 0000000000000..8899057bf7ff6 --- /dev/null +++ b/docs/operator-manual/applicationset/applicationset-specification.md @@ -0,0 +1,7 @@ +# ApplicationSet Specification + +The following describes all the available fields of an ApplicationSet: + +```yaml +{!docs/operator-manual/applicationset.yaml!} +``` diff --git a/docs/operator-manual/applicationset/index.md b/docs/operator-manual/applicationset/index.md new file mode 100644 index 0000000000000..1fe83fb2a0952 --- /dev/null +++ b/docs/operator-manual/applicationset/index.md @@ -0,0 +1,109 @@ +# Introduction to ApplicationSet controller + +## Introduction + +The ApplicationSet controller is a [Kubernetes controller](https://kubernetes.io/docs/concepts/architecture/controller/) that adds support for an `ApplicationSet` [CustomResourceDefinition](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/) (CRD). This controller/CRD enables both automation and greater flexibility managing [Argo CD](../../index.md) Applications across a large number of clusters and within monorepos, plus it makes self-service usage possible on multitenant Kubernetes clusters. + +The ApplicationSet controller works alongside an existing [Argo CD installation](../../index.md). Argo CD is a declarative, GitOps continuous delivery tool, which allows developers to define and control deployment of Kubernetes application resources from within their existing Git workflow. + +Starting with Argo CD v2.3, the ApplicationSet controller is bundled with Argo CD. + +The ApplicationSet controller, supplements Argo CD by adding additional features in support of cluster-administrator-focused scenarios. The `ApplicationSet` controller provides: + +- The ability to use a single Kubernetes manifest to target multiple Kubernetes clusters with Argo CD +- The ability to use a single Kubernetes manifest to deploy multiple applications from one or multiple Git repositories with Argo CD +- Improved support for monorepos: in the context of Argo CD, a monorepo is multiple Argo CD Application resources defined within a single Git repository +- Within multitenant clusters, improves the ability of individual cluster tenants to deploy applications using Argo CD (without needing to involve privileged cluster administrators in enabling the destination clusters/namespaces) + +!!! note + Be aware of the [security implications](./Security.md) of ApplicationSets before using them. + +## The ApplicationSet resource + +This example defines a new `guestbook` resource of kind `ApplicationSet`: +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + generators: + - list: + elements: + - cluster: engineering-dev + url: https://1.2.3.4 + - cluster: engineering-prod + url: https://2.4.6.8 + - cluster: finance-preprod + url: https://9.8.7.6 + template: + metadata: + name: '{{cluster}}-guestbook' + spec: + project: my-project + source: + repoURL: https://github.com/infra-team/cluster-deployments.git + targetRevision: HEAD + path: guestbook/{{cluster}} + destination: + server: '{{url}}' + namespace: guestbook +``` + +In this example, we want to deploy our `guestbook` application (with the Kubernetes resources for this application coming from Git, since this is GitOps) to a list of Kubernetes clusters (with the list of target clusters defined in the List items element of the `ApplicationSet` resource). + +While there are multiple types of *generators* that are available to use with the `ApplicationSet` resource, this example uses the List generator, which simply contains a fixed, literal list of clusters to target. This list of clusters will be the clusters upon which Argo CD deploys the `guestbook` application resources, once the ApplicationSet controller has processed the `ApplicationSet` resource. + +Generators, such as the List generator, are responsible for generating *parameters*. Parameters are key-values pairs that are substituted into the `template:` section of the ApplicationSet resource during template rendering. + +There are multiple generators currently supported by the ApplicationSet controller: + +- **List generator**: Generates parameters based on a fixed list of cluster name/URL values, as seen in the example above. +- **Cluster generator**: Rather than a literal list of clusters (as with the list generator), the cluster generator automatically generates cluster parameters based on the clusters that are defined within Argo CD. +- **Git generator**: The Git generator generates parameters based on files or folders that are contained within the Git repository defined within the generator resource. + - Files containing JSON values will be parsed and converted into template parameters. + - Individual directory paths within the Git repository may be used as parameter values, as well. +- **Matrix generator**: The Matrix generators combines the generated parameters of two other generators. + +See the [generator section](Generators.md) for more information about individual generators, and the other generators not listed above. + +## Parameter substitution into templates + +Independent of which generator is used, parameters generated by a generator are substituted into `{{parameter name}}` values within the `template:` section of the `ApplicationSet` resource. In this example, the List generator defines `cluster` and `url` parameters, which are then substituted into the template's `{{cluster}}` and `{{url}}` values, respectively. + +After substitution, this `guestbook` `ApplicationSet` resource is applied to the Kubernetes cluster: + +1. The ApplicationSet controller processes the generator entries, producing a set of template parameters. +2. These parameters are substituted into the template, once for each set of parameters. +3. Each rendered template is converted into an Argo CD `Application` resource, which is then created (or updated) within the Argo CD namespace. +4. Finally, the Argo CD controller is notified of these `Application` resources and is responsible for handling them. + + +With the three different clusters defined in our example -- `engineering-dev`, `engineering-prod`, and `finance-preprod` -- this will produce three new Argo CD `Application` resources: one for each cluster. + +Here is an example of one of the `Application` resources that would be created, for the `engineering-dev` cluster at `1.2.3.4`: +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: engineering-dev-guestbook +spec: + source: + repoURL: https://github.com/infra-team/cluster-deployments.git + targetRevision: HEAD + path: guestbook/engineering-dev + destination: + server: https://1.2.3.4 + namespace: guestbook +``` +We can see that the generated values have been substituted into the `server` and `path` fields of the template, and the template has been rendered into a fully-fleshed out Argo CD Application. + +The Applications are now also visible from within the Argo CD UI: + +![List generator example in Argo CD Web UI](../../assets/applicationset/Introduction/List-Example-In-Argo-CD-Web-UI.png) + +The ApplicationSet controller will ensure that any changes, updates, or deletions made to `ApplicationSet` resources are automatically applied to the corresponding `Application`(s). + +For instance, if a new cluster/URL list entry was added to the List generator, a new Argo CD `Application` resource would be accordingly created for this new cluster. Any edits made to the `guestbook` `ApplicationSet` resource will affect all the Argo CD Applications that were instantiated by that resource, including the new Application. + +While the List generator's literal list of clusters is fairly simplistic, much more sophisticated scenarios are supported by the other available generators in the ApplicationSet controller. diff --git a/docs/operator-manual/architecture.md b/docs/operator-manual/architecture.md index 0edac18090daf..0c9d069624700 100644 --- a/docs/operator-manual/architecture.md +++ b/docs/operator-manual/architecture.md @@ -24,11 +24,10 @@ manifests when provided the following inputs: * repository URL * revision (commit, tag, branch) * application path -* template specific settings: parameters, ksonnet environments, helm values.yaml +* template specific settings: parameters, helm values.yaml ### Application Controller The application controller is a Kubernetes controller which continuously monitors running applications and compares the current, live state against the desired target state (as specified in the repo). It detects `OutOfSync` application state and optionally takes corrective action. It is responsible for invoking any user-defined hooks for lifecycle events (PreSync, Sync, PostSync) - diff --git a/docs/operator-manual/argocd-cm-yaml.md b/docs/operator-manual/argocd-cm-yaml.md new file mode 100644 index 0000000000000..666e78d03fc1b --- /dev/null +++ b/docs/operator-manual/argocd-cm-yaml.md @@ -0,0 +1,7 @@ +# argocd-cm.yaml example + +An example of an argocd-cm.yaml file: + +```yaml +{!docs/operator-manual/argocd-cm.yaml!} +``` diff --git a/docs/operator-manual/argocd-cm.yaml b/docs/operator-manual/argocd-cm.yaml index 5b949aee62b27..5e4ed095be56d 100644 --- a/docs/operator-manual/argocd-cm.yaml +++ b/docs/operator-manual/argocd-cm.yaml @@ -13,11 +13,20 @@ data: # Enables application status badge feature statusbadge.enabled: "true" + # Override the Argo CD hostname root URL for both the project and the application status badges. + # Here is an example of the application status badge for the app `myapp` to see what is replaced. + # api/badge?name=myapp&revision=true + # Provide custom URL to override. You must include the trailing forward slash: + statusbadge.url: "https://cd-status.apps.argoproj.io/" + # Enables anonymous user access. The anonymous users get default role permissions specified argocd-rbac-cm.yaml. users.anonymous.enabled: "true" # Specifies token expiration duration users.session.duration: "24h" + # Specifies regex expression for password + passwordPattern: "^.{8,32}$" + # Enables google analytics tracking is specified ga.trackingid: "UA-12345-1" # Unless set to 'false' then user ids are hashed before sending to google analytics @@ -27,9 +36,18 @@ data: help.chatUrl: "https://mycorp.slack.com/argo-cd" # the text for getting chat help, defaults to "Chat now!" help.chatText: "Chat now!" + # The URLs to download additional ArgoCD binaries (besides the Linux with current platform binary included by default) + # for different OS architectures. If provided, additional download buttons will be displayed on the help page. + help.download.linux-amd64: "path-or-url-to-download" + help.download.linux-arm64: "path-or-url-to-download" + help.download.linux-ppc64le: "path-or-url-to-download" + help.download.linux-s390x: "path-or-url-to-download" + help.download.darwin-amd64: "path-or-url-to-download" + help.download.darwin-arm64: "path-or-url-to-download" + help.download.windows-amd64: "path-or-url-to-download" # A dex connector configuration (optional). See SSO configuration documentation: - # https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/sso + # https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/user-management/index.md#sso # https://dexidp.io/docs/connectors/ dex.config: | connectors: @@ -64,120 +82,117 @@ data: # Optional set of OIDC claims to request on the ID token. requestedIDTokenClaims: {"groups": {"essential": true}} - # Git repositories configure Argo CD with (optional). - # This list is updated when configuring/removing repos from the UI/CLI - # Note: 'type: helm' field is supported in v1.3+. Use 'helm.repositories' for older versions. - repositories: | - - url: https://github.com/argoproj/my-private-repository - passwordSecret: - name: my-secret - key: password - usernameSecret: - name: my-secret - key: username - sshPrivateKeySecret: - name: my-secret - key: sshPrivateKey - - type: helm - url: https://storage.googleapis.com/istio-prerelease/daily-build/master-latest-daily/charts - name: istio.io - - type: helm - url: https://my-private-chart-repo.internal - name: private-repo - usernameSecret: - name: my-secret - key: username - passwordSecret: - name: my-secret - key: password - - # Non-standard and private Helm repositories (deprecated in 1.3). - helm.repositories: | - - url: https://storage.googleapis.com/istio-prerelease/daily-build/master-latest-daily/charts - name: istio.io - - url: https://my-private-chart-repo.internal - name: private-repo - usernameSecret: - name: my-secret - key: username - passwordSecret: - name: my-secret - key: password - - # Configuration to customize resource behavior (optional). Keys are in the form: group/Kind. - resource.customizations: | - admissionregistration.k8s.io/MutatingWebhookConfiguration: - # List of json pointers in the object to ignore differences - ignoreDifferences: | - jsonPointers: - - /webhooks/0/clientConfig/caBundle - certmanager.k8s.io/Certificate: - # Lua script for customizing the health status assessment - health.lua: | - hs = {} - if obj.status ~= nil then - if obj.status.conditions ~= nil then - for i, condition in ipairs(obj.status.conditions) do - if condition.type == "Ready" and condition.status == "False" then - hs.status = "Degraded" - hs.message = condition.message - return hs - end - if condition.type == "Ready" and condition.status == "True" then - hs.status = "Healthy" - hs.message = condition.message - return hs - end - end + # Configuration to customize resource behavior (optional) can be configured via splitted sub keys. + # Keys are in the form: resource.customizations.ignoreDifferences., resource.customizations.health. + # resource.customizations.actions., resource.customizations.knownTypeFields. + # resource.customizations.ignoreResourceUpdates. + resource.customizations.ignoreDifferences.admissionregistration.k8s.io_MutatingWebhookConfiguration: | + jsonPointers: + - /webhooks/0/clientConfig/caBundle + jqPathExpressions: + - .webhooks[0].clientConfig.caBundle + managedFieldsManagers: + - kube-controller-manager + + # Configuration to define customizations ignoring differences between live and desired states for + # all resources (GK). + resource.customizations.ignoreDifferences.all: | + managedFieldsManagers: + - kube-controller-manager + jsonPointers: + - /spec/replicas + + # Enable resource.customizations.ignoreResourceUpdates rules. If "false," those rules are not applied, and all updates + # to resources are applied to the cluster cache. Default is false. + resource.ignoreResourceUpdatesEnabled: "false" + + # Configuration to define customizations ignoring differences during watched resource updates to skip application reconciles. + resource.customizations.ignoreResourceUpdates.all: | + jsonPointers: + - /metadata/resourceVersion + + # Configuration to define customizations ignoring differences during watched resource updates can be configured via splitted sub key. + resource.customizations.ignoreResourceUpdates.argoproj.io_Application: | + jsonPointers: + - /status + + # jsonPointers and jqPathExpressions can be specified. + resource.customizations.ignoreResourceUpdates.autoscaling_HorizontalPodAutoscaler: | + jqPathExpressions: + - '.metadata.annotations."autoscaling.alpha.kubernetes.io/behavior"' + - '.metadata.annotations."autoscaling.alpha.kubernetes.io/conditions"' + - '.metadata.annotations."autoscaling.alpha.kubernetes.io/metrics"' + - '.metadata.annotations."autoscaling.alpha.kubernetes.io/current-metrics"' + jsonPointers: + - /metadata/annotations/autoscaling.alpha.kubernetes.io~1behavior + - /metadata/annotations/autoscaling.alpha.kubernetes.io~1conditions + - /metadata/annotations/autoscaling.alpha.kubernetes.io~1metrics + - /metadata/annotations/autoscaling.alpha.kubernetes.io~1current-metrics + + resource.customizations.health.certmanager.k8s.io-Certificate: | + hs = {} + if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + if condition.type == "Ready" and condition.status == "False" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + if condition.type == "Ready" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs end end - hs.status = "Progressing" - hs.message = "Waiting for certificate" - return hs - cert-manager.io/Certificate: - # Lua script for customizing the health status assessment - health.lua: | - hs = {} - if obj.status ~= nil then - if obj.status.conditions ~= nil then - for i, condition in ipairs(obj.status.conditions) do - if condition.type == "Ready" and condition.status == "False" then - hs.status = "Degraded" - hs.message = condition.message - return hs - end - if condition.type == "Ready" and condition.status == "True" then - hs.status = "Healthy" - hs.message = condition.message - return hs - end - end + end + end + hs.status = "Progressing" + hs.message = "Waiting for certificate" + return hs + + resource.customizations.health.cert-manager.io_Certificate: | + hs = {} + if obj.status ~= nil then + if obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + if condition.type == "Ready" and condition.status == "False" then + hs.status = "Degraded" + hs.message = condition.message + return hs + end + if condition.type == "Ready" and condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs end end - hs.status = "Progressing" - hs.message = "Waiting for certificate" - return hs - apps/Deployment: - # List of Lua Scripts to introduce custom actions - actions: | - # Lua Script to indicate which custom actions are available on the resource - discovery.lua: | - actions = {} - actions["restart"] = {} - return actions - definitions: - - name: restart - # Lua Script to modify the obj - action.lua: | - local os = require("os") - if obj.spec.template.metadata == nil then - obj.spec.template.metadata = {} - end - if obj.spec.template.metadata.annotations == nil then - obj.spec.template.metadata.annotations = {} - end - obj.spec.template.metadata.annotations["kubectl.kubernetes.io/restartedAt"] = os.date("!%Y-%m-%dT%XZ") - return obj + end + end + hs.status = "Progressing" + hs.message = "Waiting for certificate" + return hs + + # List of Lua Scripts to introduce custom actions + resource.customizations.actions.apps_Deployment: | + # Lua Script to indicate which custom actions are available on the resource + discovery.lua: | + actions = {} + actions["restart"] = {} + return actions + definitions: + - name: restart + # Lua Script to modify the obj + action.lua: | + local os = require("os") + if obj.spec.template.metadata == nil then + obj.spec.template.metadata = {} + end + if obj.spec.template.metadata.annotations == nil then + obj.spec.template.metadata.annotations = {} + end + obj.spec.template.metadata.annotations["kubectl.kubernetes.io/restartedAt"] = os.date("!%Y-%m-%dT%XZ") + return obj # Configuration to completely ignore entire classes of resource group/kinds (optional). # Excluding high-volume resources improves performance and memory usage, and reduces load and @@ -203,6 +218,9 @@ data: clusters: - "*.local" + # An optional comma-separated list of metadata.labels to observe in the UI. + resource.customLabels: tier + resource.compareoptions: | # if ignoreAggregatedRoles set to true then differences caused by aggregated roles in RBAC resources are ignored. ignoreAggregatedRoles: true @@ -213,6 +231,10 @@ data: # 'none' - disabled ignoreResourceStatusField: crd + # configuration to instruct controller to only watch for resources that it has permissions to list + # can be either empty, "normal" or "strict". By default, it is empty i.e. disabled. + resource.respectRBAC: "normal" + # Configuration to add a config management plugin. configManagementPlugins: | - name: kasane @@ -221,6 +243,12 @@ data: generate: command: [kasane, show] + # A set of settings that allow enabling or disabling the config management tool. + # If unset, each defaults to "true". + kustomize.enabled: true + jsonnet.enabled: true + helm.enabled: true + # Build options/parameters to use with `kustomize build` (optional) kustomize.buildOptions: --load_restrictor none @@ -232,11 +260,23 @@ data: kustomize.version.v3.5.1: /custom-tools/kustomize_3_5_1 kustomize.version.v3.5.4: /custom-tools/kustomize_3_5_4 + # Comma delimited list of additional custom remote values file schemes (http are https are allowed by default). + # Change to empty value if you want to disable remote values files altogether. + helm.valuesFileSchemes: http, https + # The metadata.label key name where Argo CD injects the app name as a tracking label (optional). # Tracking labels are used to determine which resources need to be deleted when pruning. # If omitted, Argo CD injects the app name into the label: 'app.kubernetes.io/instance' application.instanceLabelKey: mycompany.com/appname + # You can change the resource tracking method Argo CD uses by changing the + # setting application.resourceTrackingMethod to the desired method. + # The following methods are available: + # - label : Uses the application.instanceLabelKey label for tracking + # - annotation : Uses an annotation with additional metadata for tracking instead of the label + # - annotation+label : Also uses an annotation for tracking, but additionally labels the resource with the application name + application.resourceTrackingMethod: annotation + # disables admin user. Admin is enabled by default admin.enabled: "false" # add an additional local user with apiKey and login capabilities @@ -262,3 +302,107 @@ data: # Optional link for banner. If set, the entire banner text will become a link. # You can have bannercontent without a bannerurl, but not the other way around. ui.bannerurl: "https://argoproj.github.io" + # Uncomment to make the banner not show the close buttons, thereby making the banner permanent. + # Because it is permanent, only one line of text is available to not take up too much real estate in the UI, + # so it is recommended that the length of the bannercontent text is kept reasonably short. Note that you can + # have either a permanent banner or a regular closeable banner, and NOT both. eg. A user can't dismiss a + # notification message (closeable) banner, to then immediately see a permanent banner. + # ui.bannerpermanent: "true" + # An option to specify the position of the banner, either the top or bottom of the page. The default is at the top. + # Uncomment to make the banner appear at the bottom of the page. Any value other than "bottom" will make the banner appear at the top. + # ui.bannerposition: "bottom" + + # Application reconciliation timeout is the max amount of time required to discover if a new manifests version got + # published to the repository. Reconciliation by timeout is disabled if timeout is set to 0. Three minutes by default. + # > Note: argocd-repo-server deployment must be manually restarted after changing the setting. + timeout.reconciliation: 180s + + # cluster.inClusterEnabled indicates whether to allow in-cluster server address. This is enabled by default. + cluster.inClusterEnabled: "true" + + # Application pod logs RBAC enforcement enables control over who can and who can't view application pod logs. + # When you enable the switch, pod logs will be visible only to admin role by default. Other roles/users will not be able to view them via cli and UI. + # When you enable the switch, viewing pod logs for other roles/users will require explicit RBAC allow policies (allow get on logs subresource). + # When you disable the switch (either add it to the configmap with a "false" value or do not add it to the configmap), no actual RBAC enforcement will take place. + server.rbac.log.enforce.enable: "false" + + # exec.enabled indicates whether the UI exec feature is enabled. It is disabled by default. + exec.enabled: "false" + + # exec.shells restricts which shells are allowed for `exec`, and in which order they are attempted + exec.shells: "bash,sh,powershell,cmd" + + # oidc.tls.insecure.skip.verify determines whether certificate verification is skipped when verifying tokens with the + # configured OIDC provider (either external or the bundled Dex instance). Setting this to "true" will cause JWT + # token verification to pass despite the OIDC provider having an invalid certificate. Only set to "true" if you + # understand the risks. + oidc.tls.insecure.skip.verify: "false" + + # Add Deep Links to ArgoCD UI + # sample project level links + project.links: | + - url: https://myaudit-system.com?project={{.metadata.name}} + title: Audit + description: system audit logs + icon.class: "fa-book" + # sample application level links + application.links: | + # pkg.go.dev/text/template is used for evaluating url templates + - url: https://mycompany.splunk.com?search={{.spec.destination.namespace}} + title: Splunk + # conditionally show link e.g. for specific project + # github.com/antonmedv/expr is used for evaluation of conditions + - url: https://mycompany.splunk.com?search={{.spec.destination.namespace}} + title: Splunk + if: spec.project == "default" + - url: https://{{.metadata.annotations.splunkhost}}?search={{.spec.destination.namespace}} + title: Splunk + if: metadata.annotations.splunkhost + # sample resource level links + resource.links: | + - url: https://mycompany.splunk.com?search={{.metadata.namespace}} + title: Splunk + if: kind == "Pod" || kind == "Deployment" + + extension.config: | + extensions: + # Name defines the endpoint that will be used to register + # the extension route. + # Mandatory field. + - name: some-extension + backend: + # ConnectionTimeout is the maximum amount of time a dial to + # the extension server will wait for a connect to complete. + # Optional field. Default: 2 seconds + connectionTimeout: 2s + + # KeepAlive specifies the interval between keep-alive probes + # for an active network connection between the API server and + # the extension server. + # Optional field. Default: 15 seconds + keepAlive: 15s + + # IdleConnectionTimeout is the maximum amount of time an idle + # (keep-alive) connection between the API server and the extension + # server will remain idle before closing itself. + # Optional field. Default: 60 seconds + idleConnectionTimeout: 60s + + # MaxIdleConnections controls the maximum number of idle (keep-alive) + # connections between the API server and the extension server. + # Optional field. Default: 30 + maxIdleConnections: 30 + + services: + # URL is the address where the extension backend must be available. + # Mandatory field. + - url: http://httpbin.org + + # Cluster if provided, will have to match the application + # destination name or the destination server to have requests + # properly forwarded to this service URL. + # Optional field if only one service is specified. + # Mandatory if multiple services are specified. + cluster: + name: some-cluster + server: https://some-cluster diff --git a/docs/operator-manual/argocd-cmd-params-cm-yaml.md b/docs/operator-manual/argocd-cmd-params-cm-yaml.md new file mode 100644 index 0000000000000..1cdba010fcfc6 --- /dev/null +++ b/docs/operator-manual/argocd-cmd-params-cm-yaml.md @@ -0,0 +1,7 @@ +# argocd-cmd-params-cm.yaml example + +An example of an argocd-cmd-params-cm.yaml file: + +```yaml +{!docs/operator-manual/argocd-cmd-params-cm.yaml!} +``` diff --git a/docs/operator-manual/argocd-cmd-params-cm.yaml b/docs/operator-manual/argocd-cmd-params-cm.yaml new file mode 100644 index 0000000000000..7d38506d0b7ec --- /dev/null +++ b/docs/operator-manual/argocd-cmd-params-cm.yaml @@ -0,0 +1,194 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cmd-params-cm + labels: + app.kubernetes.io/name: argocd-cmd-params-cm + app.kubernetes.io/part-of: argocd +data: + # Repo server address. (default "argocd-repo-server:8081") + repo.server: "argocd-repo-server:8081" + + # Redis server hostname and port (e.g. argocd-redis:6379) + redis.server: "argocd-redis:6379" + # Enable compression for data sent to Redis with the required compression algorithm. (default 'gzip') + redis.compression: gzip + # Redis database + redis.db: + + # Open-Telemetry collector address: (e.g. "otel-collector:4317") + otlp.address: + + # List of additional namespaces where applications may be created in and + # reconciled from. The namespace where Argo CD is installed to will always + # be allowed. + # + # Feature state: Beta + application.namespaces: ns1, ns2, ns3 + + ## Controller Properties + # Repo server RPC call timeout seconds. + controller.repo.server.timeout.seconds: "60" + # Disable TLS on connections to repo server + controller.repo.server.plaintext: "false" + # Whether to use strict validation of the TLS cert presented by the repo server + controller.repo.server.strict.tls: "false" + # Number of application status processors (default 20) + controller.status.processors: "20" + # Number of application operation processors (default 10) + controller.operation.processors: "10" + # Set the logging format. One of: text|json (default "text") + controller.log.format: "text" + # Set the logging level. One of: debug|info|warn|error (default "info") + controller.log.level: "info" + # Prometheus metrics cache expiration (disabled by default. e.g. 24h0m0s) + controller.metrics.cache.expiration: "24h0m0s" + # Specifies timeout between application self heal attempts (default 5) + controller.self.heal.timeout.seconds: "5" + # Cache expiration for app state (default 1h0m0s) + controller.app.state.cache.expiration: "1h0m0s" + # Specifies if resource health should be persisted in app CRD (default true) + # Changing this to `false` significantly reduce number of Application CRD updates and improves controller performance. + # However, disabling resource health by default might affect applications that communicate with Applications CRD directly + # so we have to defer switching this to `false` by default till v3.0 release. + controller.resource.health.persist: "true" + # Cache expiration default (default 24h0m0s) + controller.default.cache.expiration: "24h0m0s" + # Sharding algorithm used to balance clusters accross application controller shards (default "legacy") + controller.sharding.algorithm: legacy + # Number of allowed concurrent kubectl fork/execs. Any value less than 1 means no limit. + controller.kubectl.parallelism.limit: "20" + + ## Server properties + # Listen on given address for incoming connections (default "0.0.0.0") + server.listen.address: "0.0.0.0" + # Listen on given address for metrics (default "0.0.0.0") + server.metrics.listen.address: "0.0.0.0" + # Run server without TLS + server.insecure: "false" + # Value for base href in index.html. Used if Argo CD is running behind reverse proxy under subpath different from / (default "/") + server.basehref: "/" + # Used if Argo CD is running behind reverse proxy under subpath different from / + server.rootpath: "" + # Directory path that contains additional static assets + server.staticassets: "/shared/app" + + # Set the logging format. One of: text|json (default "text") + server.log.format: "text" + # Set the logging level. One of: debug|info|warn|error (default "info") + server.log.level: "info" + # Repo server RPC call timeout seconds. (default 60) + server.repo.server.timeout.seconds: "60" + # Use a plaintext client (non-TLS) to connect to repository server + server.repo.server.plaintext: "false" + # Perform strict validation of TLS certificates when connecting to repo server + server.repo.server.strict.tls: "false" + # Dex server address (default "http://argocd-dex-server:5556") + server.dex.server: "http://argocd-dex-server:5556" + # Use a plaintext client (non-TLS) to connect to dex server + server.dex.server.plaintext: "false" + # Perform strict validation of TLS certificates when connecting to dex server + server.dex.server.strict.tls: "false" + # Disable client authentication + server.disable.auth: "false" + # Toggle GZIP compression + server.enable.gzip: "true" + # Set X-Frame-Options header in HTTP responses to value. To disable, set to "". (default "sameorigin") + server.x.frame.options: "sameorigin" + # The minimum SSL/TLS version that is acceptable (one of: 1.0|1.1|1.2|1.3) (default "1.2") + server.tls.minversion: "1.2" + # The maximum SSL/TLS version that is acceptable (one of: 1.0|1.1|1.2|1.3) (default "1.3") + server.tls.maxversion: "1.3" + # The list of acceptable ciphers to be used when establishing TLS connections. Use 'list' to list available ciphers. (default "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:TLS_RSA_WITH_AES_256_GCM_SHA384") + server.tls.ciphers: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:TLS_RSA_WITH_AES_256_GCM_SHA384" + # Cache expiration for cluster/repo connection status (default 1h0m0s) + server.connection.status.cache.expiration: "1h0m0s" + # Cache expiration for OIDC state (default 3m0s) + server.oidc.cache.expiration: "3m0s" + # Cache expiration for failed login attempts (default 24h0m0s) + server.login.attempts.expiration: "24h0m0s" + # Cache expiration for app state (default 1h0m0s) + server.app.state.cache.expiration: "1h0m0s" + # Cache expiration default (default 24h0m0s) + server.default.cache.expiration: "24h0m0s" + # Enable the experimental proxy extension feature + server.enable.proxy.extension: "false" + + ## Repo-server properties + # Listen on given address for incoming connections (default "0.0.0.0") + reposerver.listen.address: "0.0.0.0" + # Listen on given address for metrics (default "0.0.0.0") + reposerver.metrics.listen.address: "0.0.0.0" + # Set the logging format. One of: text|json (default "text") + reposerver.log.format: "text" + # Set the logging level. One of: debug|info|warn|error (default "info") + reposerver.log.level: "info" + # Limit on number of concurrent manifests generate requests. Any value less the 1 means no limit. + reposerver.parallelism.limit: "1" + # Disable TLS on the gRPC endpoint + reposerver.disable.tls: "false" + # The minimum SSL/TLS version that is acceptable (one of: 1.0|1.1|1.2|1.3) (default "1.2") + reposerver.tls.minversion: "1.2" + # The maximum SSL/TLS version that is acceptable (one of: 1.0|1.1|1.2|1.3) (default "1.3") + reposerver.tls.maxversion: "1.3" + # The list of acceptable ciphers to be used when establishing TLS connections. Use 'list' to list available ciphers. (default "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:TLS_RSA_WITH_AES_256_GCM_SHA384") + reposerver.tls.ciphers: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:TLS_RSA_WITH_AES_256_GCM_SHA384" + # Cache expiration for repo state, incl. app lists, app details, manifest generation, revision meta-data (default 24h0m0s) + reposerver.repo.cache.expiration: "24h0m0s" + # Cache expiration default (default 24h0m0s) + reposerver.default.cache.expiration: "24h0m0s" + # Max combined manifest file size for a single directory-type Application. In-memory manifest representation may be as + # much as 300x the manifest file size. Limit this to stay within the memory limits of the repo-server while allowing + # for 300x memory expansion and N Applications running at the same time. + # (example 10M max * 300 expansion * 10 Apps = 30G max theoretical memory usage). + reposerver.max.combined.directory.manifests.size: '10M' + # Paths to be excluded from the tarball streamed to plugins. Separate with ; + reposerver.plugin.tar.exclusions: "" + # Allow repositories to contain symlinks that leave the boundaries of the repository. + # Changing this to "true" will not allow _all_ out-of-bounds symlinks. Those will still be blocked for things like values + # files in Helm charts. But symlinks which are not explicitly blocked by other checks will be allowed. + reposerver.allow.oob.symlinks: "false" + # Maximum size of tarball when streaming manifests to the repo server for generation + reposerver.streamed.manifest.max.tar.size: "100M" + # Maximum size of extracted manifests when streaming manifests to the repo server for generation + reposerver.streamed.manifest.max.extracted.size: "1G" + # Enable git submodule support + reposerver.enable.git.submodule: "true" + + # Disable TLS on the HTTP endpoint + dexserver.disable.tls: "false" + + ## ApplicationSet Controller Properties + # Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager. + applicationsetcontroller.enable.leader.election: "false" + # "Modify how application is synced between the generator and the cluster. Default is 'sync' (create & update & delete), options: 'create-only', 'create-update' (no deletion), 'create-delete' (no update)" + applicationsetcontroller.policy: "sync" + # Print debug logs. Takes precedence over loglevel + applicationsetcontroller.debug: "false" + # Set the logging format. One of: text|json (default "text") + applicationsetcontroller.log.format: "text" + # Set the logging level. One of: debug|info|warn|error (default "info") + applicationsetcontroller.log.level: "info" + # Enable dry run mode + applicationsetcontroller.dryrun: "false" + # Enable git submodule support + applicationsetcontroller.enable.git.submodule: "true" + # Enables use of the Progressive Syncs capability + applicationsetcontroller.enable.progressive.syncs: "false" + # A list of glob patterns specifying where to look for ApplicationSet resources. (default is only the ns where the controller is installed) + applicationsetcontroller.namespaces: "argocd,argocd-appsets-*" + # Path of the self-signed TLS certificate for SCM/PR Gitlab Generator + applicationsetcontroller.scm.root.ca.path: "" + # A comma separated list of allowed SCM providers (default "" is all SCM providers). + # Setting this field is required when using ApplicationSets-in-any-namespace, to prevent users from + # sending secrets from `tokenRef`s to disallowed `api` domains. + # The url used in the scm generator must exactly match one in the list + applicationsetcontroller.allowed.scm.providers: "https://git.example.com/,https://gitlab.example.com/" + # To disable SCM providers entirely (i.e. disable the SCM and PR generators), set this to "false". Default is "true". + applicationsetcontroller.enable.scm.providers: "false" + + ## Argo CD Notifications Controller Properties + # Set the logging level. One of: debug|info|warn|error (default "info") + notificationscontroller.log.level: "info" + # Set the logging format. One of: text|json (default "text") + notificationscontroller.log.format: "text" diff --git a/docs/operator-manual/argocd-rbac-cm-yaml.md b/docs/operator-manual/argocd-rbac-cm-yaml.md new file mode 100644 index 0000000000000..c0dbcde428543 --- /dev/null +++ b/docs/operator-manual/argocd-rbac-cm-yaml.md @@ -0,0 +1,7 @@ +# argocd-rbac-cm.yaml example + +An example of an argocd-rbac-cm.yaml file: + +```yaml +{!docs/operator-manual/argocd-rbac-cm.yaml!} +``` diff --git a/docs/operator-manual/argocd-rbac-cm.yaml b/docs/operator-manual/argocd-rbac-cm.yaml index 75ceb093779e5..b68d93ecc4f89 100644 --- a/docs/operator-manual/argocd-rbac-cm.yaml +++ b/docs/operator-manual/argocd-rbac-cm.yaml @@ -19,6 +19,15 @@ data: # Grant all members of 'my-org:team-beta' admins g, my-org:team-beta, role:admin + # it is possible to provide additional entries in this configmap to compose the final policy csv. + # In this case the key must follow the pattern 'policy..csv'. Argo CD will concatenate + # all additional policies it finds with this pattern below the main one ('policy.csv'). This is useful + # to allow composing policies in config management tools like Kustomize, Helm, etc. + policy.overlay.csv: | + p, role:tester, applications, *, */*, allow + p, role:tester, projects, *, *, allow + g, my-org:team-qa, role:tester + # policy.default is the name of the default role which Argo CD will falls back to, when # authorizing API requests (optional). If omitted or empty, users may be still be able to login, # but will see no apps, projects, etc... @@ -28,3 +37,8 @@ data: # If omitted, defaults to: '[groups]'. The scope value can be a string, or a list of strings. scopes: '[cognito:groups, email]' + # matchMode configures the matchers function for casbin. + # There are two options for this, 'glob' for glob matcher or 'regex' for regex matcher. If omitted or mis-configured, + # will be set to 'glob' as default. + policy.matchMode: 'glob' + diff --git a/docs/operator-manual/argocd-repo-creds-yaml.md b/docs/operator-manual/argocd-repo-creds-yaml.md new file mode 100644 index 0000000000000..dca214068405c --- /dev/null +++ b/docs/operator-manual/argocd-repo-creds-yaml.md @@ -0,0 +1,7 @@ +# argocd-repo-creds.yaml example + +An example of an argocd-repo-creds.yaml file: + +```yaml +{!docs/operator-manual/argocd-repo-creds.yaml!} +``` diff --git a/docs/operator-manual/argocd-repo-creds.yaml b/docs/operator-manual/argocd-repo-creds.yaml new file mode 100644 index 0000000000000..731d8a8f6f67e --- /dev/null +++ b/docs/operator-manual/argocd-repo-creds.yaml @@ -0,0 +1,63 @@ +# Repository credentials, for using the same credentials in multiple repositories. +apiVersion: v1 +kind: Secret +metadata: + name: argoproj-https-creds + namespace: argocd + labels: + argocd.argoproj.io/secret-type: repo-creds +stringData: + url: https://github.com/argoproj + type: helm + password: my-password + username: my-username +--- +apiVersion: v1 +kind: Secret +metadata: + name: argoproj-ssh-creds + namespace: argocd + labels: + argocd.argoproj.io/secret-type: repo-creds +stringData: + url: git@github.com:argoproj-labs + type: helm + sshPrivateKey: | + -----BEGIN OPENSSH PRIVATE KEY----- + ... + -----END OPENSSH PRIVATE KEY----- +--- +apiVersion: v1 +kind: Secret +metadata: + name: github-creds + namespace: argocd + labels: + argocd.argoproj.io/secret-type: repo-creds +stringData: + url: https://github.com/argoproj + type: helm + githubAppID: 1 + githubAppInstallationID: 2 + githubAppPrivateKey: | + -----BEGIN OPENSSH PRIVATE KEY----- + ... + -----END OPENSSH PRIVATE KEY----- +--- +apiVersion: v1 +kind: Secret +metadata: + name: github-enterprise-creds + namespace: argocd + labels: + argocd.argoproj.io/secret-type: repo-creds +stringData: + url: https://github.com/argoproj + type: helm + githubAppID: 1 + githubAppInstallationID: 2 + githubAppEnterpriseBaseUrl: https://ghe.example.com/api/v3 + githubAppPrivateKey: | + -----BEGIN OPENSSH PRIVATE KEY----- + ... + -----END OPENSSH PRIVATE KEY----- \ No newline at end of file diff --git a/docs/operator-manual/argocd-repositories-yaml.md b/docs/operator-manual/argocd-repositories-yaml.md new file mode 100644 index 0000000000000..c9c99357c391a --- /dev/null +++ b/docs/operator-manual/argocd-repositories-yaml.md @@ -0,0 +1,7 @@ +# argocd-repositories.yaml example + +An example of an argocd-repositories.yaml file: + +```yaml +{!docs/operator-manual/argocd-repositories.yaml!} +``` diff --git a/docs/operator-manual/argocd-repositories.yaml b/docs/operator-manual/argocd-repositories.yaml new file mode 100644 index 0000000000000..b6aa0715c389d --- /dev/null +++ b/docs/operator-manual/argocd-repositories.yaml @@ -0,0 +1,69 @@ +# Git repositories configure Argo CD with (optional). +# This list is updated when configuring/removing repos from the UI/CLI +# Note: the last example in the list would use a repository credential template, configured under "argocd-repo-creds.yaml". +apiVersion: v1 +kind: Secret +metadata: + name: my-private-https-repo + namespace: argocd + labels: + argocd.argoproj.io/secret-type: repository +stringData: + url: https://github.com/argoproj/argocd-example-apps + password: my-password + username: my-username + insecure: "true" # Ignore validity of server's TLS certificate. Defaults to "false" + forceHttpBasicAuth: "true" # Skip auth method negotiation and force usage of HTTP basic auth. Defaults to "false" + enableLfs: "true" # Enable git-lfs for this repository. Defaults to "false" +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-private-ssh-repo + namespace: argocd + labels: + argocd.argoproj.io/secret-type: repository +stringData: + url: ssh://git@github.com/argoproj/argocd-example-apps + sshPrivateKey: | + -----BEGIN OPENSSH PRIVATE KEY----- + ... + -----END OPENSSH PRIVATE KEY----- + insecure: "true" # Do not perform a host key check for the server. Defaults to "false" + enableLfs: "true" # Enable git-lfs for this repository. Defaults to "false" +--- +apiVersion: v1 +kind: Secret +metadata: + name: istio-helm-repo + namespace: argocd + labels: + argocd.argoproj.io/secret-type: repository +stringData: + url: https://storage.googleapis.com/istio-prerelease/daily-build/master-latest-daily/charts + name: istio.io + type: helm +--- +apiVersion: v1 +kind: Secret +metadata: + name: private-helm-repo + namespace: argocd + labels: + argocd.argoproj.io/secret-type: repository +stringData: + url: https://my-private-chart-repo.internal + name: private-repo + type: helm + password: my-password + username: my-username +--- +apiVersion: v1 +kind: Secret +metadata: + name: private-repo + namespace: argocd + labels: + argocd.argoproj.io/secret-type: repository +stringData: + url: https://github.com/argoproj/private-repo \ No newline at end of file diff --git a/docs/operator-manual/argocd-secret-yaml.md b/docs/operator-manual/argocd-secret-yaml.md new file mode 100644 index 0000000000000..33a88a8e96ee2 --- /dev/null +++ b/docs/operator-manual/argocd-secret-yaml.md @@ -0,0 +1,7 @@ +# argocd-secret.yaml example + +An example of an argocd-secret.yaml file: + +```yaml +{!docs/operator-manual/argocd-secret.yaml!} +``` diff --git a/docs/operator-manual/argocd-ssh-known-hosts-cm-yaml.md b/docs/operator-manual/argocd-ssh-known-hosts-cm-yaml.md new file mode 100644 index 0000000000000..4a5977f61e842 --- /dev/null +++ b/docs/operator-manual/argocd-ssh-known-hosts-cm-yaml.md @@ -0,0 +1,7 @@ +# argocd-ssh-known-hosts-cm.yaml example + +An example of an argocd-ssh-known-hosts-cm.yaml file: + +```yaml +{!docs/operator-manual/argocd-ssh-known-hosts-cm.yaml!} +``` diff --git a/docs/operator-manual/argocd-ssh-known-hosts-cm.yaml b/docs/operator-manual/argocd-ssh-known-hosts-cm.yaml index 7f129a142836e..0f30fa5671662 100644 --- a/docs/operator-manual/argocd-ssh-known-hosts-cm.yaml +++ b/docs/operator-manual/argocd-ssh-known-hosts-cm.yaml @@ -7,8 +7,16 @@ metadata: name: argocd-ssh-known-hosts-cm data: ssh_known_hosts: | - bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw== - github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ== + # This file was automatically generated by hack/update-ssh-known-hosts.sh. DO NOT EDIT + [ssh.github.com]:443 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg= + [ssh.github.com]:443 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl + [ssh.github.com]:443 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk= + bitbucket.org ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPIQmuzMBuKdWeF4+a2sjSSpBK0iqitSQ+5BM9KhpexuGt20JpTVM7u5BDZngncgrqDMbWdxMWWOGtZ9UgbqgZE= + bitbucket.org ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIazEu89wgQZ4bqs3d63QSMzYVa0MuJ2e2gKTKqu+UUO + bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDQeJzhupRu0u0cdegZIa8e86EG2qOCsIsD1Xw0xSeiPDlCr7kq97NLmMbpKTX6Esc30NuoqEEHCuc7yWtwp8dI76EEEB1VqY9QJq6vk+aySyboD5QF61I/1WeTwu+deCbgKMGbUijeXhtfbxSxm6JwGrXrhBdofTsbKRUsrN1WoNgUa8uqN1Vx6WAJw1JHPhglEGGHea6QICwJOAr/6mrui/oB7pkaWKHj3z7d1IC4KWLtY47elvjbaTlkN04Kc/5LFEirorGYVbt15kAUlqGM65pk6ZBxtaO3+30LVlORZkxOh+LKL/BvbZ/iRNhItLqNyieoQj/uh/7Iv4uyH/cV/0b4WDSd3DptigWq84lJubb9t/DnZlrJazxyDCulTmKdOR7vs9gMTo+uoIrPSb8ScTtvw65+odKAlBj59dhnVp9zd7QUojOpXlL62Aw56U4oO+FALuevvMjiWeavKhJqlR7i5n9srYcrNV7ttmDw7kf/97P5zauIhxcjX+xHv4M= + github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg= + github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl + github.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk= gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY= gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9 diff --git a/docs/operator-manual/argocd-tls-certs-cm-yaml.md b/docs/operator-manual/argocd-tls-certs-cm-yaml.md new file mode 100644 index 0000000000000..e18b54d6e117e --- /dev/null +++ b/docs/operator-manual/argocd-tls-certs-cm-yaml.md @@ -0,0 +1,7 @@ +# argocd-tls-certs-cm.yaml example + +An example of an argocd-tls-certs-cm.yaml file: + +```yaml +{!docs/operator-manual/argocd-tls-certs-cm.yaml!} +``` diff --git a/docs/operator-manual/cluster-bootstrapping.md b/docs/operator-manual/cluster-bootstrapping.md index 3d9fe5e939c01..7a43800da2478 100644 --- a/docs/operator-manual/cluster-bootstrapping.md +++ b/docs/operator-manual/cluster-bootstrapping.md @@ -1,9 +1,16 @@ # Cluster Bootstrapping -This guide for operators who have already installed Argo CD, and have a new cluster and are looking to install many apps in that cluster. +This guide is for operators who have already installed Argo CD, and have a new cluster and are looking to install many apps in that cluster. There's no one particular pattern to solve this problem, e.g. you could write a script to create your apps, or you could even manually create them. However, users of Argo CD tend to use the **app of apps pattern**. +!!!warning "App of Apps is an admin-only tool" + The ability to create Applications in arbitrary [Projects](./declarative-setup.md#projects) + is an admin-level capability. Only admins should have push access to the parent Application's source repository. + Admins should review pull requests to that repository, paying particular attention to the `project` field in each + Application. Projects with access to the namespace in which Argo CD is installed effectively have admin-level + privileges. + ## App Of Apps Pattern [Declaratively](declarative-setup.md) specify one Argo CD app that consists only of other apps. @@ -78,6 +85,8 @@ The parent app will appear as in-sync but the child apps will be out of sync: ![New App Of Apps](../assets/new-app-of-apps.png) +> NOTE: You may want to modify this behavior to bootstrap your cluster in waves; see [v1.8 upgrade notes](upgrading/1.7-1.8.md) for information on changing this. + You can either sync via the UI, firstly filter by the correct label: ![Filter Apps](../assets/filter-apps.png) @@ -92,4 +101,22 @@ Or, via the CLI: argocd app sync -l app.kubernetes.io/instance=apps ``` -View [the example on Github](https://github.com/argoproj/argocd-example-apps/tree/master/apps). +View [the example on GitHub](https://github.com/argoproj/argocd-example-apps/tree/master/apps). + + + +### Cascading deletion + +If you want to ensure that child-apps and all of their resources are deleted when the parent-app is deleted make sure to add the appropriate [finalizer](../user-guide/app_deletion.md#about-the-deletion-finalizer) to your `Application` definition + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: guestbook + namespace: argocd + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + ... +``` diff --git a/docs/operator-manual/config-management-plugins.md b/docs/operator-manual/config-management-plugins.md new file mode 100644 index 0000000000000..ee805b71cd604 --- /dev/null +++ b/docs/operator-manual/config-management-plugins.md @@ -0,0 +1,491 @@ + +# Config Management Plugins + +Argo CD's "native" config management tools are Helm, Jsonnet, and Kustomize. If you want to use a different config +management tools, or if Argo CD's native tool support does not include a feature you need, you might need to turn to +a Config Management Plugin (CMP). + +The Argo CD "repo server" component is in charge of building Kubernetes manifests based on some source files from a +Helm, OCI, or git repository. When a config management plugin is correctly configured, the repo server may delegate the +task of building manifests to the plugin. + +The following sections will describe how to create, install, and use plugins. Check out the +[example plugins](https://github.com/argoproj/argo-cd/tree/master/examples/plugins) for additional guidance. + +!!! warning + Plugins are granted a level of trust in the Argo CD system, so it is important to implement plugins securely. Argo + CD administrators should only install plugins from trusted sources, and they should audit plugins to weigh their + particular risks and benefits. + +## Installing a config management plugin + +### Sidecar plugin + +An operator can configure a plugin tool via a sidecar to repo-server. The following changes are required to configure a new plugin: + +#### Write the plugin configuration file + +Plugins will be configured via a ConfigManagementPlugin manifest located inside the plugin container. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ConfigManagementPlugin +metadata: + # The name of the plugin must be unique within a given Argo CD instance. + name: my-plugin +spec: + version: v1.0 + # The init command runs in the Application source directory at the beginning of each manifest generation. The init + # command can output anything. A non-zero status code will fail manifest generation. + init: + # Init always happens immediately before generate, but its output is not treated as manifests. + # This is a good place to, for example, download chart dependencies. + command: [sh] + args: [-c, 'echo "Initializing..."'] + # The generate command runs in the Application source directory each time manifests are generated. Standard output + # must be ONLY valid Kubernetes Objects in either YAML or JSON. A non-zero exit code will fail manifest generation. + # Error output will be sent to the UI, so avoid printing sensitive information (such as secrets). + generate: + command: [sh, -c] + args: + - | + echo "{\"kind\": \"ConfigMap\", \"apiVersion\": \"v1\", \"metadata\": { \"name\": \"$ARGOCD_APP_NAME\", \"namespace\": \"$ARGOCD_APP_NAMESPACE\", \"annotations\": {\"Foo\": \"$ARGOCD_ENV_FOO\", \"KubeVersion\": \"$KUBE_VERSION\", \"KubeApiVersion\": \"$KUBE_API_VERSIONS\",\"Bar\": \"baz\"}}}" + # The discovery config is applied to a repository. If every configured discovery tool matches, then the plugin may be + # used to generate manifests for Applications using the repository. If the discovery config is omitted then the plugin + # will not match any application but can still be invoked explicitly by specifying the plugin name in the app spec. + # Only one of fileName, find.glob, or find.command should be specified. If multiple are specified then only the + # first (in that order) is evaluated. + discover: + # fileName is a glob pattern (https://pkg.go.dev/path/filepath#Glob) that is applied to the Application's source + # directory. If there is a match, this plugin may be used for the Application. + fileName: "./subdir/s*.yaml" + find: + # This does the same thing as fileName, but it supports double-start (nested directory) glob patterns. + glob: "**/Chart.yaml" + # The find command runs in the repository's root directory. To match, it must exit with status code 0 _and_ + # produce non-empty output to standard out. + command: [sh, -c, find . -name env.yaml] + # The parameters config describes what parameters the UI should display for an Application. It is up to the user to + # actually set parameters in the Application manifest (in spec.source.plugin.parameters). The announcements _only_ + # inform the "Parameters" tab in the App Details page of the UI. + parameters: + # Static parameter announcements are sent to the UI for _all_ Applications handled by this plugin. + # Think of the `string`, `array`, and `map` values set here as "defaults". It is up to the plugin author to make + # sure that these default values actually reflect the plugin's behavior if the user doesn't explicitly set different + # values for those parameters. + static: + - name: string-param + title: Description of the string param + tooltip: Tooltip shown when the user hovers the + # If this field is set, the UI will indicate to the user that they must set the value. + required: false + # itemType tells the UI how to present the parameter's value (or, for arrays and maps, values). Default is + # "string". Examples of other types which may be supported in the future are "boolean" or "number". + # Even if the itemType is not "string", the parameter value from the Application spec will be sent to the plugin + # as a string. It's up to the plugin to do the appropriate conversion. + itemType: "" + # collectionType describes what type of value this parameter accepts (string, array, or map) and allows the UI + # to present a form to match that type. Default is "string". This field must be present for non-string types. + # It will not be inferred from the presence of an `array` or `map` field. + collectionType: "" + # This field communicates the parameter's default value to the UI. Setting this field is optional. + string: default-string-value + # All the fields above besides "string" apply to both the array and map type parameter announcements. + - name: array-param + # This field communicates the parameter's default value to the UI. Setting this field is optional. + array: [default, items] + collectionType: array + - name: map-param + # This field communicates the parameter's default value to the UI. Setting this field is optional. + map: + some: value + collectionType: map + # Dynamic parameter announcements are announcements specific to an Application handled by this plugin. For example, + # the values for a Helm chart's values.yaml file could be sent as parameter announcements. + dynamic: + # The command is run in an Application's source directory. Standard output must be JSON matching the schema of the + # static parameter announcements list. + command: [echo, '[{"name": "example-param", "string": "default-string-value"}]'] + + # If set to `true` then the plugin receives repository files with original file mode. Dangerous since the repository + # might have executable files. Set to true only if you trust the CMP plugin authors. + preserveFileMode: false +``` + +!!! note + While the ConfigManagementPlugin _looks like_ a Kubernetes object, it is not actually a custom resource. + It only follows kubernetes-style spec conventions. + +The `generate` command must print a valid Kubernetes YAML or JSON object stream to stdout. Both `init` and `generate` commands are executed inside the application source directory. + +The `discover.fileName` is used as [glob](https://pkg.go.dev/path/filepath#Glob) pattern to determine whether an +application repository is supported by the plugin or not. + +```yaml + discover: + find: + command: [sh, -c, find . -name env.yaml] +``` + +If `discover.fileName` is not provided, the `discover.find.command` is executed in order to determine whether an +application repository is supported by the plugin or not. The `find` command should return a non-error exit code +and produce output to stdout when the application source type is supported. + +#### Place the plugin configuration file in the sidecar + +Argo CD expects the plugin configuration file to be located at `/home/argocd/cmp-server/config/plugin.yaml` in the sidecar. + +If you use a custom image for the sidecar, you can add the file directly to that image. + +```dockerfile +WORKDIR /home/argocd/cmp-server/config/ +COPY plugin.yaml ./ +``` + +If you use a stock image for the sidecar or would rather maintain the plugin configuration in a ConfigMap, just nest the +plugin config file in a ConfigMap under the `plugin.yaml` key and mount the ConfigMap in the sidecar (see next section). + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: my-plugin-config +data: + plugin.yaml: | + apiVersion: argoproj.io/v1alpha1 + kind: ConfigManagementPlugin + metadata: + name: my-plugin + spec: + version: v1.0 + init: + command: [sh, -c, 'echo "Initializing..."'] + generate: + command: [sh, -c, 'echo "{\"kind\": \"ConfigMap\", \"apiVersion\": \"v1\", \"metadata\": { \"name\": \"$ARGOCD_APP_NAME\", \"namespace\": \"$ARGOCD_APP_NAMESPACE\", \"annotations\": {\"Foo\": \"$ARGOCD_ENV_FOO\", \"KubeVersion\": \"$KUBE_VERSION\", \"KubeApiVersion\": \"$KUBE_API_VERSIONS\",\"Bar\": \"baz\"}}}"'] + discover: + fileName: "./subdir/s*.yaml" +``` + +#### Register the plugin sidecar + +To install a plugin, patch argocd-repo-server to run the plugin container as a sidecar, with argocd-cmp-server as its +entrypoint. You can use either off-the-shelf or custom-built plugin image as sidecar image. For example: + +```yaml +containers: +- name: my-plugin + command: [/var/run/argocd/argocd-cmp-server] # Entrypoint should be Argo CD lightweight CMP server i.e. argocd-cmp-server + image: busybox # This can be off-the-shelf or custom-built image + securityContext: + runAsNonRoot: true + runAsUser: 999 + volumeMounts: + - mountPath: /var/run/argocd + name: var-files + - mountPath: /home/argocd/cmp-server/plugins + name: plugins + # Remove this volumeMount if you've chosen to bake the config file into the sidecar image. + - mountPath: /home/argocd/cmp-server/config/plugin.yaml + subPath: plugin.yaml + name: my-plugin-config + # Starting with v2.4, do NOT mount the same tmp volume as the repo-server container. The filesystem separation helps + # mitigate path traversal attacks. + - mountPath: /tmp + name: cmp-tmp +volumes: +- configMap: + name: my-plugin-config + name: my-plugin-config +- emptyDir: {} + name: cmp-tmp +``` + +!!! important "Double-check these items" + 1. Make sure to use `/var/run/argocd/argocd-cmp-server` as an entrypoint. The `argocd-cmp-server` is a lightweight GRPC service that allows Argo CD to interact with the plugin. + 2. Make sure that sidecar container is running as user 999. + 3. Make sure that plugin configuration file is present at `/home/argocd/cmp-server/config/plugin.yaml`. It can either be volume mapped via configmap or baked into image. + +### Using environment variables in your plugin + +Plugin commands have access to + +1. The system environment variables of the sidecar +2. [Standard build environment variables](../user-guide/build-environment.md) +3. Variables in the Application spec (References to system and build variables will get interpolated in the variables' values): + + apiVersion: argoproj.io/v1alpha1 + kind: Application + spec: + source: + plugin: + env: + - name: FOO + value: bar + - name: REV + value: test-$ARGOCD_APP_REVISION + + Before reaching the `init.command`, `generate.command`, and `discover.find.command` commands, Argo CD prefixes all + user-supplied environment variables (#3 above) with `ARGOCD_ENV_`. This prevents users from directly setting + potentially-sensitive environment variables. + +4. Parameters in the Application spec: + + apiVersion: argoproj.io/v1alpha1 + kind: Application + spec: + source: + plugin: + parameters: + - name: values-files + array: [values-dev.yaml] + - name: helm-parameters + map: + image.tag: v1.2.3 + + The parameters are available as JSON in the `ARGOCD_APP_PARAMETERS` environment variable. The example above would + produce this JSON: + + [{"name": "values-files", "array": ["values-dev.yaml"]}, {"name": "helm-parameters", "map": {"image.tag": "v1.2.3"}}] + + !!! note + Parameter announcements, even if they specify defaults, are _not_ sent to the plugin in `ARGOCD_APP_PARAMETERS`. + Only parameters explicitly set in the Application spec are sent to the plugin. It is up to the plugin to apply + the same defaults as the ones announced to the UI. + + The same parameters are also available as individual environment variables. The names of the environment variables + follows this convention: + + - name: some-string-param + string: some-string-value + # PARAM_SOME_STRING_PARAM=some-string-value + + - name: some-array-param + value: [item1, item2] + # PARAM_SOME_ARRAY_PARAM_0=item1 + # PARAM_SOME_ARRAY_PARAM_1=item2 + + - name: some-map-param + map: + image.tag: v1.2.3 + # PARAM_SOME_MAP_PARAM_IMAGE_TAG=v1.2.3 + +!!! warning "Sanitize/escape user input" + As part of Argo CD's manifest generation system, config management plugins are treated with a level of trust. Be + sure to escape user input in your plugin to prevent malicious input from causing unwanted behavior. + +## Using a config management plugin with an Application + +You may leave the `name` field +empty in the `plugin` section for the plugin to be automatically matched with the Application based on its discovery rules. If you do mention the name make sure +it is either `-` if version is mentioned in the `ConfigManagementPlugin` spec or else just ``. When name is explicitly +specified only that particular plugin will be used iff its discovery pattern/command matches the provided application repo. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: guestbook + namespace: argocd +spec: + project: default + source: + repoURL: https://github.com/argoproj/argocd-example-apps.git + targetRevision: HEAD + path: guestbook + plugin: + env: + - name: FOO + value: bar +``` + +If you don't need to set any environment variables, you can set an empty plugin section. + +```yaml + plugin: {} +``` + +!!! important + If your CMP command runs too long, the command will be killed, and the UI will show an error. The CMP server + respects the timeouts set by the `server.repo.server.timeout.seconds` and `controller.repo.server.timeout.seconds` + items in `argocd-cm`. Increase their values from the default of 60s. + + Each CMP command will also independently timeout on the `ARGOCD_EXEC_TIMEOUT` set for the CMP sidecar. The default + is 90s. So if you increase the repo server timeout greater than 90s, be sure to set `ARGOCD_EXEC_TIMEOUT` on the + sidecar. + +!!! note + Each Application can only have one config management plugin configured at a time. If you're converting an existing + plugin configured through the `argocd-cm` ConfigMap to a sidecar, make sure to update the plugin name to either `-` + if version was mentioned in the `ConfigManagementPlugin` spec or else just use ``. You can also remove the name altogether + and let the automatic discovery to identify the plugin. +!!! note + If a CMP renders blank manfiests, and `prune` is set to `true`, Argo CD will automatically remove resources. CMP plugin authors should ensure errors are part of the exit code. Commonly something like `kustomize build . | cat` won't pass errors because of the pipe. Consider setting `set -o pipefail` so anything piped will pass errors on failure. + +## Debugging a CMP + +If you are actively developing a sidecar-installed CMP, keep a few things in mind: + +1. If you are mounting plugin.yaml from a ConfigMap, you will have to restart the repo-server Pod so the plugin will + pick up the changes. +2. If you have baked plugin.yaml into your image, you will have to build, push, and force a re-pull of that image on the + repo-server Pod so the plugin will pick up the changes. If you are using `:latest`, the Pod will always pull the new + image. If you're using a different, static tag, set `imagePullPolicy: Always` on the CMP's sidecar container. +3. CMP errors are cached by the repo-server in Redis. Restarting the repo-server Pod will not clear the cache. Always + do a "Hard Refresh" when actively developing a CMP so you have the latest output. +4. Verify your sidecar has started properly by viewing the Pod and seeing that two containers are running `kubectl get pod -l app.kubernetes.io/component=repo-server -n argocd` + + +### Other Common Errors +| Error Message | Cause | +| -- | -- | +| `no matches for kind "ConfigManagementPlugin" in version "argoproj.io/v1alpha1"` | The `ConfigManagementPlugin` CRD was deprecated in Argo CD 2.4 and removed in 2.8. This error means you've tried to put the configuration for your plugin directly into Kubernetes as a CRD. Refer to this [section of documentation](#write-the-plugin-configuration-file) for how to write the plugin configuration file and place it properly in the sidecar. | + +## Plugin tar stream exclusions + +In order to increase the speed of manifest generation, certain files and folders can be excluded from being sent to your +plugin. We recommend excluding your `.git` folder if it isn't necessary. Use Go's +[filepatch.Match](https://pkg.go.dev/path/filepath#Match) syntax. For example, `.git/*` to exclude `.git` folder. + +You can set it one of three ways: + +1. The `--plugin-tar-exclude` argument on the repo server. +2. The `reposerver.plugin.tar.exclusions` key if you are using `argocd-cmd-params-cm` +3. Directly setting `ARGOCD_REPO_SERVER_PLUGIN_TAR_EXCLUSIONS` environment variable on the repo server. + +For option 1, the flag can be repeated multiple times. For option 2 and 3, you can specify multiple globs by separating +them with semicolons. + +## Migrating from argocd-cm plugins + +Installing plugins by modifying the argocd-cm ConfigMap is deprecated as of v2.4 and has been completely removed starting in v2.8. + +CMP plugins work by adding a sidecar to `argocd-repo-server` along with a configuration in that sidecar located at `/home/argocd/cmp-server/config/plugin.yaml`. A argocd-cm plugin can be easily converted with the following steps. + +### Convert the ConfigMap entry into a config file + +First, copy the plugin's configuration into its own YAML file. Take for example the following ConfigMap entry: + +```yaml +data: + configManagementPlugins: | + - name: pluginName + init: # Optional command to initialize application source directory + command: ["sample command"] + args: ["sample args"] + generate: # Command to generate Kubernetes Objects in either YAML or JSON + command: ["sample command"] + args: ["sample args"] + lockRepo: true # Defaults to false. See below. +``` + +The `pluginName` item would be converted to a config file like this: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ConfigManagementPlugin +metadata: + name: pluginName +spec: + init: # Optional command to initialize application source directory + command: ["sample command"] + args: ["sample args"] + generate: # Command to generate Kubernetes Objects in either YAML or JSON + command: ["sample command"] + args: ["sample args"] +``` + +!!! note + The `lockRepo` key is not relevant for sidecar plugins, because sidecar plugins do not share a single source repo + directory when generating manifests. + +Next, we need to decide how this yaml is going to be added to the sidecar. We can either bake the yaml directly into the image, or we can mount it from a ConfigMap. + +If using a ConfigMap, our example would look like this: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: pluginName + namespace: argocd +data: + pluginName.yaml: | + apiVersion: argoproj.io/v1alpha1 + kind: ConfigManagementPlugin + metadata: + name: pluginName + spec: + init: # Optional command to initialize application source directory + command: ["sample command"] + args: ["sample args"] + generate: # Command to generate Kubernetes Objects in either YAML or JSON + command: ["sample command"] + args: ["sample args"] +``` + +Then this would be mounted in our plugin sidecar. + +### Write discovery rules for your plugin + +Sidecar plugins can use either discovery rules or a plugin name to match Applications to plugins. If the discovery rule is omitted +then you have to explicitly specify the plugin by name in the app spec or else that particular plugin will not match any app. + +If you want to use discovery instead of the plugin name to match applications to your plugin, write rules applicable to +your plugin [using the instructions above](#1-write-the-plugin-configuration-file) and add them to your configuration +file. + +To use the name instead of discovery, update the name in your application manifest to `-` +if version was mentioned in the `ConfigManagementPlugin` spec or else just use ``. For example: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: guestbook +spec: + source: + plugin: + name: pluginName # Delete this for auto-discovery (and set `plugin: {}` if `name` was the only value) or use proper sidecar plugin name +``` + +### Make sure the plugin has access to the tools it needs + +Plugins configured with argocd-cm ran on the Argo CD image. This gave it access to all the tools installed on that +image by default (see the [Dockerfile](https://github.com/argoproj/argo-cd/blob/master/Dockerfile) for base image and +installed tools). + +You can either use a stock image (like busybox, or alpine/k8s) or design your own base image with the tools your plugin needs. For +security, avoid using images with more binaries installed than what your plugin actually needs. + +### Test the plugin + +After installing the plugin as a sidecar [according to the directions above](#installing-a-config-management-plugin), +test it out on a few Applications before migrating all of them to the sidecar plugin. + +Once tests have checked out, remove the plugin entry from your argocd-cm ConfigMap. + +### Additional Settings + +#### Preserve repository files mode + +By default, config management plugin receives source repository files with reset file mode. This is done for security +reasons. If you want to preserve original file mode, you can set `preserveFileMode` to `true` in the plugin spec: + +!!! warning + Make sure you trust the plugin you are using. If you set `preserveFileMode` to `true` then the plugin might receive + files with executable permissions which can be a security risk. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ConfigManagementPlugin +metadata: + name: pluginName +spec: + init: + command: ["sample command"] + args: ["sample args"] + generate: + command: ["sample command"] + args: ["sample args"] + preserveFileMode: true +``` diff --git a/docs/operator-manual/core.md b/docs/operator-manual/core.md new file mode 100644 index 0000000000000..01b394d6e9d8c --- /dev/null +++ b/docs/operator-manual/core.md @@ -0,0 +1,99 @@ +# Argo CD Core + +## Introduction + +Argo CD Core is a different installation that runs Argo CD in headless +mode. With this installation, you will have a fully functional GitOps +engine capable of getting the desired state from Git repositories and +applying it in Kubernetes. + +The following groups of features won't be available in this +installation: + +- Argo CD RBAC model +- Argo CD API +- OIDC based authentication + +The following features will be partially available (see the +[usage](#using) section below for more details): + +- Argo CD Web UI +- Argo CD CLI +- Multi-tenancy (strictly GitOps based on git push permissions) + +A few use-cases that justify running Argo CD Core are: + +- As a cluster admin, I want to rely on Kubernetes RBAC only. +- As a devops engineer, I don't want to learn a new API or depend on + another CLI to automate my deployments. I want instead rely in + Kubernetes API only. +- As a cluster admin, I don't want to provide Argo CD UI or Argo CD + CLI to developers. + +## Architecture + +Because Argo CD is designed with a component based architecture in +mind, it is possible to have a more minimalist installation. In this +case fewer components are installed and yet the main GitOps +functionality remains operational. + +In the diagram below, the Core box, shows the components that will be +installed while opting for Argo CD Core: + +![Argo CD Core](../assets/argocd-core-components.png) + +Note that even if the Argo CD controller can run without Redis, it +isn't recommended. The Argo CD controller uses Redis as an important +caching mechanism reducing the load on Kube API and in Git. For this +reason, Redis is also included in this installation method. + +## Installing + +Argo CD Core can be installed by applying a single manifest file that +contains all the required resources. + +Example: + +``` +export ARGOCD_VERSION= +kubectl create namespace argocd +kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/$ARGOCD_VERSION/manifests/core-install.yaml +``` + +## Using + +Once Argo CD Core is installed, users will be able to interact with it +by relying on GitOps. The available Kubernetes resources will be the +`Application` and the `ApplicationSet` CRDs. By using those resources, +users will be able to deploy and manage applications in Kubernetes. + +It is still possible to use Argo CD CLI even when running Argo CD +Core. In this case, the CLI will spawn a local API server process that +will be used to handle the CLI command. Once the command is concluded, +the local API Server process will also be terminated. This happens +transparently for the user with no additional command required. Note +that Argo CD Core will rely only on Kubernetes RBAC and the user (or +the process) invoking the CLI needs to have access to the Argo CD +namespace with the proper permission in the `Application` and +`ApplicationSet` resources for executing a given command. + +To use Argo CD CLI in core mode, it is required to pass the `--core` +flag with the `login` subcommand. + +Example: + +```bash +kubectl config set-context --current --namespace=argocd # change current kube context to argocd namespace +argocd login --core +``` + +Similarly, users can also run the Web UI locally if they prefer to +interact with Argo CD using this method. The Web UI can be started +locally by running the following command: + +``` +argocd admin dashboard -n argocd +``` + +Argo CD Web UI will be available at `http://localhost:8080` + diff --git a/docs/operator-manual/custom-styles.md b/docs/operator-manual/custom-styles.md index c06fdae624e02..8f2499a2d636a 100644 --- a/docs/operator-manual/custom-styles.md +++ b/docs/operator-manual/custom-styles.md @@ -1,6 +1,6 @@ # Custom Styles -Argo CD has imports the majority of its UI stylesheets from the [argo-ui](https://github.com/argoproj/argo-ui) project. +Argo CD imports the majority of its UI stylesheets from the [argo-ui](https://github.com/argoproj/argo-ui) project. Sometimes, it may be desired to customize certain components of the UI for branding purposes or to help distinguish between multiple instances of Argo CD running in different environments. @@ -56,7 +56,7 @@ metadata: name: argocd-styles-cm data: my-styles.css: | - .nav-bar { + .sidebar { background: linear-gradient(to bottom, #999, #777, #333, #222, #111); } ``` @@ -88,11 +88,33 @@ spec: name: styles ``` -Note that the CSS file should be mounted within a subdirectory of the existing "/shared/app" directory +Note that the CSS file should be mounted within a subdirectory of the "/shared/app" directory (e.g. "/shared/app/custom"). Otherwise, the file will likely fail to be imported by the browser with an -"incorrect MIME type" error. +"incorrect MIME type" error. The subdirectory can be changed using `server.staticassets` key of the +[argocd-cmd-params-cm.yaml](./argocd-cmd-params-cm.yaml) ConfigMap. ## Developing Style Overlays The styles specified in the injected CSS file should be specific to components and classes defined in [argo-ui](https://github.com/argoproj/argo-ui). It is recommended to test out the styles you wish to apply first by making use of your browser's built-in developer tools. For a more full-featured -experience, you may wish to build a separate project using the [Argo CD UI dev server](https://webpack.js.org/configuration/dev-server/). \ No newline at end of file +experience, you may wish to build a separate project using the [Argo CD UI dev server](https://webpack.js.org/configuration/dev-server/). + +## Banners + +Argo CD can optionally display a banner that can be used to notify your users of upcoming maintenance and operational changes. This feature can be enabled by specifying the banner message using the `ui.bannercontent` field in the `argocd-cm` ConfigMap and Argo CD will display this message at the top of every UI page. You can optionally add a link to this message by setting `ui.bannerurl`. You can also make the banner sticky (permanent) by setting `ui.bannerpermanent` to `true` and change it's position to the bottom by using `ui.bannerposition: "bottom"` + +### argocd-cm +```yaml +--- +apiVersion: v1 +kind: ConfigMap +metadata: + ... + name: argocd-cm +data: + ui.bannercontent: "Banner message linked to a URL" + ui.bannerurl: "www.bannerlink.com" + ui.bannerpermanent: "true" + ui.bannerposition: "bottom" +``` + +![banner with link](../assets/banner.png) diff --git a/docs/operator-manual/custom_tools.md b/docs/operator-manual/custom_tools.md index 9eaace6ec03b8..e94a1d5ac6d03 100644 --- a/docs/operator-manual/custom_tools.md +++ b/docs/operator-manual/custom_tools.md @@ -5,9 +5,9 @@ as part of its container images. Sometimes, it may be desired to use a specific other than what Argo CD bundles. Some reasons to do this might be: * To upgrade/downgrade to a specific version of a tool due to bugs or bug fixes. -* To install additional dependencies which to be used by kustomize's configmap/secret generators +* To install additional dependencies to be used by kustomize's configmap/secret generators. (e.g. curl, vault, gpg, AWS CLI) -* To install a [config management plugin](../user-guide/application_sources.md#config-management-plugins) +* To install a [config management plugin](config-management-plugins.md). As the Argo CD repo-server is the single service responsible for generating Kubernetes manifests, it can be customized to use alternative toolchain required by your environment. @@ -46,12 +46,12 @@ the helm binary with a different version than what is bundled in Argo CD: ## BYOI (Build Your Own Image) -Sometimes replacing a binary isn't sufficient and you need to install other dependencies. The +Sometimes replacing a binary isn't sufficient, and you need to install other dependencies. The following example builds an entirely customized repo-server from a Dockerfile, installing extra dependencies that may be needed for generating manifests. ```Dockerfile -FROM argoproj/argocd:latest +FROM argoproj/argocd:v2.5.4 # Replace tag with the appropriate argo version # Switch to root for the ability to perform install USER root @@ -69,5 +69,5 @@ RUN apt-get update && \ chmod +x /usr/local/bin/sops # Switch back to non-root user -USER argocd +USER $ARGOCD_USER_ID ``` diff --git a/docs/operator-manual/declarative-setup.md b/docs/operator-manual/declarative-setup.md index 79b44c1bc068a..5353f70cf14ef 100644 --- a/docs/operator-manual/declarative-setup.md +++ b/docs/operator-manual/declarative-setup.md @@ -4,20 +4,35 @@ Argo CD applications, projects and settings can be defined declaratively using K ## Quick Reference -| File Name | Resource Name | Kind | Description | -|-----------|---------------|------|-------------| -| [`argocd-cm.yaml`](argocd-cm.yaml) | argocd-cm | ConfigMap | General Argo CD configuration | -| [`argocd-secret.yaml`](argocd-secret.yaml) | argocd-secret | Secret | Password, Certificates, Signing Key | -| [`argocd-rbac-cm.yaml`](argocd-rbac-cm.yaml) | argocd-rbac-cm | ConfigMap | RBAC Configuration | -| [`argocd-tls-certs-cm.yaml`](argocd-tls-certs-cm.yaml) | argocd-tls-certs-cm | ConfigMap | Custom TLS certificates for connecting Git repositories via HTTPS (v1.2 and later) | -| [`argocd-ssh-known-hosts-cm.yaml`](argocd-ssh-known-hosts-cm.yaml) | argocd-ssh-known-hosts-cm | ConfigMap | SSH known hosts data for connecting Git repositories via SSH (v1.2 and later) | -| [`application.yaml`](application.yaml) | - | Application | Example application spec | -| [`project.yaml`](project.yaml) | - | AppProject | Example project spec | - -All resources, including `Application` and `AppProject` specs, have to be installed in the ArgoCD namespace (by default `argocd`). Also, ConfigMap and Secret resources need to be named as shown in the table above. For `Application` and `AppProject` resources, the name of the resource equals the name of the application or project within ArgoCD. This also means that application and project names are unique within the same ArgoCD installation - you cannot i.e. have the same application name for two different applications. +All resources, including `Application` and `AppProject` specs, have to be installed in the Argo CD namespace (by default `argocd`). + +### Atomic configuration + +| Sample File | Resource Name | Kind | Description | +|-----------------------------------------------------------------------|------------------------------------------------------------------------------------|-----------|--------------------------------------------------------------------------------------| +| [`argocd-cm.yaml`](argocd-cm-yaml.md) | argocd-cm | ConfigMap | General Argo CD configuration | +| [`argocd-repositories.yaml`](argocd-repositories-yaml.md) | my-private-repo / istio-helm-repo / private-helm-repo / private-repo | Secrets | Sample repository connection details | +| [`argocd-repo-creds.yaml`](argocd-repo-creds-yaml.md) | argoproj-https-creds / argoproj-ssh-creds / github-creds / github-enterprise-creds | Secrets | Sample repository credential templates | +| [`argocd-cmd-params-cm.yaml`](argocd-cmd-params-cm-yaml.md) | argocd-cmd-params-cm | ConfigMap | Argo CD env variables configuration | +| [`argocd-secret.yaml`](argocd-secret-yaml.md) | argocd-secret | Secret | User Passwords, Certificates (deprecated), Signing Key, Dex secrets, Webhook secrets | +| [`argocd-rbac-cm.yaml`](argocd-rbac-cm-yaml.md) | argocd-rbac-cm | ConfigMap | RBAC Configuration | +| [`argocd-tls-certs-cm.yaml`](argocd-tls-certs-cm-yaml.md) | argocd-tls-certs-cm | ConfigMap | Custom TLS certificates for connecting Git repositories via HTTPS (v1.2 and later) | +| [`argocd-ssh-known-hosts-cm.yaml`](argocd-ssh-known-hosts-cm-yaml.md) | argocd-ssh-known-hosts-cm | ConfigMap | SSH known hosts data for connecting Git repositories via SSH (v1.2 and later) | + +For each specific kind of ConfigMap and Secret resource, there is only a single supported resource name (as listed in the above table) - if you need to merge things you need to do it before creating them. !!!warning "A note about ConfigMap resources" - Be sure to annotate your ConfigMap resources using the label `app.kubernetes.io/part-of: argocd`, otherwise ArgoCD will not be able to use them. + Be sure to annotate your ConfigMap resources using the label `app.kubernetes.io/part-of: argocd`, otherwise Argo CD will not be able to use them. + +### Multiple configuration objects + +| Sample File | Kind | Description | +|------------------------------------------------------------------|-------------|--------------------------| +| [`application.yaml`](../user-guide/application-specification.md) | Application | Example application spec | +| [`project.yaml`](./project-specification.md) | AppProject | Example project spec | +| - | Secret | Repository credentials | + +For `Application` and `AppProject` resources, the name of the resource equals the name of the application or project within Argo CD. This also means that application and project names are unique within a given Argo CD installation - you cannot have the same application name for two different applications. ## Applications @@ -25,7 +40,7 @@ The Application CRD is the Kubernetes resource object representing a deployed ap in an environment. It is defined by two key pieces of information: * `source` reference to the desired state in Git (repository, revision, path, environment) -* `destination` reference to the target cluster and namespace. For the cluster one of server or name can be used, but not both (which will result in an error). Behind the hood when the server is missing, it is being calculated based on the name and then the server is used for any operations. +* `destination` reference to the target cluster and namespace. For the cluster one of server or name can be used, but not both (which will result in an error). Under the hood when the server is missing, it is calculated based on the name and used for any operations. A minimal Application spec is as follows: @@ -46,10 +61,10 @@ spec: namespace: guestbook ``` -See [application.yaml](application.yaml) for additional fields. As long as you have completed the first step of [Getting Started](../getting_started.md#1-install-argo-cd), you can already apply this with `kubectl apply -n argocd -f application.yaml` and Argo CD will start deploying the guestbook application. +See [application.yaml](application.yaml) for additional fields. As long as you have completed the first step of [Getting Started](../getting_started.md#1-install-argo-cd), you can apply this with `kubectl apply -n argocd -f application.yaml` and Argo CD will start deploying the guestbook application. !!! note - The namespace must match the namespace of your Argo cd, typically this is `argocd`. + The namespace must match the namespace of your Argo CD instance - typically this is `argocd`. !!! note When creating an application from a Helm repository, the `chart` attribute must be specified instead of the `path` attribute within `spec.source`. @@ -62,7 +77,7 @@ spec: ``` !!! warning - By default, deleting an application will not perform a cascade delete, thereby deleting its resources. You must add the finalizer if you want this behaviour - which you may well not want. + Without the `resources-finalizer.argocd.argoproj.io` finalizer, deleting an application will not delete the resources it manages. To perform a cascading delete, you must add the finalizer. See [App Deletion](../user-guide/app_deletion.md#about-the-deletion-finalizer). ```yaml metadata: @@ -73,7 +88,7 @@ metadata: ### App of Apps You can create an app that creates other apps, which in turn can create other apps. -This allows you to declaratively manage a group of app that can be deployed and configured in concert. +This allows you to declaratively manage a group of apps that can be deployed and configured in concert. See [cluster bootstrapping](cluster-bootstrapping.md). @@ -83,9 +98,15 @@ The AppProject CRD is the Kubernetes resource object representing a logical grou It is defined by the following key pieces of information: * `sourceRepos` reference to the repositories that applications within the project can pull manifests from. -* `destinations` reference to clusters and namespaces that applications within the project can deploy into (don't use the name field, only server is being matched). +* `destinations` reference to clusters and namespaces that applications within the project can deploy into. * `roles` list of entities with definitions of their access to resources within the project. +!!!warning "Projects which can deploy to the Argo CD namespace grant admin access" + If a Project's `destinations` configuration allows deploying to the namespace in which Argo CD is installed, then + Applications under that project have admin-level access. [RBAC access](https://argo-cd.readthedocs.io/en/stable/operator-manual/rbac/) + to admin-level Projects should be carefully restricted, and push access to allowed `sourceRepos` should be limited + to only admins. + An example spec is as follows: ```yaml @@ -149,227 +170,168 @@ spec: !!!note Some Git hosters - notably GitLab and possibly on-premise GitLab instances as well - require you to specify the `.git` suffix in the repository URL, otherwise they will send a HTTP 301 redirect to the - repository URL suffixed with `.git`. ArgoCD will **not** follow these redirects, so you have to - adapt your repository URL to be suffixed with `.git`. + repository URL suffixed with `.git`. Argo CD will **not** follow these redirects, so you have to + adjust your repository URL to be suffixed with `.git`. -Repository credentials are stored in secret. Use following steps to configure a repo: +Repository details are stored in secrets. To configure a repo, create a secret which contains repository details. +Consider using [bitnami-labs/sealed-secrets](https://github.com/bitnami-labs/sealed-secrets) to store an encrypted secret definition as a Kubernetes manifest. +Each repository must have a `url` field and, depending on whether you connect using HTTPS, SSH, or GitHub App, `username` and `password` (for HTTPS), `sshPrivateKey` (for SSH), or `githubAppPrivateKey` (for GitHub App). -1. Create secret which contains repository credentials. Consider using [bitnami-labs/sealed-secrets](https://github.com/bitnami-labs/sealed-secrets) to store encrypted secret -definition as a Kubernetes manifest. -2. Register repository in the `argocd-cm` config map. Each repository must have `url` field and, depending on whether you connect using HTTPS, SSH, or GitHub App, `usernameSecret` and `passwordSecret` (for HTTPS), `sshPrivateKeySecret` (for SSH), `githubAppPrivateKeySecret` (for GitHub App). +!!!warning + When using [bitnami-labs/sealed-secrets](https://github.com/bitnami-labs/sealed-secrets) the labels will be removed and have to be readded as described here: https://github.com/bitnami-labs/sealed-secrets#sealedsecrets-as-templates-for-secrets Example for HTTPS: ```yaml apiVersion: v1 -kind: ConfigMap +kind: Secret metadata: - name: argocd-cm + name: private-repo namespace: argocd labels: - app.kubernetes.io/name: argocd-cm - app.kubernetes.io/part-of: argocd -data: - repositories: | - - url: https://github.com/argoproj/my-private-repository - passwordSecret: - name: my-secret - key: password - usernameSecret: - name: my-secret - key: username + argocd.argoproj.io/secret-type: repository +stringData: + type: git + url: https://github.com/argoproj/private-repo + password: my-password + username: my-username ``` Example for SSH: ```yaml apiVersion: v1 -kind: ConfigMap +kind: Secret metadata: - name: argocd-cm + name: private-repo namespace: argocd labels: - app.kubernetes.io/name: argocd-cm - app.kubernetes.io/part-of: argocd -data: - repositories: | - - url: git@github.com:argoproj/my-private-repository - sshPrivateKeySecret: - name: my-secret - key: sshPrivateKey + argocd.argoproj.io/secret-type: repository +stringData: + type: git + url: git@github.com:argoproj/my-private-repository.git + sshPrivateKey: | + -----BEGIN OPENSSH PRIVATE KEY----- + ... + -----END OPENSSH PRIVATE KEY----- ``` -> v1.9 or later - Example for GitHub App: ```yaml apiVersion: v1 -kind: ConfigMap +kind: Secret metadata: - name: argocd-cm + name: github-repo namespace: argocd labels: - app.kubernetes.io/name: argocd-cm - app.kubernetes.io/part-of: argocd -data: - repositories: | - - url: https://github.com/argoproj/my-private-repository - githubAppID: 1 - githubAppInstallationID: 2 - githubAppPrivateKeySecret: - name: my-secret - key: githubAppPrivateKey + argocd.argoproj.io/secret-type: repository +stringData: + type: git + url: https://github.com/argoproj/my-private-repository + githubAppID: 1 + githubAppInstallationID: 2 + githubAppPrivateKey: | + -----BEGIN OPENSSH PRIVATE KEY----- + ... + -----END OPENSSH PRIVATE KEY----- +--- +apiVersion: v1 +kind: Secret +metadata: + name: github-enterprise-repo + namespace: argocd + labels: + argocd.argoproj.io/secret-type: repository +stringData: + type: git + url: https://ghe.example.com/argoproj/my-private-repository + githubAppID: 1 + githubAppInstallationID: 2 + githubAppEnterpriseBaseUrl: https://ghe.example.com/api/v3 + githubAppPrivateKey: | + -----BEGIN OPENSSH PRIVATE KEY----- + ... + -----END OPENSSH PRIVATE KEY----- +``` - - url: https://ghe.example.com/argoproj/my-private-repository - githubAppID: 1 - githubAppInstallationID: 2 - githubAppEnterpriseBaseUrl: https://ghe.example.com/api/v3 - githubAppPrivateKeySecret: - name: my-secret - key: githubAppPrivateKey +Example for Google Cloud Source repositories: + +```yaml +kind: Secret +metadata: + name: github-repo + namespace: argocd + labels: + argocd.argoproj.io/secret-type: repository +stringData: + type: git + repo: https://source.developers.google.com/p/my-google-project/r/my-repo + gcpServiceAccountKey: | + { + "type": "service_account", + "project_id": "my-google-project", + "private_key_id": "REDACTED", + "private_key": "-----BEGIN PRIVATE KEY-----\nREDACTED\n-----END PRIVATE KEY-----\n", + "client_email": "argocd-service-account@my-google-project.iam.gserviceaccount.com", + "client_id": "REDACTED", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/argocd-service-account%40my-google-project.iam.gserviceaccount.com" + } ``` !!! tip The Kubernetes documentation has [instructions for creating a secret containing a private key](https://kubernetes.io/docs/concepts/configuration/secret/#use-case-pod-with-ssh-keys). ### Repository Credentials - -> Earlier than v1.4 -If you want to use the same credentials for multiple repositories, you can use `repository.credentials`: +If you want to use the same credentials for multiple repositories, you can configure credential templates. Credential templates can carry the same credentials information as repositories. ```yaml apiVersion: v1 -kind: ConfigMap +kind: Secret metadata: - name: argocd-cm + name: first-repo namespace: argocd labels: - app.kubernetes.io/name: argocd-cm - app.kubernetes.io/part-of: argocd -data: - repositories: | - - url: https://github.com/argoproj/private-repo - - url: https://github.com/argoproj/other-private-repo - repository.credentials: | - - url: https://github.com/argoproj - passwordSecret: - name: my-secret - key: password - usernameSecret: - name: my-secret - key: username - - url: git@github.com:argoproj-labs - sshPrivateKeySecret: - name: my-secret - key: sshPrivateKey - - url: https://github.com/argoproj - githubAppID: 1 - githubAppInstallationID: 2 - githubAppPrivateKeySecret: - name: my-secret - key: githubAppPrivateKey - - url: https://ghe.example.com/argoproj - githubAppID: 1 - githubAppInstallationID: 2 - githubAppEnterpriseBaseUrl: https://ghe.example.com/api/v3 - githubAppPrivateKeySecret: - name: my-secret - key: githubAppPrivateKey -``` - -Argo CD will only use the credentials if you omit `usernameSecret`, `passwordSecret`, and `sshPrivateKeySecret` fields (`insecureIgnoreHostKey` is ignored) or if your repository is not listed in `repositories`. - -A credential may be match if it's URL is the prefix of the repository's URL. The means that credentials may match, e.g in the above example both [https://github.com/argoproj](https://github.com/argoproj) and [https://github.com](https://github.com) would match. Argo CD selects the first one that matches. - -!!! tip - Order your credentials with the most specific at the top and the least specific at the bottom. - -A complete example. - -```yaml + argocd.argoproj.io/secret-type: repository +stringData: + type: git + url: https://github.com/argoproj/private-repo +--- apiVersion: v1 -kind: ConfigMap +kind: Secret metadata: - name: argocd-cm + name: second-repo namespace: argocd labels: - app.kubernetes.io/name: argocd-cm - app.kubernetes.io/part-of: argocd -data: - repositories: | - # this has it's own credentials - - url: https://github.com/argoproj/private-repo - passwordSecret: - name: private-repo-secret - key: password - usernameSecret: - name: private-repo-secret - key: username - sshPrivateKeySecret: - name: private-repo-secret - key: sshPrivateKey - - url: https://github.com/argoproj/other-private-repo - - url: https://github.com/otherproj/another-private-repo - repository.credentials: | - # this will be used for the second repo - - url: https://github.com/argoproj - passwordSecret: - name: other-private-repo-secret - key: password - usernameSecret: - name: other-private-repo-secret - key: username - sshPrivateKeySecret: - name: other-private-repo-secret - key: sshPrivateKey - # this will be used for the third repo - - url: https://github.com - passwordSecret: - name: another-private-repo-secret - key: password - usernameSecret: - name: another-private-repo-secret - key: username - sshPrivateKeySecret: - name: another-private-repo-secret - key: sshPrivateKey -``` - -> v1.4 or later - -If you want to use the same credentials for multiple repositories, you can use `repository.credentials` to configure credential templates. Credential templates can carry the same credentials information as repositories. - -```yaml + argocd.argoproj.io/secret-type: repository +stringData: + type: git + url: https://github.com/argoproj/other-private-repo +--- apiVersion: v1 -kind: ConfigMap +kind: Secret metadata: - name: argocd-cm + name: private-repo-creds namespace: argocd labels: - app.kubernetes.io/name: argocd-cm - app.kubernetes.io/part-of: argocd -data: - repositories: | - - url: https://github.com/argoproj/private-repo - - url: https://github.com/argoproj/other-private-repo - repository.credentials: | - - url: https://github.com/argoproj - passwordSecret: - name: my-secret - key: password - usernameSecret: - name: my-secret - key: username + argocd.argoproj.io/secret-type: repo-creds +stringData: + type: git + url: https://github.com/argoproj + password: my-password + username: my-username ``` -In the above example, every repository accessed via HTTPS whose URL is prefixed with `https://github.com/argoproj` would use a username stored in the key `username` and a password stored in the key `password` of the secret `my-secret` for connecting to Git. +In the above example, every repository accessed via HTTPS whose URL is prefixed with `https://github.com/argoproj` would use a username stored in the key `username` and a password stored in the key `password` of the secret `private-repo-creds` for connecting to Git. -In order for ArgoCD to use a credential template for any given repository, the following conditions must be met: +In order for Argo CD to use a credential template for any given repository, the following conditions must be met: -* The repository must either not be configured at all, or if configured, must not contain any credential information (i.e. contain none of `sshPrivateKeySecret`, `usernameSecret`, `passwordSecret` ) -* The URL configured for a credential template (e.g. `https://github.com/argoproj`) must match as prefix for the repository URL (e.g. `https://github.com/argoproj/argocd-example-apps`). +* The repository must either not be configured at all, or if configured, must not contain any credential information (i.e. contain none of `sshPrivateKey`, `username`, `password` ) +* The URL configured for a credential template (e.g. `https://github.com/argoproj`) must match as prefix for the repository URL (e.g. `https://github.com/argoproj/argocd-example-apps`). !!! note Matching credential template URL prefixes is done on a _best match_ effort, so the longest (best) match will take precedence. The order of definition is not important, as opposed to pre v1.4 configuration. @@ -378,16 +340,16 @@ The following keys are valid to refer to credential secrets: #### SSH repositories -* `sshPrivateKeySecret` refers to a secret where an SSH private key is stored for accessing the repositories +* `sshPrivateKey` refers to the SSH private key for accessing the repositories #### HTTPS repositories -* `usernameSecret` and `passwordSecret` refer to secrets where username and/or password are stored for accessing the repositories +* `username` and `password` refer to the username and/or password for accessing the repositories * `tlsClientCertData` and `tlsClientCertKey` refer to secrets where a TLS client certificate (`tlsClientCertData`) and the corresponding private key `tlsClientCertKey` are stored for accessing the repositories #### GitHub App repositories -* `githubAppPrivateKeySecret` refers to the secret where the GitHub App private key is stored for accessing the repositories +* `githubAppPrivateKey` refers to the GitHub App private key for accessing the repositories * `githubAppID` refers to the GitHub Application ID for the application you created. * `githubAppInstallationID` refers to the Installation ID of the GitHub app you created and installed. * `githubAppEnterpriseBaseUrl` refers to the base api URL for GitHub Enterprise (e.g. `https://ghe.example.com/api/v3`) @@ -395,8 +357,6 @@ The following keys are valid to refer to credential secrets: ### Repositories using self-signed TLS certificates (or are signed by custom CA) -> v1.2 or later - You can manage the TLS certificates used to verify the authenticity of your repository servers in a ConfigMap object named `argocd-tls-certs-cm`. The data section should contain a map, with the repository server's hostname part (not the complete URL) as key, and the certificate(s) in PEM format as data. So, if you connect to a repository with the URL `https://server.example.com/repos/my-repo`, you should use `server.example.com` as key. The certificate data should be either the server's certificate (in case of self-signed certificate) or the certificate of the CA that was used to sign the server's certificate. You can configure multiple certificates for each server, e.g. if you are having a certificate roll-over planned. If there are no dedicated certificates configured for a repository server, the system's default trust store is used for validating the server's repository. This should be good enough for most (if not all) public Git repository services such as GitLab, GitHub and Bitbucket as well as most privately hosted sites which use certificates from well-known CAs, including Let's Encrypt certificates. @@ -456,23 +416,46 @@ data: ### SSH known host public keys -If you are connecting repositories via SSH, ArgoCD will need to know the SSH known hosts public key of the repository servers. You can manage the SSH known hosts data in the ConfigMap named `argocd-ssh-known-hosts-cm`. This ConfigMap contains a single key/value pair, with `ssh_known_hosts` as the key and the actual public keys of the SSH servers as data. As opposed to TLS configuration, the public key(s) of each single repository server ArgoCD will connect via SSH must be configured, otherwise the connections to the repository will fail. There is no fallback. The data can be copied from any existing `ssh_known_hosts` file, or from the output of the `ssh-keyscan` utility. The basic format is ` `, one entry per line. +If you are configuring repositories to use SSH, Argo CD will need to know their SSH public keys. In order for Argo CD to connect via SSH the public key(s) for each repository server must be pre-configured in Argo CD (unlike TLS configuration), otherwise the connections to the repository will fail. + +You can manage the SSH known hosts data in the `argocd-ssh-known-hosts-cm` ConfigMap. This ConfigMap contains a single entry, `ssh_known_hosts`, with the public keys of the SSH servers as its value. The value can be filled in from any existing `ssh_known_hosts` file, or from the output of the `ssh-keyscan` utility (which is part of OpenSSH's client package). The basic format is ` `, one entry per line. + +Here is an example of running `ssh-keyscan`: +```bash +$ for host in bitbucket.org github.com gitlab.com ssh.dev.azure.com vs-ssh.visualstudio.com ; do ssh-keyscan $host 2> /dev/null ; done +bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDQeJzhupRu0u0cdegZIa8e86EG2qOCsIsD1Xw0xSeiPDlCr7kq97NLmMbpKTX6Esc30NuoqEEHCuc7yWtwp8dI76EEEB1VqY9QJq6vk+aySyboD5QF61I/1WeTwu+deCbgKMGbUijeXhtfbxSxm6JwGrXrhBdofTsbKRUsrN1WoNgUa8uqN1Vx6WAJw1JHPhglEGGHea6QICwJOAr/6mrui/oB7pkaWKHj3z7d1IC4KWLtY47elvjbaTlkN04Kc/5LFEirorGYVbt15kAUlqGM65pk6ZBxtaO3+30LVlORZkxOh+LKL/BvbZ/iRNhItLqNyieoQj/uh/7Iv4uyH/cV/0b4WDSd3DptigWq84lJubb9t/DnZlrJazxyDCulTmKdOR7vs9gMTo+uoIrPSb8ScTtvw65+odKAlBj59dhnVp9zd7QUojOpXlL62Aw56U4oO+FALuevvMjiWeavKhJqlR7i5n9srYcrNV7ttmDw7kf/97P5zauIhxcjX+xHv4M= +github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl +github.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk= +github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg= +gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY= +gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf +gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9 +ssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H +vs-ssh.visualstudio.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H +``` -An example ConfigMap object: +Here is an example `ConfigMap` object using the output from `ssh-keyscan` above: ```yaml apiVersion: v1 kind: ConfigMap metadata: - name: argocd-ssh-known-hosts-cm - namespace: argocd labels: - app.kubernetes.io/name: argocd-cm + app.kubernetes.io/name: argocd-ssh-known-hosts-cm app.kubernetes.io/part-of: argocd + name: argocd-ssh-known-hosts-cm data: ssh_known_hosts: | - bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw== - github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ== + # This file was automatically generated by hack/update-ssh-known-hosts.sh. DO NOT EDIT + [ssh.github.com]:443 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg= + [ssh.github.com]:443 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl + [ssh.github.com]:443 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk= + bitbucket.org ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPIQmuzMBuKdWeF4+a2sjSSpBK0iqitSQ+5BM9KhpexuGt20JpTVM7u5BDZngncgrqDMbWdxMWWOGtZ9UgbqgZE= + bitbucket.org ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIazEu89wgQZ4bqs3d63QSMzYVa0MuJ2e2gKTKqu+UUO + bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDQeJzhupRu0u0cdegZIa8e86EG2qOCsIsD1Xw0xSeiPDlCr7kq97NLmMbpKTX6Esc30NuoqEEHCuc7yWtwp8dI76EEEB1VqY9QJq6vk+aySyboD5QF61I/1WeTwu+deCbgKMGbUijeXhtfbxSxm6JwGrXrhBdofTsbKRUsrN1WoNgUa8uqN1Vx6WAJw1JHPhglEGGHea6QICwJOAr/6mrui/oB7pkaWKHj3z7d1IC4KWLtY47elvjbaTlkN04Kc/5LFEirorGYVbt15kAUlqGM65pk6ZBxtaO3+30LVlORZkxOh+LKL/BvbZ/iRNhItLqNyieoQj/uh/7Iv4uyH/cV/0b4WDSd3DptigWq84lJubb9t/DnZlrJazxyDCulTmKdOR7vs9gMTo+uoIrPSb8ScTtvw65+odKAlBj59dhnVp9zd7QUojOpXlL62Aw56U4oO+FALuevvMjiWeavKhJqlR7i5n9srYcrNV7ttmDw7kf/97P5zauIhxcjX+xHv4M= + github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg= + github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl + github.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk= gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY= gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9 @@ -481,11 +464,70 @@ data: ``` !!! note - The `argocd-ssh-known-hosts-cm` ConfigMap will be mounted as a volume at the mount path `/app/config/ssh` in the pods of `argocd-server` and `argocd-repo-server`. It will create a file `ssh_known_hosts` in that directory, which contains the SSH known hosts data used by ArgoCD for connecting to Git repositories via SSH. It might take a while for changes in the ConfigMap to be reflected in your pods, depending on your Kubernetes configuration. + The `argocd-ssh-known-hosts-cm` ConfigMap will be mounted as a volume at the mount path `/app/config/ssh` in the pods of `argocd-server` and `argocd-repo-server`. It will create a file `ssh_known_hosts` in that directory, which contains the SSH known hosts data used by Argo CD for connecting to Git repositories via SSH. It might take a while for changes in the ConfigMap to be reflected in your pods, depending on your Kubernetes configuration. + +### Configure repositories with proxy + +Proxy for your repository can be specified in the `proxy` field of the repository secret, along with other repository configurations. Argo CD uses this proxy to access the repository. Argo CD looks for the standard proxy environment variables in the repository server if the custom proxy is absent. + +An example repository with proxy: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: private-repo + namespace: argocd + labels: + argocd.argoproj.io/secret-type: repository +stringData: + type: git + url: https://github.com/argoproj/private-repo + proxy: https://proxy-server-url:8888 + password: my-password + username: my-username +``` + +### Legacy behaviour + +In Argo CD version 2.0 and earlier, repositories where stored as part of the `argocd-cm` config map. For +backward-compatibility, Argo CD will still honor repositories in the config map, but this style of repository +configuration is deprecated and support for it will be removed in a future version. + +```yaml +apiVersion: v1 +kind: ConfigMap +data: + repositories: | + - url: https://github.com/argoproj/my-private-repository + passwordSecret: + name: my-secret + key: password + usernameSecret: + name: my-secret + key: username + repository.credentials: | + - url: https://github.com/argoproj + passwordSecret: + name: my-secret + key: password + usernameSecret: + name: my-secret + key: username +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-secret + namespace: argocd +stringData: + password: my-password + username: my-username +``` ## Clusters -Cluster credentials are stored in secrets same as repository credentials but does not require entry in `argocd-cm` config map. Each secret must have label +Cluster credentials are stored in secrets same as repositories or repository credentials. Each secret must have label `argocd.argoproj.io/secret-type: cluster`. The secret data must include following fields: @@ -493,6 +535,8 @@ The secret data must include following fields: * `name` - cluster name * `server` - cluster api server url * `namespaces` - optional comma-separated list of namespaces which are accessible in that cluster. Cluster level resources would be ignored if namespace list is not empty. +* `clusterResources` - optional boolean string (`"true"` or `"false"`) determining whether Argo CD can manage cluster-level resources on this cluster. This setting is used only if the list of managed namespaces is not empty. +* `project` - optional string to designate this as a project-scoped cluster. * `config` - JSON representation of following data structure: ```yaml @@ -519,13 +563,13 @@ execProviderConfig: installHint: string # Transport layer security configuration settings tlsClientConfig: - # PEM-encoded bytes (typically read from a client certificate file). + # Base64 encoded PEM-encoded bytes (typically read from a client certificate file). caData: string - # PEM-encoded bytes (typically read from a client certificate file). + # Base64 encoded PEM-encoded bytes (typically read from a client certificate file). certData: string # Server should be accessed without verifying the TLS certificate insecure: boolean - # PEM-encoded bytes (typically read from a client certificate key file). + # Base64 encoded PEM-encoded bytes (typically read from a client certificate key file). keyData: string # ServerName is passed to the server for SNI and is used in the client to check server # certificates against. If ServerName is empty, the hostname used to contact the @@ -533,7 +577,7 @@ tlsClientConfig: serverName: string ``` -Note that if you specify a command to run under `execProviderConfig`, that command must be available in the ArgoCD image. See [BYOI (Build Your Own Image)](custom_tools.md#byoi-build-your-own-image). +Note that if you specify a command to run under `execProviderConfig`, that command must be available in the Argo CD image. See [BYOI (Build Your Own Image)](custom_tools.md#byoi-build-your-own-image). Cluster secret example: @@ -558,63 +602,304 @@ stringData: } ``` +### EKS + +EKS cluster secret example using argocd-k8s-auth and [IRSA](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html): + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: mycluster-secret + labels: + argocd.argoproj.io/secret-type: cluster +type: Opaque +stringData: + name: "mycluster.com" + server: "https://mycluster.com" + config: | + { + "awsAuthConfig": { + "clusterName": "my-eks-cluster-name", + "roleARN": "arn:aws:iam:::role/" + }, + "tlsClientConfig": { + "insecure": false, + "caData": "" + } + } +``` + +Note that you should have IRSA enabled on your EKS cluster, create an appropriate IAM role which allows it to assume +other IAM roles (whichever `roleARN`s that Argo CD needs to assume) and have an assume role policy which allows +the argocd-application-controller and argocd-server pods to assume said role via OIDC. + +Example trust relationship config for `:role/`, which +is required for Argo CD to perform actions via IAM. Ensure that the cluster has an [IAM OIDC provider configured](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html) +for it. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam:::oidc-provider/oidc.eks..amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "oidc.eks..amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:sub": ["system:serviceaccount:argocd:argocd-application-controller", "system:serviceaccount:argocd:argocd-server"], + "oidc.eks..amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:aud": "sts.amazonaws.com" + } + } + } + ] +} +``` + +The Argo CD management role also needs to be allowed to assume other roles, in this case we want it to assume +`arn:aws:iam:::role/` so that it can manage the cluster mapped to that role. This can be +extended to allow assumption of multiple roles, either as an explicit array of role ARNs or by using `*` where appropriate. + +```json +{ + "Version" : "2012-10-17", + "Statement" : { + "Effect" : "Allow", + "Action" : "sts:AssumeRole", + "Principal" : { + "AWS" : ":role/" + } + } + } +``` + +Example service account configs for `argocd-application-controller` and `argocd-server`. Note that once the annotations +have been set on the service accounts, both the application controller and server pods need to be restarted. + +```yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + eks.amazonaws.com/role-arn: ":role/" + name: argocd-application-controller +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + eks.amazonaws.com/role-arn: ":role/" + name: argocd-server +``` + +In turn, the `roleARN` of each managed cluster needs to be added to each respective cluster's `aws-auth` config map (see +[Enabling IAM principal access to your cluster](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html)), as +well as having an assume role policy which allows it to be assumed by the Argo CD pod role. + +Example assume role policy for a cluster which is managed by Argo CD: + +```json +{ + "Version" : "2012-10-17", + "Statement" : { + "Effect" : "Allow", + "Action" : "sts:AssumeRole", + "Principal" : { + "AWS" : ":role/" + } + } + } +``` + +Example kube-system/aws-auth configmap for your cluster managed by Argo CD: + +```yaml +apiVersion: v1 +data: + # Other groups and accounts omitted for brevity. Ensure that no other rolearns and/or groups are inadvertently removed, + # or you risk borking access to your cluster. + # + # The group name is a RoleBinding which you use to map to a [Cluster]Role. See https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-binding-examples + mapRoles: | + - "groups": + - "" + "rolearn": ":role/" + "username": "" +``` +### GKE + +GKE cluster secret example using argocd-k8s-auth and [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity): + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: mycluster-secret + labels: + argocd.argoproj.io/secret-type: cluster +type: Opaque +stringData: + name: mycluster.com + server: https://mycluster.com + config: | + { + "execProviderConfig": { + "command": "argocd-k8s-auth", + "args": ["gcp"], + "apiVersion": "client.authentication.k8s.io/v1beta1" + }, + "tlsClientConfig": { + "insecure": false, + "caData": "" + } + } +``` + +Note that you must enable Workload Identity on your GKE cluster, create GCP service account with appropriate IAM role and bind it to Kubernetes service account for argocd-application-controller and argocd-server (showing Pod logs on UI). See [Use Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) and [Authenticating to the Kubernetes API server](https://cloud.google.com/kubernetes-engine/docs/how-to/api-server-authentication). + +### AKS + +Azure cluster secret example using argocd-k8s-auth and [kubelogin](https://github.com/Azure/kubelogin). The option *azure* to the argocd-k8s-auth execProviderConfig encapsulates the *get-token* command for kubelogin. Depending upon which authentication flow is desired (devicecode, spn, ropc, msi, azurecli, workloadidentity), set the environment variable AAD_LOGIN_METHOD with this value. Set other appropriate environment variables depending upon which authentication flow is desired. + +|Variable Name|Description| +|-------------|-----------| +|AAD_LOGIN_METHOD|One of devicecode, spn, ropc, msi, azurecli, or workloadidentity| +|AAD_SERVICE_PRINCIPAL_CLIENT_CERTIFICATE|AAD client cert in pfx. Used in spn login| +|AAD_SERVICE_PRINCIPAL_CLIENT_ID|AAD client application ID| +|AAD_SERVICE_PRINCIPAL_CLIENT_SECRET|AAD client application secret| +|AAD_USER_PRINCIPAL_NAME|Used in the ropc flow| +|AAD_USER_PRINCIPAL_PASSWORD|Used in the ropc flow| +|AZURE_TENANT_ID|The AAD tenant ID.| +|AZURE_AUTHORITY_HOST|Used in the WorkloadIdentityLogin flow| +|AZURE_FEDERATED_TOKEN_FILE|Used in the WorkloadIdentityLogin flow| +|AZURE_CLIENT_ID|Used in the WorkloadIdentityLogin flow| + +In addition to the environment variables above, argocd-k8s-auth accepts two extra environment variables to set the AAD environment, and to set the AAD server application ID. The AAD server application ID will default to 6dae42f8-4368-4678-94ff-3960e28e3630 if not specified. See [here](https://github.com/azure/kubelogin#exec-plugin-format) for details. + +|Variable Name|Description| +|-------------|-----------| +|AAD_ENVIRONMENT_NAME|The azure environment to use, default of AzurePublicCloud| +|AAD_SERVER_APPLICATION_ID|The optional AAD server application ID, defaults to 6dae42f8-4368-4678-94ff-3960e28e3630| + +This is an example of using the [federated workload login flow](https://github.com/Azure/kubelogin#azure-workload-federated-identity-non-interactive). The federated token file needs to be mounted as a secret into argoCD, so it can be used in the flow. The location of the token file needs to be set in the environment variable AZURE_FEDERATED_TOKEN_FILE. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: mycluster-secret + labels: + argocd.argoproj.io/secret-type: cluster +type: Opaque +stringData: + name: mycluster.com + server: https://mycluster.com + config: | + { + "execProviderConfig": { + "command": "argocd-k8s-auth", + "env": { + "AAD_ENVIRONMENT_NAME": "AzurePublicCloud", + "AZURE_CLIENT_ID": "fill in client id", + "AZURE_TENANT_ID": "fill in tenant id", + "AZURE_FEDERATED_TOKEN_FILE": "/opt/path/to/federated_file.json", + "AZURE_AUTHORITY_HOST": "https://login.microsoftonline.com/", + "AAD_LOGIN_METHOD": "workloadidentity" + }, + "args": ["azure"], + "apiVersion": "client.authentication.k8s.io/v1beta1" + }, + "tlsClientConfig": { + "insecure": false, + "caData": "" + } + } +``` + +This is an example of using the spn (service principal name) flow. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: mycluster-secret + labels: + argocd.argoproj.io/secret-type: cluster +type: Opaque +stringData: + name: mycluster.com + server: https://mycluster.com + config: | + { + "execProviderConfig": { + "command": "argocd-k8s-auth", + "env": { + "AAD_ENVIRONMENT_NAME": "AzurePublicCloud", + "AAD_SERVICE_PRINCIPAL_CLIENT_SECRET": "fill in your service principal client secret", + "AZURE_TENANT_ID": "fill in tenant id", + "AAD_SERVICE_PRINCIPAL_CLIENT_ID": "fill in your service principal client id", + "AAD_LOGIN_METHOD": "spn" + }, + "args": ["azure"], + "apiVersion": "client.authentication.k8s.io/v1beta1" + }, + "tlsClientConfig": { + "insecure": false, + "caData": "" + } + } +``` + ## Helm Chart Repositories -Non standard Helm Chart repositories have to be registered under the `repositories` key in the -`argocd-cm` ConfigMap. Each repository must have `url`, `type` and `name` fields. For private Helm repos you -may need to configure access credentials and HTTPS settings using `usernameSecret`, `passwordSecret`, -`caSecret`, `certSecret` and `keySecret` fields. +Non standard Helm Chart repositories have to be registered explicitly. +Each repository must have `url`, `type` and `name` fields. For private Helm repos you may need to configure access credentials and HTTPS settings using `username`, `password`, +`tlsClientCertData` and `tlsClientCertKey` fields. Example: ```yaml apiVersion: v1 -kind: ConfigMap +kind: Secret metadata: - name: argocd-cm + name: istio namespace: argocd labels: - app.kubernetes.io/name: argocd-cm - app.kubernetes.io/part-of: argocd -data: - # v1.2 or earlier use `helm.repositories` - helm.repositories: | - - url: https://storage.googleapis.com/istio-prerelease/daily-build/master-latest-daily/charts - name: istio.io - # v1.3 or later use `repositories` with `type: helm` - repositories: | - - type: helm - url: https://storage.googleapis.com/istio-prerelease/daily-build/master-latest-daily/charts - name: istio.io - - type: helm - url: https://argoproj.github.io/argo-helm - name: argo - usernameSecret: - name: my-secret - key: username - passwordSecret: - name: my-secret - key: password - caSecret: - name: my-secret - key: ca - certSecret: - name: my-secret - key: cert - keySecret: - name: my-secret - key: key + argocd.argoproj.io/secret-type: repository +stringData: + name: istio.io + url: https://storage.googleapis.com/istio-prerelease/daily-build/master-latest-daily/charts + type: helm +--- +apiVersion: v1 +kind: Secret +metadata: + name: argo-helm + namespace: argocd + labels: + argocd.argoproj.io/secret-type: repository +stringData: + name: argo + url: https://argoproj.github.io/argo-helm + type: helm + username: my-username + password: my-password + tlsClientCertData: ... + tlsClientCertKey: ... ``` ## Resource Exclusion/Inclusion -Resources can be excluded from discovery and sync so that ArgoCD is unaware of them. For example, `events.k8s.io` and `metrics.k8s.io` are always excluded. Use cases: +Resources can be excluded from discovery and sync so that Argo CD is unaware of them. For example, the apiGroup/kind `events.k8s.io/*`, `metrics.k8s.io/*`, `coordination.k8s.io/Lease`, and `""/Endpoints` are always excluded. Use cases: * You have temporal issues and you want to exclude problematic resources. -* There are many of a kind of resources that impacts ArgoCD's performance. -* Restrict ArgoCD's access to certain kinds of resources, e.g. secrets. See [security.md#cluster-rbac](security.md#cluster-rbac). +* There are many of a kind of resources that impacts Argo CD's performance. +* Restrict Argo CD's access to certain kinds of resources, e.g. secrets. See [security.md#cluster-rbac](security.md#cluster-rbac). -To configure this, edit the `argcd-cm` config map: +To configure this, edit the `argocd-cm` config map: ```shell kubectl edit configmap argocd-cm -n argocd @@ -638,13 +923,13 @@ kind: ConfigMap The `resource.exclusions` node is a list of objects. Each object can have: * `apiGroups` A list of globs to match the API group. -* `kinds` A list of kinds to match. Can be "*" to match all. -* `cluster` A list of globs to match the cluster. +* `kinds` A list of kinds to match. Can be `"*"` to match all. +* `clusters` A list of globs to match the cluster. If all three match, then the resource is ignored. In addition to exclusions, you might configure the list of included resources using the `resource.inclusions` setting. -By default, all resource group/kinds are included. The `resource.inclusions` setting allows customizing the list of included group/kinds: +By default, all resource group/kinds are included. The `resource.inclusions` setting allows customizing the list of included group/kinds: ```yaml apiVersion: v1 @@ -668,6 +953,37 @@ Notes: * Invalid globs result in the whole rule being ignored. * If you add a rule that matches existing resources, these will appear in the interface as `OutOfSync`. +## Auto respect RBAC for controller + +Argocd controller can be restricted from discovering/syncing specific resources using just controller rbac, without having to manually configure resource exclusions. +This feature can be enabled by setting `resource.respectRBAC` key in argocd cm, once it is set the controller will automatically stop watching for resources +that it does not have the permission to list/access. Possible values for `resource.respectRBAC` are: + - `strict` : This setting checks whether the list call made by controller is forbidden/unauthorized and if it is, it will cross-check the permission by making a `SelfSubjectAccessReview` call for the resource. + - `normal` : This will only check whether the list call response is forbidden/unauthorized and skip `SelfSubjectAccessReview` call, to minimize any extra api-server calls. + - unset/empty (default) : This will disable the feature and controller will continue to monitor all resources. + +Users who are comfortable with an increase in kube api-server calls can opt for `strict` option while users who are concerned with higher api calls and are willing to compromise on the accuracy can opt for the `normal` option. + +Notes: + +* When set to use `strict` mode controller must have rbac permission to `create` a `SelfSubjectAccessReview` resource +* The `SelfSubjectAccessReview` request will be only made for the `list` verb, it is assumed that if `list` is allowed for a resource then all other permissions are also available to the controller. + +Example argocd cm with `resource.respectRBAC` set to `strict`: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm +data: + resource.respectRBAC: "strict" +``` + +## Resource Custom Labels + +Custom Labels configured with `resource.customLabels` (comma separated string) will be displayed in the UI (for any resource that defines them). + ## SSO & RBAC * SSO configuration details: [SSO](./user-management/index.md) @@ -681,21 +997,19 @@ based application which uses base Argo CD manifests from [https://github.com/arg Example of `kustomization.yaml`: ```yaml -bases: -- github.com/argoproj/argo-cd//manifests/cluster-install?ref=v1.0.1 - # additional resources like ingress rules, cluster and repository secrets. resources: +- github.com/argoproj/argo-cd//manifests/cluster-install?ref=v1.0.1 - clusters-secrets.yaml - repos-secrets.yaml # changes to config maps -patchesStrategicMerge: -- overlays/argo-cd-cm.yaml +patches: +- path: overlays/argo-cd-cm.yaml ``` The live example of self managed Argo CD config is available at [https://cd.apps.argoproj.io](https://cd.apps.argoproj.io) and with configuration stored at [argoproj/argoproj-deployments](https://github.com/argoproj/argoproj-deployments/tree/master/argocd). !!! note - You will need to sign-in using your github account to get access to [https://cd.apps.argoproj.io](https://cd.apps.argoproj.io) + You will need to sign-in using your GitHub account to get access to [https://cd.apps.argoproj.io](https://cd.apps.argoproj.io) diff --git a/docs/operator-manual/deep_links.md b/docs/operator-manual/deep_links.md new file mode 100644 index 0000000000000..c166a1d25d75d --- /dev/null +++ b/docs/operator-manual/deep_links.md @@ -0,0 +1,78 @@ +# Deep Links + +Deep links allow users to quickly redirect to third-party systems, such as Splunk, Datadog, etc. from the Argo CD +user interface. + +Argo CD administrator will be able to configure links to third-party systems by providing +deep link templates configured in `argocd-cm`. The templates can be conditionally rendered and are able +to reference different types of resources relating to where the links show up, this includes projects, applications, +or individual resources (pods, services, etc.). + +## Configuring Deep Links + +The configuration for Deep Links is present in `argocd-cm` as `.links` fields where +`` determines where it will be displayed. The possible values for `` are: + +- `project`: all links under this field will show up in the project tab in the Argo CD UI +- `application`: all links under this field will show up in the application summary tab +- `resource`: all links under this field will show up in the resource (deployments, pods, services, etc.) summary tab + +Each link in the list has five subfields: + +1. `title`: title/tag that will be displayed in the UI corresponding to that link +2. `url`: the actual URL where the deep link will redirect to, this field can be templated to use data from the + corresponding application, project or resource objects (depending on where it is located). This uses [text/template](https://pkg.go.dev/text/template) pkg for templating +3. `description` (optional): a description for what the deep link is about +4. `icon.class` (optional): a font-awesome icon class to be used when displaying the links in dropdown menus +5. `if` (optional): a conditional statement that results in either `true` or `false`, it also has access to the same + data as the `url` field. If the condition resolves to `true` the deep link will be displayed - else it will be hidden. If + the field is omitted, by default the deep links will be displayed. This uses [antonmedv/expr](https://github.com/antonmedv/expr/tree/master/docs) for evaluating conditions + +!!!note + For resources of kind Secret the data fields are redacted but other fields are accessible for templating the deep links. + +!!!warning + Make sure to validate the url templates and inputs to prevent data leaks or possible generation of any malicious links. + +As mentioned earlier the links and conditions can be templated to use data from the resource, each category of links can access different types of data linked to that resource. +Overall we have these 4 resources available for templating in the system: + +- `app` or `application`: this key is used to access the application resource data. +- `resource`: this key is used to access values for the actual k8s resource. +- `cluster`: this key is used to access the related destination cluster data like name, server, namespaces etc. +- `project`: this key is used to access the project resource data. + +The above resources are accessible in particular link categories, here's a list of resources available in each category: + +- `resource.links`: `resource`, `application`, `cluster` and `project` +- `application.links`: `app`/`application` and `cluster` +- `project.links`: `project` + +An example `argocd-cm.yaml` file with deep links and their variations : + +```yaml + # sample project level links + project.links: | + - url: https://myaudit-system.com?project={{.project.metadata.name}} + title: Audit + description: system audit logs + icon.class: "fa-book" + # sample application level links + application.links: | + # pkg.go.dev/text/template is used for evaluating url templates + - url: https://mycompany.splunk.com?search={{.app.spec.destination.namespace}}&env={{.project.metadata.labels.env}} + title: Splunk + # conditionally show link e.g. for specific project + # github.com/antonmedv/expr is used for evaluation of conditions + - url: https://mycompany.splunk.com?search={{.app.spec.destination.namespace}} + title: Splunk + if: application.spec.project == "default" + - url: https://{{.app.metadata.annotations.splunkhost}}?search={{.app.spec.destination.namespace}} + title: Splunk + if: app.metadata.annotations.splunkhost != "" + # sample resource level links + resource.links: | + - url: https://mycompany.splunk.com?search={{.resource.metadata.name}}&env={{.project.metadata.labels.env}} + title: Splunk + if: resource.kind == "Pod" || resource.kind == "Deployment" +``` diff --git a/docs/operator-manual/disaster_recovery.md b/docs/operator-manual/disaster_recovery.md index b6a17885b2345..97d2868051d65 100644 --- a/docs/operator-manual/disaster_recovery.md +++ b/docs/operator-manual/disaster_recovery.md @@ -1,6 +1,6 @@ # Disaster Recovery -You can use `argocd-util` to import and export all Argo CD data. +You can use `argocd admin` to import and export all Argo CD data. Make sure you have `~/.kube/config` pointing to your Argo CD cluster. @@ -15,14 +15,14 @@ export VERSION=v1.0.1 Export to a backup: ```bash -docker run -v ~/.kube:/home/argocd/.kube --rm argoproj/argocd:$VERSION argocd-util export > backup.yaml +docker run -v ~/.kube:/home/argocd/.kube --rm quay.io/argoproj/argocd:$VERSION argocd admin export > backup.yaml ``` Import from a backup: ```bash -docker run -i -v ~/.kube:/home/argocd/.kube --rm argoproj/argocd:$VERSION argocd-util import - < backup.yaml +docker run -i -v ~/.kube:/home/argocd/.kube --rm quay.io/argoproj/argocd:$VERSION argocd admin import - < backup.yaml ``` !!! note - If you are running Argo CD on a namespace different than default remember to pass the namespace parameter (-n ). 'argocd-util export' will not fail if you run it in the wrong namespace. + If you are running Argo CD on a namespace different than default remember to pass the namespace parameter (-n ). 'argocd admin export' will not fail if you run it in the wrong namespace. diff --git a/docs/operator-manual/dynamic-cluster-distribution.md b/docs/operator-manual/dynamic-cluster-distribution.md new file mode 100644 index 0000000000000..a32258c3f2f0a --- /dev/null +++ b/docs/operator-manual/dynamic-cluster-distribution.md @@ -0,0 +1,59 @@ +# Dynamic Cluster Distribution + +*Current Status: [Alpha][1] (Since v2.9.0)* + +By default, clusters are assigned to shards indefinitely. For users of the default, hash-based sharding algorithm, this +static assignment is fine: shards will always be roughly-balanced by the hash-based algorithm. But for users of the +[round-robin](high_availability.md#argocd-application-controller) or other custom shard assignment algorithms, this +static assignment can lead to unbalanced shards when replicas are added or removed. + +Starting v2.9, Argo CD supports a dynamic cluster distribution feature. When replicas are added or removed, the sharding +algorithm is re-run to ensure that the clusters are distributed according to the algorithm. If the algorithm is +well-balanced, like round-robin, then the shards will be well-balanced. + +Previously, the shard count was set via the `ARGOCD_CONTROLLER_REPLICAS` environment variable. Changing the environment +variable forced a restart of all application controller pods. Now, the shard count is set via the `replicas` field of the deployment, +which does not require a restart of the application controller pods. + +## Enabling Dynamic Distribution of Clusters + +This feature is disabled by default while it is in alpha. To enable it, you must set the environment `ARGOCD_ENABLE_DYNAMIC_CLUSTER_DISTRIBUTION` to true when running the Application Controller. + +In order to utilize the feature, the manifests `manifests/ha/base/controller-deployment/` can be applied as a Kustomize +overlay. This overlay sets the StatefulSet replicas to `0` and deploys the application controller as a Deployment. The +dynamic distribution code automatically kicks in when the controller is deployed as a Deployment. + +!!! important + The use of a Deployment instead of a StatefulSet is an implementation detail which may change in future versions of + this feature. Therefore, the directory name of the Kustomize overlay may change as well. Monitor the release notes + to avoid issues. + +Note the introduction of new environment variable `ARGOCD_CONTROLLER_HEARTBEAT_TIME`. The environment variable is explained in [working of Dynamic Distribution Heartbeat Process](#working-of-dynamic-distribution) + +## Working of Dynamic Distribution + +To accomplish runtime distribution of clusters, the Application Controller uses a ConfigMap to associate a controller +pod with a shard number and a heartbeat to ensure that controller pods are still alive and handling their shard, in +effect, their share of the work. + +The Application Controller will create a new ConfigMap named `argocd-app-controller-shard-cm` to store the Controller <-> Shard mapping. The mapping would look like below for each shard: + +```yaml +ShardNumber : 0 +ControllerName : "argocd-application-controller-hydrxyt" +HeartbeatTime : "2009-11-17 20:34:58.651387237 +0000 UTC" +``` + +* `ControllerName`: Stores the hostname of the Application Controller pod +* `ShardNumber` : Stores the shard number managed by the controller pod +* `HeartbeatTime`: Stores the last time this heartbeat was updated. + +Controller Shard Mapping is updated in the ConfigMap during each readiness probe check of the pod, that is every 10 seconds (otherwise as configured). The controller will acquire the shard during every iteration of readiness probe check and try to update the ConfigMap with the `HeartbeatTime`. The default `HeartbeatDuration` after which the heartbeat should be updated is `10` seconds. If the ConfigMap was not updated for any controller pod for more than `3 * HeartbeatDuration`, then the readiness probe for the application pod is marked as `Unhealthy`. To increase the default `HeartbeatDuration`, you can set the environment variable `ARGOCD_CONTROLLER_HEARTBEAT_TIME` with the desired value. + +The new sharding mechanism does not monitor the environment variable `ARGOCD_CONTROLLER_REPLICAS` but instead reads the replica count directly from the Application Controller Deployment. The controller identifies the change in the number of replicas by comparing the replica count in the Application Controller Deployment and the number of mappings in the `argocd-app-controller-shard-cm` ConfigMap. + +In the scenario when the number of Application Controller replicas increases, a new entry is added to the list of mappings in the `argocd-app-controller-shard-cm` ConfigMap and the cluster distribution is triggered to re-distribute the clusters. + +In the scenario when the number of Application Controller replicas decreases, the mappings in the `argocd-app-controller-shard-cm` ConfigMap are reset and every controller acquires the shard again thus triggering the re-distribution of the clusters. + +[1]: https://github.com/argoproj/argoproj/blob/master/community/feature-status.md diff --git a/docs/operator-manual/health.md b/docs/operator-manual/health.md index a772915ca8152..5cc80de6538c5 100644 --- a/docs/operator-manual/health.md +++ b/docs/operator-manual/health.md @@ -5,7 +5,7 @@ Argo CD provides built-in health assessment for several standard Kubernetes type surfaced to the overall Application health status as a whole. The following checks are made for specific types of kubernetes resources: -### Deployment, ReplicaSet, StatefulSet DaemonSet +### Deployment, ReplicaSet, StatefulSet, DaemonSet * Observed generation is equal to desired generation. * Number of **updated** replicas equals the number of desired replicas. @@ -16,9 +16,45 @@ with at least one value for `hostname` or `IP`. ### Ingress * The `status.loadBalancer.ingress` list is non-empty, with at least one value for `hostname` or `IP`. +### Job +* If job `.spec.suspended` is set to 'true', then the job and app health will be marked as suspended. ### PersistentVolumeClaim * The `status.phase` is `Bound` +### Argocd App + +The health assessment of `argoproj.io/Application` CRD has been removed in argocd 1.8 (see [#3781](https://github.com/argoproj/argo-cd/issues/3781) for more information). +You might need to restore it if you are using app-of-apps pattern and orchestrating synchronization using sync waves. Add the following resource customization in +`argocd-cm` ConfigMap: + +```yaml +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm + namespace: argocd + labels: + app.kubernetes.io/name: argocd-cm + app.kubernetes.io/part-of: argocd +data: + resource.customizations: | + argoproj.io/Application: + health.lua: | + hs = {} + hs.status = "Progressing" + hs.message = "" + if obj.status ~= nil then + if obj.status.health ~= nil then + hs.status = obj.status.health.status + if obj.status.health.message ~= nil then + hs.message = obj.status.health.message + end + end + end + return hs +``` + ## Custom Health Checks Argo CD supports custom health checks written in [Lua](https://www.lua.org/). This is useful if you: @@ -30,7 +66,15 @@ There are two ways to configure a custom health check. The next two sections des ### Way 1. Define a Custom Health Check in `argocd-cm` ConfigMap -Custom health checks can be defined in `resource.customizations` field of `argocd-cm`. Following example demonstrates a health check for `cert-manager.io/Certificate`. +Custom health checks can be defined in +```yaml + resource.customizations: | + : + health.lua: | +``` +field of `argocd-cm`. If you are using argocd-operator, this is overridden by [the argocd-operator resourceCustomizations](https://argocd-operator.readthedocs.io/en/latest/reference/argocd/#resource-customizations). + +The following example demonstrates a health check for `cert-manager.io/Certificate`. ```yaml data: @@ -54,14 +98,51 @@ data: end end end - + hs.status = "Progressing" hs.message = "Waiting for certificate" return hs ``` +In order to prevent duplication of the custom health check for potentially multiple resources, it is also possible to specify a wildcard in the resource kind, and anywhere in the resource group, like this: + +```yaml + resource.customizations: | + ec2.aws.crossplane.io/*: + health.lua: | + ... +``` + +```yaml + resource.customizations: | + "*.aws.crossplane.io/*": + health.lua: | + ... +``` + +!!!important + Please note the required quotes in the resource customization health section, if the wildcard starts with `*`. + The `obj` is a global variable which contains the resource. The script must return an object with status and optional message field. +The custom health check might return one of the following health statuses: + + * `Healthy` - the resource is healthy + * `Progressing` - the resource is not healthy yet but still making progress and might be healthy soon + * `Degraded` - the resource is degraded + * `Suspended` - the resource is suspended and waiting for some external event to resume (e.g. suspended CronJob or paused Deployment) + +By default health typically returns `Progressing` status. -NOTE: as a security measure you don't have access to most of the standard Lua libraries. +NOTE: As a security measure, access to the standard Lua libraries will be disabled by default. Admins can control access by +setting `resource.customizations.useOpenLibs.`. In the following example, standard libraries are enabled for health check of `cert-manager.io/Certificate`. + +```yaml +data: + resource.customizations: | + cert-manager.io/Certificate: + health.lua.useOpenLibs: true + health.lua: | + # Lua standard libraries are enabled for this script +``` ### Way 2. Contribute a Custom Health Check @@ -87,4 +168,41 @@ tests: inputPath: testdata/test-resource-definition.yaml ``` +To test the implemented custom health checks, run `go test -v ./util/lua/`. + The [PR#1139](https://github.com/argoproj/argo-cd/pull/1139) is an example of Cert Manager CRDs custom health check. + +Please note that bundled health checks with wildcards are not supported. + +## Health Checks + +An Argo CD App's health is inferred from the health of its immediate child resources (the resources represented in +source control). + +But the health of a resource is not inherited from child resources - it is calculated using only information about the +resource itself. A resource's status field may or may not contain information about the health of a child resource, and +the resource's health check may or may not take that information into account. + +The lack of inheritance is by design. A resource's health can't be inferred from its children because the health of a +child resource may not be relevant to the health of the parent resource. For example, a Deployment's health is not +necessarily affected by the health of its Pods. + +``` +App (healthy) +└── Deployment (healthy) + └── ReplicaSet (healthy) + └── Pod (healthy) + └── ReplicaSet (unhealthy) + └── Pod (unhealthy) +``` + +If you want the health of a child resource to affect the health of its parent, you need to configure the parent's health +check to take the child's health into account. Since only the parent resource's state is available to the health check, +the parent resource's controller needs to make the child resource's health available in the parent resource's status +field. + +``` +App (healthy) +└── CustomResource (healthy) <- This resource's health check needs to be fixed to mark the App as unhealthy + └── CustomChildResource (unhealthy) +``` diff --git a/docs/operator-manual/high_availability.md b/docs/operator-manual/high_availability.md index c0e8e58eff9fa..ac59c333ba7cb 100644 --- a/docs/operator-manual/high_availability.md +++ b/docs/operator-manual/high_availability.md @@ -1,13 +1,11 @@ # High Availability -Argo CD is largely stateless, all data is persisted as Kubernetes objects, which in turn is stored in Kubernetes' etcd. Redis is only used as a throw-away cache and can be lost. When lost, it will be rebuilt without loss of service. +Argo CD is largely stateless. All data is persisted as Kubernetes objects, which in turn is stored in Kubernetes' etcd. Redis is only used as a throw-away cache and can be lost. When lost, it will be rebuilt without loss of service. -A set of HA manifests are provided for users who wish to run Argo CD in a highly available manner. This runs more containers, and runs Redis in HA mode. +A set of [HA manifests](https://github.com/argoproj/argo-cd/tree/master/manifests/ha) are provided for users who wish to run Argo CD in a highly available manner. This runs more containers, and runs Redis in HA mode. -[Manifests ⧉](https://github.com/argoproj/argo-cd/tree/master/manifests) - -!!! note - The HA installation will require at least three different nodes due to pod anti-affinity roles in the specs. +> **NOTE:** The HA installation will require at least three different nodes due to pod anti-affinity roles in the +> specs. Additionally, IPv6 only clusters are not supported. ## Scaling Up @@ -17,58 +15,55 @@ A set of HA manifests are provided for users who wish to run Argo CD in a highly The `argocd-repo-server` is responsible for cloning Git repository, keeping it up to date and generating manifests using the appropriate tool. -* `argocd-repo-server` fork/exec config management tool to generate manifests. The fork can fail due to lack of memory and limit on the number of OS threads. -The `--parallelismlimit` flag controls how many manifests generations are running concurrently and allows avoiding OOM kills. +* `argocd-repo-server` fork/exec config management tool to generate manifests. The fork can fail due to lack of memory or limit on the number of OS threads. +The `--parallelismlimit` flag controls how many manifests generations are running concurrently and helps avoid OOM kills. * the `argocd-repo-server` ensures that repository is in the clean state during the manifest generation using config management tools such as Kustomize, Helm -or custom plugin. As a result Git repositories with multiple applications might be affect repository server performance. +or custom plugin. As a result Git repositories with multiple applications might affect repository server performance. Read [Monorepo Scaling Considerations](#monorepo-scaling-considerations) for more information. -* `argocd-repo-server` clones repository into `/tmp` ( of path specified in `TMPDIR` env variable ). Pod might run out of disk space if have too many repository -or repositories has a lot of files. To avoid this problem mount persistent volume. +* `argocd-repo-server` clones the repository into `/tmp` (or the path specified in the `TMPDIR` env variable). The Pod might run out of disk space if it has too many repositories +or if the repositories have a lot of files. To avoid this problem mount a persistent volume. -* `argocd-repo-server` `git ls-remote` to resolve ambiguous revision such as `HEAD`, branch or tag name. This operation is happening pretty frequently -and might fail. To avoid failed syncs use `ARGOCD_GIT_ATTEMPTS_COUNT` environment variable to retry failed requests. +* `argocd-repo-server` uses `git ls-remote` to resolve ambiguous revisions such as `HEAD`, a branch or a tag name. This operation happens frequently +and might fail. To avoid failed syncs use the `ARGOCD_GIT_ATTEMPTS_COUNT` environment variable to retry failed requests. -* `argocd-repo-server` Every 3m (by default) Argo CD checks for changes to the app manifests. Argo CD assumes by default that manifests only change when the repo changes, so it caches generated manifests (for 24h by default). With Kustomize remote bases, or Helm patch releases, the manifests can change even though the repo has not changed. By reducing the cache time, you can get the changes without waiting for 24h. Use `--repo-cache-expiration duration`, and we'd suggest in low volume environments you try '1h'. Bear in mind this will negate the benefit of caching if set too low. +* `argocd-repo-server` Every 3m (by default) Argo CD checks for changes to the app manifests. Argo CD assumes by default that manifests only change when the repo changes, so it caches the generated manifests (for 24h by default). With Kustomize remote bases, or in case a Helm chart gets changed without bumping its version number, the expected manifests can change even though the repo has not changed. By reducing the cache time, you can get the changes without waiting for 24h. Use `--repo-cache-expiration duration`, and we'd suggest in low volume environments you try '1h'. Bear in mind that this will negate the benefits of caching if set too low. -* `argocd-repo-server` fork exec config management tools such as `helm` or `kustomize` and enforces 90 seconds timeout. The timeout can be increased using `ARGOCD_EXEC_TIMEOUT` env variable. +* `argocd-repo-server` executes config management tools such as `helm` or `kustomize` and enforces a 90 second timeout. This timeout can be changed by using the `ARGOCD_EXEC_TIMEOUT` env variable. The value should be in the Go time duration string format, for example, `2m30s`. **metrics:** -* `argocd_git_request_total` - Number of git requests. The metric provides two tags: `repo` - Git repo URL; `request_type` - `ls-remote` or `fetch`. +* `argocd_git_request_total` - Number of git requests. This metric provides two tags: `repo` - Git repo URL; `request_type` - `ls-remote` or `fetch`. -* `ARGOCD_ENABLE_GRPC_TIME_HISTOGRAM` (v1.8+) - environment variable that enables collecting RPC performance metrics. Enable it if you need to troubleshoot performance issue. Note: metric is expensive to both query and store! +* `ARGOCD_ENABLE_GRPC_TIME_HISTOGRAM` - Is an environment variable that enables collecting RPC performance metrics. Enable it if you need to troubleshoot performance issues. Note: This metric is expensive to both query and store! ### argocd-application-controller **settings:** -The `argocd-application-controller` uses `argocd-repo-server` to get generated manifests and Kubernetes API server to get actual cluster state. +The `argocd-application-controller` uses `argocd-repo-server` to get generated manifests and Kubernetes API server to get the actual cluster state. -* each controller replica uses two separate queues to process application reconciliation (milliseconds) and app syncing (seconds). Number of queue processors for each queue is controlled by -`--status-processors` (20 by default) and `--operation-processors` (10 by default) flags. Increase number of processors if your Argo CD instance manages too many applications. +* each controller replica uses two separate queues to process application reconciliation (milliseconds) and app syncing (seconds). The number of queue processors for each queue is controlled by +`--status-processors` (20 by default) and `--operation-processors` (10 by default) flags. Increase the number of processors if your Argo CD instance manages too many applications. For 1000 application we use 50 for `--status-processors` and 25 for `--operation-processors` -* The manifest generation typically takes the most time during reconciliation. The duration of manifest generation is limited to make sure controller refresh queue does not overflow. -The app reconciliation fails with `Context deadline exceeded` error if manifest generating taking too much time. As workaround increase value of `--repo-server-timeout-seconds` and -consider scaling up `argocd-repo-server` deployment. +* The manifest generation typically takes the most time during reconciliation. The duration of manifest generation is limited to make sure the controller refresh queue does not overflow. +The app reconciliation fails with `Context deadline exceeded` error if the manifest generation is taking too much time. As a workaround increase the value of `--repo-server-timeout-seconds` and +consider scaling up the `argocd-repo-server` deployment. -* The controller uses `kubectl` fork/exec to push changes into the cluster and to convert resource from preferred version into user specified version -(e.g. Deployment `apps/v1` into `extensions/v1beta1`). Same as config management tool `kubectl` fork/exec might cause pod OOM kill. Use `--kubectl-parallelism-limit` flag to limit -number of allowed concurrent kubectl fork/execs. +* The controller uses Kubernetes watch APIs to maintain a lightweight Kubernetes cluster cache. This allows avoiding querying Kubernetes during app reconciliation and significantly improves +performance. For performance reasons the controller monitors and caches only the preferred versions of a resource. During reconciliation, the controller might have to convert cached resources from the +preferred version into a version of the resource stored in Git. If `kubectl convert` fails because the conversion is not supported then the controller falls back to Kubernetes API query which slows down +reconciliation. In this case, we advise to use the preferred resource version in Git. -* The controller uses Kubernetes watch APIs to maintain lightweight Kubernetes cluster cache. This allows to avoid querying Kubernetes during app reconciliation and significantly improve -performance. For performance reasons controller monitors and caches only preferred the version of a resource. During reconciliation, the controller might have to convert cached resource from -preferred version into a version of the resource stored in Git. If `kubectl convert` fails because conversion is not supported than controller fallback to Kubernetes API query which slows down -reconciliation. In this case advice user-preferred resource version in Git. - -* The controller polls Git every 3m by default. You can increase this duration using `--app-resync seconds` to reduce polling. +* The controller polls Git every 3m by default. You can change this duration using the `timeout.reconciliation` setting in the `argocd-cm` ConfigMap. The value of `timeout.reconciliation` is a duration string e.g `60s`, `1m`, `1h` or `1d`. * If the controller is managing too many clusters and uses too much memory then you can shard clusters across multiple -controller replicas. To enable sharding increase the number of replicas in `argocd-application-controller` `StatefulSet` -and repeat number of replicas in `ARGOCD_CONTROLLER_REPLICAS` environment variable. The strategic merge patch below -demonstrates changes required to configure two controller replicas. +controller replicas. To enable sharding, increase the number of replicas in `argocd-application-controller` `StatefulSet` +and repeat the number of replicas in the `ARGOCD_CONTROLLER_REPLICAS` environment variable. The strategic merge patch below demonstrates changes required to configure two controller replicas. + +* By default, the controller will update the cluster information every 10 seconds. If there is a problem with your cluster network environment that is causing the update time to take a long time, you can try modifying the environment variable `ARGO_CD_UPDATE_CLUSTER_INFO_TIMEOUT` to increase the timeout (the unit is seconds). ```yaml apiVersion: apps/v1 @@ -85,23 +80,82 @@ spec: - name: ARGOCD_CONTROLLER_REPLICAS value: "2" ``` +* In order to manually set the cluster's shard number, specify the optional `shard` property when creating a cluster. If not specified, it will be calculated on the fly by the application controller. -* `ARGOCD_ENABLE_GRPC_TIME_HISTOGRAM` (v1.8+)- environment variable that enables collecting RPC performance metrics. Enable it if you need to troubleshoot performance issue. Note: metric is expensive to both query and store! +* The shard distribution algorithm of the `argocd-application-controller` can be set by using the `--sharding-method` parameter. Supported sharding methods are : [legacy (default), round-robin]. `legacy` mode uses an `uid` based distribution (non-uniform). `round-robin` uses an equal distribution across all shards. The `--sharding-method` parameter can also be overriden by setting the key `controller.sharding.algorithm` in the `argocd-cmd-params-cm` `configMap` (preferably) or by setting the `ARGOCD_CONTROLLER_SHARDING_ALGORITHM` environment variable and by specifiying the same possible values. + +!!! warning "Alpha Feature" + The `round-robin` shard distribution algorithm is an experimental feature. Reshuffling is known to occur in certain scenarios with cluster removal. If the cluster at rank-0 is removed, reshuffling all clusters across shards will occur and may temporarily have negative performance impacts. + +* A cluster can be manually assigned and forced to a `shard` by patching the `shard` field in the cluster secret to contain the shard number, e.g. +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: mycluster-secret + labels: + argocd.argoproj.io/secret-type: cluster +type: Opaque +stringData: + shard: 1 + name: mycluster.com + server: https://mycluster.com + config: | + { + "bearerToken": "", + "tlsClientConfig": { + "insecure": false, + "caData": "" + } + } +``` + +* `ARGOCD_ENABLE_GRPC_TIME_HISTOGRAM` - environment variable that enables collecting RPC performance metrics. Enable it if you need to troubleshoot performance issues. Note: This metric is expensive to both query and store! + +* `ARGOCD_CLUSTER_CACHE_LIST_PAGE_BUFFER_SIZE` - environment variable controlling the number of pages the controller + buffers in memory when performing a list operation against the K8s api server while syncing the cluster cache. This + is useful when the cluster contains a large number of resources and cluster sync times exceed the default etcd + compaction interval timeout. In this scenario, when attempting to sync the cluster cache, the application controller + may throw an error that the `continue parameter is too old to display a consistent list result`. Setting a higher + value for this environment variable configures the controller with a larger buffer in which to store pre-fetched + pages which are processed asynchronously, increasing the likelihood that all pages have been pulled before the etcd + compaction interval timeout expires. In the most extreme case, operators can set this value such that + `ARGOCD_CLUSTER_CACHE_LIST_PAGE_SIZE * ARGOCD_CLUSTER_CACHE_LIST_PAGE_BUFFER_SIZE` exceeds the largest resource + count (grouped by k8s api version, the granule of parallelism for list operations). In this case, all resources will + be buffered in memory -- no api server request will be blocked by processing. **metrics** -* `argocd_app_reconcile` - reports application reconciliation duration. Can be used to build reconciliation duration heat map to get high-level reconciliation performance picture. +* `argocd_app_reconcile` - reports application reconciliation duration. Can be used to build reconciliation duration heat map to get a high-level reconciliation performance picture. * `argocd_app_k8s_request_total` - number of k8s requests per application. The number of fallback Kubernetes API queries - useful to identify which application has a resource with non-preferred version and causes performance issues. ### argocd-server -The `argocd-server` is stateless and probably least likely to cause issues. You might consider increasing number of replicas to 3 or more to ensure there is no downtime during upgrades. +The `argocd-server` is stateless and probably the least likely to cause issues. To ensure there is no downtime during upgrades, consider increasing the number of replicas to `3` or more and repeat the number in the `ARGOCD_API_SERVER_REPLICAS` environment variable. The strategic merge patch below +demonstrates this. + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: argocd-server +spec: + replicas: 3 + template: + spec: + containers: + - name: argocd-server + env: + - name: ARGOCD_API_SERVER_REPLICAS + value: "3" +``` **settings:** +* The `ARGOCD_API_SERVER_REPLICAS` environment variable is used to divide [the limit of concurrent login requests (`ARGOCD_MAX_CONCURRENT_LOGIN_REQUESTS_COUNT`)](./user-management/index.md#failed-logins-rate-limiting) between each replica. * The `ARGOCD_GRPC_MAX_SIZE_MB` environment variable allows specifying the max size of the server response message in megabytes. -The default value is 200. You might need to increase for an Argo CD instance that manages 3000+ applications. +The default value is 200. You might need to increase this for an Argo CD instance that manages 3000+ applications. ### argocd-dex-server, argocd-redis @@ -109,40 +163,34 @@ The `argocd-dex-server` uses an in-memory database, and two or more instances wo ## Monorepo Scaling Considerations -Argo CD repo server maintains one repository clone locally and use it for application manifest generation. If the manifest generation requires to change a file in the local repository clone then only one concurrent manifest generation per server instance is allowed. This limitation might significantly slowdown Argo CD if you have a mono repository with multiple applications (50+). +Argo CD repo server maintains one repository clone locally and uses it for application manifest generation. If the manifest generation requires to change a file in the local repository clone then only one concurrent manifest generation per server instance is allowed. This limitation might significantly slowdown Argo CD if you have a mono repository with multiple applications (50+). ### Enable Concurrent Processing -Argo CD determines if manifest generation might change local files in the local repository clone based on config management tool and application settings. -If the manifest generation has no side effects then requests are processed in parallel without the performance penalty. Following are known cases that might cause slowness and workarounds: +Argo CD determines if manifest generation might change local files in the local repository clone based on the config management tool and application settings. +If the manifest generation has no side effects then requests are processed in parallel without a performance penalty. The following are known cases that might cause slowness and their workarounds: - * **Multiple Helm based applications pointing to the same directory in one Git repository:** ensure that your Helm chart don't have conditional -[dependencies](https://helm.sh/docs/chart_best_practices/dependencies/#conditions-and-tags) and create `.argocd-allow-concurrency` file in chart directory. + * **Multiple Helm based applications pointing to the same directory in one Git repository:** ensure that your Helm chart doesn't have conditional +[dependencies](https://helm.sh/docs/chart_best_practices/dependencies/#conditions-and-tags) and create `.argocd-allow-concurrency` file in the chart directory. - * **Multiple Custom plugin based applications:** avoid creating temporal files during manifest generation and and create `.argocd-allow-concurrency` file in app directory. + * **Multiple Custom plugin based applications:** avoid creating temporal files during manifest generation and create `.argocd-allow-concurrency` file in the app directory, or use the sidecar plugin option, which processes each application using a temporary copy of the repository. - * **Multiple Kustomize or Ksonnet applications in same repository with [parameter overrides](../user-guide/parameters.md):** sorry, no workaround for now. + * **Multiple Kustomize applications in same repository with [parameter overrides](../user-guide/parameters.md):** sorry, no workaround for now. ### Webhook and Manifest Paths Annotation -Argo CD aggressively caches generated manifests and uses repository commit SHA as a cache key. A new commit to the Git repository invalidates cache for all applications configured in the repository -that again negatively affect mono repositories with multiple applications. You might use [webhooks ⧉](https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/webhook.md) and `argocd.argoproj.io/manifest-generate-paths` Application -CRD annotation to solve this problem and improve performance. - -The `argocd.argoproj.io/manifest-generate-paths` contains a semicolon-separated list of paths within the Git repository that are used during manifest generation. The webhook compares paths specified in the annotation -with the changed files specified in the webhook payload. If non of the changed files are located in the paths then webhook don't trigger application reconciliation and re-uses previously generated manifests cache for a new commit. +Argo CD aggressively caches generated manifests and uses the repository commit SHA as a cache key. A new commit to the Git repository invalidates the cache for all applications configured in the repository. +This can negatively affect repositories with multiple applications. You can use [webhooks](https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/webhook.md) and the `argocd.argoproj.io/manifest-generate-paths` Application CRD annotation to solve this problem and improve performance. -Installations that use a different repo for each app are **not** subject to this behavior and will likely get no benefit from using these annotations. - -!!! note - Installations with a large number of apps should also set the `--app-resync` flag in the `argocd-application-controller` process to a larger value to reduce automatic refreshes based on git polling. The exact value is a trade-off between reduced work and app sync in case of a missed webhook event. For most cases `1800` (30m) or `3600` (1h) is a good trade-off. +The `argocd.argoproj.io/manifest-generate-paths` annotation contains a semicolon-separated list of paths within the Git repository that are used during manifest generation. The webhook compares paths specified in the annotation with the changed files specified in the webhook payload. If no modified files match the paths specified in `argocd.argoproj.io/manifest-generate-paths`, then the webhook will not trigger application reconciliation and the existing cache will be considered valid for the new commit. +Installations that use a different repository for each application are **not** subject to this behavior and will likely get no benefit from using these annotations. !!! note - Application manifest paths annotation support depends on the git provider used for the Application. It is currently only supported for GitHub, GitLab, and Gogs based repos + Application manifest paths annotation support depends on the git provider used for the Application. It is currently only supported for GitHub, GitLab, and Gogs based repos. -* **Relative path** The annotation might contains relative path. In this case the path is considered relative to the path specified in the application source: +* **Relative path** The annotation might contain a relative path. In this case the path is considered relative to the path specified in the application source: ```yaml apiVersion: argoproj.io/v1alpha1 @@ -160,7 +208,8 @@ spec: path: guestbook # ... ``` -* **Absolute path** The annotation value might be an absolute path started from '/'. In this case path is considered as an absolute path within the Git repository: + +* **Absolute path** The annotation value might be an absolute path starting with '/'. In this case path is considered as an absolute path within the Git repository: ```yaml apiVersion: argoproj.io/v1alpha1 diff --git a/docs/operator-manual/ingress.md b/docs/operator-manual/ingress.md index a41f929f6c5d3..84b2bcaf34a67 100644 --- a/docs/operator-manual/ingress.md +++ b/docs/operator-manual/ingress.md @@ -1,6 +1,6 @@ # Ingress Configuration -Argo CD runs both a gRPC server (used by the CLI), as well as a HTTP/HTTPS server (used by the UI). +Argo CD API server runs both a gRPC server (used by the CLI), as well as a HTTP/HTTPS server (used by the UI). Both protocols are exposed by the argocd-server service object on the following ports: * 443 - gRPC/HTTPS @@ -12,7 +12,7 @@ There are several ways how Ingress can be configured. The Ambassador Edge Stack can be used as a Kubernetes ingress controller with [automatic TLS termination](https://www.getambassador.io/docs/latest/topics/running/tls/#host) and routing capabilities for both the CLI and the UI. -The API server should be run with TLS disabled. Edit the `argocd-server` deployment to add the `--insecure` flag to the argocd-server command. Given the `argocd` CLI includes the port number in the request `host` header, 2 Mappings are required. +The API server should be run with TLS disabled. Edit the `argocd-server` deployment to add the `--insecure` flag to the argocd-server command, or simply set `server.insecure: "true"` in the `argocd-cmd-params-cm` ConfigMap [as described here](server-commands/additional-configuration-method.md). Given the `argocd` CLI includes the port number in the request `host` header, 2 Mappings are required. ### Option 1: Mapping CRD for Host-based Routing ```yaml @@ -32,15 +32,19 @@ metadata: name: argocd-server-cli namespace: argocd spec: + # NOTE: the port must be ignored if you have strip_matching_host_port enabled on envoy host: argocd.example.com:443 prefix: / - service: argocd-server:443 + service: argocd-server:80 + regex_headers: + Content-Type: "^application/grpc.*$" + grpc: true ``` -Login with the `argocd` CLI using the extra `--grpc-web-root-path` flag for gRPC-web. +Login with the `argocd` CLI: ```shell -argocd login : --grpc-web-root-path / +argocd login ``` ### Option 2: Mapping CRD for Path-based Routing @@ -68,9 +72,9 @@ argocd login : --grpc-web-root-path /argo-cd ## [Contour](https://projectcontour.io/) The Contour ingress controller can terminate TLS ingress traffic at the edge. -The Argo CD API server should be run with TLS disabled. Edit the `argocd-server` Deployment to add the `--insecure` flag to the argocd-server container command. +The Argo CD API server should be run with TLS disabled. Edit the `argocd-server` Deployment to add the `--insecure` flag to the argocd-server container command, or simply set `server.insecure: "true"` in the `argocd-cmd-params-cm` ConfigMap [as described here](server-commands/additional-configuration-method.md). -It is also possible to provide an internal-only ingress path and an external-only ingress path by deploying two instances of Contour: one behind a private-subnet LoadBalancer service and one behind a public-subnet LoadBalancer service. The private Contour deployment will pick up Ingresses annotated with `kubernetes.io/ingress.class: contour-external` and the public Contour deployment will pick up Ingresses annotated with `kubernetes.io/ingress.class: contour-external`. +It is also possible to provide an internal-only ingress path and an external-only ingress path by deploying two instances of Contour: one behind a private-subnet LoadBalancer service and one behind a public-subnet LoadBalancer service. The private Contour deployment will pick up Ingresses annotated with `kubernetes.io/ingress.class: contour-internal` and the public Contour deployment will pick up Ingresses annotated with `kubernetes.io/ingress.class: contour-external`. This provides the opportunity to deploy the Argo CD UI privately but still allow for SSO callbacks to succeed. @@ -79,7 +83,7 @@ Since Contour Ingress supports only a single protocol per Ingress object, define Internal HTTP/HTTPS Ingress: ```yaml -apiVersion: extensions/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: argocd-server-http @@ -91,9 +95,13 @@ spec: - host: internal.path.to.argocd.io http: paths: - - backend: - serviceName: argocd-server - servicePort: http + - path: / + pathType: Prefix + backend: + service: + name: argocd-server + port: + name: http tls: - hosts: - internal.path.to.argocd.io @@ -102,7 +110,7 @@ spec: Internal gRPC Ingress: ```yaml -apiVersion: extensions/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: argocd-server-grpc @@ -113,9 +121,13 @@ spec: - host: grpc-internal.path.to.argocd.io http: paths: - - backend: - serviceName: argocd-server - servicePort: https + - path: / + pathType: Prefix + backend: + service: + name: argocd-server + port: + name: https tls: - hosts: - grpc-internal.path.to.argocd.io @@ -124,7 +136,7 @@ spec: External HTTPS SSO Callback Ingress: ```yaml -apiVersion: extensions/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: argocd-server-external-callback-http @@ -137,9 +149,12 @@ spec: http: paths: - path: /api/dex/callback + pathType: Prefix backend: - serviceName: argocd-server - servicePort: http + service: + name: argocd-server + port: + name: http tls: - hosts: - external.path.to.argocd.io @@ -149,22 +164,7 @@ spec: The argocd-server Service needs to be annotated with `projectcontour.io/upstream-protocol.h2c: "https,443"` to wire up the gRPC protocol proxying. The API server should then be run with TLS disabled. Edit the `argocd-server` deployment to add the -`--insecure` flag to the argocd-server command: - -```yaml -spec: - template: - spec: - containers: - - name: argocd-server - command: - - /argocd-server - - --staticassets - - /shared/app - - --repo-server - - argocd-repo-server:8081 - - --insecure -``` +`--insecure` flag to the argocd-server command, or simply set `server.insecure: "true"` in the `argocd-cmd-params-cm` ConfigMap [as described here](server-commands/additional-configuration-method.md). ## [kubernetes/ingress-nginx](https://github.com/kubernetes/ingress-nginx) @@ -180,23 +180,27 @@ In order to expose the Argo CD API server with a single ingress rule and hostnam must be used to passthrough TLS connections and terminate TLS at the Argo CD API server. ```yaml -apiVersion: extensions/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: argocd-server-ingress namespace: argocd annotations: - kubernetes.io/ingress.class: nginx nginx.ingress.kubernetes.io/force-ssl-redirect: "true" nginx.ingress.kubernetes.io/ssl-passthrough: "true" spec: + ingressClassName: nginx rules: - host: argocd.example.com http: paths: - - backend: - serviceName: argocd-server - servicePort: https + - path: / + pathType: Prefix + backend: + service: + name: argocd-server + port: + name: https ``` The above rule terminates TLS at the Argo CD API server, which detects the protocol being used, @@ -207,106 +211,102 @@ requires that the `--enable-ssl-passthrough` flag be added to the command line a #### SSL-Passthrough with cert-manager and Let's Encrypt ```yaml -apiVersion: extensions/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: argocd-server-ingress namespace: argocd annotations: cert-manager.io/cluster-issuer: letsencrypt-prod - kubernetes.io/ingress.class: nginx - kubernetes.io/tls-acme: "true" nginx.ingress.kubernetes.io/ssl-passthrough: "true" - # If you encounter a redirect loop or are getting a 307 response code + # If you encounter a redirect loop or are getting a 307 response code # then you need to force the nginx ingress to connect to the backend using HTTPS. # - # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" spec: + ingressClassName: nginx rules: - host: argocd.example.com http: paths: - - backend: - serviceName: argocd-server - servicePort: https - path: / + - path: / + pathType: Prefix + backend: + service: + name: argocd-server + port: + name: https tls: - hosts: - argocd.example.com - secretName: argocd-secret # do not change, this is provided by Argo CD + secretName: argocd-server-tls # as expected by argocd-server ``` -### Option 2: Multiple Ingress Objects And Hosts +### Option 2: SSL Termination at Ingress Controller -Since ingress-nginx Ingress supports only a single protocol per Ingress object, an alternative -way would be to define two Ingress objects. One for HTTP/HTTPS, and the other for gRPC: +An alternative approach is to perform the SSL termination at the Ingress. Since an `ingress-nginx` Ingress supports only a single protocol per Ingress object, two Ingress objects need to be defined using the `nginx.ingress.kubernetes.io/backend-protocol` annotation, one for HTTP/HTTPS and the other for gRPC. + +Each ingress will be for a different domain (`argocd.example.com` and `grpc.argocd.example.com`). This requires that the Ingress resources use different TLS `secretName`s to avoid unexpected behavior. HTTP/HTTPS Ingress: ```yaml -apiVersion: extensions/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: argocd-server-http-ingress namespace: argocd annotations: - kubernetes.io/ingress.class: "nginx" nginx.ingress.kubernetes.io/force-ssl-redirect: "true" nginx.ingress.kubernetes.io/backend-protocol: "HTTP" spec: + ingressClassName: nginx rules: - http: paths: - - backend: - serviceName: argocd-server - servicePort: http + - path: / + pathType: Prefix + backend: + service: + name: argocd-server + port: + name: http host: argocd.example.com tls: - hosts: - argocd.example.com - secretName: argocd-secret # do not change, this is provided by Argo CD + secretName: argocd-ingress-http ``` gRPC Ingress: ```yaml -apiVersion: extensions/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: argocd-server-grpc-ingress namespace: argocd annotations: - kubernetes.io/ingress.class: "nginx" nginx.ingress.kubernetes.io/backend-protocol: "GRPC" spec: + ingressClassName: nginx rules: - http: paths: - - backend: - serviceName: argocd-server - servicePort: https + - path: / + pathType: Prefix + backend: + service: + name: argocd-server + port: + name: https host: grpc.argocd.example.com tls: - hosts: - grpc.argocd.example.com - secretName: argocd-secret # do not change, this is provided by Argo CD + secretName: argocd-ingress-grpc ``` The API server should then be run with TLS disabled. Edit the `argocd-server` deployment to add the -`--insecure` flag to the argocd-server command: - -```yaml -spec: - template: - spec: - containers: - - name: argocd-server - command: - - argocd-server - - --staticassets - - /shared/app - - --repo-server - - argocd-repo-server:8081 - - --insecure -``` +`--insecure` flag to the argocd-server command, or simply set `server.insecure: "true"` in the `argocd-cmd-params-cm` ConfigMap [as described here](server-commands/additional-configuration-method.md). The obvious disadvantage to this approach is that this technique requires two separate hostnames for the API server -- one for gRPC and the other for HTTP/HTTPS. However it allows TLS termination to @@ -319,7 +319,7 @@ Traefik can be used as an edge router and provide [TLS](https://docs.traefik.io/ It currently has an advantage over NGINX in that it can terminate both TCP and HTTP connections _on the same port_ meaning you do not require multiple hosts or paths. -The API server should be run with TLS disabled. Edit the `argocd-server` deployment to add the `--insecure` flag to the argocd-server command. +The API server should be run with TLS disabled. Edit the `argocd-server` deployment to add the `--insecure` flag to the argocd-server command or set `server.insecure: "true"` in the `argocd-cmd-params-cm` ConfigMap [as described here](server-commands/additional-configuration-method.md). ### IngressRoute CRD ```yaml @@ -347,11 +347,10 @@ spec: scheme: h2c tls: certResolver: default - options: {} ``` ## AWS Application Load Balancers (ALBs) And Classic ELB (HTTP Mode) -AWS ALBs can be used as an L7 Load Balancer for both UI and gRPC traffic, whereas Classic ELBs and NLBs can be used as L4 Load Balancers for both. +AWS ALBs can be used as an L7 Load Balancer for both UI and gRPC traffic, whereas Classic ELBs and NLBs can be used as L4 Load Balancers for both. When using an ALB, you'll want to create a second service for argocd-server. This is necessary because we need to tell the ALB to send the GRPC traffic to a different target group then the UI traffic, since the backend protocol is HTTP2 instead of HTTP1. @@ -374,18 +373,18 @@ spec: selector: app.kubernetes.io/name: argocd-server sessionAffinity: None - type: ClusterIP + type: NodePort ``` -Once we create this service, we can configure the Ingress to conditionally route all `application/grpc` traffic to the new HTTP2 backend, using the `alb.ingress.kubernetes.io/conditions` annotation, as seen below. Note: The value after the . in the condition annotation _must_ be the same name as the service that you want traffic to route to - and will be applied on any path with a matching serviceName. +Once we create this service, we can configure the Ingress to conditionally route all `application/grpc` traffic to the new HTTP2 backend, using the `alb.ingress.kubernetes.io/conditions` annotation, as seen below. Note: The value after the . in the condition annotation _must_ be the same name as the service that you want traffic to route to - and will be applied on any path with a matching serviceName. ```yaml - apiVersion: networking.k8s.io/v1 # Use extensions/v1beta1 for Kubernetes 1.18 and older + apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: alb.ingress.kubernetes.io/backend-protocol: HTTPS - # Use this annotation (which must match a service name) to route traffic to HTTP2 backends. + # Use this annotation (which must match a service name) to route traffic to HTTP2 backends. alb.ingress.kubernetes.io/conditions.argogrpc: | [{"field":"http-header","httpHeaderConfig":{"httpHeaderName": "Content-Type", "values":["application/grpc"]}}] alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS":443}]' @@ -396,22 +395,324 @@ Once we create this service, we can configure the Ingress to conditionally route - host: argocd.argoproj.io http: paths: - - backend: - serviceName: argogrpc - servicePort: 443 - pathType: ImplementationSpecific - - backend: - serviceName: argocd-server - servicePort: 443 - pathType: ImplementationSpecific + - path: / + backend: + service: + name: argogrpc + port: + number: 443 + pathType: Prefix + - path: / + backend: + service: + name: argocd-server + port: + number: 443 + pathType: Prefix tls: - hosts: - argocd.argoproj.io ``` +## [Istio](https://www.istio.io) +You can put Argo CD behind Istio using following configurations. Here we will achive both serving Argo CD behind istio and using subpath on Istio + +First we need to make sure that we can run Argo CD with subpath (ie /argocd). For this we have used install.yaml from argocd project as is + +```bash +curl -kLs -o install.yaml https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml +``` + +save following file as kustomization.yml + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- ./install.yaml + +patches: +- path: ./patch.yml +``` + +And following lines as patch.yml + +```yaml +# Use --insecure so Ingress can send traffic with HTTP +# --bashref /argocd is the subpath like https://IP/argocd +# env was added because of https://github.com/argoproj/argo-cd/issues/3572 error +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: argocd-server +spec: + template: + spec: + containers: + - args: + - /usr/local/bin/argocd-server + - --staticassets + - /shared/app + - --redis + - argocd-redis-ha-haproxy:6379 + - --insecure + - --basehref + - /argocd + - --rootpath + - /argocd + name: argocd-server + env: + - name: ARGOCD_MAX_CONCURRENT_LOGIN_REQUESTS_COUNT + value: "0" +``` + +After that install Argo CD (there should be only 3 yml file defined above in current directory ) + +```bash +kubectl apply -k ./ -n argocd --wait=true +``` + +Be sure you create secret for Isito ( in our case secretname is argocd-server-tls on argocd Namespace). After that we create Istio Resources + +```yaml +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: argocd-gateway + namespace: argocd +spec: + selector: + istio: ingressgateway + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - "*" + tls: + httpsRedirect: true + - port: + number: 443 + name: https + protocol: HTTPS + hosts: + - "*" + tls: + credentialName: argocd-server-tls + maxProtocolVersion: TLSV1_3 + minProtocolVersion: TLSV1_2 + mode: SIMPLE + cipherSuites: + - ECDHE-ECDSA-AES128-GCM-SHA256 + - ECDHE-RSA-AES128-GCM-SHA256 + - ECDHE-ECDSA-AES128-SHA + - AES128-GCM-SHA256 + - AES128-SHA + - ECDHE-ECDSA-AES256-GCM-SHA384 + - ECDHE-RSA-AES256-GCM-SHA384 + - ECDHE-ECDSA-AES256-SHA + - AES256-GCM-SHA384 + - AES256-SHA +--- +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: argocd-virtualservice + namespace: argocd +spec: + hosts: + - "*" + gateways: + - argocd-gateway + http: + - match: + - uri: + prefix: /argocd + route: + - destination: + host: argocd-server + port: + number: 80 +``` + +And now we can browse http://{{ IP }}/argocd (it will be rewritten to https://{{ IP }}/argocd + + +## Google Cloud load balancers with Kubernetes Ingress + +You can make use of the integration of GKE with Google Cloud to deploy Load Balancers using just Kubernetes objects. + +For this we will need these five objects: +- A Service +- A BackendConfig +- A FrontendConfig +- A secret with your SSL certificate +- An Ingress for GKE + +If you need detail for all the options available for these Google integrations, you can check the [Google docs on configuring Ingress features](https://cloud.google.com/kubernetes-engine/docs/how-to/ingress-features) + +### Disable internal TLS + +First, to avoid internal redirection loops from HTTP to HTTPS, the API server should be run with TLS disabled. + +Edit the `--insecure` flag in the `argocd-server` command of the argocd-server deployment, or simply set `server.insecure: "true"` in the `argocd-cmd-params-cm` ConfigMap [as described here](server-commands/additional-configuration-method.md). + +### Creating a service + +Now you need an externally accessible service. This is practically the same as the internal service Argo CD has, but with Google Cloud annotations. Note that this service is annotated to use a [Network Endpoint Group](https://cloud.google.com/load-balancing/docs/negs) (NEG) to allow your load balancer to send traffic directly to your pods without using kube-proxy, so remove the `neg` annotation it that's not what you want. + +The service: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: argocd-server + namespace: argocd + annotations: + cloud.google.com/neg: '{"ingress": true}' + cloud.google.com/backend-config: '{"ports": {"http":"argocd-backend-config"}}' +spec: + type: ClusterIP + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 8080 + selector: + app.kubernetes.io/name: argocd-server +``` + +### Creating a BackendConfig + +See that previous service referencing a backend config called `argocd-backend-config`? So lets deploy it using this yaml: + +```yaml +apiVersion: cloud.google.com/v1 +kind: BackendConfig +metadata: + name: argocd-backend-config + namespace: argocd +spec: + healthCheck: + checkIntervalSec: 30 + timeoutSec: 5 + healthyThreshold: 1 + unhealthyThreshold: 2 + type: HTTP + requestPath: /healthz + port: 8080 +``` + +It uses the same health check as the pods. + +### Creating a FrontendConfig + +Now we can deploy a frontend config with an HTTP to HTTPS redirect: + +```yaml +apiVersion: networking.gke.io/v1beta1 +kind: FrontendConfig +metadata: + name: argocd-frontend-config + namespace: argocd +spec: + redirectToHttps: + enabled: true +``` + +--- +!!! note + + The next two steps (the certificate secret and the Ingress) are described supposing that you manage the certificate yourself, and you have the certificate and key files for it. In the case that your certificate is Google-managed, fix the next two steps using the [guide to use a Google-managed SSL certificate](https://cloud.google.com/kubernetes-engine/docs/how-to/managed-certs#creating_an_ingress_with_a_google-managed_certificate). + +--- + +### Creating a certificate secret + +We need now to create a secret with the SSL certificate we want in our load balancer. It's as easy as executing this command on the path you have your certificate keys stored: + +``` +kubectl -n argocd create secret tls secret-yourdomain-com \ + --cert cert-file.crt --key key-file.key +``` + +### Creating an Ingress + +And finally, to top it all, our Ingress. Note the reference to our frontend config, the service, and to the certificate secret. + +--- +!!! note + + GKE clusters running versions earlier than `1.21.3-gke.1600`, [the only supported value for the pathType field](https://cloud.google.com/kubernetes-engine/docs/how-to/load-balance-ingress#creating_an_ingress) is `ImplementationSpecific`. So you must check your GKE cluster's version. You need to use different YAML depending on the version. + +--- + +If you use the version earlier than `1.21.3-gke.1600`, you should use the following Ingress resource: +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: argocd + namespace: argocd + annotations: + networking.gke.io/v1beta1.FrontendConfig: argocd-frontend-config +spec: + tls: + - secretName: secret-yourdomain-com + rules: + - host: argocd.yourdomain.com + http: + paths: + - pathType: ImplementationSpecific + path: "/*" # "*" is needed. Without this, the UI Javascript and CSS will not load properly + backend: + service: + name: argocd-server + port: + number: 80 +``` + +If you use the version `1.21.3-gke.1600` or later, you should use the following Ingress resource: +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: argocd + namespace: argocd + annotations: + networking.gke.io/v1beta1.FrontendConfig: argocd-frontend-config +spec: + tls: + - secretName: secret-yourdomain-com + rules: + - host: argocd.yourdomain.com + http: + paths: + - pathType: Prefix + path: "/" + backend: + service: + name: argocd-server + port: + number: 80 +``` + +As you may know already, it can take some minutes to deploy the load balancer and become ready to accept connections. Once it's ready, get the public IP address for your Load Balancer, go to your DNS server (Google or third party) and point your domain or subdomain (i.e. argocd.yourdomain.com) to that IP address. + +You can get that IP address describing the Ingress object like this: + +``` +kubectl -n argocd describe ingresses argocd | grep Address +``` + +Once the DNS change is propagated, you're ready to use Argo with your Google Cloud Load Balancer + ## Authenticating through multiple layers of authenticating reverse proxies -ArgoCD endpoints may be protected by one or more reverse proxies layers, in that case, you can provide additional headers through the `argocd` CLI `--header` parameter to authenticate through those layers. +Argo CD endpoints may be protected by one or more reverse proxies layers, in that case, you can provide additional headers through the `argocd` CLI `--header` parameter to authenticate through those layers. ```shell $ argocd login : --header 'x-token1:foo' --header 'x-token2:bar' # can be repeated multiple times @@ -419,7 +720,7 @@ $ argocd login : --header 'x-token1:foo,x-token2:bar' # headers can ``` ## ArgoCD Server and UI Root Path (v1.5.3) -ArgoCD server and UI can be configured to be available under a non-root path (e.g. `/argo-cd`). +Argo CD server and UI can be configured to be available under a non-root path (e.g. `/argo-cd`). To do this, add the `--rootpath` flag into the `argocd-server` deployment command: ```yaml @@ -430,14 +731,12 @@ spec: containers: - command: - /argocd-server - - --staticassets - - /shared/app - --repo-server - argocd-repo-server:8081 - --rootpath - /argo-cd ``` -NOTE: The flag `--rootpath` changes both API Server and UI base URL. +NOTE: The flag `--rootpath` changes both API Server and UI base URL. Example nginx.conf: ``` @@ -484,8 +783,6 @@ spec: containers: - command: - /argocd-server - - --staticassets - - /shared/app - --repo-server - argocd-repo-server:8081 - --basehref diff --git a/docs/operator-manual/installation.md b/docs/operator-manual/installation.md new file mode 100644 index 0000000000000..5782e5660868f --- /dev/null +++ b/docs/operator-manual/installation.md @@ -0,0 +1,94 @@ +# Installation + +Argo CD has two type of installations: multi-tenant and core. + +## Multi-Tenant + +The multi-tenant installation is the most common way to install Argo CD. This type of installation is typically used to service multiple application developer teams +in the organization and maintained by a platform team. + +The end-users can access Argo CD via the API server using the Web UI or `argocd` CLI. The `argocd` CLI has to be configured using `argocd login ` command +(learn more [here](../user-guide/commands/argocd_login.md)). + +Two types of installation manifests are provided: + +### Non High Availability: + +Not recommended for production use. This type of installation is typically used during evaluation period for demonstrations and testing. + +* [install.yaml](https://github.com/argoproj/argo-cd/blob/master/manifests/install.yaml) - Standard Argo CD installation with cluster-admin access. Use this + manifest set if you plan to use Argo CD to deploy applications in the same cluster that Argo CD runs + in (i.e. kubernetes.svc.default). It will still be able to deploy to external clusters with inputted + credentials. + +* [namespace-install.yaml](https://github.com/argoproj/argo-cd/blob/master/manifests/namespace-install.yaml) - Installation of Argo CD which requires only + namespace level privileges (does not need cluster roles). Use this manifest set if you do not + need Argo CD to deploy applications in the same cluster that Argo CD runs in, and will rely solely + on inputted cluster credentials. An example of using this set of manifests is if you run several + Argo CD instances for different teams, where each instance will be deploying applications to + external clusters. It will still be possible to deploy to the same cluster (kubernetes.svc.default) + with inputted credentials (i.e. `argocd cluster add --in-cluster --namespace `). + + > Note: Argo CD CRDs are not included into [namespace-install.yaml](https://github.com/argoproj/argo-cd/blob/master/manifests/namespace-install.yaml). + > and have to be installed separately. The CRD manifests are located in the [manifests/crds](https://github.com/argoproj/argo-cd/blob/master/manifests/crds) directory. + > Use the following command to install them: + > ``` + > kubectl apply -k https://github.com/argoproj/argo-cd/manifests/crds\?ref\=stable + > ``` + +### High Availability: + +High Availability installation is recommended for production use. This bundle includes the same components but tuned for high availability and resiliency. + +* [ha/install.yaml](https://github.com/argoproj/argo-cd/blob/master/manifests/ha/install.yaml) - the same as install.yaml but with multiple replicas for + supported components. + +* [ha/namespace-install.yaml](https://github.com/argoproj/argo-cd/blob/master/manifests/ha/namespace-install.yaml) - the same as namespace-install.yaml but + with multiple replicas for supported components. + +## Core + +The Argo CD Core installation is primarily used to deploy Argo CD in +headless mode. This type of installation is most suitable for cluster +administrators who independently use Argo CD and don't need +multi-tenancy features. This installation includes fewer components +and is easier to setup. The bundle does not include the API server or +UI, and installs the lightweight (non-HA) version of each component. + +Installation manifest is available at [core-install.yaml](https://github.com/argoproj/argo-cd/blob/master/manifests/core-install.yaml). + +For more details about Argo CD Core please refer to the [official +documentation](./core.md) + +## Kustomize + +The Argo CD manifests can also be installed using Kustomize. It is recommended to include the manifest as a remote resource and apply additional customizations +using Kustomize patches. + + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +namespace: argocd +resources: +- https://raw.githubusercontent.com/argoproj/argo-cd/v2.7.2/manifests/install.yaml +``` + +For an example of this, see the [kustomization.yaml](https://github.com/argoproj/argoproj-deployments/blob/master/argocd/kustomization.yaml) +used to deploy the [Argoproj CI/CD infrastructure](https://github.com/argoproj/argoproj-deployments#argoproj-deployments). + +## Helm + +The Argo CD can be installed using [Helm](https://helm.sh/). The Helm chart is currently community maintained and available at +[argo-helm/charts/argo-cd](https://github.com/argoproj/argo-helm/tree/main/charts/argo-cd). + +## Supported versions + +For detailed information regarding Argo CD's version support policy, please refer to the [Release Process and Cadence documentation](https://argo-cd.readthedocs.io/en/stable/developer-guide/release-process-and-cadence/). + +## Tested versions + +The following table shows the versions of Kubernetes that are tested with each version of Argo CD. + +{!docs/operator-manual/tested-kubernetes-versions.md!} diff --git a/docs/operator-manual/metrics.md b/docs/operator-manual/metrics.md index 9cc3075a2e9b6..174b08fd75c2c 100644 --- a/docs/operator-manual/metrics.md +++ b/docs/operator-manual/metrics.md @@ -1,25 +1,88 @@ # Metrics -Argo CD exposes two sets of Prometheus metrics +Argo CD exposes different sets of Prometheus metrics per server. -## Application Metrics +## Application Controller Metrics Metrics about applications. Scraped at the `argocd-metrics:8082/metrics` endpoint. -* Gauge for application health status -* Gauge for application sync status -* Counter for application sync history +| Metric | Type | Description | +|--------|:----:|-------------| +| `argocd_app_info` | gauge | Information about Applications. It contains labels such as `sync_status` and `health_status` that reflect the application state in Argo CD. | +| `argocd_app_k8s_request_total` | counter | Number of kubernetes requests executed during application reconciliation | +| `argocd_app_labels` | gauge | Argo Application labels converted to Prometheus labels. Disabled by default. See section below about how to enable it. | +| `argocd_app_reconcile` | histogram | Application reconciliation performance. | +| `argocd_app_sync_total` | counter | Counter for application sync history | +| `argocd_cluster_api_resource_objects` | gauge | Number of k8s resource objects in the cache. | +| `argocd_cluster_api_resources` | gauge | Number of monitored kubernetes API resources. | +| `argocd_cluster_cache_age_seconds` | gauge | Cluster cache age in seconds. | +| `argocd_cluster_connection_status` | gauge | The k8s cluster current connection status. | +| `argocd_cluster_events_total` | counter | Number of processes k8s resource events. | +| `argocd_cluster_info` | gauge | Information about cluster. | +| `argocd_kubectl_exec_pending` | gauge | Number of pending kubectl executions | +| `argocd_kubectl_exec_total` | counter | Number of kubectl executions | +| `argocd_redis_request_duration` | histogram | Redis requests duration. | +| `argocd_redis_request_total` | counter | Number of redis requests executed during application reconciliation | -If you use ArgoCD with many application and project creation and deletion, +If you use Argo CD with many application and project creation and deletion, the metrics page will keep in cache your application and project's history. If you are having issues because of a large number of metrics cardinality due to deleted resources, you can schedule a metrics reset to clean the history with an application controller flag. Example: `--metrics-cache-expiration="24h0m0s"`. +### Exposing Application labels as Prometheus metrics + +There are use-cases where Argo CD Applications contain labels that are desired to be exposed as Prometheus metrics. +Some examples are: + +* Having the team name as a label to allow routing alerts to specific receivers +* Creating dashboards broken down by business units + +As the Application labels are specific to each company, this feature is disabled by default. To enable it, add the +`--metrics-application-labels` flag to the Argo CD application controller. + +The example below will expose the Argo CD Application labels `team-name` and `business-unit` to Prometheus: + + containers: + - command: + - argocd-application-controller + - --metrics-application-labels + - team-name + - --metrics-application-labels + - business-unit + +In this case, the metric would look like: + +``` +# TYPE argocd_app_labels gauge +argocd_app_labels{label_business_unit="bu-id-1",label_team_name="my-team",name="my-app-1",namespace="argocd",project="important-project"} 1 +argocd_app_labels{label_business_unit="bu-id-1",label_team_name="my-team",name="my-app-2",namespace="argocd",project="important-project"} 1 +argocd_app_labels{label_business_unit="bu-id-2",label_team_name="another-team",name="my-app-3",namespace="argocd",project="important-project"} 1 +``` + ## API Server Metrics Metrics about API Server API request and response activity (request totals, response codes, etc...). Scraped at the `argocd-server-metrics:8083/metrics` endpoint. +| Metric | Type | Description | +|--------|:----:|-------------| +| `argocd_redis_request_duration` | histogram | Redis requests duration. | +| `argocd_redis_request_total` | counter | Number of kubernetes requests executed during application reconciliation. | +| `grpc_server_handled_total` | counter | Total number of RPCs completed on the server, regardless of success or failure. | +| `grpc_server_msg_sent_total` | counter | Total number of gRPC stream messages sent by the server. | + +## Repo Server Metrics +Metrics about the Repo Server. +Scraped at the `argocd-repo-server:8084/metrics` endpoint. + +| Metric | Type | Description | +|--------|:----:|-------------| +| `argocd_git_request_duration_seconds` | histogram | Git requests duration seconds. | +| `argocd_git_request_total` | counter | Number of git requests performed by repo server | +| `argocd_redis_request_duration_seconds` | histogram | Redis requests duration seconds. | +| `argocd_redis_request_total` | counter | Number of kubernetes requests executed during application reconciliation. | +| `argocd_repo_pending_request_total` | gauge | Number of pending requests requiring repository lock | + ## Prometheus Operator If using Prometheus Operator, the following ServiceMonitor example manifests can be used. @@ -65,7 +128,22 @@ metadata: spec: selector: matchLabels: - app.kubernetes.io/name: argocd-repo-server-metrics + app.kubernetes.io/name: argocd-repo-server + endpoints: + - port: metrics +``` + +```yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: argocd-applicationset-controller-metrics + labels: + release: prometheus-operator +spec: + selector: + matchLabels: + app.kubernetes.io/name: argocd-applicationset-controller endpoints: - port: metrics ``` diff --git a/docs/operator-manual/notifications.md b/docs/operator-manual/notifications.md deleted file mode 100644 index c8c34b24fcbbd..0000000000000 --- a/docs/operator-manual/notifications.md +++ /dev/null @@ -1,14 +0,0 @@ -# Notifications - -The notifications support is not bundled into the Argo CD itself. Instead of reinventing the wheel and implementing opinionated notifications system Argo CD leverages integrations -with the third-party notification system. Following integrations are recommended: - -* To monitor Argo CD performance or health state of managed applications use [Prometheus Metrics](./metrics.md) in combination with [Grafana](https://grafana.com/), -[Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). -* To notify the end-users of Argo CD about events like application upgrades, user errors in application definition, etc use one of the following projects: - * [ArgoCD Notifications](https://github.com/argoproj-labs/argocd-notifications) - Argo CD specific notification system that continuously monitors Argo CD applications - and aims to integrate with various notification services such as Slack, SMTP, Telegram, Discord, etc. - * [Argo Kube Notifier](https://github.com/argoproj-labs/argo-kube-notifier) - generic Kubernetes resource controller that allows monitoring any Kubernetes resource and sends a - notification when the configured rule is met. - * [Kube Watch](https://github.com/bitnami-labs/kubewatch) - a Kubernetes watcher that could publishes notification to Slack/hipchat/mattermost/flock channels. It watches the - cluster for resource changes and notifies them through webhooks. diff --git a/docs/operator-manual/notifications/argocd-notifications-cm.yaml b/docs/operator-manual/notifications/argocd-notifications-cm.yaml new file mode 100644 index 0000000000000..5c6b95a43dd58 --- /dev/null +++ b/docs/operator-manual/notifications/argocd-notifications-cm.yaml @@ -0,0 +1,95 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-notifications-cm +data: + # Triggers define the condition when the notification should be sent and list of templates required to generate the message + # Recipients can subscribe to the trigger and specify the required message template and destination notification service. + trigger.on-sync-status-unknown: | + - when: app.status.sync.status == 'Unknown' + send: [my-custom-template] + + # Optional 'oncePer' property ensure that notification is sent only once per specified field value + # E.g. following is triggered once per sync revision + trigger.on-deployed: | + - when: app.status.operationState.phase in ['Succeeded'] and app.status.health.status == 'Healthy' + oncePer: app.status.sync.revision + send: [app-sync-succeeded] + + # Templates are used to generate the notification template message + template.my-custom-template: | + message: | + Application details: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}. + + # Templates might have notification service specific fields. E.g. slack message might include annotations + template.my-custom-template-slack-template: | + message: | + Application {{.app.metadata.name}} sync is {{.app.status.sync.status}}. + Application details: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}. + email: + subject: Application {{.app.metadata.name}} sync status is {{.app.status.sync.status}} + slack: + attachments: | + [{ + "title": "{{.app.metadata.name}}", + "title_link": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + "color": "#18be52" + }] + + # Holds list of triggers that are used by default if trigger is not specified explicitly in the subscription + defaultTriggers: | + - on-sync-status-unknown + + # Notification services are used to deliver message. + # Service definition might reference values from argocd-notifications-secret Secret using $my-key format + # Service format key is: service.. + # Slack + service.slack: | + token: $slack-token + username: # optional username + icon: # optional icon for the message (supports both emoij and url notation) + + # Slack based notifier with name mattermost + service.slack.mattermost: | + apiURL: https://my-mattermost-url.com/api + token: $slack-token + username: # optional username + icon: # optional icon for the message (supports both emoij and url notation) + + # Email + service.email: | + host: smtp.gmail.com + port: 587 + from: @gmail.com + username: $email-username + password: $email-password + + # Opsgenie + service.opsgenie: | + apiUrl: api.opsgenie.com + apiKeys: + $opsgenie-team-id: $opsgenie-team-api-key + ... + + # Telegram + service.telegram: | + token: $telegram-token + + # Context holds list of variables that can be referenced in templates + context: | + argocdUrl: https://cd.apps.argoproj.io/ + + # Contains centrally managed global application subscriptions + subscriptions: | + # subscription for on-sync-status-unknown trigger notifications + - recipients: + - slack:test2 + - email:test@gmail.com + triggers: + - on-sync-status-unknown + # subscription restricted to applications with matching labels only + - recipients: + - slack:test3 + selector: test=true + triggers: + - on-sync-status-unknown \ No newline at end of file diff --git a/docs/operator-manual/notifications/argocd-notifications-secret.yaml b/docs/operator-manual/notifications/argocd-notifications-secret.yaml new file mode 100644 index 0000000000000..72ed67c586061 --- /dev/null +++ b/docs/operator-manual/notifications/argocd-notifications-secret.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: argocd-notifications-secret +stringData: + slack-token: + email-username: @gmail.com + email-password: + +type: Opaque diff --git a/docs/operator-manual/notifications/catalog.md b/docs/operator-manual/notifications/catalog.md new file mode 100644 index 0000000000000..8f413ac7eb5b3 --- /dev/null +++ b/docs/operator-manual/notifications/catalog.md @@ -0,0 +1,531 @@ +# Triggers and Templates Catalog +## Triggers +| NAME | DESCRIPTION | TEMPLATE | +|------------------------|---------------------------------------------------------------|-----------------------------------------------------| +| on-created | Application is created. | [app-created](#app-created) | +| on-deleted | Application is deleted. | [app-deleted](#app-deleted) | +| on-deployed | Application is synced and healthy. Triggered once per commit. | [app-deployed](#app-deployed) | +| on-health-degraded | Application has degraded | [app-health-degraded](#app-health-degraded) | +| on-sync-failed | Application syncing has failed | [app-sync-failed](#app-sync-failed) | +| on-sync-running | Application is being synced | [app-sync-running](#app-sync-running) | +| on-sync-status-unknown | Application status is 'Unknown' | [app-sync-status-unknown](#app-sync-status-unknown) | +| on-sync-succeeded | Application syncing has succeeded | [app-sync-succeeded](#app-sync-succeeded) | + +## Templates +### app-created +**definition**: +```yaml +email: + subject: Application {{.app.metadata.name}} has been created. +message: Application {{.app.metadata.name}} has been created. +teams: + title: Application {{.app.metadata.name}} has been created. + +``` +### app-deleted +**definition**: +```yaml +email: + subject: Application {{.app.metadata.name}} has been deleted. +message: Application {{.app.metadata.name}} has been deleted. +teams: + title: Application {{.app.metadata.name}} has been deleted. + +``` +### app-deployed +**definition**: +```yaml +email: + subject: New version of an application {{.app.metadata.name}} is up and running. +message: | + {{if eq .serviceType "slack"}}:white_check_mark:{{end}} Application {{.app.metadata.name}} is now running new version of deployments manifests. +slack: + attachments: | + [{ + "title": "{{ .app.metadata.name}}", + "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + "color": "#18be52", + "fields": [ + { + "title": "Sync Status", + "value": "{{.app.status.sync.status}}", + "short": true + }, + { + "title": "Repository", + "value": "{{.app.spec.source.repoURL}}", + "short": true + }, + { + "title": "Revision", + "value": "{{.app.status.sync.revision}}", + "short": true + } + {{range $index, $c := .app.status.conditions}} + {{if not $index}},{{end}} + {{if $index}},{{end}} + { + "title": "{{$c.type}}", + "value": "{{$c.message}}", + "short": true + } + {{end}} + ] + }] + deliveryPolicy: Post + groupingKey: "" + notifyBroadcast: false +teams: + facts: | + [{ + "name": "Sync Status", + "value": "{{.app.status.sync.status}}" + }, + { + "name": "Repository", + "value": "{{.app.spec.source.repoURL}}" + }, + { + "name": "Revision", + "value": "{{.app.status.sync.revision}}" + } + {{range $index, $c := .app.status.conditions}} + {{if not $index}},{{end}} + {{if $index}},{{end}} + { + "name": "{{$c.type}}", + "value": "{{$c.message}}" + } + {{end}} + ] + potentialAction: |- + [{ + "@type":"OpenUri", + "name":"Operation Application", + "targets":[{ + "os":"default", + "uri":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}" + }] + }, + { + "@type":"OpenUri", + "name":"Open Repository", + "targets":[{ + "os":"default", + "uri":"{{.app.spec.source.repoURL | call .repo.RepoURLToHTTPS}}" + }] + }] + themeColor: '#000080' + title: New version of an application {{.app.metadata.name}} is up and running. + +``` +### app-health-degraded +**definition**: +```yaml +email: + subject: Application {{.app.metadata.name}} has degraded. +message: | + {{if eq .serviceType "slack"}}:exclamation:{{end}} Application {{.app.metadata.name}} has degraded. + Application details: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}. +slack: + attachments: | + [{ + "title": "{{ .app.metadata.name}}", + "title_link": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + "color": "#f4c030", + "fields": [ + { + "title": "Health Status", + "value": "{{.app.status.health.status}}", + "short": true + }, + { + "title": "Repository", + "value": "{{.app.spec.source.repoURL}}", + "short": true + } + {{range $index, $c := .app.status.conditions}} + {{if not $index}},{{end}} + {{if $index}},{{end}} + { + "title": "{{$c.type}}", + "value": "{{$c.message}}", + "short": true + } + {{end}} + ] + }] + deliveryPolicy: Post + groupingKey: "" + notifyBroadcast: false +teams: + facts: | + [{ + "name": "Health Status", + "value": "{{.app.status.health.status}}" + }, + { + "name": "Repository", + "value": "{{.app.spec.source.repoURL}}" + } + {{range $index, $c := .app.status.conditions}} + {{if not $index}},{{end}} + {{if $index}},{{end}} + { + "name": "{{$c.type}}", + "value": "{{$c.message}}" + } + {{end}} + ] + potentialAction: | + [{ + "@type":"OpenUri", + "name":"Open Application", + "targets":[{ + "os":"default", + "uri":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}" + }] + }, + { + "@type":"OpenUri", + "name":"Open Repository", + "targets":[{ + "os":"default", + "uri":"{{.app.spec.source.repoURL | call .repo.RepoURLToHTTPS}}" + }] + }] + themeColor: '#FF0000' + title: Application {{.app.metadata.name}} has degraded. + +``` +### app-sync-failed +**definition**: +```yaml +email: + subject: Failed to sync application {{.app.metadata.name}}. +message: | + {{if eq .serviceType "slack"}}:exclamation:{{end}} The sync operation of application {{.app.metadata.name}} has failed at {{.app.status.operationState.finishedAt}} with the following error: {{.app.status.operationState.message}} + Sync operation details are available at: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true . +slack: + attachments: | + [{ + "title": "{{ .app.metadata.name}}", + "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + "color": "#E96D76", + "fields": [ + { + "title": "Sync Status", + "value": "{{.app.status.sync.status}}", + "short": true + }, + { + "title": "Repository", + "value": "{{.app.spec.source.repoURL}}", + "short": true + } + {{range $index, $c := .app.status.conditions}} + {{if not $index}},{{end}} + {{if $index}},{{end}} + { + "title": "{{$c.type}}", + "value": "{{$c.message}}", + "short": true + } + {{end}} + ] + }] + deliveryPolicy: Post + groupingKey: "" + notifyBroadcast: false +teams: + facts: | + [{ + "name": "Sync Status", + "value": "{{.app.status.sync.status}}" + }, + { + "name": "Failed at", + "value": "{{.app.status.operationState.finishedAt}}" + }, + { + "name": "Repository", + "value": "{{.app.spec.source.repoURL}}" + } + {{range $index, $c := .app.status.conditions}} + {{if not $index}},{{end}} + {{if $index}},{{end}} + { + "name": "{{$c.type}}", + "value": "{{$c.message}}" + } + {{end}} + ] + potentialAction: |- + [{ + "@type":"OpenUri", + "name":"Open Operation", + "targets":[{ + "os":"default", + "uri":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true" + }] + }, + { + "@type":"OpenUri", + "name":"Open Repository", + "targets":[{ + "os":"default", + "uri":"{{.app.spec.source.repoURL | call .repo.RepoURLToHTTPS}}" + }] + }] + themeColor: '#FF0000' + title: Failed to sync application {{.app.metadata.name}}. + +``` +### app-sync-running +**definition**: +```yaml +email: + subject: Start syncing application {{.app.metadata.name}}. +message: | + The sync operation of application {{.app.metadata.name}} has started at {{.app.status.operationState.startedAt}}. + Sync operation details are available at: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true . +slack: + attachments: | + [{ + "title": "{{ .app.metadata.name}}", + "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + "color": "#0DADEA", + "fields": [ + { + "title": "Sync Status", + "value": "{{.app.status.sync.status}}", + "short": true + }, + { + "title": "Repository", + "value": "{{.app.spec.source.repoURL}}", + "short": true + } + {{range $index, $c := .app.status.conditions}} + {{if not $index}},{{end}} + {{if $index}},{{end}} + { + "title": "{{$c.type}}", + "value": "{{$c.message}}", + "short": true + } + {{end}} + ] + }] + deliveryPolicy: Post + groupingKey: "" + notifyBroadcast: false +teams: + facts: | + [{ + "name": "Sync Status", + "value": "{{.app.status.sync.status}}" + }, + { + "name": "Started at", + "value": "{{.app.status.operationState.startedAt}}" + }, + { + "name": "Repository", + "value": "{{.app.spec.source.repoURL}}" + } + {{range $index, $c := .app.status.conditions}} + {{if not $index}},{{end}} + {{if $index}},{{end}} + { + "name": "{{$c.type}}", + "value": "{{$c.message}}" + } + {{end}} + ] + potentialAction: |- + [{ + "@type":"OpenUri", + "name":"Open Operation", + "targets":[{ + "os":"default", + "uri":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true" + }] + }, + { + "@type":"OpenUri", + "name":"Open Repository", + "targets":[{ + "os":"default", + "uri":"{{.app.spec.source.repoURL | call .repo.RepoURLToHTTPS}}" + }] + }] + title: Start syncing application {{.app.metadata.name}}. + +``` +### app-sync-status-unknown +**definition**: +```yaml +email: + subject: Application {{.app.metadata.name}} sync status is 'Unknown' +message: | + {{if eq .serviceType "slack"}}:exclamation:{{end}} Application {{.app.metadata.name}} sync is 'Unknown'. + Application details: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}. + {{if ne .serviceType "slack"}} + {{range $c := .app.status.conditions}} + * {{$c.message}} + {{end}} + {{end}} +slack: + attachments: | + [{ + "title": "{{ .app.metadata.name}}", + "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + "color": "#E96D76", + "fields": [ + { + "title": "Sync Status", + "value": "{{.app.status.sync.status}}", + "short": true + }, + { + "title": "Repository", + "value": "{{.app.spec.source.repoURL}}", + "short": true + } + {{range $index, $c := .app.status.conditions}} + {{if not $index}},{{end}} + {{if $index}},{{end}} + { + "title": "{{$c.type}}", + "value": "{{$c.message}}", + "short": true + } + {{end}} + ] + }] + deliveryPolicy: Post + groupingKey: "" + notifyBroadcast: false +teams: + facts: | + [{ + "name": "Sync Status", + "value": "{{.app.status.sync.status}}" + }, + { + "name": "Repository", + "value": "{{.app.spec.source.repoURL}}" + } + {{range $index, $c := .app.status.conditions}} + {{if not $index}},{{end}} + {{if $index}},{{end}} + { + "name": "{{$c.type}}", + "value": "{{$c.message}}" + } + {{end}} + ] + potentialAction: |- + [{ + "@type":"OpenUri", + "name":"Open Application", + "targets":[{ + "os":"default", + "uri":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}" + }] + }, + { + "@type":"OpenUri", + "name":"Open Repository", + "targets":[{ + "os":"default", + "uri":"{{.app.spec.source.repoURL | call .repo.RepoURLToHTTPS}}" + }] + }] + title: Application {{.app.metadata.name}} sync status is 'Unknown' + +``` +### app-sync-succeeded +**definition**: +```yaml +email: + subject: Application {{.app.metadata.name}} has been successfully synced. +message: | + {{if eq .serviceType "slack"}}:white_check_mark:{{end}} Application {{.app.metadata.name}} has been successfully synced at {{.app.status.operationState.finishedAt}}. + Sync operation details are available at: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true . +slack: + attachments: | + [{ + "title": "{{ .app.metadata.name}}", + "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + "color": "#18be52", + "fields": [ + { + "title": "Sync Status", + "value": "{{.app.status.sync.status}}", + "short": true + }, + { + "title": "Repository", + "value": "{{.app.spec.source.repoURL}}", + "short": true + } + {{range $index, $c := .app.status.conditions}} + {{if not $index}},{{end}} + {{if $index}},{{end}} + { + "title": "{{$c.type}}", + "value": "{{$c.message}}", + "short": true + } + {{end}} + ] + }] + deliveryPolicy: Post + groupingKey: "" + notifyBroadcast: false +teams: + facts: | + [{ + "name": "Sync Status", + "value": "{{.app.status.sync.status}}" + }, + { + "name": "Synced at", + "value": "{{.app.status.operationState.finishedAt}}" + }, + { + "name": "Repository", + "value": "{{.app.spec.source.repoURL}}" + } + {{range $index, $c := .app.status.conditions}} + {{if not $index}},{{end}} + {{if $index}},{{end}} + { + "name": "{{$c.type}}", + "value": "{{$c.message}}" + } + {{end}} + ] + potentialAction: |- + [{ + "@type":"OpenUri", + "name":"Operation Details", + "targets":[{ + "os":"default", + "uri":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true" + }] + }, + { + "@type":"OpenUri", + "name":"Open Repository", + "targets":[{ + "os":"default", + "uri":"{{.app.spec.source.repoURL | call .repo.RepoURLToHTTPS}}" + }] + }] + themeColor: '#000080' + title: Application {{.app.metadata.name}} has been successfully synced + +``` diff --git a/docs/operator-manual/notifications/functions.md b/docs/operator-manual/notifications/functions.md new file mode 100644 index 0000000000000..3d614e4e53a55 --- /dev/null +++ b/docs/operator-manual/notifications/functions.md @@ -0,0 +1,79 @@ +### **time** +Time related functions. + +
+**`time.Now() Time`** + +Executes function built-in Golang [time.Now](https://golang.org/pkg/time/#Now) function. Returns an instance of +Golang [Time](https://golang.org/pkg/time/#Time). + +
+**`time.Parse(val string) Time`** + +Parses specified string using RFC3339 layout. Returns an instance of Golang [Time](https://golang.org/pkg/time/#Time). + +### **strings** +String related functions. + +
+**`strings.ReplaceAll() string`** + +Executes function built-in Golang [strings.ReplaceAll](https://pkg.go.dev/strings#ReplaceAll) function. + +
+**`strings.ToUpper() string`** + +Executes function built-in Golang [strings.ToUpper](https://pkg.go.dev/strings#ToUpper) function. + +
+**`strings.ToLower() string`** + +Executes function built-in Golang [strings.ToLower](https://pkg.go.dev/strings#ToLower) function. + +### **sync** + +
+**`sync.GetInfoItem(app map, name string) string`** +Returns the `info` item value by given name stored in the Argo CD App sync operation. + +### **repo** +Functions that provide additional information about Application source repository. +
+**`repo.RepoURLToHTTPS(url string) string`** + +Transforms given GIT URL into HTTPs format. + +
+**`repo.FullNameByRepoURL(url string) string`** + +Returns repository URL full name `(/)`. Currently supports only Github, GitLab and Bitbucket. + +
+**`repo.GetCommitMetadata(sha string) CommitMetadata`** + +Returns commit metadata. The commit must belong to the application source repository. `CommitMetadata` fields: + +* `Message string` commit message +* `Author string` - commit author +* `Date time.Time` - commit creation date +* `Tags []string` - Associated tags + +
+**`repo.GetAppDetails() AppDetail`** + +Returns application details. `AppDetail` fields: + +* `Type string` - AppDetail type +* `Helm HelmAppSpec` - Helm details + * Fields : + * `Name string` + * `ValueFiles []string` + * `Parameters []*v1alpha1.HelmParameter` + * `Values string` + * `FileParameters []*v1alpha1.HelmFileParameter` + * Methods : + * `GetParameterValueByName(Name string)` Retrieve value by name in Parameters field + * `GetFileParameterPathByName(Name string)` Retrieve path by name in FileParameters field +* +* `Kustomize *apiclient.KustomizeAppSpec` - Kustomize details +* `Directory *apiclient.DirectoryAppSpec` - Directory details diff --git a/docs/operator-manual/notifications/grafana-dashboard.json b/docs/operator-manual/notifications/grafana-dashboard.json new file mode 100644 index 0000000000000..5d04f9116aa16 --- /dev/null +++ b/docs/operator-manual/notifications/grafana-dashboard.json @@ -0,0 +1,305 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 4, + "iteration": 1589141097815, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(increase(argocd_notifications_trigger_eval_total[$interval])) by (notifier)", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Trigger Evaluations", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(increase(argocd_notifications_deliveries_total[$interval])) by (notifier)", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Notification deliveries", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "schemaVersion": 21, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "text": "Prometheus", + "value": "Prometheus" + }, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "auto": true, + "auto_count": 30, + "auto_min": "10s", + "current": { + "selected": false, + "text": "1m", + "value": "1m" + }, + "hide": 0, + "label": null, + "name": "interval", + "options": [ + { + "selected": false, + "text": "auto", + "value": "$__auto_interval_interval" + }, + { + "selected": true, + "text": "1m", + "value": "1m" + }, + { + "selected": false, + "text": "5m", + "value": "5m" + }, + { + "selected": false, + "text": "10m", + "value": "10m" + }, + { + "selected": false, + "text": "30m", + "value": "30m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "2h", + "value": "2h" + }, + { + "selected": false, + "text": "4h", + "value": "4h" + }, + { + "selected": false, + "text": "8h", + "value": "8h" + } + ], + "query": "1m,5m,10m,30m,1h,2h,4h,8h", + "refresh": 2, + "skipUrlSync": false, + "type": "interval" + } + ] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Argo CD Notifications", + "uid": "3qXvXigMz", + "version": 1 +} \ No newline at end of file diff --git a/docs/operator-manual/notifications/index.md b/docs/operator-manual/notifications/index.md new file mode 100644 index 0000000000000..c719d10e7611c --- /dev/null +++ b/docs/operator-manual/notifications/index.md @@ -0,0 +1,47 @@ +# Notifications Overview + +Argo CD Notifications continuously monitors Argo CD applications and provides a flexible way to notify +users about important changes in the application state. Using a flexible mechanism of +[triggers](triggers.md) and [templates](templates.md) you can configure when the notification should be sent as +well as notification content. Argo CD Notifications includes the [catalog](catalog.md) of useful triggers and templates. +So you can just use them instead of reinventing new ones. + +## Getting Started + +* Install Triggers and Templates from the catalog + + ```bash + kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/notifications_catalog/install.yaml + ``` + +* Add Email username and password token to `argocd-notifications-secret` secret + + ```bash + EMAIL_USER= + PASSWORD= + + kubectl apply -n argocd -f - << EOF + apiVersion: v1 + kind: Secret + metadata: + name: argocd-notifications-secret + stringData: + email-username: $EMAIL_USER + email-password: $PASSWORD + type: Opaque + EOF + ``` + +* Register Email notification service + + ```bash + kubectl patch cm argocd-notifications-cm -n argocd --type merge -p '{"data": {"service.email.gmail": "{ username: $email-username, password: $email-password, host: smtp.gmail.com, port: 465, from: $email-username }" }}' + ``` + +* Subscribe to notifications by adding the `notifications.argoproj.io/subscribe.on-sync-succeeded.slack` annotation to the Argo CD application or project: + + ```bash + kubectl patch app -n argocd -p '{"metadata": {"annotations": {"notifications.argoproj.io/subscribe.on-sync-succeeded.slack":""}}}' --type merge + ``` + +Try syncing an application to get notified when the sync is completed. diff --git a/docs/operator-manual/notifications/monitoring.md b/docs/operator-manual/notifications/monitoring.md new file mode 100644 index 0000000000000..a0aabbaae1f09 --- /dev/null +++ b/docs/operator-manual/notifications/monitoring.md @@ -0,0 +1,30 @@ +# Monitoring + +The Argo CD Notification controller serves Prometheus metrics on port 9001. + +!!! note + The metrics port can be changed using the `--metrics-port` flag in `argocd-notifications-controller` deployment. + +## Metrics +The following metrics are available: + +### `argocd_notifications_deliveries_total` + + Number of delivered notifications. + Labels: + +* `template` - notification template name +* `notifier` - notification service name +* `succeeded` - flag that indicates if notification was successfully sent or failed + +### `argocd_notifications_trigger_eval_total` + + Number of trigger evaluations. + Labels: + +* `name` - trigger name +* `triggered` - flag that indicates if trigger condition returned true of false + +## Examples + +* Grafana Dashboard: [grafana-dashboard.json](grafana-dashboard.json) diff --git a/docs/operator-manual/notifications/services/alertmanager.md b/docs/operator-manual/notifications/services/alertmanager.md new file mode 100755 index 0000000000000..e0f9d7e4e7889 --- /dev/null +++ b/docs/operator-manual/notifications/services/alertmanager.md @@ -0,0 +1,164 @@ +# Alertmanager + +## Parameters + +The notification service is used to push events to [Alertmanager](https://github.com/prometheus/alertmanager), and the following settings need to be specified: + +* `targets` - the alertmanager service address, array type +* `scheme` - optional, default is "http", e.g. http or https +* `apiPath` - optional, default is "/api/v2/alerts" +* `insecureSkipVerify` - optional, default is "false", when scheme is https whether to skip the verification of ca +* `basicAuth` - optional, server auth +* `bearerToken` - optional, server auth +* `timeout` - optional, the timeout in seconds used when sending alerts, default is "3 seconds" + +`basicAuth` or `bearerToken` is used for authentication, you can choose one. If the two are set at the same time, `basicAuth` takes precedence over `bearerToken`. + +## Example + +### Prometheus Alertmanager config + +```yaml +global: + resolve_timeout: 5m + +route: + group_by: ['alertname'] + group_wait: 10s + group_interval: 10s + repeat_interval: 1h + receiver: 'default' +receivers: +- name: 'default' + webhook_configs: + - send_resolved: false + url: 'http://10.5.39.39:10080/api/alerts/webhook' +``` + +You should turn off "send_resolved" or you will receive unnecessary recovery notifications after "resolve_timeout". + +### Send one alertmanager without auth + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + service.alertmanager: | + targets: + - 10.5.39.39:9093 +``` + +### Send alertmanager cluster with custom api path + +If your alertmanager has changed the default api, you can customize "apiPath". + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + service.alertmanager: | + targets: + - 10.5.39.39:443 + scheme: https + apiPath: /api/events + insecureSkipVerify: true +``` + +### Send high availability alertmanager with auth + +Store auth token in `argocd-notifications-secret` Secret and use configure in `argocd-notifications-cm` ConfigMap. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: +stringData: + alertmanager-username: + alertmanager-password: + alertmanager-bearer-token: +``` + +- with basicAuth + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + service.alertmanager: | + targets: + - 10.5.39.39:19093 + - 10.5.39.39:29093 + - 10.5.39.39:39093 + scheme: https + apiPath: /api/v2/alerts + insecureSkipVerify: true + basicAuth: + username: $alertmanager-username + password: $alertmanager-password +``` + +- with bearerToken + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + service.alertmanager: | + targets: + - 10.5.39.39:19093 + - 10.5.39.39:29093 + - 10.5.39.39:39093 + scheme: https + apiPath: /api/v2/alerts + insecureSkipVerify: true + bearerToken: $alertmanager-bearer-token +``` + +## Templates + +* `labels` - at least one label pair required, implement different notification strategies according to alertmanager routing +* `annotations` - optional, specifies a set of information labels, which can be used to store longer additional information, but only for display +* `generatorURL` - optional, default is '{{.app.spec.source.repoURL}}', backlink used to identify the entity that caused this alert in the client + +the `label` or `annotations` or `generatorURL` values can be templated. + +```yaml +context: | + argocdUrl: https://example.com/argocd + +template.app-deployed: | + message: Application {{.app.metadata.name}} has been healthy. + alertmanager: + labels: + fault_priority: "P5" + event_bucket: "deploy" + event_status: "succeed" + recipient: "{{.recipient}}" + annotations: + application: '{{.app.metadata.name}}' + author: "{{(call .repo.GetCommitMetadata .app.status.sync.revision).Author}}" + message: "{{(call .repo.GetCommitMetadata .app.status.sync.revision).Message}}" +``` + +You can do targeted push on [Alertmanager](https://github.com/prometheus/alertmanager) according to labels. + +```yaml +template.app-deployed: | + message: Application {{.app.metadata.name}} has been healthy. + alertmanager: + labels: + alertname: app-deployed + fault_priority: "P5" + event_bucket: "deploy" +``` + +There is a special label `alertname`. If you don’t set its value, it will be equal to the template name by default. \ No newline at end of file diff --git a/docs/operator-manual/notifications/services/awssqs.md b/docs/operator-manual/notifications/services/awssqs.md new file mode 100755 index 0000000000000..6bbc47cbbc0b5 --- /dev/null +++ b/docs/operator-manual/notifications/services/awssqs.md @@ -0,0 +1,106 @@ +# AWS SQS + +## Parameters + +This notification service is capable of sending simple messages to AWS SQS queue. + +* `queue` - name of the queue you are intending to send messages to. Can be overwriten with target destination annotation. +* `region` - region of the sqs queue can be provided via env variable AWS_DEFAULT_REGION +* `key` - optional, aws access key must be either referenced from a secret via variable or via env variable AWS_ACCESS_KEY_ID +* `secret` - optional, aws access secret must be either referenced from a secret via variableor via env variable AWS_SECRET_ACCESS_KEY +* `account` optional, external accountId of the queue +* `endpointUrl` optional, useful for development with localstack + +## Example + +### Using Secret for credential retrieval: + +Resource Annotation: +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + annotations: + notifications.argoproj.io/subscribe.on-deployment-ready.awssqs: "overwrite-myqueue" +``` + +* ConfigMap +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + service.awssqs: | + region: "us-east-2" + queue: "myqueue" + account: "1234567" + key: "$awsaccess_key" + secret: "$awsaccess_secret" + + template.deployment-ready: | + message: | + Deployment {{.obj.metadata.name}} is ready! + + trigger.on-deployment-ready: | + - when: any(obj.status.conditions, {.type == 'Available' && .status == 'True'}) + send: [deployment-ready] + - oncePer: obj.metadata.annotations["generation"] + +``` + Secret +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: +stringData: + awsaccess_key: test + awsaccess_secret: test +``` + + +### Minimal configuration using AWS Env variables + +Ensure following list of enviromental variable is injected via OIDC, or other method. And assuming SQS is local to the account. +You may skip usage of secret for sensitive data and omit other parameters. (Setting parameters via ConfigMap takes precedent.) + +Variables: + +```bash +export AWS_ACCESS_KEY_ID="test" +export AWS_SECRET_ACCESS_KEY="test" +export AWS_DEFAULT_REGION="us-east-1" +``` + +Resource Annotation: +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + annotations: + notifications.argoproj.io/subscribe.on-deployment-ready.awssqs: "" +``` + +* ConfigMap +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + service.awssqs: | + queue: "myqueue" + + template.deployment-ready: | + message: | + Deployment {{.obj.metadata.name}} is ready! + + trigger.on-deployment-ready: | + - when: any(obj.status.conditions, {.type == 'Available' && .status == 'True'}) + send: [deployment-ready] + - oncePer: obj.metadata.annotations["generation"] + +``` diff --git a/docs/operator-manual/notifications/services/email.md b/docs/operator-manual/notifications/services/email.md new file mode 100755 index 0000000000000..b81ab6cde8b4c --- /dev/null +++ b/docs/operator-manual/notifications/services/email.md @@ -0,0 +1,63 @@ +# Email + +## Parameters + +The Email notification service sends email notifications using SMTP protocol and requires specifying the following settings: + +* `host` - the SMTP server host name +* `port` - the SMTP server port +* `username` - username +* `password` - password +* `from` - from email address +* `html` - optional bool, true or false +* `insecure_skip_verify` - optional bool, true or false + +## Example + +The following snippet contains sample Gmail service configuration: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + service.email.gmail: | + username: $email-username + password: $email-password + host: smtp.gmail.com + port: 465 + from: $email-username +``` + +Without authentication: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + service.email.example: | + host: smtp.example.com + port: 587 + from: $email-username +``` + +## Template + +[Notification templates](../templates.md) support specifying subject for email notifications: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + template.app-sync-succeeded: | + email: + subject: Application {{.app.metadata.name}} has been successfully synced. + message: | + {{if eq .serviceType "slack"}}:white_check_mark:{{end}} Application {{.app.metadata.name}} has been successfully synced at {{.app.status.operationState.finishedAt}}. + Sync operation details are available at: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true . +``` diff --git a/docs/operator-manual/notifications/services/github.md b/docs/operator-manual/notifications/services/github.md new file mode 100755 index 0000000000000..a3f89f8c87ef0 --- /dev/null +++ b/docs/operator-manual/notifications/services/github.md @@ -0,0 +1,86 @@ +# GitHub + +## Parameters + +The GitHub notification service changes commit status using [GitHub Apps](https://docs.github.com/en/developers/apps) and requires specifying the following settings: + +* `appID` - the app id +* `installationID` - the app installation id +* `privateKey` - the app private key +* `enterpriseBaseURL` - optional URL, e.g. https://git.example.com/ + +## Configuration + +1. Create a GitHub Apps using https://github.com/settings/apps/new +2. Change repository permissions to enable write commit statuses and/or deployments +![2](https://user-images.githubusercontent.com/18019529/108397381-3ca57980-725b-11eb-8d17-5b8992dc009e.png) +3. Generate a private key, and download it automatically +![3](https://user-images.githubusercontent.com/18019529/108397926-d4a36300-725b-11eb-83fe-74795c8c3e03.png) +4. Install app to account +5. Store privateKey in `argocd-notifications-secret` Secret and configure GitHub integration +in `argocd-notifications-cm` ConfigMap + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + service.github: | + appID: + installationID: + privateKey: $github-privateKey +``` + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: +stringData: + github-privateKey: | + -----BEGIN RSA PRIVATE KEY----- + (snip) + -----END RSA PRIVATE KEY----- +``` + +6. Create subscription for your GitHub integration + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + annotations: + notifications.argoproj.io/subscribe..github: "" +``` + +## Templates + +![](https://user-images.githubusercontent.com/18019529/108520497-168ce180-730e-11eb-93cb-b0b91f99bdc5.png) + +```yaml +template.app-deployed: | + message: | + Application {{.app.metadata.name}} is now running new version of deployments manifests. + github: + repoURLPath: "{{.app.spec.source.repoURL}}" + revisionPath: "{{.app.status.operationState.syncResult.revision}}" + status: + state: success + label: "continuous-delivery/{{.app.metadata.name}}" + targetURL: "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true" + deployment: + state: success + environment: production + environmentURL: "https://{{.app.metadata.name}}.example.com" + logURL: "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true" + requiredContexts: [] + autoMerge: true +``` + +**Notes**: +- If the message is set to 140 characters or more, it will be truncated. +- If `github.repoURLPath` and `github.revisionPath` are same as above, they can be omitted. +- Automerge is optional and `true` by default for github deployments to ensure the requested ref is up to date with the default branch. + Setting this option to `false` is required if you would like to deploy older refs in your default branch. + For more information see the [Github Deployment API Docs](https://docs.github.com/en/rest/deployments/deployments?apiVersion=2022-11-28#create-a-deployment). diff --git a/docs/operator-manual/notifications/services/googlechat.md b/docs/operator-manual/notifications/services/googlechat.md new file mode 100755 index 0000000000000..041ea6e022ef5 --- /dev/null +++ b/docs/operator-manual/notifications/services/googlechat.md @@ -0,0 +1,92 @@ +# Google Chat + +## Parameters + +The Google Chat notification service send message notifications to a google chat webhook. This service uses the following settings: + +* `webhooks` - a map of the form `webhookName: webhookUrl` + +## Configuration + +1. Open `Google chat` and go to the space to which you want to send messages +2. From the menu at the top of the page, select **Configure Webhooks** +3. Under **Incoming Webhooks**, click **Add Webhook** +4. Give a name to the webhook, optionally add an image and click **Save** +5. Copy the URL next to your webhook +6. Store the URL in `argocd-notification-secret` and declare it in `argocd-notifications-cm` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + service.googlechat: | + webhooks: + spaceName: $space-webhook-url +``` + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: +stringData: + space-webhook-url: https://chat.googleapis.com/v1/spaces//messages?key=&token= +``` + +6. Create a subscription for your space + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + annotations: + notifications.argoproj.io/subscribe.on-sync-succeeded.googlechat: spaceName +``` + +## Templates + +You can send [simple text](https://developers.google.com/chat/reference/message-formats/basic) or [card messages](https://developers.google.com/chat/reference/message-formats/cards) to a Google Chat space. A simple text message template can be defined as follows: + +```yaml +template.app-sync-succeeded: | + message: The app {{ .app.metadata.name }} has successfully synced! +``` + +A card message can be defined as follows: + +```yaml +template.app-sync-succeeded: | + googlechat: + cards: | + - header: + title: ArgoCD Bot Notification + sections: + - widgets: + - textParagraph: + text: The app {{ .app.metadata.name }} has successfully synced! + - widgets: + - keyValue: + topLabel: Repository + content: {{ call .repo.RepoURLToHTTPS .app.spec.source.repoURL }} + - keyValue: + topLabel: Revision + content: {{ .app.spec.source.targetRevision }} + - keyValue: + topLabel: Author + content: {{ (call .repo.GetCommitMetadata .app.status.sync.revision).Author }} +``` + +The card message can be written in JSON too. + +## Chat Threads + +It is possible send both simple text and card messages in a chat thread by specifying a unique key for the thread. The thread key can be defined as follows: + +```yaml +template.app-sync-succeeded: | + message: The app {{ .app.metadata.name }} has succesfully synced! + googlechat: + threadKey: {{ .app.metadata.name }} +``` diff --git a/docs/operator-manual/notifications/services/grafana.md b/docs/operator-manual/notifications/services/grafana.md new file mode 100755 index 0000000000000..a36672d0fa423 --- /dev/null +++ b/docs/operator-manual/notifications/services/grafana.md @@ -0,0 +1,51 @@ +# Grafana + +To be able to create Grafana annotation with argocd-notifications you have to create an [API Key](https://grafana.com/docs/grafana/latest/http_api/auth/#create-api-key) inside your [Grafana](https://grafana.com). + +![sample](https://user-images.githubusercontent.com/18019529/112024976-0f106080-8b78-11eb-9658-7663305899be.png) + +Available parameters : + +* `apiURL` - the server url, e.g. https://grafana.example.com +* `apiKey` - the API key for the serviceaccount +* `insecureSkipVerify` - optional bool, true or false + +1. Login to your Grafana instance as `admin` +2. On the left menu, go to Configuration / API Keys +3. Click "Add API Key" +4. Fill the Key with name `ArgoCD Notification`, role `Editor` and Time to Live `10y` (for example) +5. Click on Add button +6. Store apiKey in `argocd-notifications-secret` Secret and Copy your API Key and define it in `argocd-notifications-cm` ConfigMap + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + service.grafana: | + apiUrl: https://grafana.example.com/api + apiKey: $grafana-api-key +``` + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: +stringData: + grafana-api-key: api-key +``` + +7. Create subscription for your Grafana integration + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + annotations: + notifications.argoproj.io/subscribe..grafana: tag1|tag2 # list of tags separated with | +``` + +8. Change the annotations settings +![8](https://user-images.githubusercontent.com/18019529/112022083-47fb0600-8b75-11eb-849b-d25d41925909.png) diff --git a/docs/operator-manual/notifications/services/mattermost.md b/docs/operator-manual/notifications/services/mattermost.md new file mode 100755 index 0000000000000..98e0d0fd7b82f --- /dev/null +++ b/docs/operator-manual/notifications/services/mattermost.md @@ -0,0 +1,78 @@ +# Mattermost + +## Parameters + +* `apiURL` - the server url, e.g. https://mattermost.example.com +* `token` - the bot token +* `insecureSkipVerify` - optional bool, true or false + +## Configuration + +1. Create a bot account and copy token after creating it +![1](https://user-images.githubusercontent.com/18019529/111499520-62ed0500-8786-11eb-88b0-d0aade61fed4.png) +2. Invite team +![2](https://user-images.githubusercontent.com/18019529/111500197-1229dc00-8787-11eb-98e5-587ee36c94a9.png) +3. Store token in `argocd-notifications-secret` Secret and configure Mattermost integration +in `argocd-notifications-cm` ConfigMap + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + service.mattermost: | + apiURL: + token: $mattermost-token +``` + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: +stringData: + mattermost-token: token +``` + +4. Copy channel id +![4](https://user-images.githubusercontent.com/18019529/111501289-333efc80-8788-11eb-9731-8353170cd73a.png) + +5. Create subscription for your Mattermost integration + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + annotations: + notifications.argoproj.io/subscribe..mattermost: +``` + +## Templates + +![](https://user-images.githubusercontent.com/18019529/111502636-5fa74880-8789-11eb-97c5-5eac22c00a37.png) + +You can reuse the template of slack. +Mattermost is compatible with attachments of Slack. See [Mattermost Integration Guide](https://docs.mattermost.com/developer/message-attachments.html). + +```yaml +template.app-deployed: | + message: | + Application {{.app.metadata.name}} is now running new version of deployments manifests. + mattermost: + attachments: | + [{ + "title": "{{.app.metadata.name}}", + "title_link": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + "color": "#18be52", + "fields": [{ + "title": "Sync Status", + "value": "{{.app.status.sync.status}}", + "short": true + }, { + "title": "Repository", + "value": "{{.app.spec.source.repoURL}}", + "short": true + }] + }] +``` diff --git a/docs/operator-manual/notifications/services/newrelic.md b/docs/operator-manual/notifications/services/newrelic.md new file mode 100755 index 0000000000000..d98288a846422 --- /dev/null +++ b/docs/operator-manual/notifications/services/newrelic.md @@ -0,0 +1,61 @@ +# NewRelic + +## Parameters + +* `apiURL` - the api server url, e.g. https://api.newrelic.com +* `apiKey` - a [NewRelic ApiKey](https://docs.newrelic.com/docs/apis/rest-api-v2/get-started/introduction-new-relic-rest-api-v2/#api_key) + +## Configuration + +1. Create a NewRelic [Api Key](https://docs.newrelic.com/docs/apis/intro-apis/new-relic-api-keys/#user-api-key) +2. Store apiKey in `argocd-notifications-secret` Secret and configure NewRelic integration in `argocd-notifications-cm` ConfigMap + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + service.newrelic: | + apiURL: + apiKey: $newrelic-apiKey +``` + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: +stringData: + newrelic-apiKey: apiKey +``` + +3. Copy [Application ID](https://docs.newrelic.com/docs/apis/rest-api-v2/get-started/get-app-other-ids-new-relic-one/#apm) +4. Create subscription for your NewRelic integration + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + annotations: + notifications.argoproj.io/subscribe..newrelic: +``` + +## Templates + +* `description` - __optional__, high-level description of this deployment, visible in the [Summary](https://docs.newrelic.com/docs/apm/applications-menu/monitoring/apm-overview-page) page and on the [Deployments](https://docs.newrelic.com/docs/apm/applications-menu/events/deployments-page) page when you select an individual deployment. + * Defaults to `message` +* `changelog` - __optional__, A summary of what changed in this deployment, visible in the [Deployments](https://docs.newrelic.com/docs/apm/applications-menu/events/deployments-page) page when you select (selected deployment) > Change log. + * Defaults to `{{(call .repo.GetCommitMetadata .app.status.sync.revision).Message}}` +* `user` - __optional__, A username to associate with the deployment, visible in the [Summary](https://docs.newrelic.com/docs/apm/applications-menu/events/deployments-page) and on the [Deployments](https://docs.newrelic.com/docs/apm/applications-menu/events/deployments-page). + * Defaults to `{{(call .repo.GetCommitMetadata .app.status.sync.revision).Author}}` + +```yaml +context: | + argocdUrl: https://example.com/argocd + +template.app-deployed: | + message: Application {{.app.metadata.name}} has successfully deployed. + newrelic: + description: Application {{.app.metadata.name}} has successfully deployed +``` diff --git a/docs/operator-manual/notifications/services/opsgenie.md b/docs/operator-manual/notifications/services/opsgenie.md new file mode 100755 index 0000000000000..665d0081e7c73 --- /dev/null +++ b/docs/operator-manual/notifications/services/opsgenie.md @@ -0,0 +1,28 @@ +# Opsgenie + +To be able to send notifications with argocd-notifications you have to create an [API Integration](https://docs.opsgenie.com/docs/integrations-overview) inside your [Opsgenie Team](https://docs.opsgenie.com/docs/teams). + +1. Login to Opsgenie at https://app.opsgenie.com or https://app.eu.opsgenie.com (if you have an account in the european union) +2. Make sure you already have a team, if not follow this guide https://docs.opsgenie.com/docs/teams +3. Click "Teams" in the Menu on the left +4. Select the team that you want to notify +5. In the teams configuration menu select "Integrations" +6. click "Add Integration" in the top right corner +7. Select "API" integration +8. Give your integration a name, copy the "API key" and safe it somewhere for later +9. Make sure the checkboxes for "Create and Update Access" and "enable" are selected, disable the other checkboxes to remove unnecessary permissions +10. Click "Safe Integration" at the bottom +11. Check your browser for the correct server apiURL. If it is "app.opsgenie.com" then use the us/international api url `api.opsgenie.com` in the next step, otherwise use `api.eu.opsgenie.com` (european api). +12. You are finished with configuring opsgenie. Now you need to configure argocd-notifications. Use the apiUrl, the team name and the apiKey to configure the opsgenie integration in the `argocd-notifications-secret` secret. + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + service.opsgenie: | + apiUrl: + apiKeys: + : +``` \ No newline at end of file diff --git a/docs/operator-manual/notifications/services/overview.md b/docs/operator-manual/notifications/services/overview.md new file mode 100755 index 0000000000000..265e575755088 --- /dev/null +++ b/docs/operator-manual/notifications/services/overview.md @@ -0,0 +1,54 @@ +The notification services represent integration with services such as slack, email or custom webhook. Services are configured in `argocd-notifications-cm` ConfigMap +using `service..()` keys and might reference sensitive data from `argocd-notifications-secret` Secret. Following example demonstrates slack +service configuration: + +```yaml + service.slack: | + token: $slack-token +``` + + +The `slack` indicates that service sends slack notification; name is missing and defaults to `slack`. + +## Sensitive Data + +Sensitive data like authentication tokens should be stored in `` Secret and can be referenced in +service configuration using `$` format. For example `$slack-token` referencing value of key `slack-token` in +`` Secret. + +## Custom Names + +Service custom names allow configuring two instances of the same service type. + +```yaml + service.slack.workspace1: | + token: $slack-token-workspace1 + service.slack.workspace2: | + token: $slack-token-workspace2 +``` + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + annotations: + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: my-channel + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace2: my-channel +``` + +## Service Types + +* [AwsSqs](./awssqs.md) +* [Email](./email.md) +* [GitHub](./github.md) +* [Slack](./slack.md) +* [Mattermost](./mattermost.md) +* [Opsgenie](./opsgenie.md) +* [Grafana](./grafana.md) +* [Webhook](./webhook.md) +* [Telegram](./telegram.md) +* [Teams](./teams.md) +* [Google Chat](./googlechat.md) +* [Rocket.Chat](./rocketchat.md) +* [Pushover](./pushover.md) +* [Alertmanager](./alertmanager.md) \ No newline at end of file diff --git a/docs/operator-manual/notifications/services/pagerduty.md b/docs/operator-manual/notifications/services/pagerduty.md new file mode 100755 index 0000000000000..0e1ab965332e1 --- /dev/null +++ b/docs/operator-manual/notifications/services/pagerduty.md @@ -0,0 +1,66 @@ +# Pagerduty + +## Parameters + +The Pagerduty notification service is used to create pagerduty incidents and requires specifying the following settings: + +* `pagerdutyToken` - the pagerduty auth token +* `from` - email address of a valid user associated with the account making the request. +* `serviceID` - The ID of the resource. + + +## Example + +The following snippet contains sample Pagerduty service configuration: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: +stringData: + pagerdutyToken: +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + service.pagerduty: | + token: $pagerdutyToken + from: +``` + +## Template + +[Notification templates](../templates.md) support specifying subject for pagerduty notifications: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + template.rollout-aborted: | + message: Rollout {{.rollout.metadata.name}} is aborted. + pagerduty: + title: "Rollout {{.rollout.metadata.name}}" + urgency: "high" + body: "Rollout {{.rollout.metadata.name}} aborted " + priorityID: "" +``` + +NOTE: A Priority is a label representing the importance and impact of an incident. This is only available on Standard and Enterprise plans of pagerduty. + +## Annotation + +Annotation sample for pagerduty notifications: +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + notifications.argoproj.io/subscribe.on-rollout-aborted.pagerduty: "" +``` diff --git a/docs/operator-manual/notifications/services/pagerduty_v2.md b/docs/operator-manual/notifications/services/pagerduty_v2.md new file mode 100755 index 0000000000000..21e8d942e4e93 --- /dev/null +++ b/docs/operator-manual/notifications/services/pagerduty_v2.md @@ -0,0 +1,78 @@ +# PagerDuty V2 + +## Parameters + +The PagerDuty notification service is used to trigger PagerDuty events and requires specifying the following settings: + +* `serviceKeys` - a dictionary with the following structure: + * `service-name: $pagerduty-key-service-name` where `service-name` is the name you want to use for the service to make events for, and `$pagerduty-key-service-name` is a reference to the secret that contains the actual PagerDuty integration key (Events API v2 integration) + +If you want multiple Argo apps to trigger events to their respective PagerDuty services, create an integration key in each service you want to setup alerts for. + +To create a PagerDuty integration key, [follow these instructions](https://support.pagerduty.com/docs/services-and-integrations#create-a-generic-events-api-integration) to add an Events API v2 integration to the service of your choice. + +## Configuration + +The following snippet contains sample PagerDuty service configuration. It assumes the service you want to alert on is called `my-service`. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: +stringData: + pagerduty-key-my-service: +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + service.pagerdutyv2: | + serviceKeys: + my-service: $pagerduty-key-my-service +``` + +## Template + +[Notification templates](../templates.md) support specifying subject for PagerDuty notifications: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + template.rollout-aborted: | + message: Rollout {{.rollout.metadata.name}} is aborted. + pagerdutyv2: + summary: "Rollout {{.rollout.metadata.name}} is aborted." + severity: "critical" + source: "{{.rollout.metadata.name}}" +``` + +The parameters for the PagerDuty configuration in the template generally match with the payload for the Events API v2 endpoint. All parameters are strings. + +* `summary` - (required) A brief text summary of the event, used to generate the summaries/titles of any associated alerts. +* `severity` - (required) The perceived severity of the status the event is describing with respect to the affected system. Allowed values: `critical`, `warning`, `error`, `info` +* `source` - (required) The unique location of the affected system, preferably a hostname or FQDN. +* `component` - Component of the source machine that is responsible for the event. +* `group` - Logical grouping of components of a service. +* `class` - The class/type of the event. +* `url` - The URL that should be used for the link "View in ArgoCD" in PagerDuty. + +The `timestamp` and `custom_details` parameters are not currently supported. + +## Annotation + +Annotation sample for PagerDuty notifications: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + notifications.argoproj.io/subscribe.on-rollout-aborted.pagerdutyv2: "" +``` diff --git a/docs/operator-manual/notifications/services/pushover.md b/docs/operator-manual/notifications/services/pushover.md new file mode 100755 index 0000000000000..37cb20b277dcc --- /dev/null +++ b/docs/operator-manual/notifications/services/pushover.md @@ -0,0 +1,33 @@ +# Pushover + +1. Create an app at [pushover.net](https://pushover.net/apps/build). +2. Store the API key in `` Secret and define the secret name in `` ConfigMap: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + service.pushover: | + token: $pushover-token +``` + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: +stringData: + pushover-token: avtc41pn13asmra6zaiyf7dh6cgx97 +``` + +3. Add your user key to your Application resource: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + annotations: + notifications.argoproj.io/subscribe.on-sync-succeeded.pushover: uumy8u4owy7bgkapp6mc5mvhfsvpcd +``` \ No newline at end of file diff --git a/docs/operator-manual/notifications/services/rocketchat.md b/docs/operator-manual/notifications/services/rocketchat.md new file mode 100755 index 0000000000000..f1157050139d0 --- /dev/null +++ b/docs/operator-manual/notifications/services/rocketchat.md @@ -0,0 +1,96 @@ +# Rocket.Chat + +## Parameters + +The Rocket.Chat notification service configuration includes following settings: + +* `email` - the Rocker.Chat user's email +* `password` - the Rocker.Chat user's password +* `alias` - optional alias that should be used to post message +* `icon` - optional message icon +* `avatar` - optional message avatar +* `serverUrl` - optional Rocket.Chat server url + +## Configuration + +1. Login to your RocketChat instance +2. Go to user management + +![2](https://user-images.githubusercontent.com/15252187/115824993-7ccad900-a411-11eb-89de-6a0c4438ffdf.png) + +3. Add new user with `bot` role. Also note that `Require password change` checkbox mus be not checked + +![3](https://user-images.githubusercontent.com/15252187/115825174-b4d21c00-a411-11eb-8f20-cda48cea9fad.png) + +4. Copy username and password that you was created for bot user +5. Create a public or private channel, or a team, for this example `my_channel` +6. Add your bot to this channel **otherwise it won't work** +7. Store email and password in argocd_notifications-secret Secret + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: +stringData: + rocketchat-email: + rocketchat-password: +``` + +8. Finally, use these credentials to configure the RocketChat integration in the `argocd-configmap` config map: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + service.rocketchat: | + email: $rocketchat-email + password: $rocketchat-password +``` + +9. Create a subscription for your Rocket.Chat integration: + +*Note: channel, team or user must be prefixed with # or @ elsewhere we will be interpretative destination as a room ID* + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + annotations: + notifications.argoproj.io/subscribe.on-sync-succeeded.rocketchat: #my_channel +``` + +## Templates + +[Notification templates](../templates.md) can be customized with RocketChat [attachments](https://developer.rocket.chat/api/rest-api/methods/chat/postmessage#attachments-detail). + +*Note: Attachments structure in Rocketchat is same with Slack attachments [feature](https://api.slack.com/messaging/composing/layouts).* + + + +The message attachments can be specified in `attachments` string fields under `rocketchat` field: + +```yaml +template.app-sync-status: | + message: | + Application {{.app.metadata.name}} sync is {{.app.status.sync.status}}. + Application details: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}. + rocketchat: + attachments: | + [{ + "title": "{{.app.metadata.name}}", + "title_link": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + "color": "#18be52", + "fields": [{ + "title": "Sync Status", + "value": "{{.app.status.sync.status}}", + "short": true + }, { + "title": "Repository", + "value": "{{.app.spec.source.repoURL}}", + "short": true + }] + }] +``` diff --git a/docs/operator-manual/notifications/services/slack.md b/docs/operator-manual/notifications/services/slack.md new file mode 100755 index 0000000000000..15937597c19f2 --- /dev/null +++ b/docs/operator-manual/notifications/services/slack.md @@ -0,0 +1,169 @@ +# Slack + +If you want to send message using incoming webhook, you can use [webhook](./webhook.md#send-slack). + +## Parameters + +The Slack notification service configuration includes following settings: + +* `token` - the app token +* `apiURL` - optional, the server url, e.g. https://example.com/api +* `username` - optional, the app username +* `icon` - optional, the app icon, e.g. :robot_face: or https://example.com/image.png +* `insecureSkipVerify` - optional bool, true or false + +## Configuration + +1. Create Slack Application using https://api.slack.com/apps?new_app=1 +![1](https://user-images.githubusercontent.com/426437/73604308-4cb0c500-4543-11ea-9092-6ca6bae21cbb.png) +1. Once application is created navigate to `Enter OAuth & Permissions` +![2](https://user-images.githubusercontent.com/426437/73604309-4d495b80-4543-11ea-9908-4dea403d3399.png) +1. Click `Permissions` under `Add features and functionality` section and add `chat:write` scope. To use the optional username and icon overrides in the Slack notification service also add the `chat:write.customize` scope. +![3](https://user-images.githubusercontent.com/426437/73604310-4d495b80-4543-11ea-8576-09cd91aea0e5.png) +1. Scroll back to the top, click 'Install App to Workspace' button and confirm the installation. +![4](https://user-images.githubusercontent.com/426437/73604311-4d495b80-4543-11ea-9155-9d216b20ec86.png) +1. Once installation is completed copy the OAuth token. +![5](https://user-images.githubusercontent.com/426437/73604312-4d495b80-4543-11ea-832b-a9d9d5e4bc29.png) + +1. Create a public or private channel, for this example `my_channel` +1. Invite your slack bot to this channel **otherwise slack bot won't be able to deliver notifications to this channel** +1. Store Oauth access token in `argocd-notifications-secret` secret + + ```yaml + apiVersion: v1 + kind: Secret + metadata: + name: + stringData: + slack-token: + ``` + +1. Define service type slack in data section of `argocd-notifications-cm` configmap: + + ```yaml + apiVersion: v1 + kind: ConfigMap + metadata: + name: + data: + service.slack: | + token: $slack-token + ``` + +1. Add annotation in application yaml file to enable notifications for specific argocd app. The following example uses the [on-sync-succeeded trigger](../catalog.md#triggers): + + ```yaml + apiVersion: argoproj.io/v1alpha1 + kind: Application + metadata: + annotations: + notifications.argoproj.io/subscribe.on-sync-succeeded.slack: my_channel + ``` + +1. Annotation with more than one [trigger](../catalog.md#triggers), with multiple destinations and recipients + + ```yaml + apiVersion: argoproj.io/v1alpha1 + kind: Application + metadata: + annotations: + notifications.argoproj.io/subscriptions: | + - trigger: [on-scaling-replica-set, on-rollout-updated, on-rollout-step-completed] + destinations: + - service: slack + recipients: [my-channel-1, my-channel-2] + - service: email + recipients: [recipient-1, recipient-2, recipient-3 ] + - trigger: [on-rollout-aborted, on-analysis-run-failed, on-analysis-run-error] + destinations: + - service: slack + recipients: [my-channel-21, my-channel-22] + ``` + +## Templates + +[Notification templates](../templates.md) can be customized to leverage slack message blocks and attachments +[feature](https://api.slack.com/messaging/composing/layouts). + +![](https://user-images.githubusercontent.com/426437/72776856-6dcef880-3bc8-11ea-8e3b-c72df16ee8e6.png) + +The message blocks and attachments can be specified in `blocks` and `attachments` string fields under `slack` field: + +```yaml +template.app-sync-status: | + message: | + Application {{.app.metadata.name}} sync is {{.app.status.sync.status}}. + Application details: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}. + slack: + attachments: | + [{ + "title": "{{.app.metadata.name}}", + "title_link": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + "color": "#18be52", + "fields": [{ + "title": "Sync Status", + "value": "{{.app.status.sync.status}}", + "short": true + }, { + "title": "Repository", + "value": "{{.app.spec.source.repoURL}}", + "short": true + }] + }] +``` + +The messages can be aggregated to the slack threads by grouping key which can be specified in a `groupingKey` string field under `slack` field. +`groupingKey` is used across each template and works independently on each slack channel. +When multiple applications will be updated at the same time or frequently, the messages in slack channel can be easily read by aggregating with git commit hash, application name, etc. +Furthermore, the messages can be broadcast to the channel at the specific template by `notifyBroadcast` field. + +```yaml +template.app-sync-status: | + message: | + Application {{.app.metadata.name}} sync is {{.app.status.sync.status}}. + Application details: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}. + slack: + attachments: | + [{ + "title": "{{.app.metadata.name}}", + "title_link": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + "color": "#18be52", + "fields": [{ + "title": "Sync Status", + "value": "{{.app.status.sync.status}}", + "short": true + }, { + "title": "Repository", + "value": "{{.app.spec.source.repoURL}}", + "short": true + }] + }] + # Aggregate the messages to the thread by git commit hash + groupingKey: "{{.app.status.sync.revision}}" + notifyBroadcast: false +template.app-sync-failed: | + message: | + Application {{.app.metadata.name}} sync is {{.app.status.sync.status}}. + Application details: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}. + slack: + attachments: | + [{ + "title": "{{.app.metadata.name}}", + "title_link": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + "color": "#ff0000", + "fields": [{ + "title": "Sync Status", + "value": "{{.app.status.sync.status}}", + "short": true + }, { + "title": "Repository", + "value": "{{.app.spec.source.repoURL}}", + "short": true + }] + }] + # Aggregate the messages to the thread by git commit hash + groupingKey: "{{.app.status.sync.revision}}" + notifyBroadcast: true +``` + +The message is sent according to the `deliveryPolicy` string field under the `slack` field. The available modes are `Post` (default), `PostAndUpdate`, and `Update`. The `PostAndUpdate` and `Update` settings require `groupingKey` to be set. diff --git a/docs/operator-manual/notifications/services/teams.md b/docs/operator-manual/notifications/services/teams.md new file mode 100755 index 0000000000000..b5b9a228c43eb --- /dev/null +++ b/docs/operator-manual/notifications/services/teams.md @@ -0,0 +1,126 @@ +# Teams + +## Parameters + +The Teams notification service send message notifications using Teams bot and requires specifying the following settings: + +* `recipientUrls` - the webhook url map, e.g. `channelName: https://example.com` + +## Configuration + +1. Open `Teams` and goto `Apps` +2. Find `Incoming Webhook` microsoft app and click on it +3. Press `Add to a team` -> select team and channel -> press `Set up a connector` +4. Enter webhook name and upload image (optional) +5. Press `Create` then copy webhook url and store it in `argocd-notifications-secret` and define it in `argocd-notifications-cm` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + service.teams: | + recipientUrls: + channelName: $channel-teams-url +``` + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: +stringData: + channel-teams-url: https://example.com +``` + +6. Create subscription for your Teams integration: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + annotations: + notifications.argoproj.io/subscribe.on-sync-succeeded.teams: channelName +``` + +## Templates + +![](https://user-images.githubusercontent.com/18019529/114271500-9d2b8880-9a4c-11eb-85c1-f6935f0431d5.png) + +[Notification templates](../templates.md) can be customized to leverage teams message sections, facts, themeColor, summary and potentialAction [feature](https://docs.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/connectors-using). + +```yaml +template.app-sync-succeeded: | + teams: + themeColor: "#000080" + sections: | + [{ + "facts": [ + { + "name": "Sync Status", + "value": "{{.app.status.sync.status}}" + }, + { + "name": "Repository", + "value": "{{.app.spec.source.repoURL}}" + } + ] + }] + potentialAction: |- + [{ + "@type":"OpenUri", + "name":"Operation Details", + "targets":[{ + "os":"default", + "uri":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true" + }] + }] + title: Application {{.app.metadata.name}} has been successfully synced + text: Application {{.app.metadata.name}} has been successfully synced at {{.app.status.operationState.finishedAt}}. + summary: "{{.app.metadata.name}} sync succeeded" +``` + +### facts field + +You can use `facts` field instead of `sections` field. + +```yaml +template.app-sync-succeeded: | + teams: + facts: | + [{ + "name": "Sync Status", + "value": "{{.app.status.sync.status}}" + }, + { + "name": "Repository", + "value": "{{.app.spec.source.repoURL}}" + }] +``` + +### theme color field + +You can set theme color as hex string for the message. + +![](https://user-images.githubusercontent.com/1164159/114864810-0718a900-9e24-11eb-8127-8d95da9544c1.png) + +```yaml +template.app-sync-succeeded: | + teams: + themeColor: "#000080" +``` + +### summary field + +You can set a summary of the message that will be shown on Notifcation & Activity Feed + +![](https://user-images.githubusercontent.com/6957724/116587921-84c4d480-a94d-11eb-9da4-f365151a12e7.jpg) + +![](https://user-images.githubusercontent.com/6957724/116588002-99a16800-a94d-11eb-807f-8626eb53b980.jpg) + +```yaml +template.app-sync-succeeded: | + teams: + summary: "Sync Succeeded" +``` diff --git a/docs/operator-manual/notifications/services/telegram.md b/docs/operator-manual/notifications/services/telegram.md new file mode 100755 index 0000000000000..953c2a9fca0bf --- /dev/null +++ b/docs/operator-manual/notifications/services/telegram.md @@ -0,0 +1,35 @@ +# Telegram + +1. Get an API token using [@Botfather](https://t.me/Botfather). +2. Store token in `` Secret and configure telegram integration +in `` ConfigMap: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + service.telegram: | + token: $telegram-token +``` + +3. Create new Telegram [channel](https://telegram.org/blog/channels). +4. Add your bot as an administrator. +5. Use this channel `username` (public channel) or `chatID` (private channel) in the subscription for your Telegram integration: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + annotations: + notifications.argoproj.io/subscribe.on-sync-succeeded.telegram: username +``` + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + annotations: + notifications.argoproj.io/subscribe.on-sync-succeeded.telegram: -1000000000000 +``` diff --git a/docs/operator-manual/notifications/services/webex.md b/docs/operator-manual/notifications/services/webex.md new file mode 100755 index 0000000000000..440ed1ddc738f --- /dev/null +++ b/docs/operator-manual/notifications/services/webex.md @@ -0,0 +1,41 @@ +# Webex Teams + +## Parameters + +The Webex Teams notification service configuration includes following settings: + +* `token` - the app token + +## Configuration + +1. Create a Webex [Bot](https://developer.webex.com/docs/bots) +1. Copy the bot access [token](https://developer.webex.com/my-apps) and store it in the `argocd-notifications-secret` Secret and configure Webex Teams integration in `argocd-notifications-cm` ConfigMap + + ``` yaml + apiVersion: v1 + kind: Secret + metadata: + name: + stringData: + webex-token: + ``` + + ``` yaml + apiVersion: v1 + kind: ConfigMap + metadata: + name: + data: + service.webex: | + token: $webex-token + ``` + +1. Create subscription for your Webex Teams integration + + ``` yaml + apiVersion: argoproj.io/v1alpha1 + kind: Application + metadata: + annotations: + notifications.argoproj.io/subscribe..webex: + ``` diff --git a/docs/operator-manual/notifications/services/webhook.md b/docs/operator-manual/notifications/services/webhook.md new file mode 100755 index 0000000000000..bd45b1f69e40b --- /dev/null +++ b/docs/operator-manual/notifications/services/webhook.md @@ -0,0 +1,189 @@ +# Webhook + +The webhook notification service allows sending a generic HTTP request using the templatized request body and URL. +Using Webhook you might trigger a Jenkins job, update Github commit status. + +## Parameters + +The Webhook notification service configuration includes following settings: + +- `url` - the url to send the webhook to +- `headers` - optional, the headers to pass along with the webhook +- `basicAuth` - optional, the basic authentication to pass along with the webook +- `insecureSkipVerify` - optional bool, true or false + +## Configuration + +Use the following steps to configure webhook: + +1 Register webhook in `argocd-notifications-cm` ConfigMap: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + service.webhook.: | + url: https:/// + headers: #optional headers + - name: + value: + basicAuth: #optional username password + username: + password: + insecureSkipVerify: true #optional bool +``` + +2 Define template that customizes webhook request method, path and body: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + template.github-commit-status: | + webhook: + : + method: POST # one of: GET, POST, PUT, PATCH. Default value: GET + path: + body: | + + trigger.: | + - when: app.status.operationState.phase in ['Succeeded'] + send: [github-commit-status] +``` + +3 Create subscription for webhook integration: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + annotations: + notifications.argoproj.io/subscribe..: "" +``` + +## Examples + +### Set Github commit status + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + service.webhook.github: | + url: https://api.github.com + headers: #optional headers + - name: Authorization + value: token $github-token +``` + +2 Define template that customizes webhook request method, path and body: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + service.webhook.github: | + url: https://api.github.com + headers: #optional headers + - name: Authorization + value: token $github-token + + template.github-commit-status: | + webhook: + github: + method: POST + path: /repos/{{call .repo.FullNameByRepoURL .app.spec.source.repoURL}}/statuses/{{.app.status.operationState.operation.sync.revision}} + body: | + { + {{if eq .app.status.operationState.phase "Running"}} "state": "pending"{{end}} + {{if eq .app.status.operationState.phase "Succeeded"}} "state": "success"{{end}} + {{if eq .app.status.operationState.phase "Error"}} "state": "error"{{end}} + {{if eq .app.status.operationState.phase "Failed"}} "state": "error"{{end}}, + "description": "ArgoCD", + "target_url": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + "context": "continuous-delivery/{{.app.metadata.name}}" + } +``` + +### Start Jenkins Job + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + service.webhook.jenkins: | + url: http:///job//build?token= + basicAuth: + username: + password: + +type: Opaque +``` + +### Send form-data + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + service.webhook.form: | + url: https://form.example.com + headers: + - name: Content-Type + value: application/x-www-form-urlencoded + + template.form-data: | + webhook: + form: + method: POST + body: key1=value1&key2=value2 +``` + +### Send Slack + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: +data: + service.webhook.slack_webhook: | + url: https://hooks.slack.com/services/xxxxx + headers: + - name: Content-Type + value: application/json + + template.send-slack: | + webhook: + slack_webhook: + method: POST + body: | + { + "attachments": [{ + "title": "{{.app.metadata.name}}", + "title_link": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + "color": "#18be52", + "fields": [{ + "title": "Sync Status", + "value": "{{.app.status.sync.status}}", + "short": true + }, { + "title": "Repository", + "value": "{{.app.spec.source.repoURL}}", + "short": true + }] + }] + } +``` diff --git a/docs/operator-manual/notifications/subscriptions.md b/docs/operator-manual/notifications/subscriptions.md new file mode 100644 index 0000000000000..e9f985280f8ad --- /dev/null +++ b/docs/operator-manual/notifications/subscriptions.md @@ -0,0 +1,71 @@ +The subscription to Argo CD application events can be defined using `notifications.argoproj.io/subscribe..: ` annotation. +For example, the following annotation subscribes two Slack channels to notifications about every successful synchronization of the Argo CD application: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + annotations: + notifications.argoproj.io/subscribe.on-sync-succeeded.slack: my-channel1;my-channel2 +``` + +The annotation key consists of following parts: + +* `on-sync-succeeded` - trigger name +* `slack` - notification service name +* `my-channel1;my-channel2` - a semicolon separated list of recipients + +You can create subscriptions for all applications of an Argo CD project by adding the same annotation to the AppProject resource: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: AppProject +metadata: + annotations: + notifications.argoproj.io/subscribe.on-sync-succeeded.slack: my-channel1;my-channel2 +``` + +## Default Subscriptions + +The subscriptions might be configured globally in the `argocd-notifications-cm` ConfigMap using the `subscriptions` field. The default subscriptions +are applied to all applications. The trigger and applications might be configured using the +`triggers` and `selector` fields: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-notifications-cm +data: + # Contains centrally managed global application subscriptions + subscriptions: | + # subscription for on-sync-status-unknown trigger notifications + - recipients: + - slack:test2 + - email:test@gmail.com + triggers: + - on-sync-status-unknown + # subscription restricted to applications with matching labels only + - recipients: + - slack:test3 + selector: test=true + triggers: + - on-sync-status-unknown +``` + +If you want to use webhook in subscriptions, you need to store the custom webhook name in the subscription's `recipients` field. + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-notifications-cm +data: + service.webhook.: | + (snip) + subscriptions: | + - recipients: + - + triggers: + - on-sync-status-unknown +``` diff --git a/docs/operator-manual/notifications/templates.md b/docs/operator-manual/notifications/templates.md new file mode 100644 index 0000000000000..f865229e12835 --- /dev/null +++ b/docs/operator-manual/notifications/templates.md @@ -0,0 +1,93 @@ +The notification template is used to generate the notification content and is configured in the `argocd-notifications-cm` ConfigMap. The template is leveraging +the [html/template](https://golang.org/pkg/html/template/) golang package and allows customization of the notification message. +Templates are meant to be reusable and can be referenced by multiple triggers. + +The following template is used to notify the user about application sync status. + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-notifications-cm +data: + template.my-custom-template-slack-template: | + message: | + Application {{.app.metadata.name}} sync is {{.app.status.sync.status}}. + Application details: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}. +``` + +Each template has access to the following fields: + +- `app` holds the application object. +- `context` is a user-defined string map and might include any string keys and values. +- `serviceType` holds the notification service type name (such as "slack" or "email). The field can be used to conditionally +render service-specific fields. +- `recipient` holds the recipient name. + +## Defining user-defined `context` + +It is possible to define some shared context between all notification templates by setting a top-level +YAML document of key-value pairs, which can then be used within templates, like so: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-notifications-cm +data: + context: | + region: east + environmentName: staging + + template.a-slack-template-with-context: | + message: "Something happened in {{ .context.environmentName }} in the {{ .context.region }} data center!" +``` + +## Notification Service Specific Fields + +The `message` field of the template definition allows creating a basic notification for any notification service. You can leverage notification service-specific +fields to create complex notifications. For example using service-specific you can add blocks and attachments for Slack, subject for Email or URL path, and body for Webhook. +See corresponding service [documentation](services/overview.md) for more information. + +## Change the timezone + +You can change the timezone to show in notifications as follows. + +1. Call time functions. + + ``` + {{ (call .time.Parse .app.status.operationState.startedAt).Local.Format "2006-01-02T15:04:05Z07:00" }} + ``` + +2. Set the `TZ` environment variable on the argocd-notifications-controller container. + + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: argocd-notifications-controller + spec: + template: + spec: + containers: + - name: argocd-notifications-controller + env: + - name: TZ + value: Asia/Tokyo + ``` + +## Functions + +Templates have access to the set of built-in functions: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-notifications-cm +data: + template.my-custom-template-slack-template: | + message: "Author: {{(call .repo.GetCommitMetadata .app.status.sync.revision).Author}}" +``` + +{!docs/operator-manual/notifications/functions.md!} diff --git a/docs/operator-manual/notifications/triggers.md b/docs/operator-manual/notifications/triggers.md new file mode 100644 index 0000000000000..c3e2dc601296b --- /dev/null +++ b/docs/operator-manual/notifications/triggers.md @@ -0,0 +1,128 @@ +The trigger defines the condition when the notification should be sent. The definition includes name, condition +and notification templates reference. The condition is a predicate expression that returns true if the notification +should be sent. The trigger condition evaluation is powered by [antonmedv/expr](https://github.com/antonmedv/expr). +The condition language syntax is described at [Language-Definition.md](https://github.com/antonmedv/expr/blob/master/docs/Language-Definition.md). + +The trigger is configured in the `argocd-notifications-cm` ConfigMap. For example the following trigger sends a notification +when application sync status changes to `Unknown` using the `app-sync-status` template: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-notifications-cm +data: + trigger.on-sync-status-unknown: | + - when: app.status.sync.status == 'Unknown' # trigger condition + send: [app-sync-status, github-commit-status] # template names +``` + +Each condition might use several templates. Typically, each template is responsible for generating a service-specific notification part. +In the example above, the `app-sync-status` template "knows" how to create email and Slack notification, and `github-commit-status` knows how to +generate the payload for GitHub webhook. + +## Conditions Bundles + +Triggers are typically managed by administrators and encapsulate information about when and which notification should be sent. +The end users just need to subscribe to the trigger and specify the notification destination. In order to improve user experience +triggers might include multiple conditions with a different set of templates for each condition. For example, the following trigger +covers all stages of sync status operation and use a different template for different cases: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-notifications-cm +data: + trigger.sync-operation-change: | + - when: app.status.operationState.phase in ['Succeeded'] + send: [github-commit-status] + - when: app.status.operationState.phase in ['Running'] + send: [github-commit-status] + - when: app.status.operationState.phase in ['Error', 'Failed'] + send: [app-sync-failed, github-commit-status] +``` + +## Avoid Sending Same Notification Too Often + +In some cases, the trigger condition might be "flapping". The example below illustrates the problem. +The trigger is supposed to generate a notification once when Argo CD application is successfully synchronized and healthy. +However, the application health status might intermittently switch to `Progressing` and then back to `Healthy` so the trigger might unnecessarily generate +multiple notifications. The `oncePer` field configures triggers to generate the notification only when the corresponding application field changes. +The `on-deployed` trigger from the example below sends the notification only once per observed Git revision of the deployment repository. + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-notifications-cm +data: + # Optional 'oncePer' property ensure that notification is sent only once per specified field value + # E.g. following is triggered once per sync revision + trigger.on-deployed: | + when: app.status.operationState.phase in ['Succeeded'] and app.status.health.status == 'Healthy' + oncePer: app.status.sync.revision + send: [app-sync-succeeded] +``` + +**Mono Repo Usage** + +When one repo is used to sync multiple applications, the `oncePer: app.status.sync.revision` field will trigger a notification for each commit. For mono repos, the better approach will be using `oncePer: app.status.operationState.syncResult.revision` statement. This way a notification will be sent only for a particular Application's revision. + +### oncePer + +The `oncePer` filed is supported like as follows. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + annotations: + example.com/version: v0.1 +``` + +```yaml +oncePer: app.metadata.annotations["example.com/version"] +``` + +## Default Triggers + +You can use `defaultTriggers` field instead of specifying individual triggers to the annotations. + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-notifications-cm +data: + # Holds list of triggers that are used by default if trigger is not specified explicitly in the subscription + defaultTriggers: | + - on-sync-status-unknown + + defaultTriggers.mattermost: | + - on-sync-running + - on-sync-succeeded +``` + +Specify the annotations as follows to use `defaultTriggers`. In this example, `slack` sends when `on-sync-status-unknown`, and `mattermost` sends when `on-sync-running` and `on-sync-succeeded`. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + annotations: + notifications.argoproj.io/subscribe.slack: my-channel + notifications.argoproj.io/subscribe.mattermost: my-mattermost-channel +``` + +## Functions + +Triggers have access to the set of built-in functions. + +Example: + +```yaml +when: time.Now().Sub(time.Parse(app.status.operationState.startedAt)).Minutes() >= 5 +``` + +{!docs/operator-manual/notifications/functions.md!} diff --git a/docs/operator-manual/notifications/troubleshooting-commands.md b/docs/operator-manual/notifications/troubleshooting-commands.md new file mode 100644 index 0000000000000..633eb47d71690 --- /dev/null +++ b/docs/operator-manual/notifications/troubleshooting-commands.md @@ -0,0 +1,221 @@ +## argocd admin notifications template get + +Prints information about configured templates + +``` +argocd admin notifications template get [flags] +``` + +### Examples + +``` + +# prints all templates +argocd admin notifications template get +# print YAML formatted app-sync-succeeded template definition +argocd admin notifications template get app-sync-succeeded -o=yaml + +``` + +### Options + +``` + -h, --help help for get + -o, --output string Output format. One of:json|yaml|wide|name (default "wide") +``` + +### Options inherited from parent commands + +``` + --argocd-repo-server string Argo CD repo server address (default "argocd-repo-server:8081") + --argocd-repo-server-plaintext Use a plaintext client (non-TLS) to connect to repository server + --argocd-repo-server-strict-tls Perform strict validation of TLS certificates when connecting to repo server + --as string Username to impersonate for the operation + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --config-map string argocd-notifications-cm.yaml file path + --context string The name of the kubeconfig context to use + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to a kube config. Only required if out-of-cluster + -n, --namespace string If present, the namespace scope for this CLI request + --password string Password for basic authentication to the API server + --proxy-url string If provided, this URL will be used to connect via proxy + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + --secret string argocd-notifications-secret.yaml file path. Use empty secret if provided value is ':empty' + --server string The address and port of the Kubernetes API server + --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use + --username string Username for basic authentication to the API server +``` + +## argocd admin notifications template notify + +Generates notification using the specified template and send it to specified recipients + +``` +argocd admin notifications template notify NAME RESOURCE_NAME [flags] +``` + +### Examples + +``` + +# Trigger notification using in-cluster config map and secret +argocd admin notifications template notify app-sync-succeeded guestbook --recipient slack:my-slack-channel + +# Render notification render generated notification in console +argocd admin notifications template notify app-sync-succeeded guestbook + +``` + +### Options + +``` + -h, --help help for notify + --recipient stringArray List of recipients (default [console:stdout]) +``` + +### Options inherited from parent commands + +``` + --argocd-repo-server string Argo CD repo server address (default "argocd-repo-server:8081") + --argocd-repo-server-plaintext Use a plaintext client (non-TLS) to connect to repository server + --argocd-repo-server-strict-tls Perform strict validation of TLS certificates when connecting to repo server + --as string Username to impersonate for the operation + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --config-map string argocd-notifications-cm.yaml file path + --context string The name of the kubeconfig context to use + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to a kube config. Only required if out-of-cluster + -n, --namespace string If present, the namespace scope for this CLI request + --password string Password for basic authentication to the API server + --proxy-url string If provided, this URL will be used to connect via proxy + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + --secret string argocd-notifications-secret.yaml file path. Use empty secret if provided value is ':empty' + --server string The address and port of the Kubernetes API server + --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use + --username string Username for basic authentication to the API server +``` + +## argocd admin notifications trigger get + +Prints information about configured triggers + +``` +argocd admin notifications trigger get [flags] +``` + +### Examples + +``` + +# prints all triggers +argocd admin notifications trigger get +# print YAML formatted on-sync-failed trigger definition +argocd admin notifications trigger get on-sync-failed -o=yaml + +``` + +### Options + +``` + -h, --help help for get + -o, --output string Output format. One of:json|yaml|wide|name (default "wide") +``` + +### Options inherited from parent commands + +``` + --argocd-repo-server string Argo CD repo server address (default "argocd-repo-server:8081") + --argocd-repo-server-plaintext Use a plaintext client (non-TLS) to connect to repository server + --argocd-repo-server-strict-tls Perform strict validation of TLS certificates when connecting to repo server + --as string Username to impersonate for the operation + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --config-map string argocd-notifications-cm.yaml file path + --context string The name of the kubeconfig context to use + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to a kube config. Only required if out-of-cluster + -n, --namespace string If present, the namespace scope for this CLI request + --password string Password for basic authentication to the API server + --proxy-url string If provided, this URL will be used to connect via proxy + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + --secret string argocd-notifications-secret.yaml file path. Use empty secret if provided value is ':empty' + --server string The address and port of the Kubernetes API server + --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use + --username string Username for basic authentication to the API server +``` + +## argocd admin notifications trigger run + +Evaluates specified trigger condition and prints the result + +``` +argocd admin notifications trigger run NAME RESOURCE_NAME [flags] +``` + +### Examples + +``` + +# Execute trigger configured in 'argocd-notification-cm' ConfigMap +argocd admin notifications trigger run on-sync-status-unknown ./sample-app.yaml + +# Execute trigger using my-config-map.yaml instead of 'argocd-notifications-cm' ConfigMap +argocd admin notifications trigger run on-sync-status-unknown ./sample-app.yaml \ + --config-map ./my-config-map.yaml +``` + +### Options + +``` + -h, --help help for run +``` + +### Options inherited from parent commands + +``` + --argocd-repo-server string Argo CD repo server address (default "argocd-repo-server:8081") + --argocd-repo-server-plaintext Use a plaintext client (non-TLS) to connect to repository server + --argocd-repo-server-strict-tls Perform strict validation of TLS certificates when connecting to repo server + --as string Username to impersonate for the operation + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --config-map string argocd-notifications-cm.yaml file path + --context string The name of the kubeconfig context to use + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to a kube config. Only required if out-of-cluster + -n, --namespace string If present, the namespace scope for this CLI request + --password string Password for basic authentication to the API server + --proxy-url string If provided, this URL will be used to connect via proxy + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + --secret string argocd-notifications-secret.yaml file path. Use empty secret if provided value is ':empty' + --server string The address and port of the Kubernetes API server + --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use + --username string Username for basic authentication to the API server +``` + diff --git a/docs/operator-manual/notifications/troubleshooting-errors.md b/docs/operator-manual/notifications/troubleshooting-errors.md new file mode 100644 index 0000000000000..f76bb7a2b0d3f --- /dev/null +++ b/docs/operator-manual/notifications/troubleshooting-errors.md @@ -0,0 +1,41 @@ +## Failed to parse new settings + +### error converting YAML to JSON + +YAML syntax is incorrect. + +**incorrect:** + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-notifications-cm +data: + service.slack: | + token: $slack-token + icon: :rocket: +``` + +**correct:** + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-notifications-cm +data: + service.slack: | + token: $slack-token + icon: ":rocket:" +``` + +### service type 'xxxx' is not supported + +You need to check your argocd-notifications controller version. For instance, the teams integration is to support `v1.1.0` and more. + +## Failed to notify recipient + +### notification service 'xxxx' is not supported + +You have not defined `xxxx` in `argocd-notifications-cm` or to fail to parse settings. diff --git a/docs/operator-manual/notifications/troubleshooting.md b/docs/operator-manual/notifications/troubleshooting.md new file mode 100644 index 0000000000000..6e144bb0c9985 --- /dev/null +++ b/docs/operator-manual/notifications/troubleshooting.md @@ -0,0 +1,82 @@ +`argocd admin notifications` is a CLI command group that helps to configure the controller +settings and troubleshoot issues. Full command details are available in the [command reference](../../user-guide/commands/argocd_admin_notifications.md). + +## Global flags +The following global flags are available for all sub-commands: + +* `--config-map` - path to the file containing `argocd-notifications-cm` ConfigMap. If not specified +then the command loads `argocd-notification-cm` ConfigMap using the local Kubernetes config file. +* `--secret` - path to the file containing `argocd-notifications-secret` ConfigMap. If not +specified then the command loads `argocd-notification-secret` Secret using the local Kubernetes config file. +Additionally, you can specify `:empty` to use empty secret with no notification service settings. + +**Examples:** + +* Get a list of triggers configured in the local config map: + + ```bash + argocd admin notifications trigger get \ + --config-map ./argocd admin notifications-cm.yaml --secret :empty + ``` + +* Trigger notification using in-cluster config map and secret: + + ```bash + argocd admin notifications template notify \ + app-sync-succeeded guestbook --recipient slack:argocd admin notifications + ``` + +## Kustomize + +If you are managing `argocd-notifications` config using Kustomize you can pipe whole `kustomize build` output +into stdin using `--config-map -` flag: + +```bash +kustomize build ./argocd-notifications | \ + argocd-notifications \ + template notify app-sync-succeeded guestbook --recipient grafana:argocd \ + --config-map - +``` + +## How to get it + +### On your laptop + +You can download the `argocd` CLI from the GitHub [release](https://github.com/argoproj/argo-cd/releases) +attachments. + +The binary is available in the `quay.io/argoproj/argocd` image. Use the `docker run` and volume mount +to execute binary on any platform. + +**Example:** + +```bash +docker run --rm -it -w /src -v $(pwd):/src \ + quay.io/argoproj/argocd: \ + /app/argocd admin notifications trigger get \ + --config-map ./argocd-notifications-cm.yaml --secret :empty +``` + +### In your cluster + +SSH into the running `argocd-notifications-controller` pod and use `kubectl exec` command to validate in-cluster +configuration. + +**Example** +```bash +kubectl exec -it argocd-notifications-controller- \ + /app/argocd admin notifications trigger get +``` + +## Commands + +The following commands may help debug issues with notifications: + +* [`argocd admin notifications template get`](../../user-guide/commands/argocd_admin_notifications_template_get.md) +* [`argocd admin notifications template notify`](../../user-guide/commands/argocd_admin_notifications_template_notify.md) +* [`argocd admin notifications trigger get`](../../user-guide/commands/argocd_admin_notifications_trigger_get.md) +* [`argocd admin notifications trigger run`](../../user-guide/commands/argocd_admin_notifications_trigger_run.md) + +## Errors + +{!docs/operator-manual/notifications/troubleshooting-errors.md!} diff --git a/docs/operator-manual/project-specification.md b/docs/operator-manual/project-specification.md new file mode 100644 index 0000000000000..4d18eb1a9dd1b --- /dev/null +++ b/docs/operator-manual/project-specification.md @@ -0,0 +1,7 @@ +# Project Specification + +The following describes all the available fields of a Project: + +```yaml +{!docs/operator-manual/project.yaml!} +``` diff --git a/docs/operator-manual/project.yaml b/docs/operator-manual/project.yaml index f28e706e61f7d..c4d93f536239f 100644 --- a/docs/operator-manual/project.yaml +++ b/docs/operator-manual/project.yaml @@ -15,9 +15,11 @@ spec: - '*' # Only permit applications to deploy to the guestbook namespace in the same cluster + # Destination clusters can be identified by 'server', 'name', or both. destinations: - namespace: guestbook server: https://kubernetes.default.svc + name: in-cluster # Deny all cluster-scoped resources from being created, except for Namespace clusterResourceWhitelist: @@ -64,3 +66,33 @@ spec: # anywhere by Argo CD. It can be prematurely revoked by removing the entry from this list. jwtTokens: - iat: 1535390316 + + # Sync windows restrict when Applications may be synced. https://argo-cd.readthedocs.io/en/stable/user-guide/sync_windows/ + syncWindows: + - kind: allow + schedule: '10 1 * * *' + duration: 1h + applications: + - '*-prod' + manualSync: true + - kind: deny + schedule: '0 22 * * *' + duration: 1h + namespaces: + - default + - kind: allow + schedule: '0 23 * * *' + duration: 1h + clusters: + - in-cluster + - cluster1 + + # By default, apps may sync to any cluster specified under the `destinations` field, even if they are not + # scoped to this project. Set the following field to `true` to restrict apps in this cluster to only clusters + # scoped to this project. + permitOnlyProjectScopedClusters: false + + # When using Applications-in-any-namespace, this field determines which namespaces this AppProject permits + # Applications to reside in. Details: https://argo-cd.readthedocs.io/en/stable/operator-manual/app-any-namespace/ + sourceNamespaces: + - "argocd-apps-*" diff --git a/docs/operator-manual/rbac.md b/docs/operator-manual/rbac.md index 9285740b73dbf..0f15a18be1973 100644 --- a/docs/operator-manual/rbac.md +++ b/docs/operator-manual/rbac.md @@ -2,8 +2,8 @@ The RBAC feature enables restriction of access to Argo CD resources. Argo CD does not have its own user management system and has only one built-in user `admin`. The `admin` user is a superuser and -it has unrestricted access to the system. RBAC requires [SSO configuration](user-management/index.md) or [one or more local users setup](user-management/index.md). -Once SSO or local users are configured, additional RBAC roles can be defined, and SSO groups or local users can man be mapped to roles. +it has unrestricted access to the system. RBAC requires [SSO configuration](user-management/index.md) or [one or more local users setup](user-management/index.md). +Once SSO or local users are configured, additional RBAC roles can be defined, and SSO groups or local users can then be mapped to roles. ## Basic Built-in Roles @@ -18,19 +18,109 @@ These default built-in role definitions can be seen in [builtin-policy.csv](http Breaking down the permissions definition differs slightly between applications and every other resource type in Argo CD. -* All resources *except* applications permissions (see next bullet): +* All resources *except* application-specific permissions (see next bullet): `p, , , , ` -* Applications (which belong to an AppProject): +* Applications, applicationsets, logs, and exec (which belong to an `AppProject`): `p, , , , /` ### RBAC Resources and Actions -Resources: `clusters`, `projects`, `applications`, `repositories`, `certificates`, `accounts`, `gpgkeys` +Resources: `clusters`, `projects`, `applications`, `applicationsets`, +`repositories`, `certificates`, `accounts`, `gpgkeys`, `logs`, `exec`, +`extensions` -Actions: `get`, `create`, `update`, `delete`, `sync`, `override`, `action` +Actions: `get`, `create`, `update`, `delete`, `sync`, `override`,`action/` + +Note that `sync`, `override`, and `action/` only have meaning for the `applications` resource. + +#### Application resources + +The resource path for application objects is of the form +`/`. + +Delete access to sub-resources of a project, such as a rollout or a pod, cannot +be managed granularly. `/` grants access to all +subresources of an application. + +#### The `action` action + +The `action` action corresponds to either built-in resource customizations defined +[in the Argo CD repository](https://github.com/argoproj/argo-cd/tree/master/resource_customizations), +or to [custom resource actions](resource_actions.md#custom-resource-actions) defined by you. +The `action` path is of the form `action///`. For +example, a resource customization path +`resource_customizations/extensions/DaemonSet/actions/restart/action.lua` +corresponds to the `action` path `action/extensions/DaemonSet/restart`. You can +also use glob patterns in the action path: `action/*` (or regex patterns if you have +[enabled the `regex` match mode](https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/argocd-rbac-cm.yaml)). + +If the resource is not under a group (for examples, Pods or ConfigMaps), then omit the group name from your RBAC +configuration: + +```csv +p, example-user, applications, action//Pod/maintenance-off, default/*, allow +``` + +#### The `exec` resource + +`exec` is a special resource. When enabled with the `create` action, this privilege allows a user to `exec` into Pods via +the Argo CD UI. The functionality is similar to `kubectl exec`. + +See [Web-based Terminal](web_based_terminal.md) for more info. + +#### The `applicationsets` resource + +[ApplicationSets](applicationset/index.md) provide a declarative way to automatically create/update/delete Applications. + +Granting `applicationsets, create` effectively grants the ability to create Applications. While it doesn't allow the +user to create Applications directly, they can create Applications via an ApplicationSet. + +In v2.5, it is not possible to create an ApplicationSet with a templated Project field (e.g. `project: {{path.basename}}`) +via the API (or, by extension, the CLI). Disallowing templated projects makes project restrictions via RBAC safe: + +```csv +p, dev-group, applicationsets, *, dev-project/*, allow +``` + +With this rule in place, a `dev-group` user will be unable to create an ApplicationSet capable of creating Applications +outside the `dev-project` project. + +#### The `extensions` resource + +With the `extensions` resource it is possible configure permissions to +invoke [proxy +extensions](../developer-guide/extensions/proxy-extensions.md). The +`extensions` RBAC validation works in conjunction with the +`applications` resource. A user logged in Argo CD (UI or CLI), needs +to have at least read permission on the project, namespace and +application where the request is originated from. + +Consider the example below: + +```csv +g, ext, role:extension +p, role:extension, applications, get, default/httpbin-app, allow +p, role:extension, extensions, invoke, httpbin, allow +``` + +Explanation: + +* *line1*: defines the group `role:extension` associated with the + subject `ext`. +* *line2*: defines a policy allowing this role to read (`get`) the + `httpbin-app` application in the `default` project. +* *line3*: defines another policy allowing this role to `invoke` the + `httpbin` extension. + +**Note 1**: that for extensions requests to be allowed, the policy defined +in the *line2* is also required. + +**Note 2**: `invoke` is a new action introduced specifically to be used +with the `extensions` resource. The current actions for `extensions` +are `*` or `invoke`. ## Tying It All Together @@ -39,6 +129,10 @@ configures a custom role, named `org-admin`. The role is assigned to any user wh `your-github-org:your-team` group. All other users get the default policy of `role:readonly`, which cannot modify Argo CD settings. +!!! warning + All authenticated users get *at least* the permissions granted by the default policy. This access cannot be blocked + by a `deny` rule. Instead, restrict the default policy and then grant permissions to individual roles as needed. + *ArgoCD ConfigMap `argocd-rbac-cm` Example:* ```yaml @@ -56,6 +150,12 @@ data: p, role:org-admin, repositories, create, *, allow p, role:org-admin, repositories, update, *, allow p, role:org-admin, repositories, delete, *, allow + p, role:org-admin, projects, get, *, allow + p, role:org-admin, projects, create, *, allow + p, role:org-admin, projects, update, *, allow + p, role:org-admin, projects, delete, *, allow + p, role:org-admin, logs, get, *, allow + p, role:org-admin, exec, create, */*, allow g, your-github-org:your-team, role:org-admin ``` @@ -64,17 +164,81 @@ data: Another `policy.csv` example might look as follows: ```csv -p, role:staging-db-admins, applications, create, staging-db-admins/*, allow -p, role:staging-db-admins, applications, delete, staging-db-admins/*, allow -p, role:staging-db-admins, applications, get, staging-db-admins/*, allow -p, role:staging-db-admins, applications, override, staging-db-admins/*, allow -p, role:staging-db-admins, applications, sync, staging-db-admins/*, allow -p, role:staging-db-admins, applications, update, staging-db-admins/*, allow -p, role:staging-db-admins, projects, get, staging-db-admins, allow -g, db-admins, role:staging-db-admins +p, role:staging-db-admin, applications, create, staging-db-project/*, allow +p, role:staging-db-admin, applications, delete, staging-db-project/*, allow +p, role:staging-db-admin, applications, get, staging-db-project/*, allow +p, role:staging-db-admin, applications, override, staging-db-project/*, allow +p, role:staging-db-admin, applications, sync, staging-db-project/*, allow +p, role:staging-db-admin, applications, update, staging-db-project/*, allow +p, role:staging-db-admin, logs, get, staging-db-project/*, allow +p, role:staging-db-admin, exec, create, staging-db-project/*, allow +p, role:staging-db-admin, projects, get, staging-db-project, allow +g, db-admins, role:staging-db-admin +``` + +This example defines a *role* called `staging-db-admin` with nine *permissions* that allow users with that role to perform the following *actions*: + +* `create`, `delete`, `get`, `override`, `sync` and `update` for applications in the `staging-db-project` project, +* `get` logs for objects in the `staging-db-project` project, +* `create` exec for objects in the `staging-db-project` project, and +* `get` for the project named `staging-db-project`. + +!!! note + The `scopes` field controls which OIDC scopes to examine during rbac + enforcement (in addition to `sub` scope). If omitted, defaults to: + `'[groups]'`. The scope value can be a string, or a list of strings. + +Following example shows targeting `email` as well as `groups` from your OIDC provider. + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-rbac-cm + namespace: argocd + labels: + app.kubernetes.io/name: argocd-rbac-cm + app.kubernetes.io/part-of: argocd +data: + policy.csv: | + p, my-org:team-alpha, applications, sync, my-project/*, allow + g, my-org:team-beta, role:admin + g, user@example.org, role:admin + policy.default: role:readonly + scopes: '[groups, email]' ``` -This example defines a *role* called `staging-db-admins` with *seven permissions* that allow that role to perform the *actions* (`create`/`delete`/`get`/`override`/`sync`/`update` applications, and `get` appprojects) against `*` (all) objects in the `staging-db-admins` Argo CD AppProject. +For more information on `scopes` please review the [User Management Documentation](user-management/index.md). + +## Policy CSV Composition + +It is possible to provide additional entries in the `argocd-rbac-cm` +configmap to compose the final policy csv. In this case the key must +follow the pattern `policy..csv`. Argo CD will concatenate +all additional policies it finds with this pattern below the main one +('policy.csv'). The order of additional provided policies are +determined by the key string. Example: if two additional policies are +provided with keys `policy.A.csv` and `policy.B.csv`, it will first +concatenate `policy.A.csv` and then `policy.B.csv`. + +This is useful to allow composing policies in config management tools +like Kustomize, Helm, etc. + +The example below shows how a Kustomize patch can be provided in an +overlay to add additional configuration to an existing RBAC policy. + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-rbac-cm + namespace: argocd +data: + policy.tester-overlay.csv: | + p, role:tester, applications, *, */*, allow + p, role:tester, projects, *, *, allow + g, my-org:team-qa, role:tester +``` ## Anonymous Access @@ -84,27 +248,27 @@ The anonymous users get default role permissions specified by `policy.default` i ## Validating and testing your RBAC policies If you want to ensure that your RBAC policies are working as expected, you can -use the `argocd-util settings rbac` command to validate them. This tool allows you to +use the `argocd admin settings rbac` command to validate them. This tool allows you to test whether a certain role or subject can perform the requested action with a policy that's not live yet in the system, i.e. from a local file or config map. Additionally, it can be used against the live policy in the cluster your Argo CD is running in. To check whether your new policy is valid and understood by Argo CD's RBAC -implementation, you can use the `argocd-util settings rbac validate` command. +implementation, you can use the `argocd admin settings rbac validate` command. ### Validating a policy To validate a policy stored in a local text file: ```shell -argocd-util settings rbac validate --policy-file somepolicy.csv +argocd admin settings rbac validate --policy-file somepolicy.csv ``` To validate a policy stored in a local K8s ConfigMap definition in a YAML file: ```shell -argocd-util settings rbac validate --policy-file argocd-rbac-cm.yaml +argocd admin settings rbac validate --policy-file argocd-rbac-cm.yaml ``` To validate a policy stored in K8s, used by Argo CD in namespace `argocd`, @@ -112,55 +276,60 @@ ensure that your current context in `~/.kube/config` is pointing to your Argo CD cluster and give appropriate namespace: ```shell -argocd-util settings rbac validate --namespace argocd +argocd admin settings rbac validate --namespace argocd ``` ### Testing a policy To test whether a role or subject (group or local user) has sufficient permissions to execute certain actions on certain resources, you can -use the `argocd-util settings rbac can` command. Its general syntax is +use the `argocd admin settings rbac can` command. Its general syntax is ```shell -argocd-util settings rbac can SOMEROLE ACTION RESOURCE SUBRESOURCE [flags] +argocd admin settings rbac can SOMEROLE ACTION RESOURCE SUBRESOURCE [flags] ``` Given the example from the above ConfigMap, which defines the role `role:org-admin`, and is stored on your local system as `argocd-rbac-cm-yaml`, you can test whether that role can do something like follows: -```shell -$ argocd-util settings rbac can role:org-admin get applications --policy-file argocd-rbac-cm.yaml +```console +$ argocd admin settings rbac can role:org-admin get applications --policy-file argocd-rbac-cm.yaml Yes -$ argocd-util settings rbac can role:org-admin get clusters --policy-file argocd-rbac-cm.yaml + +$ argocd admin settings rbac can role:org-admin get clusters --policy-file argocd-rbac-cm.yaml Yes -$ argocd-util settings rbac can role:org-admin create clusters 'somecluster' --policy-file argocd-rbac-cm.yaml + +$ argocd admin settings rbac can role:org-admin create clusters 'somecluster' --policy-file argocd-rbac-cm.yaml No -$ argocd-util settings rbac can role:org-admin create applications 'someproj/someapp' --policy-file argocd-rbac-cm.yaml + +$ argocd admin settings rbac can role:org-admin create applications 'someproj/someapp' --policy-file argocd-rbac-cm.yaml Yes ``` Another example, given the policy above from `policy.csv`, which defines the -role `role:staging-db-admins` and associates the group `db-admins` with it. +role `role:staging-db-admin` and associates the group `db-admins` with it. Policy is stored locally as `policy.csv`: You can test against the role: -```shell -# Plain policy, without a default role defined -$ argocd-util settings rbac can role:stagin-db-admins get applications --policy-file policy.csv +```console +$ # Plain policy, without a default role defined +$ argocd admin settings rbac can role:staging-db-admin get applications --policy-file policy.csv No -$ argocd-util settings rbac can role:staging-db-admins get applications 'staging-db-admins/*' --policy-file policy.csv + +$ argocd admin settings rbac can role:staging-db-admin get applications 'staging-db-project/*' --policy-file policy.csv Yes -# Argo CD augments a builtin policy with two roles defined, the default role -# being 'role:readonly' - You can include a named default role to use: -$ argocd-util settings rbac can role:stagin-db-admins get applications --policy-file policy.csv --default-role role:readonly + +$ # Argo CD augments a builtin policy with two roles defined, the default role +$ # being 'role:readonly' - You can include a named default role to use: +$ argocd admin settings rbac can role:staging-db-admin get applications --policy-file policy.csv --default-role role:readonly Yes ``` Or against the group defined: -```shell -$ argocd-util settings rbac can db-admins get applications 'staging-db-admins/*' --policy-file policy.csv +```console +$ argocd admin settings rbac can db-admins get applications 'staging-db-project/*' --policy-file policy.csv Yes ``` diff --git a/docs/operator-manual/reconcile.md b/docs/operator-manual/reconcile.md new file mode 100644 index 0000000000000..a956cd9cf7b28 --- /dev/null +++ b/docs/operator-manual/reconcile.md @@ -0,0 +1,113 @@ +# Reconcile Optimization + +By default, an Argo CD Application is refreshed every time a resource that belongs to it changes. + +Kubernetes controllers often update the resources they watch periodically, causing continuous reconcile operation on the Application +and a high CPU usage on the `argocd-application-controller`. Argo CD allows you to optionally ignore resource updates on specific fields +for [tracked resources](../user-guide/resource_tracking.md). + +When a resource update is ignored, if the resource's [health status](./health.md) does not change, the Application that this resource belongs to will not be reconciled. + +## System-Level Configuration + +Argo CD allows ignoring resource updates at a specific JSON path, using [RFC6902 JSON patches](https://tools.ietf.org/html/rfc6902) and [JQ path expressions](https://stedolan.github.io/jq/manual/#path(path_expression)). It can be configured for a specified group and kind +in `resource.customizations` key of the `argocd-cm` ConfigMap. + +!!!important "Enabling the feature" + The feature is behind a flag. To enable it, set `resource.ignoreResourceUpdatesEnabled` to `"true"` in the `argocd-cm` ConfigMap. + +Following is an example of a customization which ignores the `refreshTime` status field of an [`ExternalSecret`](https://external-secrets.io/main/api/externalsecret/) resource: + +```yaml +data: + resource.customizations.ignoreResourceUpdates.external-secrets.io_ExternalSecret: | + jsonPointers: + - /status/refreshTime + # JQ equivalent of the above: + # jqPathExpressions: + # - .status.refreshTime +``` + +It is possible to configure `ignoreResourceUpdates` to be applied to all tracked resources in every Application managed by an Argo CD instance. In order to do so, resource customizations can be configured like in the example below: + +```yaml +data: + resource.customizations.ignoreResourceUpdates.all: | + jsonPointers: + - /status +``` + +### Using ignoreDifferences to ignore reconcile + +It is possible to use existing system-level `ignoreDifferences` customizations to ignore resource updates as well. Instead of copying all configurations, +the `ignoreDifferencesOnResourceUpdates` setting can be used to add all ignored differences as ignored resource updates: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm +data: + resource.compareoptions: | + ignoreDifferencesOnResourceUpdates: true +``` + +## Default Configuration + +By default, the metadata fields `generation`, `resourceVersion` and `managedFields` are always ignored for all resources. + +## Finding Resources to Ignore + +The application controller logs when a resource change triggers a refresh. You can use these logs to find +high-churn resource kinds and then inspect those resources to find which fields to ignore. + +To find these logs, search for `"Requesting app refresh caused by object update"`. The logs include structured +fields for `api-version` and `kind`. Counting the number of refreshes triggered, by api-version/kind should +reveal the high-churn resource kinds. + +!!!note + These logs are at the `debug` level. Configure the application-controller's log level to `debug`. + +Once you have identified some resources which change often, you can try to determine which fields are changing. Here is +one approach: + +```shell +kubectl get -o yaml > /tmp/before.yaml +# Wait a minute or two. +kubectl get -o yaml > /tmp/after.yaml +diff /tmp/before.yaml /tmp/after +``` + +The diff can give you a sense for which fields are changing and should perhaps be ignored. + +## Checking Whether Resource Updates are Ignored + +Whenever Argo CD skips a refresh due to an ignored resource update, the controller logs the following line: +"Ignoring change of object because none of the watched resource fields have changed". + +Search the application-controller logs for this line to confirm that your resource ignore rules are being applied. + +!!!note + These logs are at the `debug` level. Configure the application-controller's log level to `debug`. + +## Examples + +### argoproj.io/Application + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm +data: + resource.customizations.ignoreResourceUpdates.argoproj.io_Application: | + jsonPointers: + # Ignore when ownerReferences change, for example when a parent ApplicationSet changes often. + - /metadata/ownerReferences + # Ignore reconciledAt, since by itself it doesn't indicate any important change. + - /status/reconciledAt + jqPathExpressions: + # Ignore lastTransitionTime for conditions; helpful when SharedResourceWarnings are being regularly updated but not + # actually changing in content. + - .status.conditions[].lastTransitionTime +``` diff --git a/docs/operator-manual/resource_actions.md b/docs/operator-manual/resource_actions.md index 3c219843ab62a..b720f589ae8d0 100644 --- a/docs/operator-manual/resource_actions.md +++ b/docs/operator-manual/resource_actions.md @@ -9,51 +9,175 @@ Operators can add actions to custom resources in form of a Lua script and expand Argo CD supports custom resource actions written in [Lua](https://www.lua.org/). This is useful if you: - * Have a custom resource for which Argo CD does not provide any built-in actions. - * Have a commonly performed manual task that might be error prone if executed by users via `kubectl` +* Have a custom resource for which Argo CD does not provide any built-in actions. +* Have a commonly performed manual task that might be error prone if executed by users via `kubectl` +The resource actions act on a single object. You can define your own custom resource actions in the `argocd-cm` ConfigMap. +### Custom Resource Action Types + +#### An action that modifies the source resource + +This action modifies and returns the source resource. +This kind of action was the only one available till 2.8, and it is still supported. + +#### An action that produces a list of new or modified resources + +**An alpha feature, introduced in 2.8.** + +This action returns a list of impacted resources, each impacted resource has a K8S resource and an operation to perform on. +Currently supported operations are "create" and "patch", "patch" is only supported for the source resource. +Creating new resources is possible, by specifying a "create" operation for each such resource in the returned list. +One of the returned resources can be the modified source object, with a "patch" operation, if needed. +See the definition examples below. + ### Define a Custom Resource Action in `argocd-cm` ConfigMap -Custom resource actions can be defined in `resource.customizations` field of `argocd-cm`. Following example demonstrates a set of custom actions for `CronJob` resources. The customizations key is in the format of `apiGroup/Kind`. +Custom resource actions can be defined in `resource.customizations.actions.` field of `argocd-cm`. Following example demonstrates a set of custom actions for `CronJob` resources, each such action returns the modified CronJob. +The customizations key is in the format of `resource.customizations.actions.`. ```yaml -resource.customizations: | - batch/CronJob: - actions: | - discovery.lua: | - actions = {} - actions["suspend"] = {["disabled"] = true} - actions["resume"] = {["disabled"] = true} - - local suspend = false - if obj.spec.suspend ~= nil then - suspend = obj.spec.suspend - end - if suspend then - actions["resume"]["disabled"] = false - else - actions["suspend"]["disabled"] = false - end - - return actions - definitions: - - name: suspend - action.lua: | - obj.spec.suspend = true - - return obj - - name: resume - action.lua: | - if obj.spec.suspend ~= nil and obj.spec.suspend then - obj.spec.suspend = false - end - - return obj +resource.customizations.actions.batch_CronJob: | + discovery.lua: | + actions = {} + actions["suspend"] = {["disabled"] = true} + actions["resume"] = {["disabled"] = true} + + local suspend = false + if obj.spec.suspend ~= nil then + suspend = obj.spec.suspend + end + if suspend then + actions["resume"]["disabled"] = false + else + actions["suspend"]["disabled"] = false + end + return actions + definitions: + - name: suspend + action.lua: | + obj.spec.suspend = true + return obj + - name: resume + action.lua: | + if obj.spec.suspend ~= nil and obj.spec.suspend then + obj.spec.suspend = false + end + return obj ``` The `discovery.lua` script must return a table where the key name represents the action name. You can optionally include logic to enable or disable certain actions based on the current object state. -Each action name must be represented in the list of `definitions` with an accompanying `action.lua` script to control the resource modifications. The `obj` is a global variable which contains the resource. Each action script must return an optionally modified version of the resource. In this example, we are simply setting `.spec.suspend` to either `true` or `false`. +Each action name must be represented in the list of `definitions` with an accompanying `action.lua` script to control the resource modifications. The `obj` is a global variable which contains the resource. Each action script returns an optionally modified version of the resource. In this example, we are simply setting `.spec.suspend` to either `true` or `false`. + +#### Creating new resources with a custom action + +!!! important + Creating resources via the Argo CD UI is an intentional, strategic departure from GitOps principles. We recommend + that you use this feature sparingly and only for resources that are not part of the desired state of the + application. + +The resource the action is invoked on would be referred to as the `source resource`. +The new resource and all the resources implicitly created as a result, must be permitted on the AppProject level, otherwise the creation will fail. + +##### Creating a source resource child resources with a custom action + +If the new resource represents a k8s child of the source resource, the source resource ownerReference must be set on the new resource. +Here is an example Lua snippet, that takes care of constructing a Job resource that is a child of a source CronJob resource - the `obj` is a global variable, which contains the source resource: + +```lua +-- ... +ownerRef = {} +ownerRef.apiVersion = obj.apiVersion +ownerRef.kind = obj.kind +ownerRef.name = obj.metadata.name +ownerRef.uid = obj.metadata.uid +job = {} +job.metadata = {} +job.metadata.ownerReferences = {} +job.metadata.ownerReferences[1] = ownerRef +-- ... +``` + +##### Creating independent child resources with a custom action + +If the new resource is independent of the source resource, the default behavior of such new resource is that it is not known by the App of the source resource (as it is not part of the desired state and does not have an `ownerReference`). +To make the App aware of the new resource, the `app.kubernetes.io/instance` label (or other ArgoCD tracking label, if configured) must be set on the resource. +It can be copied from the source resource, like this: + +```lua +-- ... +newObj = {} +newObj.metadata = {} +newObj.metadata.labels = {} +newObj.metadata.labels["app.kubernetes.io/instance"] = obj.metadata.labels["app.kubernetes.io/instance"] +-- ... +``` + +While the new resource will be part of the App with the tracking label in place, it will be immediately deleted if auto prune is set on the App. +To keep the resource, set `Prune=false` annotation on the resource, with this Lua snippet: + +```lua +-- ... +newObj.metadata.annotations = {} +newObj.metadata.annotations["argocd.argoproj.io/sync-options"] = "Prune=false" +-- ... +``` + +(If setting `Prune=false` behavior, the resource will not be deleted upon the deletion of the App, and will require a manual cleanup). + +The resource and the App will now appear out of sync - which is the expected ArgoCD behavior upon creating a resource that is not part of the desired state. + +If you wish to treat such an App as a synced one, add the following resource annotation in Lua code: + +```lua +-- ... +newObj.metadata.annotations["argocd.argoproj.io/compare-options"] = "IgnoreExtraneous" +-- ... +``` + +#### An action that produces a list of resources - a complete example: + +```yaml +resource.customizations.actions.ConfigMap: | + discovery.lua: | + actions = {} + actions["do-things"] = {} + return actions + definitions: + - name: do-things + action.lua: | + -- Create a new ConfigMap + cm1 = {} + cm1.apiVersion = "v1" + cm1.kind = "ConfigMap" + cm1.metadata = {} + cm1.metadata.name = "cm1" + cm1.metadata.namespace = obj.metadata.namespace + cm1.metadata.labels = {} + -- Copy ArgoCD tracking label so that the resource is recognized by the App + cm1.metadata.labels["app.kubernetes.io/instance"] = obj.metadata.labels["app.kubernetes.io/instance"] + cm1.metadata.annotations = {} + -- For Apps with auto-prune, set the prune false on the resource, so it does not get deleted + cm1.metadata.annotations["argocd.argoproj.io/sync-options"] = "Prune=false" + -- Keep the App synced even though it has a resource that is not in Git + cm1.metadata.annotations["argocd.argoproj.io/compare-options"] = "IgnoreExtraneous" + cm1.data = {} + cm1.data.myKey1 = "myValue1" + impactedResource1 = {} + impactedResource1.operation = "create" + impactedResource1.resource = cm1 + + -- Patch the original cm + obj.metadata.labels["aKey"] = "aValue" + impactedResource2 = {} + impactedResource2.operation = "patch" + impactedResource2.resource = obj + + result = {} + result[1] = impactedResource1 + result[2] = impactedResource2 + return result +``` \ No newline at end of file diff --git a/docs/operator-manual/secret-management.md b/docs/operator-manual/secret-management.md index 42e608c601f02..ab06a46014b20 100644 --- a/docs/operator-manual/secret-management.md +++ b/docs/operator-manual/secret-management.md @@ -1,16 +1,37 @@ # Secret Management -Argo CD is un-opinionated about how secrets are managed. There's many ways to do it and there's no one-size-fits-all solution. Here's some ways people are doing GitOps secrets: +Argo CD is un-opinionated about how secrets are managed. There are many ways to do it, and there's no one-size-fits-all solution. + +Many solutions use plugins to inject secrets into the application manifests. See [Mitigating Risks of Secret-Injection Plugins](#mitigating-risks-of-secret-injection-plugins) +below to make sure you use those plugins securely. + +Here are some ways people are doing GitOps secrets: * [Bitnami Sealed Secrets](https://github.com/bitnami-labs/sealed-secrets) -* [GoDaddy Kubernetes External Secrets](https://github.com/godaddy/kubernetes-external-secrets) -* [External Secrets Operator](https://github.com/ContainerSolutions/externalsecret-operator) +* [External Secrets Operator](https://github.com/external-secrets/external-secrets) * [Hashicorp Vault](https://www.vaultproject.io) -* [Banzai Cloud Bank-Vaults](https://github.com/banzaicloud/bank-vaults) -* [Helm Secrets](https://github.com/futuresimple/helm-secrets) +* [Bank-Vaults]((https://bank-vaults.dev/)) +* [Helm Secrets](https://github.com/jkroepke/helm-secrets) * [Kustomize secret generator plugins](https://github.com/kubernetes-sigs/kustomize/blob/fd7a353df6cece4629b8e8ad56b71e30636f38fc/examples/kvSourceGoPlugin.md#secret-values-from-anywhere) * [aws-secret-operator](https://github.com/mumoshu/aws-secret-operator) * [KSOPS](https://github.com/viaduct-ai/kustomize-sops#argo-cd-integration) - +* [argocd-vault-plugin](https://github.com/argoproj-labs/argocd-vault-plugin) +* [argocd-vault-replacer](https://github.com/crumbhole/argocd-vault-replacer) +* [Kubernetes Secrets Store CSI Driver](https://github.com/kubernetes-sigs/secrets-store-csi-driver) +* [Vals-Operator](https://github.com/digitalis-io/vals-operator) For discussion, see [#1364](https://github.com/argoproj/argo-cd/issues/1364) + +## Mitigating Risks of Secret-Injection Plugins + +Argo CD caches the manifests generated by plugins, along with the injected secrets, in its Redis instance. Those +manifests are also available via the repo-server API (a gRPC service). This means that the secrets are available to +anyone who has access to the Redis instance or to the repo-server. + +Consider these steps to mitigate the risks of secret-injection plugins: + +1. Set up network policies to prevent direct access to Argo CD components (Redis and the repo-server). Make sure your + cluster supports those network policies and can actually enforce them. +2. Consider running Argo CD on its own cluster, with no other applications running on it. +3. [Enable password authentication on the Redis instance](https://github.com/argoproj/argo-cd/issues/3130) (currently + only supported for non-HA Argo CD installations). diff --git a/docs/operator-manual/security.md b/docs/operator-manual/security.md index cdea22223e713..3ba9fdfe39363 100644 --- a/docs/operator-manual/security.md +++ b/docs/operator-manual/security.md @@ -11,7 +11,8 @@ Authentication to Argo CD API server is performed exclusively using [JSON Web To in one of the following ways: 1. For the local `admin` user, a username/password is exchanged for a JWT using the `/api/v1/session` - endpoint. This token is signed & issued by the Argo CD API server itself, and has no expiration. + endpoint. This token is signed & issued by the Argo CD API server itself and it expires after 24 hours + (this token used not to expire, see [CVE-2021-26921](https://github.com/argoproj/argo-cd/security/advisories/GHSA-9h6w-j7w4-jr52)). When the admin password is updated, all existing admin JWT tokens are immediately revoked. The password is stored as a bcrypt hash in the [`argocd-secret`](https://github.com/argoproj/argo-cd/blob/master/manifests/base/config/argocd-secret.yaml) Secret. @@ -37,6 +38,54 @@ permits access to the API request. All network communication is performed over TLS including service-to-service communication between the three components (argocd-server, argocd-repo-server, argocd-application-controller). The Argo CD API server can enforce the use of TLS 1.2 using the flag: `--tlsminversion 1.2`. +Communication with Redis is performed over plain HTTP by default. TLS can be setup with command line arguments. + +## Git & Helm Repositories + +Git and helm repositories are managed by a stand-alone service, called the repo-server. The +repo-server does not carry any Kubernetes privileges and does not store credentials to any services +(including git). The repo-server is responsible for cloning repositories which have been permitted +and trusted by Argo CD operators, and generating kubernetes manifests at a given path in the +repository. For performance and bandwidth efficiency, the repo-server maintains local clones of +these repositories so that subsequent commits to the repository are efficiently downloaded. + +There are security considerations when configuring git repositories that Argo CD is permitted to +deploy from. In short, gaining unauthorized write access to a git repository trusted by Argo CD +will have serious security implications outlined below. + +### Unauthorized Deployments + +Since Argo CD deploys the Kubernetes resources defined in git, an attacker with access to a trusted +git repo would be able to affect the Kubernetes resources which are deployed. For example, an +attacker could update the deployment manifest deploy malicious container images to the environment, +or delete resources in git causing them to be pruned in the live environment. + +### Tool command invocation + +In addition to raw YAML, Argo CD natively supports two popular Kubernetes config management tools, +helm and kustomize. When rendering manifests, Argo CD executes these config management tools +(i.e. `helm template`, `kustomize build`) to generate the manifests. It is possible that an attacker +with write access to a trusted git repository may construct malicious helm charts or kustomizations +that attempt to read files out-of-tree. This includes adjacent git repos, as well as files on the +repo-server itself. Whether or not this is a risk to your organization depends on if the contents +in the git repos are sensitive in nature. By default, the repo-server itself does not contain +sensitive information, but might be configured with Config Management Plugins which do +(e.g. decryption keys). If such plugins are used, extreme care must be taken to ensure the +repository contents can be trusted at all times. + +Optionally the built-in config management tools might be individually disabled. +If you know that your users will not need a certain config management tool, it's advisable +to disable that tool. +See [Tool Detection](../user-guide/tool_detection.md) for more information. + +### Remote bases and helm chart dependencies + +Argo CD's repository allow-list only restricts the initial repository which is cloned. However, both +kustomize and helm contain features to reference and follow *additional* repositories +(e.g. kustomize remote bases, helm chart dependencies), of which might not be in the repository +allow-list. Argo CD operators must understand that users with write access to trusted git +repositories could reference other remote git repositories containing Kubernetes resources not +easily searchable or auditable in the configured git repositories. ## Sensitive Information @@ -70,6 +119,13 @@ kubectl delete secret argocd-manager-token-XXXXXX -n kube-system argocd cluster add CONTEXTNAME ``` +!!! note + Kubernetes 1.24 [stopped automatically creating tokens for Service Accounts](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#no-really-you-must-read-this-before-you-upgrade). + [Starting in Argo CD 2.4](https://github.com/argoproj/argo-cd/pull/9546), `argocd cluster add` creates a + ServiceAccount _and_ a non-expiring Service Account token Secret when adding 1.24 clusters. In the future, Argo CD + will [add support for the Kubernetes TokenRequest API](https://github.com/argoproj/argo-cd/issues/9610) to avoid + using long-lived tokens. + To revoke Argo CD's access to a managed cluster, delete the RBAC artifacts against the *_managed_* cluster, and remove the cluster entry from Argo CD: @@ -117,7 +173,7 @@ kubectl edit clusterrole argocd-application-controller ``` !!! tip - If you want to deny ArgoCD access to a kind of resource then add it as an [excluded resource](declarative-setup.md#resource-exclusion). + If you want to deny Argo CD access to a kind of resource then add it as an [excluded resource](declarative-setup.md#resource-exclusion). ## Auditing @@ -153,3 +209,77 @@ Payloads from webhook events are considered untrusted. Argo CD only examines the the involved applications of the webhook event (e.g. which repo was modified), then refreshes the related application for reconciliation. This refresh is the same refresh which occurs regularly at three minute intervals, just fast-tracked by the webhook event. + +## Logging + +### Security field + +Security-related logs are tagged with a `security` field to make them easier to find, analyze, and report on. + +| Level | Friendly Level | Description | Example | +|-------|----------------|---------------------------------------------------------------------------------------------------|---------------------------------------------| +| 1 | Low | Unexceptional, non-malicious events | Successful access | +| 2 | Medium | Could indicate malicious events, but has a high likelihood of being user/system error | Access denied | +| 3 | High | Likely malicious events but one that had no side effects or was blocked | Out of bounds symlinks in repo | +| 4 | Critical | Any malicious or exploitable event that had a side effect | Secrets being left behind on the filesystem | +| 5 | Emergency | Unmistakably malicious events that should NEVER occur accidentally and indicates an active attack | Brute forcing of accounts | + +Where applicable, a `CWE` field is also added specifying the [Common Weakness Enumeration](https://cwe.mitre.org/index.html) number. + +!!! warning + Please be aware that not all security logs are comprehensively tagged yet and these examples are not necessarily implemented. + +### API Logs + +Argo CD logs payloads of most API requests except request that are considered sensitive, such as +`/cluster.ClusterService/Create`, `/session.SessionService/Create` etc. The full list of method +can be found in [server/server.go](https://github.com/argoproj/argo-cd/blob/abba8dddce8cd897ba23320e3715690f465b4a95/server/server.go#L516). + +Argo CD does not log IP addresses of clients requesting API endpoints, since the API server is typically behind a proxy. Instead, it is recommended +to configure IP addresses logging in the proxy server that sits in front of the API server. + +## ApplicationSets + +Argo CD's ApplicationSets feature has its own [security considerations](./applicationset/Security.md). Be aware of those +issues before using ApplicationSets. + +## Limiting Directory App Memory Usage + +> >2.2.10, 2.1.16, >2.3.5 + +Directory-type Applications (those whose source is raw JSON or YAML files) can consume significant +[repo-server](architecture.md#repository-server) memory, depending on the size and structure of the YAML files. + +To avoid over-using memory in the repo-server (potentially causing a crash and denial of service), set the +`reposerver.max.combined.directory.manifests.size` config option in [argocd-cmd-params-cm](argocd-cmd-params-cm.yaml). + +This option limits the combined size of all JSON or YAML files in an individual app. Note that the in-memory +representation of a manifest may be as much as 300x the size of the manifest on disk. Also note that the limit is per +Application. If manifests are generated for multiple applications at once, memory usage will be higher. + +**Example:** + +Suppose your repo-server has a 10G memory limit, and you have ten Applications which use raw JSON or YAML files. To +calculate the max safe combined file size per Application, divide 10G by 300 * 10 Apps (300 being the worst-case memory +growth factor for the manifests). + +``` +10G / 300 * 10 = 3M +``` + +So a reasonably safe configuration for this setup would be a 3M limit per app. + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cmd-params-cm +data: + reposerver.max.combined.directory.manifests.size: '3M' +``` + +The 300x ratio assumes a maliciously-crafted manifest file. If you only want to protect against accidental excessive +memory use, it is probably safe to use a smaller ratio. + +Keep in mind that if a malicious user can create additional Applications, they can increase the total memory usage. +Grant [App creation privileges](rbac.md) carefully. diff --git a/docs/operator-manual/server-commands/additional-configuration-method.md b/docs/operator-manual/server-commands/additional-configuration-method.md new file mode 100644 index 0000000000000..cc80ed3aeb9c8 --- /dev/null +++ b/docs/operator-manual/server-commands/additional-configuration-method.md @@ -0,0 +1,28 @@ +## Additional configuration methods + +Additional configuration methods for configuring commands `argocd-server`, `argocd-repo-server` and `argocd-application-controller`. + + +### Synopsis + +The commands can also be configured by setting the respective flag of the available options in `argocd-cmd-params-cm.yaml`. Each component has a specific prefix associated with it. + +``` +argocd-server --> server +argocd-repo-server --> reposerver +argocd-application-controller --> controller +``` + +The flags that do not have a prefix are shared across multiple components. One such flag is `repo.server` +The list of flags that are available can be found in [argocd-cmd-params-cm.yaml](../argocd-cmd-params-cm.yaml) + + +### Example + +To set `logformat` of `argocd-application-controller`, add below entry to the config map `argocd-cmd-params-cm.yaml`. + +``` +data: + controller.log.format: "text" +``` + diff --git a/docs/operator-manual/server-commands/argocd-application-controller.md b/docs/operator-manual/server-commands/argocd-application-controller.md index c9bb6b2d05809..21d26b29c572e 100644 --- a/docs/operator-manual/server-commands/argocd-application-controller.md +++ b/docs/operator-manual/server-commands/argocd-application-controller.md @@ -1,3 +1,5 @@ +# `argocd-application-controller` Command Reference + ## argocd-application-controller Run ArgoCD Application Controller @@ -13,43 +15,59 @@ argocd-application-controller [flags] ### Options ``` - --app-resync int Time period in seconds for application resync. (default 180) - --app-state-cache-expiration duration Cache expiration for app state (default 1h0m0s) - --as string Username to impersonate for the operation - --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. - --certificate-authority string Path to a cert file for the certificate authority - --client-certificate string Path to a client certificate file for TLS - --client-key string Path to a client key file for TLS - --cluster string The name of the kubeconfig cluster to use - --context string The name of the kubeconfig context to use - --default-cache-expiration duration Cache expiration default (default 24h0m0s) - --gloglevel int Set the glog logging level - -h, --help help for argocd-application-controller - --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - --kubeconfig string Path to a kube config. Only required if out-of-cluster - --kubectl-parallelism-limit int Number of allowed concurrent kubectl fork/execs. Any value less the 1 means no limit. (default 20) - --logformat string Set the logging format. One of: text|json (default "text") - --loglevel string Set the logging level. One of: debug|info|warn|error (default "info") - --metrics-cache-expiration duration Prometheus metrics cache expiration (disabled by default. e.g. 24h0m0s) - --metrics-port int Start metrics server on given port (default 8082) - -n, --namespace string If present, the namespace scope for this CLI request - --operation-processors int Number of application operation processors (default 1) - --password string Password for basic authentication to the API server - --redis string Redis server hostname and port (e.g. argocd-redis:6379). - --redisdb int Redis database. - --repo-server string Repo server address. (default "argocd-repo-server:8081") - --repo-server-plaintext Disable TLS on connections to repo server - --repo-server-strict-tls Whether to use strict validation of the TLS cert presented by the repo server - --repo-server-timeout-seconds int Repo server RPC call timeout seconds. (default 60) - --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") - --self-heal-timeout-seconds int Specifies timeout between application self heal attempts (default 5) - --sentinel stringArray Redis sentinel hostname and port (e.g. argocd-redis-ha-announce-0:6379). - --sentinelmaster string Redis sentinel master group name. (default "master") - --server string The address and port of the Kubernetes API server - --status-processors int Number of application status processors (default 1) - --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. - --token string Bearer token for authentication to the API server - --user string The name of the kubeconfig user to use - --username string Username for basic authentication to the API server + --app-hard-resync int Time period in seconds for application hard resync. + --app-resync int Time period in seconds for application resync. (default 180) + --app-state-cache-expiration duration Cache expiration for app state (default 1h0m0s) + --application-namespaces strings List of additional namespaces that applications are allowed to be reconciled from + --as string Username to impersonate for the operation + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --default-cache-expiration duration Cache expiration default (default 24h0m0s) + --dynamic-cluster-distribution-enabled Enables dynamic cluster distribution. + --gloglevel int Set the glog logging level + -h, --help help for argocd-application-controller + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to a kube config. Only required if out-of-cluster + --kubectl-parallelism-limit int Number of allowed concurrent kubectl fork/execs. Any value less than 1 means no limit. (default 20) + --logformat string Set the logging format. One of: text|json (default "text") + --loglevel string Set the logging level. One of: debug|info|warn|error (default "info") + --metrics-application-labels strings List of Application labels that will be added to the argocd_application_labels metric + --metrics-cache-expiration duration Prometheus metrics cache expiration (disabled by default. e.g. 24h0m0s) + --metrics-port int Start metrics server on given port (default 8082) + -n, --namespace string If present, the namespace scope for this CLI request + --operation-processors int Number of application operation processors (default 10) + --otlp-address string OpenTelemetry collector address to send traces to + --otlp-attrs strings List of OpenTelemetry collector extra attrs when send traces, each attribute is separated by a colon(e.g. key:value) + --password string Password for basic authentication to the API server + --persist-resource-health Enables storing the managed resources health in the Application CRD (default true) + --proxy-url string If provided, this URL will be used to connect via proxy + --redis string Redis server hostname and port (e.g. argocd-redis:6379). + --redis-ca-certificate string Path to Redis server CA certificate (e.g. /etc/certs/redis/ca.crt). If not specified, system trusted CAs will be used for server certificate validation. + --redis-client-certificate string Path to Redis client certificate (e.g. /etc/certs/redis/client.crt). + --redis-client-key string Path to Redis client key (e.g. /etc/certs/redis/client.crt). + --redis-compress string Enable compression for data sent to Redis with the required compression algorithm. (possible values: gzip, none) (default "gzip") + --redis-insecure-skip-tls-verify Skip Redis server certificate validation. + --redis-use-tls Use TLS when connecting to Redis. + --redisdb int Redis database. + --repo-server string Repo server address. (default "argocd-repo-server:8081") + --repo-server-plaintext Disable TLS on connections to repo server + --repo-server-strict-tls Whether to use strict validation of the TLS cert presented by the repo server + --repo-server-timeout-seconds int Repo server RPC call timeout seconds. (default 60) + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + --self-heal-timeout-seconds int Specifies timeout between application self heal attempts (default 5) + --sentinel stringArray Redis sentinel hostname and port (e.g. argocd-redis-ha-announce-0:6379). + --sentinelmaster string Redis sentinel master group name. (default "master") + --server string The address and port of the Kubernetes API server + --sharding-method string Enables choice of sharding method. Supported sharding methods are : [legacy, round-robin] (default "legacy") + --status-processors int Number of application status processors (default 20) + --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use + --username string Username for basic authentication to the API server ``` diff --git a/docs/operator-manual/server-commands/argocd-dex.md b/docs/operator-manual/server-commands/argocd-dex.md index 334fa49a5971f..af0eeae4a7604 100644 --- a/docs/operator-manual/server-commands/argocd-dex.md +++ b/docs/operator-manual/server-commands/argocd-dex.md @@ -1,3 +1,5 @@ +# `argocd-dex` Command Reference + ## argocd-dex argocd-dex tools used by Argo CD @@ -13,9 +15,7 @@ argocd-dex [flags] ### Options ``` - -h, --help help for argocd-dex - --logformat string Set the logging format. One of: text|json (default "text") - --loglevel string Set the logging level. One of: debug|info|warn|error (default "info") + -h, --help help for argocd-dex ``` ### SEE ALSO diff --git a/docs/operator-manual/server-commands/argocd-dex_gendexcfg.md b/docs/operator-manual/server-commands/argocd-dex_gendexcfg.md index 0fcfcb9b0b84d..1e784e94a2620 100644 --- a/docs/operator-manual/server-commands/argocd-dex_gendexcfg.md +++ b/docs/operator-manual/server-commands/argocd-dex_gendexcfg.md @@ -1,3 +1,5 @@ +# `argocd-dex gendexcfg` Command Reference + ## argocd-dex gendexcfg Generates a dex config from Argo CD settings @@ -11,17 +13,22 @@ argocd-dex gendexcfg [flags] ``` --as string Username to impersonate for the operation --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation --certificate-authority string Path to a cert file for the certificate authority --client-certificate string Path to a client certificate file for TLS --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-tls Disable TLS on the HTTP endpoint -h, --help help for gendexcfg --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure --kubeconfig string Path to a kube config. Only required if out-of-cluster + --logformat string Set the logging format. One of: text|json (default "text") + --loglevel string Set the logging level. One of: debug|info|warn|error (default "info") -n, --namespace string If present, the namespace scope for this CLI request -o, --out string Output to the specified file instead of stdout --password string Password for basic authentication to the API server + --proxy-url string If provided, this URL will be used to connect via proxy --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") --server string The address and port of the Kubernetes API server --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. diff --git a/docs/operator-manual/server-commands/argocd-dex_rundex.md b/docs/operator-manual/server-commands/argocd-dex_rundex.md index 19942ff13cf40..16e2b15abbece 100644 --- a/docs/operator-manual/server-commands/argocd-dex_rundex.md +++ b/docs/operator-manual/server-commands/argocd-dex_rundex.md @@ -1,3 +1,5 @@ +# `argocd-dex rundex` Command Reference + ## argocd-dex rundex Runs dex generating a config using settings from the Argo CD configmap and secret @@ -11,16 +13,21 @@ argocd-dex rundex [flags] ``` --as string Username to impersonate for the operation --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation --certificate-authority string Path to a cert file for the certificate authority --client-certificate string Path to a client certificate file for TLS --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-tls Disable TLS on the HTTP endpoint -h, --help help for rundex --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure --kubeconfig string Path to a kube config. Only required if out-of-cluster + --logformat string Set the logging format. One of: text|json (default "text") + --loglevel string Set the logging level. One of: debug|info|warn|error (default "info") -n, --namespace string If present, the namespace scope for this CLI request --password string Password for basic authentication to the API server + --proxy-url string If provided, this URL will be used to connect via proxy --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") --server string The address and port of the Kubernetes API server --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. diff --git a/docs/operator-manual/server-commands/argocd-repo-server.md b/docs/operator-manual/server-commands/argocd-repo-server.md index 4294eb4c83c62..33ecaf7c76dd4 100644 --- a/docs/operator-manual/server-commands/argocd-repo-server.md +++ b/docs/operator-manual/server-commands/argocd-repo-server.md @@ -1,3 +1,5 @@ +# `argocd-repo-server` Command Reference + ## argocd-repo-server Run ArgoCD Repository Server @@ -13,21 +15,39 @@ argocd-repo-server [flags] ### Options ``` - --default-cache-expiration duration Cache expiration default (default 24h0m0s) - --disable-tls Disable TLS on the gRPC endpoint - -h, --help help for argocd-repo-server - --logformat string Set the logging format. One of: text|json (default "text") - --loglevel string Set the logging level. One of: debug|info|warn|error (default "info") - --metrics-port int Start metrics server on given port (default 8084) - --parallelismlimit int Limit on number of concurrent manifests generate requests. Any value less the 1 means no limit. - --port int Listen on given port for incoming connections (default 8081) - --redis string Redis server hostname and port (e.g. argocd-redis:6379). - --redisdb int Redis database. - --repo-cache-expiration duration Cache expiration for repo state, incl. app lists, app details, manifest generation, revision meta-data (default 24h0m0s) - --sentinel stringArray Redis sentinel hostname and port (e.g. argocd-redis-ha-announce-0:6379). - --sentinelmaster string Redis sentinel master group name. (default "master") - --tlsciphers string The list of acceptable ciphers to be used when establishing TLS connections. Use 'list' to list available ciphers. (default "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:TLS_RSA_WITH_AES_256_GCM_SHA384") - --tlsmaxversion string The maximum SSL/TLS version that is acceptable (one of: 1.0|1.1|1.2|1.3) (default "1.3") - --tlsminversion string The minimum SSL/TLS version that is acceptable (one of: 1.0|1.1|1.2|1.3) (default "1.2") + --address string Listen on given address for incoming connections (default "0.0.0.0") + --allow-oob-symlinks Allow out-of-bounds symlinks in repositories (not recommended) + --default-cache-expiration duration Cache expiration default (default 24h0m0s) + --disable-helm-manifest-max-extracted-size Disable maximum size of helm manifest archives when extracted + --disable-tls Disable TLS on the gRPC endpoint + --helm-manifest-max-extracted-size string Maximum size of helm manifest archives when extracted (default "1G") + -h, --help help for argocd-repo-server + --logformat string Set the logging format. One of: text|json (default "text") + --loglevel string Set the logging level. One of: debug|info|warn|error (default "info") + --max-combined-directory-manifests-size string Max combined size of manifest files in a directory-type Application (default "10M") + --metrics-address string Listen on given address for metrics (default "0.0.0.0") + --metrics-port int Start metrics server on given port (default 8084) + --otlp-address string OpenTelemetry collector address to send traces to + --otlp-attrs strings List of OpenTelemetry collector extra attrs when send traces, each attribute is separated by a colon(e.g. key:value) + --parallelismlimit int Limit on number of concurrent manifests generate requests. Any value less the 1 means no limit. + --plugin-tar-exclude stringArray Globs to filter when sending tarballs to plugins. + --port int Listen on given port for incoming connections (default 8081) + --redis string Redis server hostname and port (e.g. argocd-redis:6379). + --redis-ca-certificate string Path to Redis server CA certificate (e.g. /etc/certs/redis/ca.crt). If not specified, system trusted CAs will be used for server certificate validation. + --redis-client-certificate string Path to Redis client certificate (e.g. /etc/certs/redis/client.crt). + --redis-client-key string Path to Redis client key (e.g. /etc/certs/redis/client.crt). + --redis-compress string Enable compression for data sent to Redis with the required compression algorithm. (possible values: gzip, none) (default "gzip") + --redis-insecure-skip-tls-verify Skip Redis server certificate validation. + --redis-use-tls Use TLS when connecting to Redis. + --redisdb int Redis database. + --repo-cache-expiration duration Cache expiration for repo state, incl. app lists, app details, manifest generation, revision meta-data (default 24h0m0s) + --revision-cache-expiration duration Cache expiration for cached revision (default 3m0s) + --sentinel stringArray Redis sentinel hostname and port (e.g. argocd-redis-ha-announce-0:6379). + --sentinelmaster string Redis sentinel master group name. (default "master") + --streamed-manifest-max-extracted-size string Maximum size of streamed manifest archives when extracted (default "1G") + --streamed-manifest-max-tar-size string Maximum size of streamed manifest archives (default "100M") + --tlsciphers string The list of acceptable ciphers to be used when establishing TLS connections. Use 'list' to list available ciphers. (default "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:TLS_RSA_WITH_AES_256_GCM_SHA384") + --tlsmaxversion string The maximum SSL/TLS version that is acceptable (one of: 1.0|1.1|1.2|1.3) (default "1.3") + --tlsminversion string The minimum SSL/TLS version that is acceptable (one of: 1.0|1.1|1.2|1.3) (default "1.2") ``` diff --git a/docs/operator-manual/server-commands/argocd-server.md b/docs/operator-manual/server-commands/argocd-server.md index ac294d1705bbf..d39459ad181d6 100644 --- a/docs/operator-manual/server-commands/argocd-server.md +++ b/docs/operator-manual/server-commands/argocd-server.md @@ -1,3 +1,5 @@ +# `argocd-server` Command Reference + ## argocd-server Run the ArgoCD API server @@ -13,20 +15,27 @@ argocd-server [flags] ### Options ``` + --address string Listen on given address (default "0.0.0.0") --app-state-cache-expiration duration Cache expiration for app state (default 1h0m0s) + --application-namespaces strings List of additional namespaces where application resources can be managed in --as string Username to impersonate for the operation --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation --basehref string Value for base href in index.html. Used if Argo CD is running behind reverse proxy under subpath different from / (default "/") --certificate-authority string Path to a cert file for the certificate authority --client-certificate string Path to a client certificate file for TLS --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --connection-status-cache-expiration duration Cache expiration for cluster/repo connection status (default 1h0m0s) + --content-security-policy value Set Content-Security-Policy header in HTTP responses to value. To disable, set to "". (default "frame-ancestors 'self';") --context string The name of the kubeconfig context to use --default-cache-expiration duration Cache expiration default (default 24h0m0s) - --dex-server string Dex server address (default "http://argocd-dex-server:5556") + --dex-server string Dex server address (default "argocd-dex-server:5556") + --dex-server-plaintext Use a plaintext client (non-TLS) to connect to dex server + --dex-server-strict-tls Perform strict validation of TLS certificates when connecting to dex server --disable-auth Disable client authentication - --enable-gzip Enable GZIP compression + --enable-gzip Enable GZIP compression (default true) + --enable-proxy-extension Enable Proxy Extension feature --gloglevel int Set the glog logging level -h, --help help for argocd-server --insecure Run server without TLS @@ -35,12 +44,22 @@ argocd-server [flags] --logformat string Set the logging format. One of: text|json (default "text") --login-attempts-expiration duration Cache expiration for failed login attempts (default 24h0m0s) --loglevel string Set the logging level. One of: debug|info|warn|error (default "info") + --metrics-address string Listen for metrics on given address (default "0.0.0.0") --metrics-port int Start metrics on given port (default 8083) -n, --namespace string If present, the namespace scope for this CLI request --oidc-cache-expiration duration Cache expiration for OIDC state (default 3m0s) + --otlp-address string OpenTelemetry collector address to send traces to + --otlp-attrs strings List of OpenTelemetry collector extra attrs when send traces, each attribute is separated by a colon(e.g. key:value) --password string Password for basic authentication to the API server --port int Listen on given port (default 8080) + --proxy-url string If provided, this URL will be used to connect via proxy --redis string Redis server hostname and port (e.g. argocd-redis:6379). + --redis-ca-certificate string Path to Redis server CA certificate (e.g. /etc/certs/redis/ca.crt). If not specified, system trusted CAs will be used for server certificate validation. + --redis-client-certificate string Path to Redis client certificate (e.g. /etc/certs/redis/client.crt). + --redis-client-key string Path to Redis client key (e.g. /etc/certs/redis/client.crt). + --redis-compress string Enable compression for data sent to Redis with the required compression algorithm. (possible values: gzip, none) (default "gzip") + --redis-insecure-skip-tls-verify Skip Redis server certificate validation. + --redis-use-tls Use TLS when connecting to Redis. --redisdb int Redis database. --repo-server string Repo server address (default "argocd-repo-server:8081") --repo-server-plaintext Use a plaintext client (non-TLS) to connect to repository server @@ -51,7 +70,7 @@ argocd-server [flags] --sentinel stringArray Redis sentinel hostname and port (e.g. argocd-redis-ha-announce-0:6379). --sentinelmaster string Redis sentinel master group name. (default "master") --server string The address and port of the Kubernetes API server - --staticassets string Static assets directory path + --staticassets string Directory path that contains additional static assets (default "/shared/app") --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. --tlsciphers string The list of acceptable ciphers to be used when establishing TLS connections. Use 'list' to list available ciphers. (default "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:TLS_RSA_WITH_AES_256_GCM_SHA384") --tlsmaxversion string The maximum SSL/TLS version that is acceptable (one of: 1.0|1.1|1.2|1.3) (default "1.3") diff --git a/docs/operator-manual/server-commands/argocd-server_version.md b/docs/operator-manual/server-commands/argocd-server_version.md index 28e96fd9e76da..2d7d9d1151e8a 100644 --- a/docs/operator-manual/server-commands/argocd-server_version.md +++ b/docs/operator-manual/server-commands/argocd-server_version.md @@ -1,3 +1,5 @@ +# `argocd-server version` Command Reference + ## argocd-server version Print version information @@ -18,6 +20,7 @@ argocd-server version [flags] ``` --as string Username to impersonate for the operation --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation --certificate-authority string Path to a cert file for the certificate authority --client-certificate string Path to a client certificate file for TLS --client-key string Path to a client key file for TLS @@ -27,6 +30,7 @@ argocd-server version [flags] --kubeconfig string Path to a kube config. Only required if out-of-cluster -n, --namespace string If present, the namespace scope for this CLI request --password string Password for basic authentication to the API server + --proxy-url string If provided, this URL will be used to connect via proxy --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") --server string The address and port of the Kubernetes API server --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. diff --git a/docs/operator-manual/server-commands/argocd-util.md b/docs/operator-manual/server-commands/argocd-util.md deleted file mode 100644 index 9a4e9d8e0e732..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util.md +++ /dev/null @@ -1,31 +0,0 @@ -## argocd-util - -argocd-util tools used by Argo CD - -### Synopsis - -argocd-util has internal utility tools used by Argo CD - -``` -argocd-util [flags] -``` - -### Options - -``` - -h, --help help for argocd-util - --logformat string Set the logging format. One of: text|json (default "text") - --loglevel string Set the logging level. One of: debug|info|warn|error (default "info") -``` - -### SEE ALSO - -* [argocd-util app](argocd-util_app.md) - Manage applications configuration -* [argocd-util cluster](argocd-util_cluster.md) - Manage clusters configuration -* [argocd-util export](argocd-util_export.md) - Export all Argo CD data to stdout (default) or a file -* [argocd-util import](argocd-util_import.md) - Import Argo CD data from stdin (specify `-') or a file -* [argocd-util proj](argocd-util_proj.md) - Manage projects configuration -* [argocd-util repo](argocd-util_repo.md) - Manage repositories configuration -* [argocd-util settings](argocd-util_settings.md) - Provides set of commands for settings validation and troubleshooting -* [argocd-util version](argocd-util_version.md) - Print version information - diff --git a/docs/operator-manual/server-commands/argocd-util_app.md b/docs/operator-manual/server-commands/argocd-util_app.md deleted file mode 100644 index a26c078cd7aa7..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_app.md +++ /dev/null @@ -1,21 +0,0 @@ -## argocd-util app - -Manage applications configuration - -``` -argocd-util app [flags] -``` - -### Options - -``` - -h, --help help for app -``` - -### SEE ALSO - -* [argocd-util](argocd-util.md) - argocd-util tools used by Argo CD -* [argocd-util app diff-reconcile-results](argocd-util_app_diff-reconcile-results.md) - Compare results of two reconciliations and print diff. -* [argocd-util app generate-spec](argocd-util_app_generate-spec.md) - Generate declarative config for an application -* [argocd-util app get-reconcile-results](argocd-util_app_get-reconcile-results.md) - Reconcile all applications and stores reconciliation summary in the specified file. - diff --git a/docs/operator-manual/server-commands/argocd-util_app_diff-reconcile-results.md b/docs/operator-manual/server-commands/argocd-util_app_diff-reconcile-results.md deleted file mode 100644 index 105fe09fe5693..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_app_diff-reconcile-results.md +++ /dev/null @@ -1,18 +0,0 @@ -## argocd-util app diff-reconcile-results - -Compare results of two reconciliations and print diff. - -``` -argocd-util app diff-reconcile-results PATH1 PATH2 [flags] -``` - -### Options - -``` - -h, --help help for diff-reconcile-results -``` - -### SEE ALSO - -* [argocd-util app](argocd-util_app.md) - Manage applications configuration - diff --git a/docs/operator-manual/server-commands/argocd-util_app_generate-spec.md b/docs/operator-manual/server-commands/argocd-util_app_generate-spec.md deleted file mode 100644 index 5d9c2b03fe29d..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_app_generate-spec.md +++ /dev/null @@ -1,90 +0,0 @@ -## argocd-util app generate-spec - -Generate declarative config for an application - -``` -argocd-util app generate-spec APPNAME [flags] -``` - -### Examples - -``` - - # Generate declarative config for a directory app - argocd-util app generate-spec guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --directory-recurse - - # Generate declarative config for a Jsonnet app - argocd-util app generate-spec jsonnet-guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path jsonnet-guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --jsonnet-ext-str replicas=2 - - # Generate declarative config for a Helm app - argocd-util app generate-spec helm-guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path helm-guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --helm-set replicaCount=2 - - # Generate declarative config for a Helm app from a Helm repo - argocd-util app generate-spec nginx-ingress --repo https://charts.helm.sh/stable --helm-chart nginx-ingress --revision 1.24.3 --dest-namespace default --dest-server https://kubernetes.default.svc - - # Generate declarative config for a Kustomize app - argocd-util app generate-spec kustomize-guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path kustomize-guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --kustomize-image gcr.io/heptio-images/ks-guestbook-demo:0.1 - - # Generate declarative config for a app using a custom tool: - argocd-util app generate-spec ksane --repo https://github.com/argoproj/argocd-example-apps.git --path plugins/kasane --dest-namespace default --dest-server https://kubernetes.default.svc --config-management-plugin kasane - -``` - -### Options - -``` - --allow-empty Set allow zero live resources when sync is automated - --auto-prune Set automatic pruning when sync is automated - --config-management-plugin string Config management plugin name - --dest-name string K8s cluster Name (e.g. minikube) - --dest-namespace string K8s target namespace (overrides the namespace specified in the ksonnet app.yaml) - --dest-server string K8s cluster URL (e.g. https://kubernetes.default.svc) - --directory-exclude string Set glob expression used to exclude files from application source path - --directory-include string Set glob expression used to include files from application source path - --directory-recurse Recurse directory - --env string Application environment to monitor - -f, --file string Filename or URL to Kubernetes manifests for the app - --helm-chart string Helm Chart name - --helm-set stringArray Helm set values on the command line (can be repeated to set several values: --helm-set key1=val1 --helm-set key2=val2) - --helm-set-file stringArray Helm set values from respective files specified via the command line (can be repeated to set several values: --helm-set-file key1=path1 --helm-set-file key2=path2) - --helm-set-string stringArray Helm set STRING values on the command line (can be repeated to set several values: --helm-set-string key1=val1 --helm-set-string key2=val2) - --helm-version string Helm version - -h, --help help for generate-spec - --jsonnet-ext-var-code stringArray Jsonnet ext var - --jsonnet-ext-var-str stringArray Jsonnet string ext var - --jsonnet-libs stringArray Additional jsonnet libs (prefixed by repoRoot) - --jsonnet-tla-code stringArray Jsonnet top level code arguments - --jsonnet-tla-str stringArray Jsonnet top level string arguments - --kustomize-common-annotation stringArray Set common labels in Kustomize - --kustomize-common-label stringArray Set common labels in Kustomize - --kustomize-image stringArray Kustomize images (e.g. --kustomize-image node:8.15.0 --kustomize-image mysql=mariadb,alpine@sha256:24a0c4b4a4c0eb97a1aabb8e29f18e917d05abfe1b7a7c07857230879ce7d3d) - --kustomize-version string Kustomize version - -l, --label stringArray Labels to apply to the app - --name string A name for the app, ignored if a file is set (DEPRECATED) - --nameprefix string Kustomize nameprefix - --namesuffix string Kustomize namesuffix - -o, --output string Output format. One of: json|yaml (default "yaml") - -p, --parameter stringArray set a parameter override (e.g. -p guestbook=image=example/guestbook:latest) - --path string Path in repository to the app directory, ignored if a file is set - --plugin-env stringArray Additional plugin envs - --project string Application project name - --release-name string Helm release-name - --repo string Repository URL, ignored if a file is set - --revision string The tracking source branch, tag, commit or Helm chart version the application will sync to - --revision-history-limit int How many items to keep in revision history (default 10) - --self-heal Set self healing when sync is automated - --sync-option Prune=false Add or remove a sync option, e.g add Prune=false. Remove using `!` prefix, e.g. `!Prune=false` - --sync-policy string Set the sync policy (one of: none, automated (aliases of automated: auto, automatic)) - --sync-retry-backoff-duration duration Sync retry backoff base duration. Input needs to be a duration (e.g. 2m, 1h) (default 5s) - --sync-retry-backoff-factor int Factor multiplies the base duration after each failed sync retry (default 2) - --sync-retry-backoff-max-duration duration Max sync retry backoff duration. Input needs to be a duration (e.g. 2m, 1h) (default 3m0s) - --sync-retry-limit int Max number of allowed sync retries - --validate Validation of repo and cluster (default true) - --values stringArray Helm values file(s) to use - --values-literal-file string Filename or URL to import as a literal Helm values block -``` - -### SEE ALSO - -* [argocd-util app](argocd-util_app.md) - Manage applications configuration - diff --git a/docs/operator-manual/server-commands/argocd-util_app_get-reconcile-results.md b/docs/operator-manual/server-commands/argocd-util_app_get-reconcile-results.md deleted file mode 100644 index 286b2a7adda8b..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_app_get-reconcile-results.md +++ /dev/null @@ -1,39 +0,0 @@ -## argocd-util app get-reconcile-results - -Reconcile all applications and stores reconciliation summary in the specified file. - -``` -argocd-util app get-reconcile-results PATH [flags] -``` - -### Options - -``` - --as string Username to impersonate for the operation - --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. - --certificate-authority string Path to a cert file for the certificate authority - --client-certificate string Path to a client certificate file for TLS - --client-key string Path to a client key file for TLS - --cluster string The name of the kubeconfig cluster to use - --context string The name of the kubeconfig context to use - -h, --help help for get-reconcile-results - --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - --kubeconfig string Path to a kube config. Only required if out-of-cluster - --l string Label selector - -n, --namespace string If present, the namespace scope for this CLI request - --o string Output format (yaml|json) (default "yaml") - --password string Password for basic authentication to the API server - --refresh If set to true then recalculates apps reconciliation - --repo-server string Repo server address. - --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") - --server string The address and port of the Kubernetes API server - --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. - --token string Bearer token for authentication to the API server - --user string The name of the kubeconfig user to use - --username string Username for basic authentication to the API server -``` - -### SEE ALSO - -* [argocd-util app](argocd-util_app.md) - Manage applications configuration - diff --git a/docs/operator-manual/server-commands/argocd-util_apps.md b/docs/operator-manual/server-commands/argocd-util_apps.md deleted file mode 100644 index 92a27b27860be..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_apps.md +++ /dev/null @@ -1,20 +0,0 @@ -## argocd-util apps - -Utility commands operate on ArgoCD applications - -``` -argocd-util apps [flags] -``` - -### Options - -``` - -h, --help help for apps -``` - -### SEE ALSO - -* [argocd-util](argocd-util.md) - argocd-util tools used by Argo CD -* [argocd-util apps diff-reconcile-results](argocd-util_apps_diff-reconcile-results.md) - Compare results of two reconciliations and print diff. -* [argocd-util apps get-reconcile-results](argocd-util_apps_get-reconcile-results.md) - Reconcile all applications and stores reconciliation summary in the specified file. - diff --git a/docs/operator-manual/server-commands/argocd-util_apps_diff-reconcile-results.md b/docs/operator-manual/server-commands/argocd-util_apps_diff-reconcile-results.md deleted file mode 100644 index 290a25190b7a4..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_apps_diff-reconcile-results.md +++ /dev/null @@ -1,18 +0,0 @@ -## argocd-util apps diff-reconcile-results - -Compare results of two reconciliations and print diff. - -``` -argocd-util apps diff-reconcile-results PATH1 PATH2 [flags] -``` - -### Options - -``` - -h, --help help for diff-reconcile-results -``` - -### SEE ALSO - -* [argocd-util apps](argocd-util_apps.md) - Utility commands operate on ArgoCD applications - diff --git a/docs/operator-manual/server-commands/argocd-util_apps_get-reconcile-results.md b/docs/operator-manual/server-commands/argocd-util_apps_get-reconcile-results.md deleted file mode 100644 index f8aaac48a5fd5..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_apps_get-reconcile-results.md +++ /dev/null @@ -1,39 +0,0 @@ -## argocd-util apps get-reconcile-results - -Reconcile all applications and stores reconciliation summary in the specified file. - -``` -argocd-util apps get-reconcile-results PATH [flags] -``` - -### Options - -``` - --as string Username to impersonate for the operation - --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. - --certificate-authority string Path to a cert file for the certificate authority - --client-certificate string Path to a client certificate file for TLS - --client-key string Path to a client key file for TLS - --cluster string The name of the kubeconfig cluster to use - --context string The name of the kubeconfig context to use - -h, --help help for get-reconcile-results - --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - --kubeconfig string Path to a kube config. Only required if out-of-cluster - --l string Label selector - -n, --namespace string If present, the namespace scope for this CLI request - --o string Output format (yaml|json) (default "yaml") - --password string Password for basic authentication to the API server - --refresh If set to true then recalculates apps reconciliation - --repo-server string Repo server address. - --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") - --server string The address and port of the Kubernetes API server - --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. - --token string Bearer token for authentication to the API server - --user string The name of the kubeconfig user to use - --username string Username for basic authentication to the API server -``` - -### SEE ALSO - -* [argocd-util apps](argocd-util_apps.md) - Utility commands operate on ArgoCD applications - diff --git a/docs/operator-manual/server-commands/argocd-util_cluster.md b/docs/operator-manual/server-commands/argocd-util_cluster.md deleted file mode 100644 index d64d60f272dde..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_cluster.md +++ /dev/null @@ -1,21 +0,0 @@ -## argocd-util cluster - -Manage clusters configuration - -``` -argocd-util cluster [flags] -``` - -### Options - -``` - -h, --help help for cluster -``` - -### SEE ALSO - -* [argocd-util](argocd-util.md) - argocd-util tools used by Argo CD -* [argocd-util cluster generate-spec](argocd-util_cluster_generate-spec.md) - Generate declarative config for a cluster -* [argocd-util cluster kubeconfig](argocd-util_cluster_kubeconfig.md) - Generates kubeconfig for the specified cluster -* [argocd-util cluster stats](argocd-util_cluster_stats.md) - Prints information cluster statistics and inferred shard number - diff --git a/docs/operator-manual/server-commands/argocd-util_cluster_generate-spec.md b/docs/operator-manual/server-commands/argocd-util_cluster_generate-spec.md deleted file mode 100644 index 780a8783f2914..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_cluster_generate-spec.md +++ /dev/null @@ -1,32 +0,0 @@ -## argocd-util cluster generate-spec - -Generate declarative config for a cluster - -``` -argocd-util cluster generate-spec CONTEXT [flags] -``` - -### Options - -``` - --aws-cluster-name string AWS Cluster name if set then aws cli eks token command will be used to access cluster - --aws-role-arn string Optional AWS role arn. If set then AWS IAM Authenticator assumes a role to perform cluster operations instead of the default AWS credential provider chain. - --bearer-token string Authentication token that should be used to access K8S API server - --exec-command string Command to run to provide client credentials to the cluster. You may need to build a custom ArgoCD image to ensure the command is available at runtime. - --exec-command-api-version string Preferred input version of the ExecInfo for the --exec-command executable - --exec-command-args stringArray Arguments to supply to the --exec-command executable - --exec-command-env stringToString Environment vars to set when running the --exec-command executable (default []) - --exec-command-install-hint string Text shown to the user when the --exec-command executable doesn't seem to be present - -h, --help help for generate-spec - --in-cluster Indicates Argo CD resides inside this cluster and should connect using the internal k8s hostname (kubernetes.default.svc) - --kubeconfig string use a particular kubeconfig file - --name string Overwrite the cluster name - --namespace stringArray List of namespaces which are allowed to manage - -o, --output string Output format. One of: json|yaml (default "yaml") - --shard int Cluster shard number; inferred from hostname if not set (default -1) -``` - -### SEE ALSO - -* [argocd-util cluster](argocd-util_cluster.md) - Manage clusters configuration - diff --git a/docs/operator-manual/server-commands/argocd-util_cluster_kubeconfig.md b/docs/operator-manual/server-commands/argocd-util_cluster_kubeconfig.md deleted file mode 100644 index 302be65813470..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_cluster_kubeconfig.md +++ /dev/null @@ -1,35 +0,0 @@ -## argocd-util cluster kubeconfig - -Generates kubeconfig for the specified cluster - -``` -argocd-util cluster kubeconfig CLUSTER_URL OUTPUT_PATH [flags] -``` - -### Options - -``` - --as string Username to impersonate for the operation - --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. - --certificate-authority string Path to a cert file for the certificate authority - --client-certificate string Path to a client certificate file for TLS - --client-key string Path to a client key file for TLS - --cluster string The name of the kubeconfig cluster to use - --context string The name of the kubeconfig context to use - -h, --help help for kubeconfig - --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - --kubeconfig string Path to a kube config. Only required if out-of-cluster - -n, --namespace string If present, the namespace scope for this CLI request - --password string Password for basic authentication to the API server - --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") - --server string The address and port of the Kubernetes API server - --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. - --token string Bearer token for authentication to the API server - --user string The name of the kubeconfig user to use - --username string Username for basic authentication to the API server -``` - -### SEE ALSO - -* [argocd-util cluster](argocd-util_cluster.md) - Manage clusters configuration - diff --git a/docs/operator-manual/server-commands/argocd-util_cluster_stats.md b/docs/operator-manual/server-commands/argocd-util_cluster_stats.md deleted file mode 100644 index 2271a7576a420..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_cluster_stats.md +++ /dev/null @@ -1,44 +0,0 @@ -## argocd-util cluster stats - -Prints information cluster statistics and inferred shard number - -``` -argocd-util cluster stats [flags] -``` - -### Options - -``` - --app-state-cache-expiration duration Cache expiration for app state (default 1h0m0s) - --as string Username to impersonate for the operation - --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. - --certificate-authority string Path to a cert file for the certificate authority - --client-certificate string Path to a client certificate file for TLS - --client-key string Path to a client key file for TLS - --cluster string The name of the kubeconfig cluster to use - --context string The name of the kubeconfig context to use - --default-cache-expiration duration Cache expiration default (default 24h0m0s) - -h, --help help for stats - --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - --kubeconfig string Path to a kube config. Only required if out-of-cluster - -n, --namespace string If present, the namespace scope for this CLI request - --password string Password for basic authentication to the API server - --port-forward-redis Automatically port-forward ha proxy redis from current namespace? (default true) - --redis string Redis server hostname and port (e.g. argocd-redis:6379). - --redisdb int Redis database. - --replicas int Application controller replicas count. Inferred from number of running controller pods if not specified - --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") - --sentinel stringArray Redis sentinel hostname and port (e.g. argocd-redis-ha-announce-0:6379). - --sentinelmaster string Redis sentinel master group name. (default "master") - --server string The address and port of the Kubernetes API server - --shard int Cluster shard filter (default -1) - --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. - --token string Bearer token for authentication to the API server - --user string The name of the kubeconfig user to use - --username string Username for basic authentication to the API server -``` - -### SEE ALSO - -* [argocd-util cluster](argocd-util_cluster.md) - Manage clusters configuration - diff --git a/docs/operator-manual/server-commands/argocd-util_config.md b/docs/operator-manual/server-commands/argocd-util_config.md deleted file mode 100644 index b7e711501dcaa..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_config.md +++ /dev/null @@ -1,22 +0,0 @@ -## argocd-util config - -Generate declarative configuration files - -``` -argocd-util config [flags] -``` - -### Options - -``` - -h, --help help for config -``` - -### SEE ALSO - -* [argocd-util](argocd-util.md) - argocd-util tools used by Argo CD -* [argocd-util config app](argocd-util_config_app.md) - Generate declarative config for an application -* [argocd-util config cluster](argocd-util_config_cluster.md) - Generate declarative config for a cluster -* [argocd-util config proj](argocd-util_config_proj.md) - Generate declarative config for a project -* [argocd-util config repo](argocd-util_config_repo.md) - Generate declarative config for a repo - diff --git a/docs/operator-manual/server-commands/argocd-util_config_app.md b/docs/operator-manual/server-commands/argocd-util_config_app.md deleted file mode 100644 index 1908198c9a612..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_config_app.md +++ /dev/null @@ -1,89 +0,0 @@ -## argocd-util config app - -Generate declarative config for an application - -``` -argocd-util config app APPNAME [flags] -``` - -### Examples - -``` - - # Generate declarative config for a directory app - argocd-util config app guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --directory-recurse - - # Generate declarative config for a Jsonnet app - argocd-util config app jsonnet-guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path jsonnet-guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --jsonnet-ext-str replicas=2 - - # Generate declarative config for a Helm app - argocd-util config app helm-guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path helm-guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --helm-set replicaCount=2 - - # Generate declarative config for a Helm app from a Helm repo - argocd-util config app nginx-ingress --repo https://kubernetes-charts.storage.googleapis.com --helm-chart nginx-ingress --revision 1.24.3 --dest-namespace default --dest-server https://kubernetes.default.svc - - # Generate declarative config for a Kustomize app - argocd-util config app kustomize-guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path kustomize-guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --kustomize-image gcr.io/heptio-images/ks-guestbook-demo:0.1 - - # Generate declarative config for a app using a custom tool: - argocd-util config app ksane --repo https://github.com/argoproj/argocd-example-apps.git --path plugins/kasane --dest-namespace default --dest-server https://kubernetes.default.svc --config-management-plugin kasane - -``` - -### Options - -``` - --allow-empty Set allow zero live resources when sync is automated - --auto-prune Set automatic pruning when sync is automated - --config-management-plugin string Config management plugin name - --dest-name string K8s cluster Name (e.g. minikube) - --dest-namespace string K8s target namespace (overrides the namespace specified in the ksonnet app.yaml) - --dest-server string K8s cluster URL (e.g. https://kubernetes.default.svc) - --directory-exclude string Set glob expression used to exclude files from application source path - --directory-include string Set glob expression used to include files from application source path - --directory-recurse Recurse directory - --env string Application environment to monitor - --helm-chart string Helm Chart name - --helm-set stringArray Helm set values on the command line (can be repeated to set several values: --helm-set key1=val1 --helm-set key2=val2) - --helm-set-file stringArray Helm set values from respective files specified via the command line (can be repeated to set several values: --helm-set-file key1=path1 --helm-set-file key2=path2) - --helm-set-string stringArray Helm set STRING values on the command line (can be repeated to set several values: --helm-set-string key1=val1 --helm-set-string key2=val2) - --helm-version string Helm version - -h, --help help for app - --jsonnet-ext-var-code stringArray Jsonnet ext var - --jsonnet-ext-var-str stringArray Jsonnet string ext var - --jsonnet-libs stringArray Additional jsonnet libs (prefixed by repoRoot) - --jsonnet-tla-code stringArray Jsonnet top level code arguments - --jsonnet-tla-str stringArray Jsonnet top level string arguments - --kustomize-common-annotation stringArray Set common labels in Kustomize - --kustomize-common-label stringArray Set common labels in Kustomize - --kustomize-image stringArray Kustomize images (e.g. --kustomize-image node:8.15.0 --kustomize-image mysql=mariadb,alpine@sha256:24a0c4b4a4c0eb97a1aabb8e29f18e917d05abfe1b7a7c07857230879ce7d3d) - --kustomize-version string Kustomize version - -l, --label stringArray Labels to apply to the app - --name string A name for the app, ignored if a file is set (DEPRECATED) - --nameprefix string Kustomize nameprefix - --namesuffix string Kustomize namesuffix - -o, --output string Output format. One of: json|yaml (default "yaml") - -p, --parameter stringArray set a parameter override (e.g. -p guestbook=image=example/guestbook:latest) - --path string Path in repository to the app directory, ignored if a file is set - --plugin-env stringArray Additional plugin envs - --project string Application project name - --release-name string Helm release-name - --repo string Repository URL, ignored if a file is set - --retry-backoff-duration duration Retry backoff base duration. Input needs to be a duration (e.g. 2m, 1h) (default 5s) - --retry-backoff-factor int Factor multiplies the base duration after each failed retry (default 2) - --retry-backoff-max-duration duration Max retry backoff duration. Input needs to be a duration (e.g. 2m, 1h) (default 3m0s) - --retry-limit int Max number of allowed sync retries - --revision string The tracking source branch, tag, commit or Helm chart version the application will sync to - --revision-history-limit int How many items to keep in revision history (default 10) - --self-heal Set self healing when sync is automated - --sync-option Prune=false Add or remove a sync option, e.g add Prune=false. Remove using `!` prefix, e.g. `!Prune=false` - --sync-policy string Set the sync policy (one of: none, automated (aliases of automated: auto, automatic)) - --validate Validation of repo and cluster (default true) - --values stringArray Helm values file(s) to use - --values-literal-file string Filename or URL to import as a literal Helm values block -``` - -### SEE ALSO - -* [argocd-util config](argocd-util_config.md) - Generate declarative configuration files - diff --git a/docs/operator-manual/server-commands/argocd-util_config_cluster.md b/docs/operator-manual/server-commands/argocd-util_config_cluster.md deleted file mode 100644 index 8bd02356d6402..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_config_cluster.md +++ /dev/null @@ -1,32 +0,0 @@ -## argocd-util config cluster - -Generate declarative config for a cluster - -``` -argocd-util config cluster CONTEXT [flags] -``` - -### Options - -``` - --aws-cluster-name string AWS Cluster name if set then aws cli eks token command will be used to access cluster - --aws-role-arn string Optional AWS role arn. If set then AWS IAM Authenticator assumes a role to perform cluster operations instead of the default AWS credential provider chain. - --bearer-token string Authentication token that should be used to access K8S API server - --exec-command string Command to run to provide client credentials to the cluster. You may need to build a custom ArgoCD image to ensure the command is available at runtime. - --exec-command-api-version string Preferred input version of the ExecInfo for the --exec-command executable - --exec-command-args stringArray Arguments to supply to the --exec-command executable - --exec-command-env stringToString Environment vars to set when running the --exec-command executable (default []) - --exec-command-install-hint string Text shown to the user when the --exec-command executable doesn't seem to be present - -h, --help help for cluster - --in-cluster Indicates Argo CD resides inside this cluster and should connect using the internal k8s hostname (kubernetes.default.svc) - --kubeconfig string use a particular kubeconfig file - --name string Overwrite the cluster name - --namespace stringArray List of namespaces which are allowed to manage - -o, --output string Output format. One of: json|yaml (default "yaml") - --shard int Cluster shard number; inferred from hostname if not set (default -1) -``` - -### SEE ALSO - -* [argocd-util config](argocd-util_config.md) - Generate declarative configuration files - diff --git a/docs/operator-manual/server-commands/argocd-util_config_proj.md b/docs/operator-manual/server-commands/argocd-util_config_proj.md deleted file mode 100644 index b43963cc877d2..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_config_proj.md +++ /dev/null @@ -1,25 +0,0 @@ -## argocd-util config proj - -Generate declarative config for a project - -``` -argocd-util config proj PROJECT [flags] -``` - -### Options - -``` - --description string Project description - -d, --dest stringArray Permitted destination server and namespace (e.g. https://192.168.99.100:8443,default) - -h, --help help for proj - --orphaned-resources Enables orphaned resources monitoring - --orphaned-resources-warn Specifies if applications should have a warning condition when orphaned resources detected - -o, --output string Output format. One of: json|yaml (default "yaml") - --signature-keys strings GnuPG public key IDs for commit signature verification - -s, --src stringArray Permitted source repository URL -``` - -### SEE ALSO - -* [argocd-util config](argocd-util_config.md) - Generate declarative configuration files - diff --git a/docs/operator-manual/server-commands/argocd-util_config_repo.md b/docs/operator-manual/server-commands/argocd-util_config_repo.md deleted file mode 100644 index 80fec43bf7a7d..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_config_repo.md +++ /dev/null @@ -1,61 +0,0 @@ -## argocd-util config repo - -Generate declarative config for a repo - -``` -argocd-util config repo REPOURL [flags] -``` - -### Examples - -``` - - # Add a Git repository via SSH using a private key for authentication, ignoring the server's host key: - argocd-util config repo git@git.example.com:repos/repo --insecure-ignore-host-key --ssh-private-key-path ~/id_rsa - - # Add a Git repository via SSH on a non-default port - need to use ssh:// style URLs here - argocd-util config repo ssh://git@git.example.com:2222/repos/repo --ssh-private-key-path ~/id_rsa - - # Add a private Git repository via HTTPS using username/password and TLS client certificates: - argocd-util config repo https://git.example.com/repos/repo --username git --password secret --tls-client-cert-path ~/mycert.crt --tls-client-cert-key-path ~/mycert.key - - # Add a private Git repository via HTTPS using username/password without verifying the server's TLS certificate - argocd-util config repo https://git.example.com/repos/repo --username git --password secret --insecure-skip-server-verification - - # Add a public Helm repository named 'stable' via HTTPS - argocd-util config repo https://kubernetes-charts.storage.googleapis.com --type helm --name stable - - # Add a private Helm repository named 'stable' via HTTPS - argocd-util config repo https://kubernetes-charts.storage.googleapis.com --type helm --name stable --username test --password test - - # Add a private Helm OCI-based repository named 'stable' via HTTPS - argocd-util config repo helm-oci-registry.cn-zhangjiakou.cr.aliyuncs.com --type helm --name stable --enable-oci --username test --password test - -``` - -### Options - -``` - --enable-lfs enable git-lfs (Large File Support) on this repository - --enable-oci enable helm-oci (Helm OCI-Based Repository) - --github-app-enterprise-base-url string base url to use when using GitHub Enterprise (e.g. https://ghe.example.com/api/v3 - --github-app-id int id of the GitHub Application - --github-app-installation-id int installation id of the GitHub Application - --github-app-private-key-path string private key of the GitHub Application - -h, --help help for repo - --insecure-ignore-host-key disables SSH strict host key checking (deprecated, use --insecure-skip-server-verification instead) - --insecure-skip-server-verification disables server certificate and host key checks - --name string name of the repository, mandatory for repositories of type helm - -o, --output string Output format. One of: json|yaml (default "yaml") - --password string password to the repository - --ssh-private-key-path string path to the private ssh key (e.g. ~/.ssh/id_rsa) - --tls-client-cert-key-path string path to the TLS client cert's key path (must be PEM format) - --tls-client-cert-path string path to the TLS client cert (must be PEM format) - --type string type of the repository, "git" or "helm" (default "git") - --username string username to the repository -``` - -### SEE ALSO - -* [argocd-util config](argocd-util_config.md) - Generate declarative configuration files - diff --git a/docs/operator-manual/server-commands/argocd-util_export.md b/docs/operator-manual/server-commands/argocd-util_export.md deleted file mode 100644 index b52a1fc048a76..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_export.md +++ /dev/null @@ -1,36 +0,0 @@ -## argocd-util export - -Export all Argo CD data to stdout (default) or a file - -``` -argocd-util export [flags] -``` - -### Options - -``` - --as string Username to impersonate for the operation - --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. - --certificate-authority string Path to a cert file for the certificate authority - --client-certificate string Path to a client certificate file for TLS - --client-key string Path to a client key file for TLS - --cluster string The name of the kubeconfig cluster to use - --context string The name of the kubeconfig context to use - -h, --help help for export - --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - --kubeconfig string Path to a kube config. Only required if out-of-cluster - -n, --namespace string If present, the namespace scope for this CLI request - -o, --out string Output to the specified file instead of stdout (default "-") - --password string Password for basic authentication to the API server - --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") - --server string The address and port of the Kubernetes API server - --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. - --token string Bearer token for authentication to the API server - --user string The name of the kubeconfig user to use - --username string Username for basic authentication to the API server -``` - -### SEE ALSO - -* [argocd-util](argocd-util.md) - argocd-util tools used by Argo CD - diff --git a/docs/operator-manual/server-commands/argocd-util_gendexcfg.md b/docs/operator-manual/server-commands/argocd-util_gendexcfg.md deleted file mode 100644 index 4f3f0c5a78082..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_gendexcfg.md +++ /dev/null @@ -1,36 +0,0 @@ -## argocd-util gendexcfg - -Generates a dex config from Argo CD settings - -``` -argocd-util gendexcfg [flags] -``` - -### Options - -``` - --as string Username to impersonate for the operation - --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. - --certificate-authority string Path to a cert file for the certificate authority - --client-certificate string Path to a client certificate file for TLS - --client-key string Path to a client key file for TLS - --cluster string The name of the kubeconfig cluster to use - --context string The name of the kubeconfig context to use - -h, --help help for gendexcfg - --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - --kubeconfig string Path to a kube config. Only required if out-of-cluster - -n, --namespace string If present, the namespace scope for this CLI request - -o, --out string Output to the specified file instead of stdout - --password string Password for basic authentication to the API server - --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") - --server string The address and port of the Kubernetes API server - --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. - --token string Bearer token for authentication to the API server - --user string The name of the kubeconfig user to use - --username string Username for basic authentication to the API server -``` - -### SEE ALSO - -* [argocd-util](argocd-util.md) - argocd-util tools used by Argo CD - diff --git a/docs/operator-manual/server-commands/argocd-util_import.md b/docs/operator-manual/server-commands/argocd-util_import.md deleted file mode 100644 index 7174769b491e4..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_import.md +++ /dev/null @@ -1,38 +0,0 @@ -## argocd-util import - -Import Argo CD data from stdin (specify `-') or a file - -``` -argocd-util import SOURCE [flags] -``` - -### Options - -``` - --as string Username to impersonate for the operation - --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. - --certificate-authority string Path to a cert file for the certificate authority - --client-certificate string Path to a client certificate file for TLS - --client-key string Path to a client key file for TLS - --cluster string The name of the kubeconfig cluster to use - --context string The name of the kubeconfig context to use - --dry-run Print what will be performed - -h, --help help for import - --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - --kubeconfig string Path to a kube config. Only required if out-of-cluster - -n, --namespace string If present, the namespace scope for this CLI request - --password string Password for basic authentication to the API server - --prune Prune secrets, applications and projects which do not appear in the backup - --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") - --server string The address and port of the Kubernetes API server - --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. - --token string Bearer token for authentication to the API server - --user string The name of the kubeconfig user to use - --username string Username for basic authentication to the API server - --verbose Verbose output (versus only changed output) -``` - -### SEE ALSO - -* [argocd-util](argocd-util.md) - argocd-util tools used by Argo CD - diff --git a/docs/operator-manual/server-commands/argocd-util_kubeconfig.md b/docs/operator-manual/server-commands/argocd-util_kubeconfig.md deleted file mode 100644 index 70426e22876cb..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_kubeconfig.md +++ /dev/null @@ -1,35 +0,0 @@ -## argocd-util kubeconfig - -Generates kubeconfig for the specified cluster - -``` -argocd-util kubeconfig CLUSTER_URL OUTPUT_PATH [flags] -``` - -### Options - -``` - --as string Username to impersonate for the operation - --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. - --certificate-authority string Path to a cert file for the certificate authority - --client-certificate string Path to a client certificate file for TLS - --client-key string Path to a client key file for TLS - --cluster string The name of the kubeconfig cluster to use - --context string The name of the kubeconfig context to use - -h, --help help for kubeconfig - --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - --kubeconfig string Path to a kube config. Only required if out-of-cluster - -n, --namespace string If present, the namespace scope for this CLI request - --password string Password for basic authentication to the API server - --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") - --server string The address and port of the Kubernetes API server - --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. - --token string Bearer token for authentication to the API server - --user string The name of the kubeconfig user to use - --username string Username for basic authentication to the API server -``` - -### SEE ALSO - -* [argocd-util](argocd-util.md) - argocd-util tools used by Argo CD - diff --git a/docs/operator-manual/server-commands/argocd-util_proj.md b/docs/operator-manual/server-commands/argocd-util_proj.md deleted file mode 100644 index 0cef263287e10..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_proj.md +++ /dev/null @@ -1,21 +0,0 @@ -## argocd-util proj - -Manage projects configuration - -``` -argocd-util proj [flags] -``` - -### Options - -``` - -h, --help help for proj -``` - -### SEE ALSO - -* [argocd-util](argocd-util.md) - argocd-util tools used by Argo CD -* [argocd-util proj generate-allow-list](argocd-util_proj_generate-allow-list.md) - Generates project allow list from the specified clusterRole file -* [argocd-util proj generate-spec](argocd-util_proj_generate-spec.md) - Generate declarative config for a project -* [argocd-util proj update-role-policy](argocd-util_proj_update-role-policy.md) - Implement bulk project role update. Useful to back-fill existing project policies or remove obsolete actions. - diff --git a/docs/operator-manual/server-commands/argocd-util_proj_generate-allow-list.md b/docs/operator-manual/server-commands/argocd-util_proj_generate-allow-list.md deleted file mode 100644 index 75e77b2740eac..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_proj_generate-allow-list.md +++ /dev/null @@ -1,36 +0,0 @@ -## argocd-util proj generate-allow-list - -Generates project allow list from the specified clusterRole file - -``` -argocd-util proj generate-allow-list CLUSTERROLE_PATH PROJ_NAME [flags] -``` - -### Options - -``` - --as string Username to impersonate for the operation - --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. - --certificate-authority string Path to a cert file for the certificate authority - --client-certificate string Path to a client certificate file for TLS - --client-key string Path to a client key file for TLS - --cluster string The name of the kubeconfig cluster to use - --context string The name of the kubeconfig context to use - -h, --help help for generate-allow-list - --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - --kubeconfig string Path to a kube config. Only required if out-of-cluster - -n, --namespace string If present, the namespace scope for this CLI request - -o, --out string Output to the specified file instead of stdout (default "-") - --password string Password for basic authentication to the API server - --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") - --server string The address and port of the Kubernetes API server - --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. - --token string Bearer token for authentication to the API server - --user string The name of the kubeconfig user to use - --username string Username for basic authentication to the API server -``` - -### SEE ALSO - -* [argocd-util proj](argocd-util_proj.md) - Manage projects configuration - diff --git a/docs/operator-manual/server-commands/argocd-util_proj_generate-spec.md b/docs/operator-manual/server-commands/argocd-util_proj_generate-spec.md deleted file mode 100644 index 74407a69d8ab8..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_proj_generate-spec.md +++ /dev/null @@ -1,30 +0,0 @@ -## argocd-util proj generate-spec - -Generate declarative config for a project - -``` -argocd-util proj generate-spec PROJECT [flags] -``` - -### Options - -``` - --allow-cluster-resource stringArray List of allowed cluster level resources - --allow-namespaced-resource stringArray List of allowed namespaced resources - --deny-cluster-resource stringArray List of denied cluster level resources - --deny-namespaced-resource stringArray List of denied namespaced resources - --description string Project description - -d, --dest stringArray Permitted destination server and namespace (e.g. https://192.168.99.100:8443,default) - -f, --file string Filename or URL to Kubernetes manifests for the project - -h, --help help for generate-spec - --orphaned-resources Enables orphaned resources monitoring - --orphaned-resources-warn Specifies if applications should have a warning condition when orphaned resources detected - -o, --output string Output format. One of: json|yaml (default "yaml") - --signature-keys strings GnuPG public key IDs for commit signature verification - -s, --src stringArray Permitted source repository URL -``` - -### SEE ALSO - -* [argocd-util proj](argocd-util_proj.md) - Manage projects configuration - diff --git a/docs/operator-manual/server-commands/argocd-util_proj_update-role-policy.md b/docs/operator-manual/server-commands/argocd-util_proj_update-role-policy.md deleted file mode 100644 index 56506b7927f3f..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_proj_update-role-policy.md +++ /dev/null @@ -1,51 +0,0 @@ -## argocd-util proj update-role-policy - -Implement bulk project role update. Useful to back-fill existing project policies or remove obsolete actions. - -``` -argocd-util proj update-role-policy PROJECT_GLOB MODIFICATION ACTION [flags] -``` - -### Examples - -``` - # Add policy that allows executing any action (action/*) to roles which name matches to *deployer* in all projects - argocd-util projects update-role-policy '*' set 'action/*' --role '*deployer*' --resource applications --scope '*' --permission allow - - # Remove policy that which manages running (action/*) from all roles which name matches *deployer* in all projects - argocd-util projects update-role-policy '*' remove override --role '*deployer*' - -``` - -### Options - -``` - --as string Username to impersonate for the operation - --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. - --certificate-authority string Path to a cert file for the certificate authority - --client-certificate string Path to a client certificate file for TLS - --client-key string Path to a client key file for TLS - --cluster string The name of the kubeconfig cluster to use - --context string The name of the kubeconfig context to use - --dry-run Dry run (default true) - -h, --help help for update-role-policy - --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - --kubeconfig string Path to a kube config. Only required if out-of-cluster - -n, --namespace string If present, the namespace scope for this CLI request - --password string Password for basic authentication to the API server - --permission string Action permission - --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") - --resource string Resource e.g. 'applications' - --role string Role name pattern e.g. '*deployer*' (default "*") - --scope string Resource scope e.g. '*' - --server string The address and port of the Kubernetes API server - --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. - --token string Bearer token for authentication to the API server - --user string The name of the kubeconfig user to use - --username string Username for basic authentication to the API server -``` - -### SEE ALSO - -* [argocd-util proj](argocd-util_proj.md) - Manage projects configuration - diff --git a/docs/operator-manual/server-commands/argocd-util_projects.md b/docs/operator-manual/server-commands/argocd-util_projects.md deleted file mode 100644 index 5aa2d214f2894..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_projects.md +++ /dev/null @@ -1,20 +0,0 @@ -## argocd-util projects - -Utility commands operate on ArgoCD Projects - -``` -argocd-util projects [flags] -``` - -### Options - -``` - -h, --help help for projects -``` - -### SEE ALSO - -* [argocd-util](argocd-util.md) - argocd-util tools used by Argo CD -* [argocd-util projects generate-allow-list](argocd-util_projects_generate-allow-list.md) - Generates project allow list from the specified clusterRole file -* [argocd-util projects update-role-policy](argocd-util_projects_update-role-policy.md) - Implement bulk project role update. Useful to back-fill existing project policies or remove obsolete actions. - diff --git a/docs/operator-manual/server-commands/argocd-util_projects_generate-allow-list.md b/docs/operator-manual/server-commands/argocd-util_projects_generate-allow-list.md deleted file mode 100644 index f78512bd7a71f..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_projects_generate-allow-list.md +++ /dev/null @@ -1,36 +0,0 @@ -## argocd-util projects generate-allow-list - -Generates project allow list from the specified clusterRole file - -``` -argocd-util projects generate-allow-list CLUSTERROLE_PATH PROJ_NAME [flags] -``` - -### Options - -``` - --as string Username to impersonate for the operation - --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. - --certificate-authority string Path to a cert file for the certificate authority - --client-certificate string Path to a client certificate file for TLS - --client-key string Path to a client key file for TLS - --cluster string The name of the kubeconfig cluster to use - --context string The name of the kubeconfig context to use - -h, --help help for generate-allow-list - --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - --kubeconfig string Path to a kube config. Only required if out-of-cluster - -n, --namespace string If present, the namespace scope for this CLI request - -o, --out string Output to the specified file instead of stdout (default "-") - --password string Password for basic authentication to the API server - --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") - --server string The address and port of the Kubernetes API server - --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. - --token string Bearer token for authentication to the API server - --user string The name of the kubeconfig user to use - --username string Username for basic authentication to the API server -``` - -### SEE ALSO - -* [argocd-util projects](argocd-util_projects.md) - Utility commands operate on ArgoCD Projects - diff --git a/docs/operator-manual/server-commands/argocd-util_projects_update-role-policy.md b/docs/operator-manual/server-commands/argocd-util_projects_update-role-policy.md deleted file mode 100644 index 346034bc541de..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_projects_update-role-policy.md +++ /dev/null @@ -1,51 +0,0 @@ -## argocd-util projects update-role-policy - -Implement bulk project role update. Useful to back-fill existing project policies or remove obsolete actions. - -``` -argocd-util projects update-role-policy PROJECT_GLOB MODIFICATION ACTION [flags] -``` - -### Examples - -``` - # Add policy that allows executing any action (action/*) to roles which name matches to *deployer* in all projects - argocd-util projects update-role-policy '*' set 'action/*' --role '*deployer*' --resource applications --scope '*' --permission allow - - # Remove policy that which manages running (action/*) from all roles which name matches *deployer* in all projects - argocd-util projects update-role-policy '*' remove override --role '*deployer*' - -``` - -### Options - -``` - --as string Username to impersonate for the operation - --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. - --certificate-authority string Path to a cert file for the certificate authority - --client-certificate string Path to a client certificate file for TLS - --client-key string Path to a client key file for TLS - --cluster string The name of the kubeconfig cluster to use - --context string The name of the kubeconfig context to use - --dry-run Dry run (default true) - -h, --help help for update-role-policy - --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - --kubeconfig string Path to a kube config. Only required if out-of-cluster - -n, --namespace string If present, the namespace scope for this CLI request - --password string Password for basic authentication to the API server - --permission string Action permission - --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") - --resource string Resource e.g. 'applications' - --role string Role name pattern e.g. '*deployer*' (default "*") - --scope string Resource scope e.g. '*' - --server string The address and port of the Kubernetes API server - --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. - --token string Bearer token for authentication to the API server - --user string The name of the kubeconfig user to use - --username string Username for basic authentication to the API server -``` - -### SEE ALSO - -* [argocd-util projects](argocd-util_projects.md) - Utility commands operate on ArgoCD Projects - diff --git a/docs/operator-manual/server-commands/argocd-util_repo.md b/docs/operator-manual/server-commands/argocd-util_repo.md deleted file mode 100644 index 5010cf024a18d..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_repo.md +++ /dev/null @@ -1,19 +0,0 @@ -## argocd-util repo - -Manage repositories configuration - -``` -argocd-util repo [flags] -``` - -### Options - -``` - -h, --help help for repo -``` - -### SEE ALSO - -* [argocd-util](argocd-util.md) - argocd-util tools used by Argo CD -* [argocd-util repo generate-spec](argocd-util_repo_generate-spec.md) - Generate declarative config for a repo - diff --git a/docs/operator-manual/server-commands/argocd-util_repo_generate-spec.md b/docs/operator-manual/server-commands/argocd-util_repo_generate-spec.md deleted file mode 100644 index 0f4b7adccbb53..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_repo_generate-spec.md +++ /dev/null @@ -1,61 +0,0 @@ -## argocd-util repo generate-spec - -Generate declarative config for a repo - -``` -argocd-util repo generate-spec REPOURL [flags] -``` - -### Examples - -``` - - # Add a Git repository via SSH using a private key for authentication, ignoring the server's host key: - argocd-util repo generate-spec git@git.example.com:repos/repo --insecure-ignore-host-key --ssh-private-key-path ~/id_rsa - - # Add a Git repository via SSH on a non-default port - need to use ssh:// style URLs here - argocd-util repo generate-spec ssh://git@git.example.com:2222/repos/repo --ssh-private-key-path ~/id_rsa - - # Add a private Git repository via HTTPS using username/password and TLS client certificates: - argocd-util repo generate-spec https://git.example.com/repos/repo --username git --password secret --tls-client-cert-path ~/mycert.crt --tls-client-cert-key-path ~/mycert.key - - # Add a private Git repository via HTTPS using username/password without verifying the server's TLS certificate - argocd-util repo generate-spec https://git.example.com/repos/repo --username git --password secret --insecure-skip-server-verification - - # Add a public Helm repository named 'stable' via HTTPS - argocd-util repo generate-spec https://charts.helm.sh/stable --type helm --name stable - - # Add a private Helm repository named 'stable' via HTTPS - argocd-util repo generate-spec https://charts.helm.sh/stable --type helm --name stable --username test --password test - - # Add a private Helm OCI-based repository named 'stable' via HTTPS - argocd-util repo generate-spec helm-oci-registry.cn-zhangjiakou.cr.aliyuncs.com --type helm --name stable --enable-oci --username test --password test - -``` - -### Options - -``` - --enable-lfs enable git-lfs (Large File Support) on this repository - --enable-oci enable helm-oci (Helm OCI-Based Repository) - --github-app-enterprise-base-url string base url to use when using GitHub Enterprise (e.g. https://ghe.example.com/api/v3 - --github-app-id int id of the GitHub Application - --github-app-installation-id int installation id of the GitHub Application - --github-app-private-key-path string private key of the GitHub Application - -h, --help help for generate-spec - --insecure-ignore-host-key disables SSH strict host key checking (deprecated, use --insecure-skip-server-verification instead) - --insecure-skip-server-verification disables server certificate and host key checks - --name string name of the repository, mandatory for repositories of type helm - -o, --output string Output format. One of: json|yaml (default "yaml") - --password string password to the repository - --ssh-private-key-path string path to the private ssh key (e.g. ~/.ssh/id_rsa) - --tls-client-cert-key-path string path to the TLS client cert's key path (must be PEM format) - --tls-client-cert-path string path to the TLS client cert (must be PEM format) - --type string type of the repository, "git" or "helm" (default "git") - --username string username to the repository -``` - -### SEE ALSO - -* [argocd-util repo](argocd-util_repo.md) - Manage repositories configuration - diff --git a/docs/operator-manual/server-commands/argocd-util_rundex.md b/docs/operator-manual/server-commands/argocd-util_rundex.md deleted file mode 100644 index 069cf329e23f8..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_rundex.md +++ /dev/null @@ -1,35 +0,0 @@ -## argocd-util rundex - -Runs dex generating a config using settings from the Argo CD configmap and secret - -``` -argocd-util rundex [flags] -``` - -### Options - -``` - --as string Username to impersonate for the operation - --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. - --certificate-authority string Path to a cert file for the certificate authority - --client-certificate string Path to a client certificate file for TLS - --client-key string Path to a client key file for TLS - --cluster string The name of the kubeconfig cluster to use - --context string The name of the kubeconfig context to use - -h, --help help for rundex - --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - --kubeconfig string Path to a kube config. Only required if out-of-cluster - -n, --namespace string If present, the namespace scope for this CLI request - --password string Password for basic authentication to the API server - --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") - --server string The address and port of the Kubernetes API server - --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. - --token string Bearer token for authentication to the API server - --user string The name of the kubeconfig user to use - --username string Username for basic authentication to the API server -``` - -### SEE ALSO - -* [argocd-util](argocd-util.md) - argocd-util tools used by Argo CD - diff --git a/docs/operator-manual/server-commands/argocd-util_settings.md b/docs/operator-manual/server-commands/argocd-util_settings.md deleted file mode 100644 index 728f52ccd3527..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_settings.md +++ /dev/null @@ -1,41 +0,0 @@ -## argocd-util settings - -Provides set of commands for settings validation and troubleshooting - -``` -argocd-util settings [flags] -``` - -### Options - -``` - --argocd-cm-path string Path to local argocd-cm.yaml file - --argocd-secret-path string Path to local argocd-secret.yaml file - --as string Username to impersonate for the operation - --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. - --certificate-authority string Path to a cert file for the certificate authority - --client-certificate string Path to a client certificate file for TLS - --client-key string Path to a client key file for TLS - --cluster string The name of the kubeconfig cluster to use - --context string The name of the kubeconfig context to use - -h, --help help for settings - --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - --kubeconfig string Path to a kube config. Only required if out-of-cluster - --load-cluster-settings Indicates that config map and secret should be loaded from cluster unless local file path is provided - -n, --namespace string If present, the namespace scope for this CLI request - --password string Password for basic authentication to the API server - --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") - --server string The address and port of the Kubernetes API server - --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. - --token string Bearer token for authentication to the API server - --user string The name of the kubeconfig user to use - --username string Username for basic authentication to the API server -``` - -### SEE ALSO - -* [argocd-util](argocd-util.md) - argocd-util tools used by Argo CD -* [argocd-util settings rbac](argocd-util_settings_rbac.md) - Validate and test RBAC configuration -* [argocd-util settings resource-overrides](argocd-util_settings_resource-overrides.md) - Troubleshoot resource overrides -* [argocd-util settings validate](argocd-util_settings_validate.md) - Validate settings - diff --git a/docs/operator-manual/server-commands/argocd-util_settings_rbac.md b/docs/operator-manual/server-commands/argocd-util_settings_rbac.md deleted file mode 100644 index 768f75dd39364..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_settings_rbac.md +++ /dev/null @@ -1,45 +0,0 @@ -## argocd-util settings rbac - -Validate and test RBAC configuration - -``` -argocd-util settings rbac [flags] -``` - -### Options - -``` - -h, --help help for rbac -``` - -### Options inherited from parent commands - -``` - --argocd-cm-path string Path to local argocd-cm.yaml file - --argocd-secret-path string Path to local argocd-secret.yaml file - --as string Username to impersonate for the operation - --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. - --certificate-authority string Path to a cert file for the certificate authority - --client-certificate string Path to a client certificate file for TLS - --client-key string Path to a client key file for TLS - --cluster string The name of the kubeconfig cluster to use - --context string The name of the kubeconfig context to use - --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - --kubeconfig string Path to a kube config. Only required if out-of-cluster - --load-cluster-settings Indicates that config map and secret should be loaded from cluster unless local file path is provided - -n, --namespace string If present, the namespace scope for this CLI request - --password string Password for basic authentication to the API server - --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") - --server string The address and port of the Kubernetes API server - --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. - --token string Bearer token for authentication to the API server - --user string The name of the kubeconfig user to use - --username string Username for basic authentication to the API server -``` - -### SEE ALSO - -* [argocd-util settings](argocd-util_settings.md) - Provides set of commands for settings validation and troubleshooting -* [argocd-util settings rbac can](argocd-util_settings_rbac_can.md) - Check RBAC permissions for a role or subject -* [argocd-util settings rbac validate](argocd-util_settings_rbac_validate.md) - Validate RBAC policy - diff --git a/docs/operator-manual/server-commands/argocd-util_settings_rbac_can.md b/docs/operator-manual/server-commands/argocd-util_settings_rbac_can.md deleted file mode 100644 index 24a5c2d2277f7..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_settings_rbac_can.md +++ /dev/null @@ -1,78 +0,0 @@ -## argocd-util settings rbac can - -Check RBAC permissions for a role or subject - -### Synopsis - - -Check whether a given role or subject has appropriate RBAC permissions to do -something. - - -``` -argocd-util settings rbac can ROLE/SUBJECT ACTION RESOURCE [SUB-RESOURCE] [flags] -``` - -### Examples - -``` - -# Check whether role some:role has permissions to create an application in the -# 'default' project, using a local policy.csv file -argocd-util settings rbac can some:role create application 'default/app' --policy-file policy.csv - -# Policy file can also be K8s config map with data keys like argocd-rbac-cm, -# i.e. 'policy.csv' and (optionally) 'policy.default' -argocd-util settings rbac can some:role create application 'default/app' --policy-file argocd-rbac-cm.yaml - -# If --policy-file is not given, the ConfigMap 'argocd-rbac-cm' from K8s is -# used. You need to specify the argocd namespace, and make sure that your -# current Kubernetes context is pointing to the cluster Argo CD is running in -argocd-util settings rbac can some:role create application 'default/app' --namespace argocd - -# You can override a possibly configured default role -argocd-util settings rbac can someuser create application 'default/app' --default-role role:readonly - - -``` - -### Options - -``` - --default-role string name of the default role to use - -h, --help help for can - --policy-file string path to the policy file to use - -q, --quiet quiet mode - do not print results to stdout - --strict whether to perform strict check on action and resource names (default true) - --use-builtin-policy whether to also use builtin-policy (default true) -``` - -### Options inherited from parent commands - -``` - --argocd-cm-path string Path to local argocd-cm.yaml file - --argocd-secret-path string Path to local argocd-secret.yaml file - --as string Username to impersonate for the operation - --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. - --certificate-authority string Path to a cert file for the certificate authority - --client-certificate string Path to a client certificate file for TLS - --client-key string Path to a client key file for TLS - --cluster string The name of the kubeconfig cluster to use - --context string The name of the kubeconfig context to use - --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - --kubeconfig string Path to a kube config. Only required if out-of-cluster - --load-cluster-settings Indicates that config map and secret should be loaded from cluster unless local file path is provided - -n, --namespace string If present, the namespace scope for this CLI request - --password string Password for basic authentication to the API server - --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") - --server string The address and port of the Kubernetes API server - --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. - --token string Bearer token for authentication to the API server - --user string The name of the kubeconfig user to use - --username string Username for basic authentication to the API server -``` - -### SEE ALSO - -* [argocd-util settings rbac](argocd-util_settings_rbac.md) - Validate and test RBAC configuration - diff --git a/docs/operator-manual/server-commands/argocd-util_settings_rbac_validate.md b/docs/operator-manual/server-commands/argocd-util_settings_rbac_validate.md deleted file mode 100644 index 81ae11d44face..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_settings_rbac_validate.md +++ /dev/null @@ -1,51 +0,0 @@ -## argocd-util settings rbac validate - -Validate RBAC policy - -### Synopsis - - -Validates an RBAC policy for being syntactically correct. The policy must be -a local file, and in either CSV or K8s ConfigMap format. - - -``` -argocd-util settings rbac validate --policy-file=POLICYFILE [flags] -``` - -### Options - -``` - -h, --help help for validate - --policy-file string path to the policy file to use -``` - -### Options inherited from parent commands - -``` - --argocd-cm-path string Path to local argocd-cm.yaml file - --argocd-secret-path string Path to local argocd-secret.yaml file - --as string Username to impersonate for the operation - --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. - --certificate-authority string Path to a cert file for the certificate authority - --client-certificate string Path to a client certificate file for TLS - --client-key string Path to a client key file for TLS - --cluster string The name of the kubeconfig cluster to use - --context string The name of the kubeconfig context to use - --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - --kubeconfig string Path to a kube config. Only required if out-of-cluster - --load-cluster-settings Indicates that config map and secret should be loaded from cluster unless local file path is provided - -n, --namespace string If present, the namespace scope for this CLI request - --password string Password for basic authentication to the API server - --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") - --server string The address and port of the Kubernetes API server - --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. - --token string Bearer token for authentication to the API server - --user string The name of the kubeconfig user to use - --username string Username for basic authentication to the API server -``` - -### SEE ALSO - -* [argocd-util settings rbac](argocd-util_settings_rbac.md) - Validate and test RBAC configuration - diff --git a/docs/operator-manual/server-commands/argocd-util_settings_resource-overrides.md b/docs/operator-manual/server-commands/argocd-util_settings_resource-overrides.md deleted file mode 100644 index 9baf7d5e88a26..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_settings_resource-overrides.md +++ /dev/null @@ -1,47 +0,0 @@ -## argocd-util settings resource-overrides - -Troubleshoot resource overrides - -``` -argocd-util settings resource-overrides [flags] -``` - -### Options - -``` - -h, --help help for resource-overrides -``` - -### Options inherited from parent commands - -``` - --argocd-cm-path string Path to local argocd-cm.yaml file - --argocd-secret-path string Path to local argocd-secret.yaml file - --as string Username to impersonate for the operation - --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. - --certificate-authority string Path to a cert file for the certificate authority - --client-certificate string Path to a client certificate file for TLS - --client-key string Path to a client key file for TLS - --cluster string The name of the kubeconfig cluster to use - --context string The name of the kubeconfig context to use - --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - --kubeconfig string Path to a kube config. Only required if out-of-cluster - --load-cluster-settings Indicates that config map and secret should be loaded from cluster unless local file path is provided - -n, --namespace string If present, the namespace scope for this CLI request - --password string Password for basic authentication to the API server - --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") - --server string The address and port of the Kubernetes API server - --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. - --token string Bearer token for authentication to the API server - --user string The name of the kubeconfig user to use - --username string Username for basic authentication to the API server -``` - -### SEE ALSO - -* [argocd-util settings](argocd-util_settings.md) - Provides set of commands for settings validation and troubleshooting -* [argocd-util settings resource-overrides health](argocd-util_settings_resource-overrides_health.md) - Assess resource health -* [argocd-util settings resource-overrides ignore-differences](argocd-util_settings_resource-overrides_ignore-differences.md) - Renders fields excluded from diffing -* [argocd-util settings resource-overrides list-actions](argocd-util_settings_resource-overrides_list-actions.md) - List available resource actions -* [argocd-util settings resource-overrides run-action](argocd-util_settings_resource-overrides_run-action.md) - Executes resource action - diff --git a/docs/operator-manual/server-commands/argocd-util_settings_resource-overrides_health.md b/docs/operator-manual/server-commands/argocd-util_settings_resource-overrides_health.md deleted file mode 100644 index 815bf6d406cc3..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_settings_resource-overrides_health.md +++ /dev/null @@ -1,54 +0,0 @@ -## argocd-util settings resource-overrides health - -Assess resource health - -### Synopsis - -Assess resource health using the lua script configured in the 'resource.customizations' field of 'argocd-cm' ConfigMap - -``` -argocd-util settings resource-overrides health RESOURCE_YAML_PATH [flags] -``` - -### Examples - -``` - -argocd-util settings resource-overrides health ./deploy.yaml --argocd-cm-path ./argocd-cm.yaml -``` - -### Options - -``` - -h, --help help for health -``` - -### Options inherited from parent commands - -``` - --argocd-cm-path string Path to local argocd-cm.yaml file - --argocd-secret-path string Path to local argocd-secret.yaml file - --as string Username to impersonate for the operation - --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. - --certificate-authority string Path to a cert file for the certificate authority - --client-certificate string Path to a client certificate file for TLS - --client-key string Path to a client key file for TLS - --cluster string The name of the kubeconfig cluster to use - --context string The name of the kubeconfig context to use - --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - --kubeconfig string Path to a kube config. Only required if out-of-cluster - --load-cluster-settings Indicates that config map and secret should be loaded from cluster unless local file path is provided - -n, --namespace string If present, the namespace scope for this CLI request - --password string Password for basic authentication to the API server - --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") - --server string The address and port of the Kubernetes API server - --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. - --token string Bearer token for authentication to the API server - --user string The name of the kubeconfig user to use - --username string Username for basic authentication to the API server -``` - -### SEE ALSO - -* [argocd-util settings resource-overrides](argocd-util_settings_resource-overrides.md) - Troubleshoot resource overrides - diff --git a/docs/operator-manual/server-commands/argocd-util_settings_resource-overrides_ignore-differences.md b/docs/operator-manual/server-commands/argocd-util_settings_resource-overrides_ignore-differences.md deleted file mode 100644 index c66be6208dca4..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_settings_resource-overrides_ignore-differences.md +++ /dev/null @@ -1,54 +0,0 @@ -## argocd-util settings resource-overrides ignore-differences - -Renders fields excluded from diffing - -### Synopsis - -Renders ignored fields using the 'ignoreDifferences' setting specified in the 'resource.customizations' field of 'argocd-cm' ConfigMap - -``` -argocd-util settings resource-overrides ignore-differences RESOURCE_YAML_PATH [flags] -``` - -### Examples - -``` - -argocd-util settings resource-overrides ignore-differences ./deploy.yaml --argocd-cm-path ./argocd-cm.yaml -``` - -### Options - -``` - -h, --help help for ignore-differences -``` - -### Options inherited from parent commands - -``` - --argocd-cm-path string Path to local argocd-cm.yaml file - --argocd-secret-path string Path to local argocd-secret.yaml file - --as string Username to impersonate for the operation - --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. - --certificate-authority string Path to a cert file for the certificate authority - --client-certificate string Path to a client certificate file for TLS - --client-key string Path to a client key file for TLS - --cluster string The name of the kubeconfig cluster to use - --context string The name of the kubeconfig context to use - --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - --kubeconfig string Path to a kube config. Only required if out-of-cluster - --load-cluster-settings Indicates that config map and secret should be loaded from cluster unless local file path is provided - -n, --namespace string If present, the namespace scope for this CLI request - --password string Password for basic authentication to the API server - --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") - --server string The address and port of the Kubernetes API server - --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. - --token string Bearer token for authentication to the API server - --user string The name of the kubeconfig user to use - --username string Username for basic authentication to the API server -``` - -### SEE ALSO - -* [argocd-util settings resource-overrides](argocd-util_settings_resource-overrides.md) - Troubleshoot resource overrides - diff --git a/docs/operator-manual/server-commands/argocd-util_settings_resource-overrides_list-actions.md b/docs/operator-manual/server-commands/argocd-util_settings_resource-overrides_list-actions.md deleted file mode 100644 index 804a74b788368..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_settings_resource-overrides_list-actions.md +++ /dev/null @@ -1,54 +0,0 @@ -## argocd-util settings resource-overrides list-actions - -List available resource actions - -### Synopsis - -List actions available for given resource action using the lua scripts configured in the 'resource.customizations' field of 'argocd-cm' ConfigMap and outputs updated fields - -``` -argocd-util settings resource-overrides list-actions RESOURCE_YAML_PATH [flags] -``` - -### Examples - -``` - -argocd-util settings resource-overrides action list /tmp/deploy.yaml --argocd-cm-path ./argocd-cm.yaml -``` - -### Options - -``` - -h, --help help for list-actions -``` - -### Options inherited from parent commands - -``` - --argocd-cm-path string Path to local argocd-cm.yaml file - --argocd-secret-path string Path to local argocd-secret.yaml file - --as string Username to impersonate for the operation - --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. - --certificate-authority string Path to a cert file for the certificate authority - --client-certificate string Path to a client certificate file for TLS - --client-key string Path to a client key file for TLS - --cluster string The name of the kubeconfig cluster to use - --context string The name of the kubeconfig context to use - --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - --kubeconfig string Path to a kube config. Only required if out-of-cluster - --load-cluster-settings Indicates that config map and secret should be loaded from cluster unless local file path is provided - -n, --namespace string If present, the namespace scope for this CLI request - --password string Password for basic authentication to the API server - --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") - --server string The address and port of the Kubernetes API server - --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. - --token string Bearer token for authentication to the API server - --user string The name of the kubeconfig user to use - --username string Username for basic authentication to the API server -``` - -### SEE ALSO - -* [argocd-util settings resource-overrides](argocd-util_settings_resource-overrides.md) - Troubleshoot resource overrides - diff --git a/docs/operator-manual/server-commands/argocd-util_settings_resource-overrides_run-action.md b/docs/operator-manual/server-commands/argocd-util_settings_resource-overrides_run-action.md deleted file mode 100644 index f4992e79dfd9e..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_settings_resource-overrides_run-action.md +++ /dev/null @@ -1,54 +0,0 @@ -## argocd-util settings resource-overrides run-action - -Executes resource action - -### Synopsis - -Executes resource action using the lua script configured in the 'resource.customizations' field of 'argocd-cm' ConfigMap and outputs updated fields - -``` -argocd-util settings resource-overrides run-action RESOURCE_YAML_PATH ACTION [flags] -``` - -### Examples - -``` - -argocd-util settings resource-overrides action run /tmp/deploy.yaml restart --argocd-cm-path ./argocd-cm.yaml -``` - -### Options - -``` - -h, --help help for run-action -``` - -### Options inherited from parent commands - -``` - --argocd-cm-path string Path to local argocd-cm.yaml file - --argocd-secret-path string Path to local argocd-secret.yaml file - --as string Username to impersonate for the operation - --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. - --certificate-authority string Path to a cert file for the certificate authority - --client-certificate string Path to a client certificate file for TLS - --client-key string Path to a client key file for TLS - --cluster string The name of the kubeconfig cluster to use - --context string The name of the kubeconfig context to use - --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - --kubeconfig string Path to a kube config. Only required if out-of-cluster - --load-cluster-settings Indicates that config map and secret should be loaded from cluster unless local file path is provided - -n, --namespace string If present, the namespace scope for this CLI request - --password string Password for basic authentication to the API server - --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") - --server string The address and port of the Kubernetes API server - --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. - --token string Bearer token for authentication to the API server - --user string The name of the kubeconfig user to use - --username string Username for basic authentication to the API server -``` - -### SEE ALSO - -* [argocd-util settings resource-overrides](argocd-util_settings_resource-overrides.md) - Troubleshoot resource overrides - diff --git a/docs/operator-manual/server-commands/argocd-util_settings_validate.md b/docs/operator-manual/server-commands/argocd-util_settings_validate.md deleted file mode 100644 index 855c3098b2887..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_settings_validate.md +++ /dev/null @@ -1,59 +0,0 @@ -## argocd-util settings validate - -Validate settings - -### Synopsis - -Validates settings specified in 'argocd-cm' ConfigMap and 'argocd-secret' Secret - -``` -argocd-util settings validate [flags] -``` - -### Examples - -``` - -#Validates all settings in the specified YAML file -argocd-util settings validate --argocd-cm-path ./argocd-cm.yaml - -#Validates accounts and plugins settings in Kubernetes cluster of current kubeconfig context -argocd-util settings validate --group accounts --group plugins --load-cluster-settings -``` - -### Options - -``` - --group stringArray Optional list of setting groups that have to be validated ( one of: accounts, general, kustomize, plugins, repositories, resource-overrides) - -h, --help help for validate -``` - -### Options inherited from parent commands - -``` - --argocd-cm-path string Path to local argocd-cm.yaml file - --argocd-secret-path string Path to local argocd-secret.yaml file - --as string Username to impersonate for the operation - --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. - --certificate-authority string Path to a cert file for the certificate authority - --client-certificate string Path to a client certificate file for TLS - --client-key string Path to a client key file for TLS - --cluster string The name of the kubeconfig cluster to use - --context string The name of the kubeconfig context to use - --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - --kubeconfig string Path to a kube config. Only required if out-of-cluster - --load-cluster-settings Indicates that config map and secret should be loaded from cluster unless local file path is provided - -n, --namespace string If present, the namespace scope for this CLI request - --password string Password for basic authentication to the API server - --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") - --server string The address and port of the Kubernetes API server - --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. - --token string Bearer token for authentication to the API server - --user string The name of the kubeconfig user to use - --username string Username for basic authentication to the API server -``` - -### SEE ALSO - -* [argocd-util settings](argocd-util_settings.md) - Provides set of commands for settings validation and troubleshooting - diff --git a/docs/operator-manual/server-commands/argocd-util_version.md b/docs/operator-manual/server-commands/argocd-util_version.md deleted file mode 100644 index cf2325ad03c25..0000000000000 --- a/docs/operator-manual/server-commands/argocd-util_version.md +++ /dev/null @@ -1,19 +0,0 @@ -## argocd-util version - -Print version information - -``` -argocd-util version [flags] -``` - -### Options - -``` - -h, --help help for version - --short print just the version number -``` - -### SEE ALSO - -* [argocd-util](argocd-util.md) - argocd-util tools used by Argo CD - diff --git a/docs/operator-manual/signed-release-assets.md b/docs/operator-manual/signed-release-assets.md new file mode 100644 index 0000000000000..9aec6bb071047 --- /dev/null +++ b/docs/operator-manual/signed-release-assets.md @@ -0,0 +1,155 @@ +# Verification of Argo CD Artifacts + +## Prerequisites +- cosign `v2.0.0` or higher [installation instructions](https://docs.sigstore.dev/cosign/installation) +- slsa-verifier [installation instructions](https://github.com/slsa-framework/slsa-verifier#installation) +- crane [installation instructions](https://github.com/google/go-containerregistry/blob/main/cmd/crane/README.md) (for container verification only) + +*** +## Release Assets +| Asset | Description | +|-------------------------|-------------------------------| +| argocd-darwin-amd64 | CLI Binary | +| argocd-darwin-arm64 | CLI Binary | +| argocd-linux_amd64 | CLI Binary | +| argocd-linux_arm64 | CLI Binary | +| argocd-linux_ppc64le | CLI Binary | +| argocd-linux_s390x | CLI Binary | +| argocd-windows_amd64 | CLI Binary | +| argocd-cli.intoto.jsonl | Attestation of CLI binaries | +| cli_checksums.txt | Checksums of binaries | +| sbom.tar.gz | Sbom | +| sbom.tar.gz.pem | Certificate used to sign sbom | +| sbom.tar.gz.sig | Signature of sbom | + +*** +## Verification of container images + +Argo CD container images are signed by [cosign](https://github.com/sigstore/cosign) using identity-based ("keyless") signing and transparency. Executing the following command can be used to verify the signature of a container image: + +```bash +cosign verify \ +--certificate-identity-regexp https://github.com/argoproj/argo-cd/.github/workflows/image-reuse.yaml@refs/tags/v \ +--certificate-oidc-issuer https://token.actions.githubusercontent.com \ +quay.io/argoproj/argocd:v2.7.0 | jq +``` +The command should output the following if the container image was correctly verified: +```bash +The following checks were performed on each of these signatures: + - The cosign claims were validated + - Existence of the claims in the transparency log was verified offline + - Any certificates were verified against the Fulcio roots. +[ + { + "critical": { + "identity": { + "docker-reference": "quay.io/argoproj/argo-cd" + }, + "image": { + "docker-manifest-digest": "sha256:63dc60481b1b2abf271e1f2b866be8a92962b0e53aaa728902caa8ac8d235277" + }, + "type": "cosign container image signature" + }, + "optional": { + "1.3.6.1.4.1.57264.1.1": "https://token.actions.githubusercontent.com", + "1.3.6.1.4.1.57264.1.2": "push", + "1.3.6.1.4.1.57264.1.3": "a6ec84da0eaa519cbd91a8f016cf4050c03323b2", + "1.3.6.1.4.1.57264.1.4": "Publish ArgoCD Release", + "1.3.6.1.4.1.57264.1.5": "argoproj/argo-cd", + "1.3.6.1.4.1.57264.1.6": "refs/tags/", + ... +``` + +*** +## Verification of container image with SLSA attestations + +A [SLSA](https://slsa.dev/) Level 3 provenance is generated using [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator). + +The following command will verify the signature of an attestation and how it was issued. It will contain the payloadType, payload, and signature. + +Run the following command as per the [slsa-verifier documentation](https://github.com/slsa-framework/slsa-verifier/tree/main#containers): + +```bash +# Get the immutable container image to prevent TOCTOU attacks https://github.com/slsa-framework/slsa-verifier#toctou-attacks +IMAGE=quay.io/argoproj/argocd:v2.7.0 +IMAGE="${IMAGE}@"$(crane digest "${IMAGE}") +# Verify provenance, including the tag to prevent rollback attacks. +slsa-verifier verify-image "$IMAGE" \ + --source-uri github.com/argoproj/argo-cd \ + --source-tag v2.7.0 +``` + +If you only want to verify up to the major or minor verion of the source repository tag (instead of the full tag), use the `--source-versioned-tag` which performs semantic versioning verification: + +```shell +slsa-verifier verify-image "$IMAGE" \ + --source-uri github.com/argoproj/argo-cd \ + --source-versioned-tag v2 # Note: May use v2.7 for minor version verification. +``` + +The attestation payload contains a non-forgeable provenance which is base64 encoded and can be viewed by passing the `--print-provenance` option to the commands above: + +```bash +slsa-verifier verify-image "$IMAGE" \ + --source-uri github.com/argoproj/argo-cd \ + --source-tag v2.7.0 + --print-provenance | jq +``` + +If you prefer using cosign, follow these [instructions](https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#cosign). + +!!! tip + `cosign` or `slsa-verifier` can both be used to verify image attestations. + Check the documentation of each binary for detailed instructions. + +*** + +## Verification of CLI artifacts with SLSA attestations + +A single attestation (`argocd-cli.intoto.jsonl`) from each release is provided. This can be used with [slsa-verifier](https://github.com/slsa-framework/slsa-verifier#verification-for-github-builders) to verify that a CLI binary was generated using Argo CD workflows on GitHub and ensures it was cryptographically signed. + +```bash +slsa-verifier verify-artifact argocd-linux-amd64 \ + --provenance-path argocd-cli.intoto.jsonl \ + --source-uri github.com/argoproj/argo-cd \ + --source-tag v2.7.0 +``` + +If you only want to verify up to the major or minor verion of the source repository tag (instead of the full tag), use the `--source-versioned-tag` which performs semantic versioning verification: + +```shell +slsa-verifier verify-artifact argocd-linux-amd64 \ + --provenance-path argocd-cli.intoto.jsonl \ + --source-uri github.com/argoproj/argo-cd \ + --source-versioned-tag v2 # Note: May use v2.7 for minor version verification. +``` + +The payload is a non-forgeable provenance which is base64 encoded and can be viewed by passing the `--print-provenance` option to the commands above: + +```bash +slsa-verifier verify-artifact argocd-linux-amd64 \ + --provenance-path argocd-cli.intoto.jsonl \ + --source-uri github.com/argoproj/argo-cd \ + --source-tag v2.7.0 \ + --print-provenance | jq +``` + +## Verification of Sbom + +A single attestation (`argocd-sbom.intoto.jsonl`) from each release is provided along with the sbom (`sbom.tar.gz`). This can be used with [slsa-verifier](https://github.com/slsa-framework/slsa-verifier#verification-for-github-builders) to verify that the SBOM was generated using Argo CD workflows on GitHub and ensures it was cryptographically signed. + +```bash +slsa-verifier verify-artifact sbom.tar.gz \ + --provenance-path argocd-sbom.intoto.jsonl \ + --source-uri github.com/argoproj/argo-cd \ + --source-tag v2.7.0 +``` + +*** +## Verification on Kubernetes + +### Policy controllers +!!! note + We encourage all users to verify signatures and provenances with your admission/policy controller of choice. Doing so will verify that an image was built by us before it's deployed on your Kubernetes cluster. + +Cosign signatures and SLSA provenances are compatible with several types of admission controllers. Please see the [cosign documentation](https://docs.sigstore.dev/cosign/overview/#kubernetes-integrations) and [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#verification) for supported controllers. diff --git a/docs/operator-manual/tested-kubernetes-versions.md b/docs/operator-manual/tested-kubernetes-versions.md new file mode 100644 index 0000000000000..897620296a515 --- /dev/null +++ b/docs/operator-manual/tested-kubernetes-versions.md @@ -0,0 +1,6 @@ +| Argo CD version | Kubernetes versions | +|-----------------|---------------------| +| 2.7 | v1.26, v1.25, v1.24, v1.23 | +| 2.6 | v1.24, v1.23, v1.22 | +| 2.5 | v1.24, v1.23, v1.22 | + diff --git a/docs/operator-manual/tls.md b/docs/operator-manual/tls.md new file mode 100644 index 0000000000000..43409fc568f43 --- /dev/null +++ b/docs/operator-manual/tls.md @@ -0,0 +1,261 @@ +# TLS configuration + +Argo CD provides three inbound TLS endpoints that can be configured: + +* The user-facing endpoint of the `argocd-server` workload which serves the UI + and the API +* The endpoint of the `argocd-repo-server`, which is accessed by `argocd-server` + and `argocd-application-controller` workloads to request repository + operations. +* The endpoint of the `argocd-dex-server`, which is accessed by `argocd-server` + to handle OIDC authentication. + +By default, and without further configuration, these endpoints will be +set-up to use an automatically generated, self-signed certificate. However, +most users will want to explicitly configure the certificates for these TLS +endpoints, possibly using automated means such as `cert-manager` or using +their own dedicated Certificate Authority. + +## Configuring TLS for argocd-server + +### Inbound TLS options for argocd-server + +You can configure certain TLS options for the `argocd-server` workload by +setting command line parameters. The following parameters are available: + +|Parameter|Default|Description| +|---------|-------|-----------| +|`--insecure`|`false`|Disables TLS completely| +|`--tlsminversion`|`1.2`|The minimum TLS version to be offered to clients| +|`--tlsmaxversion`|`1.3`|The maximum TLS version to be offered to clients| +|`--tlsciphers`|`TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:TLS_RSA_WITH_AES_256_GCM_SHA384`|A colon separated list of TLS cipher suites to be offered to clients| + +### TLS certificates used by argocd-server + +There are two ways to configure the TLS certificates used by `argocd-server`: + +* Setting the `tls.crt` and `tls.key` keys in the `argocd-server-tls` secret + to hold PEM data of the certificate and the corresponding private key. The + `argocd-server-tls` secret may be of type `tls`, but does not have to be. +* Setting the `tls.crt` and `tls.key` keys in the `argocd-secret` secret to + hold PEM data of the certificate and the corresponding private key. This + method is considered deprecated, and only exists for purposes of backwards + compatibility. Changing `argocd-secret` should not be used to override the + TLS certificate anymore. + +Argo CD decides which TLS certificate to use for the endpoint of +`argocd-server` as follows: + +* If the `argocd-server-tls` secret exists and contains a valid key pair in the + `tls.crt` and `tls.key` keys, this will be used for the certificate of the + endpoint of `argocd-server`. +* Otherwise, if the `argocd-secret` secret contains a valid key pair in the + `tls.crt` and `tls.key` keys, this will be used as certificate for the + endpoint of `argocd-server`. +* If no `tls.crt` and `tls.key` keys are found in neither of the two mentioned + secrets, Argo CD will generate a self-signed certificate and persist it in + the `argocd-secret` secret. + +The `argocd-server-tls` secret contains only information for TLS configuration +to be used by `argocd-server` and is safe to be managed via third-party tools +such as `cert-manager` or `SealedSecrets` + +To create this secret manually from an existing key pair, you can use `kubectl`: + +```shell +kubectl create -n argocd secret tls argocd-server-tls \ + --cert=/path/to/cert.pem \ + --key=/path/to/key.pem +``` + +Argo CD will pick up changes to the `argocd-server-tls` secret automatically +and will not require restart of the pods to use a renewed certificate. + +## Configuring inbound TLS for argocd-repo-server + +### Inbound TLS options for argocd-repo-server + +You can configure certain TLS options for the `argocd-repo-server` workload by +setting command line parameters. The following parameters are available: + +|Parameter|Default|Description| +|---------|-------|-----------| +|`--disable-tls`|`false`|Disables TLS completely| +|`--tlsminversion`|`1.2`|The minimum TLS version to be offered to clients| +|`--tlsmaxversion`|`1.3`|The maximum TLS version to be offered to clients| +|`--tlsciphers`|`TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:TLS_RSA_WITH_AES_256_GCM_SHA384`|A colon separated list of TLS cipher suites to be offered to clients| + +### Inbound TLS certificates used by argocd-repo-server + +To configure the TLS certificate used by the `argocd-repo-server` workload, +create a secret named `argocd-repo-server-tls` in the namespace where Argo CD +is running in with the certificate's key pair stored in `tls.crt` and +`tls.key` keys. If this secret does not exist, `argocd-repo-server` will +generate and use a self-signed certificate. + +To create this secret, you can use `kubectl`: + +```shell +kubectl create -n argocd secret tls argocd-repo-server-tls \ + --cert=/path/to/cert.pem \ + --key=/path/to/key.pem +``` + +If the certificate is self-signed, you will also need to add `ca.crt` to the secret +with the contents of your CA certificate. + +Please note, that as opposed to `argocd-server`, the `argocd-repo-server` is +not able to pick up changes to this secret automatically. If you create (or +update) this secret, the `argocd-repo-server` pods need to be restarted. + +Also note, that the certificate should be issued with the correct SAN entries +for the `argocd-repo-server`, containing at least the entries for +`DNS:argocd-repo-server` and `DNS:argocd-repo-server.argo-cd.svc` depending +on how your workloads connect to the repository server. + +## Configuring inbound TLS for argocd-dex-server + +### Inbound TLS options for argocd-dex-server + +You can configure certain TLS options for the `argocd-dex-server` workload by +setting command line parameters. The following parameters are available: + +|Parameter|Default|Description| +|---------|-------|-----------| +|`--disable-tls`|`false`|Disables TLS completely| + +### Inbound TLS certificates used by argocd-dex-server + +To configure the TLS certificate used by the `argocd-dex-server` workload, +create a secret named `argocd-dex-server-tls` in the namespace where Argo CD +is running in with the certificate's key pair stored in `tls.crt` and +`tls.key` keys. If this secret does not exist, `argocd-dex-server` will +generate and use a self-signed certificate. + +To create this secret, you can use `kubectl`: + +```shell +kubectl create -n argocd secret tls argocd-dex-server-tls \ + --cert=/path/to/cert.pem \ + --key=/path/to/key.pem +``` + +If the certificate is self-signed, you will also need to add `ca.crt` to the secret +with the contents of your CA certificate. + +Please note, that as opposed to `argocd-server`, the `argocd-dex-server` is +not able to pick up changes to this secret automatically. If you create (or +update) this secret, the `argocd-dex-server` pods need to be restarted. + +Also note, that the certificate should be issued with the correct SAN entries +for the `argocd-dex-server`, containing at least the entries for +`DNS:argocd-dex-server` and `DNS:argocd-dex-server.argo-cd.svc` depending +on how your workloads connect to the repository server. + +## Configuring TLS between Argo CD components + +### Configuring TLS to argocd-repo-server + +Both `argocd-server` and `argocd-application-controller` communicate with the +`argocd-repo-server` using a gRPC API over TLS. By default, +`argocd-repo-server` generates a non-persistent, self signed certificate +to use for its gRPC endpoint on startup. Because the `argocd-repo-server` has +no means to connect to the K8s control plane API, this certificate is not +being available to outside consumers for verification. Both, the +`argocd-server` and `argocd-application-server` will use a non-validating +connection to the `argocd-repo-server` for this reason. + +To change this behavior to be more secure by having the `argocd-server` and +`argocd-application-controller` validate the TLS certificate of the +`argocd-repo-server` endpoint, the following steps need to be performed: + +* Create a persistent TLS certificate to be used by `argocd-repo-server`, as + shown above +* Restart the `argocd-repo-server` pod(s) +* Modify the pod startup parameters for `argocd-server` and + `argocd-application-controller` to include the `--repo-server-strict-tls` + parameter. + +The `argocd-server` and `argocd-application-controller` workloads will now +validate the TLS certificate of the `argocd-repo-server` by using the +certificate stored in the `argocd-repo-server-tls` secret. + +!!!note "Certificate expiry" + Please make sure that the certificate has a proper life time. Keep in + mind that when you have to replace the certificate, all workloads have + to be restarted in order to properly work again. + +### Configuring TLS to argocd-dex-server + +`argocd-server` communicates with the `argocd-dex-server` using an HTTPS API +over TLS. By default, `argocd-dex-server` generates a non-persistent, self +signed certificate to use for its HTTPS endpoint on startup. Because the +`argocd-dex-server` has no means to connect to the K8s control plane API, +this certificate is not being available to outside consumers for verification. +The `argocd-server` will use a non-validating connection to the `argocd-dex-server` +for this reason. + +To change this behavior to be more secure by having the `argocd-server` validate +the TLS certificate of the `argocd-dex-server` endpoint, the following steps need +to be performed: + +* Create a persistent TLS certificate to be used by `argocd-dex-server`, as + shown above +* Restart the `argocd-dex-server` pod(s) +* Modify the pod startup parameters for `argocd-server` to include the +`--dex-server-strict-tls` parameter. + +The `argocd-server` workload will now validate the TLS certificate of the +`argocd-dex-server` by using the certificate stored in the `argocd-dex-server-tls` +secret. + +!!!note "Certificate expiry" + Please make sure that the certificate has a proper life time. Keep in + mind that when you have to replace the certificate, all workloads have + to be restarted in order to properly work again. + +### Disabling TLS to argocd-repo-server + +In some scenarios where mTLS through side-car proxies is involved (e.g. +in a service mesh), you may want configure the connections between the +`argocd-server` and `argocd-application-controller` to `argocd-repo-server` +to not use TLS at all. + +In this case, you will need to: + +* Configure `argocd-repo-server` with TLS on the gRPC API disabled by specifying + the `--disable-tls` parameter to the pod container's startup arguments. + Also, consider restricting listening addresses to the loopback interface by specifying + `--listen 127.0.0.1` parameter, so that insecure endpoint is not exposed on + the pod's network interfaces, but still available to the side-car container. +* Configure `argocd-server` and `argocd-application-controller` to not use TLS + for connections to the `argocd-repo-server` by specifying the parameter + `--repo-server-plaintext` to the pod container's startup arguments +* Configure `argocd-server` and `argocd-application-controller` to connect to + the side-car instead of directly to the `argocd-repo-server` service by + specifying its address via the `--repo-server
` parameter + +After this change, the `argocd-server` and `argocd-application-controller` will +use a plain text connection to the side-car proxy, that will handle all aspects +of TLS to the `argocd-repo-server`'s TLS side-car proxy. + +### Disabling TLS to argocd-dex-server + +In some scenarios where mTLS through side-car proxies is involved (e.g. +in a service mesh), you may want configure the connections between +`argocd-server` to `argocd-dex-server` to not use TLS at all. + +In this case, you will need to: + +* Configure `argocd-dex-server` with TLS on the HTTPS API disabled by specifying + the `--disable-tls` parameter to the pod container's startup arguments +* Configure `argocd-server` to not use TLS for connections to the `argocd-dex-server` + by specifying the parameter `--dex-server-plaintext` to the pod container's startup + arguments +* Configure `argocd-server` to connect to the side-car instead of directly to the + `argocd-dex-server` service by specifying its address via the `--dex-server
` + parameter + +After this change, the `argocd-server` will use a plain text connection to the side-car +proxy, that will handle all aspects of TLS to the `argocd-dex-server`'s TLS side-car proxy. + diff --git a/docs/operator-manual/troubleshooting.md b/docs/operator-manual/troubleshooting.md index 6057cb4d366cc..884045410b0b8 100644 --- a/docs/operator-manual/troubleshooting.md +++ b/docs/operator-manual/troubleshooting.md @@ -1,29 +1,15 @@ # Troubleshooting Tools -The document describes how to use `argocd-tool` binary to simplify Argo CD settings customizations and troubleshot +The document describes how to use `argocd admin` subcommands to simplify Argo CD settings customizations and troubleshot connectivity issues. ## Settings Argo CD provides multiple ways to customize system behavior and has a lot of settings. It might be dangerous to modify -settings on Argo CD used in production by multiple users. Before applying settings you can use `argocd-util` binary to -make sure that settings are valid and Argo CD is working as expected. The `argocd-util` binary is available in `argocd` -image and might be used using docker. -You can download the latest `argocd-util` binary from [the latest release page of this repository](https://github.com/argoproj/argo-cd/releases/latest), which will include the `argocd-util` CLI. -Example: +settings on Argo CD used in production by multiple users. Before applying settings you can use `argocd admin` subcommands to +make sure that settings are valid and Argo CD is working as expected. -```bash -docker run --rm -it -w /src -v $(pwd):/src argoproj/argocd: \ - argocd-util settings validate --argocd-cm-path ./argocd-cm.yaml -``` - -If you are using Linux you can extract `argocd-util` binary from docker image: - -```bash -docker run --rm -it -w /src -v $(pwd):/src argocd cp /usr/local/bin/argocd-util ./argocd-util -``` - -The `argocd-util settings validate` command performs basic settings validation and print short summary +The `argocd admin settings validate` command performs basic settings validation and print short summary of each settings group. **Diffing Customization** @@ -31,11 +17,10 @@ of each settings group. [Diffing customization](../user-guide/diffing.md) allows excluding some resource fields from diffing process. The diffing customizations are configured in `resource.customizations` field of `argocd-cm` ConfigMap. -The following `argocd-util` command prints information about fields excluded from diffing in the specified ConfigMap. +The following `argocd admin` command prints information about fields excluded from diffing in the specified ConfigMap. ```bash -docker run --rm -it -w /src -v $(pwd):/src argoproj/argocd: \ - argocd-util settings resource-overrides ignore-differences ./deploy.yaml --argocd-cm-path ./argocd-cm.yaml +argocd admin settings resource-overrides ignore-differences ./deploy.yaml --argocd-cm-path ./argocd-cm.yaml ``` **Health Assessment** @@ -44,35 +29,32 @@ Argo CD provides built-in [health assessment](./health.md) for several kubernete customized by writing your own health checks in [Lua](https://www.lua.org/). The health checks are configured in the `resource.customizations` field of `argocd-cm` ConfigMap. -The following `argocd-util` command assess resource health using Lua script configured in the specified ConfigMap. +The following `argocd admin` command assess resource health using Lua script configured in the specified ConfigMap. ```bash -docker run --rm -it -w /src -v $(pwd):/src argoproj/argocd: \ - argocd-util settings resource-overrides health ./deploy.yaml --argocd-cm-path ./argocd-cm.yaml +argocd admin settings resource-overrides health ./deploy.yaml --argocd-cm-path ./argocd-cm.yaml ``` **Resource Actions** Resource actions allows configuring named Lua script which performs resource modification. -The following `argocd-util` command executes action using Lua script configured in the specified ConfigMap and prints +The following `argocd admin` command executes action using Lua script configured in the specified ConfigMap and prints applied modifications. ```bash -docker run --rm -it -w /src -v $(pwd):/src argoproj/argocd: \ - argocd-util settings resource-overrides run-action /tmp/deploy.yaml restart --argocd-cm-path /private/tmp/argocd-cm.yaml +argocd admin settings resource-overrides run-action /tmp/deploy.yaml restart --argocd-cm-path /private/tmp/argocd-cm.yaml ``` -The following `argocd-util` command lists actions available for a given resource using Lua script configured in the specified ConfigMap. +The following `argocd admin` command lists actions available for a given resource using Lua script configured in the specified ConfigMap. ```bash -docker run --rm -it -w /src -v $(pwd):/src argoproj/argocd: \ - argocd-util settings resource-overrides list-actions /tmp/deploy.yaml --argocd-cm-path /private/tmp/argocd-cm.yaml +argocd admin settings resource-overrides list-actions /tmp/deploy.yaml --argocd-cm-path /private/tmp/argocd-cm.yaml ``` ## Cluster credentials -The `argocd-util cluster kubeconfig` is useful if you manually created Secret with cluster credentials and trying need to +The `argocd admin cluster kubeconfig` is useful if you manually created Secret with cluster credentials and trying need to troubleshoot connectivity issues. In this case, it is suggested to use the following steps: 1 SSH into [argocd-application-controller] pod. @@ -82,10 +64,10 @@ kubectl exec -n argocd -it \ $(kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-application-controller -o jsonpath='{.items[0].metadata.name}') bash ``` -2 Use `argocd-util cluster kubeconfig` command to export kubeconfig file from the configured Secret: +2 Use `argocd admin cluster kubeconfig` command to export kubeconfig file from the configured Secret: ``` -argocd-util cluster kubeconfig https:// /tmp/kubeconfig --namespace argocd +argocd admin cluster kubeconfig https:// /tmp/kubeconfig --namespace argocd ``` 3 Use `kubectl` to get more details about connection issues, fix them and apply changes back to secret: diff --git a/docs/operator-manual/ui-customization.md b/docs/operator-manual/ui-customization.md new file mode 100644 index 0000000000000..c6b463e577f50 --- /dev/null +++ b/docs/operator-manual/ui-customization.md @@ -0,0 +1,9 @@ +# UI Customization + +## Default Application Details View + +By default, the Application Details will show the `Tree` view. + +This can be configured on an Application basis, by setting the `pref.argocd.argoproj.io/default-view` annotation, accepting one of: `tree`, `pods`, `network`, `list` as values. + +For the Pods view, the default grouping mechanism can be configured using the `pref.argocd.argoproj.io/default-pod-sort` annotation, accepting one of: `node`, `parentResource`, `topLevelResource` as values. \ No newline at end of file diff --git a/docs/operator-manual/upgrading/1.3-1.4.md b/docs/operator-manual/upgrading/1.3-1.4.md index 17caec22483b1..0daea5b628b2e 100644 --- a/docs/operator-manual/upgrading/1.3-1.4.md +++ b/docs/operator-manual/upgrading/1.3-1.4.md @@ -5,7 +5,7 @@ The Argo CD deletes all **in-flight** hooks if you terminate running sync operation. The hook state assessment change implemented in this release the Argo CD enables detection of an in-flight state for all Kubernetes resources including `Deployment`, `PVC`, `StatefulSet`, `ReplicaSet` etc. So if you terminate the sync operation that has, for example, `StatefulSet` hook that is `Progressing` it will be deleted. The long-running jobs are not supposed to be used as a sync hook and you should consider using -[Sync Waves](https://argoproj.github.io/argo-cd/user-guide/sync-waves/) instead. +[Sync Waves](../../user-guide/sync-waves.md) instead. From here on you can follow the [regular upgrade process](./overview.md). diff --git a/docs/operator-manual/upgrading/1.7-1.8.md b/docs/operator-manual/upgrading/1.7-1.8.md index 95cce7803bcdd..b235320c5aee9 100644 --- a/docs/operator-manual/upgrading/1.7-1.8.md +++ b/docs/operator-manual/upgrading/1.7-1.8.md @@ -6,10 +6,10 @@ The `argocd-application-controller` has been converted to StatefulSet. That mean Similarly if you decided to rollback to v1.7 don't forget to delete `argocd-application-controller` StatefulSet. -## Health assessement of argoproj.io/Application CRD has been removed +## Health assessment of argoproj.io/Application CRD has been removed -The health assessement of `argoproj.io/Application` CRD has been removed (see [#3781](https://github.com/argoproj/argo-cd/issues/3781) for more information). -You might need to restore it if you are using app-of-apps pattern and orchestrating syncronization using sync waves. Add the following resource customization in +The health assessment of `argoproj.io/Application` CRD has been removed (see [#3781](https://github.com/argoproj/argo-cd/issues/3781) for more information). +You might need to restore it if you are using app-of-apps pattern and orchestrating synchronization using sync waves. Add the following resource customization in `argocd-cm` ConfigMap: ```yaml @@ -27,16 +27,19 @@ data: argoproj.io/Application: health.lua: | hs = {} - hs.status = "Healthy" + hs.status = "Progressing" hs.message = "" if obj.status ~= nil then if obj.status.health ~= nil then hs.status = obj.status.health.status - hs.message = obj.status.health.message + if obj.status.health.message ~= nil then + hs.message = obj.status.health.message + end end end return hs ``` +> To modify an existing installation with no existing `resources.customizations`, you can save the `data:` stanza to file and patch the configmap with e.g.: `kubectl -n argocd patch configmaps argocd-cm --patch-file argocd-cm-patch.yaml` ## gRPC metrics are disabled by default @@ -44,4 +47,4 @@ The gRPC metrics are not exposed by default by `argocd-server` and `argocd-repo- to be too expensive so we've decided to disable them by default. Metrics can be enabled using `ARGOCD_ENABLE_GRPC_TIME_HISTOGRAM=true` environment variable. -From here on you can follow the [regular upgrade process](./overview.md). \ No newline at end of file +From here on you can follow the [regular upgrade process](./overview.md). diff --git a/docs/operator-manual/upgrading/1.8-2.0.md b/docs/operator-manual/upgrading/1.8-2.0.md index d2d6afe17236e..fa74d15420d3e 100644 --- a/docs/operator-manual/upgrading/1.8-2.0.md +++ b/docs/operator-manual/upgrading/1.8-2.0.md @@ -1,4 +1,4 @@ -# v1.8 to v2.0 +# v1.8 to 2.0 ## Redis Upgraded to v6.2.1 @@ -118,7 +118,7 @@ Helm adds). You can fix this by syncing the Application. If you have existing Charts that require to be rendered using Helm v2, you will need to explicitly configure your Application to use Helm v2 for rendering the chart, as described -[here](../../user-guide/helm.md#helm-version) +[here](../../user-guide/helm.md#helm-version). Please also note that Helm v2 is now being considered deprecated in Argo CD, as it will not receive any updates from the upstream Helm project anymore. We will diff --git a/docs/operator-manual/upgrading/2.0-2.1.md b/docs/operator-manual/upgrading/2.0-2.1.md new file mode 100644 index 0000000000000..8aa3f95166017 --- /dev/null +++ b/docs/operator-manual/upgrading/2.0-2.1.md @@ -0,0 +1,45 @@ +# v2.0 to 2.1 + +## Upgraded Kustomize Version + +Note that bundled Kustomize has been upgraded to v4.2.0. Some of the flags are changed in Kustomize V4. +For example flag name `load_restrictor` is changed in Kustomize v4+. It is changed from `--load_restrictor=none` to `--load-restrictor LoadRestrictionsNone`. + +## Replacing `--app-resync` flag with `timeout.reconciliation` setting + +The`--app-resync` flag allows controlling how frequently Argo CD application controller checks resolve the target +application revision of each application. In order to allow caching resolved revision per repository as opposed to per +application, the `--app-resync` flag has been deprecated. Please use `timeout.reconciliation` setting in `argocd-cm` ConfigMap instead. The value of `timeout.reconciliation` is a duration string e.g `60s`, `1m`, `1h` or `1d`. +See example in [argocd-cm.yaml](../argocd-cm.yaml). + +From here on you can follow the [regular upgrade process](./overview.md). + +## Replacing `repositories` and `repository.credentials` with Secrets + +The configuration of repositories and repository credential templates via the `argocd-cm` has been deprecated. +Repositories and repository credentials are now discovered via Secrets that are labeled with `argocd.argoproj.io/secret-type=repository` +or `argocd.argoproj.io/secret-type=repo-creds` respectively. See the examples in [argocd-repositories.yaml](../argocd-repositories.yaml) +and [argocd-repo-creds.yaml](../argocd-repo-creds.yaml). + +## The `argocd-util` CLI commands merged into `argocd admin` + +The `argocd-util` CLI commands are available under `argocd admin` and the `argocd-util` binary is no longer available. + +## Replace runtime system user while [BYOI](../custom_tools.md#byoi-build-your-own-image) + +Runtime system user should to be changed from `argocd` to `999`, as shown below. + +```dockerfile +FROM argoproj/argocd:latest + +# Switch to root for the ability to perform install +USER root + +# Something custom here +RUN apt-get update + +# Switch back to non-root user + +# deprecated: USER argocd +USER 999 +``` diff --git a/docs/operator-manual/upgrading/2.1-2.2.md b/docs/operator-manual/upgrading/2.1-2.2.md new file mode 100644 index 0000000000000..e2ca3dad17ad5 --- /dev/null +++ b/docs/operator-manual/upgrading/2.1-2.2.md @@ -0,0 +1,89 @@ +# v2.1 to 2.2 + +## Upgraded Helm Version + +Note that bundled Helm has been upgraded from 3.6.0 to v3.7+. This includes following breaking changes: + +- Repository credentials are no longer passed to download charts that + are being served from a different domain than the repository. + + You can still force older behavior with `--helm-pass-credentials` option to `argocd app create`. + + More information in the [Helm v3.6.1 release notes](https://github.com/helm/helm/releases/tag/v3.6.1). + +- Experimental OCI support has been rewritten. + + More information in the [Helm v3.7.0 release notes](https://github.com/helm/helm/releases/tag/v3.7.0). + +## Support for private repo SSH keys using the SHA-1 signature hash algorithm is removed in 2.2.12 + +Argo CD 2.2.12 upgraded its base image from Ubuntu 21.10 to Ubuntu 22.04, which upgraded OpenSSH to 8.9. OpenSSH starting +with 8.8 [dropped support for the `ssh-rsa` SHA-1 key signature algorithm](https://www.openssh.com/txt/release-8.8). + +The signature algorithm is _not_ the same as the algorithm used when generating the key. There is no need to update +keys. + +The signature algorithm is negotiated with the SSH server when the connection is being set up. The client offers its +list of accepted signature algorithms, and if the server has a match, the connection proceeds. For most SSH servers on +up-to-date git providers, acceptable algorithms other than `ssh-rsa` should be available. + +Before upgrading to Argo CD 2.2.12, check whether your git provider(s) using SSH authentication support algorithms newer +than `rsa-ssh`. + +1. Make sure your version of SSH >= 8.9 (the version used by Argo CD). If not, upgrade it before proceeding. + + ```shell + ssh -V + ``` + + Example output: `OpenSSH_8.9p1 Ubuntu-3, OpenSSL 3.0.2 15 Mar 2022` + +2. Once you have a recent version of OpenSSH, follow the directions from the [OpenSSH 8.8 release notes](https://www.openssh.com/txt/release-8.7): + + > To check whether a server is using the weak ssh-rsa public key + > algorithm, for host authentication, try to connect to it after + > removing the ssh-rsa algorithm from ssh(1)'s allowed list: + > + > ```shell + > ssh -oHostKeyAlgorithms=-ssh-rsa user@host + > ``` + > + > If the host key verification fails and no other supported host key + > types are available, the server software on that host should be + > upgraded. + + If the server does not support an acceptable version, you will get an error similar to this; + + ``` + $ ssh -oHostKeyAlgorithms=-ssh-rsa vs-ssh.visualstudio.com + Unable to negotiate with 20.42.134.1 port 22: no matching host key type found. Their offer: ssh-rsa + ``` + + This indicates that the server needs to update its supported key signature algorithms, and Argo CD will not connect + to it. + +### Workaround + +The [OpenSSH 8.8 release notes](https://www.openssh.com/txt/release-8.8) describe a workaround if you cannot change the +server's key signature algorithms configuration. + +> Incompatibility is more likely when connecting to older SSH +> implementations that have not been upgraded or have not closely tracked +> improvements in the SSH protocol. For these cases, it may be necessary +> to selectively re-enable RSA/SHA1 to allow connection and/or user +> authentication via the HostkeyAlgorithms and PubkeyAcceptedAlgorithms +> options. For example, the following stanza in ~/.ssh/config will enable +> RSA/SHA1 for host and user authentication for a single destination host: +> +> ``` +> Host old-host +> HostkeyAlgorithms +ssh-rsa +> PubkeyAcceptedAlgorithms +ssh-rsa +> ``` +> +> We recommend enabling RSA/SHA1 only as a stopgap measure until legacy +> implementations can be upgraded or reconfigured with another key type +> (such as ECDSA or Ed25519). + +To apply this to Argo CD, you could create a ConfigMap with the desired ssh config file and then mount it at +`/home/argocd/.ssh/config`. diff --git a/docs/operator-manual/upgrading/2.2-2.3.md b/docs/operator-manual/upgrading/2.2-2.3.md new file mode 100644 index 0000000000000..4d7fec93be0c2 --- /dev/null +++ b/docs/operator-manual/upgrading/2.2-2.3.md @@ -0,0 +1,122 @@ +# v2.2 to 2.3 + +## Argo CD Notifications and ApplicationSet Are Bundled into Argo CD + +The Argo CD Notifications and ApplicationSet are part of Argo CD now. You no longer need to install them separately. +The Notifications and ApplicationSet components are bundled into default Argo CD installation manifests. + +The bundled manifests are drop-in replacements for the previous versions. If you are using Kustomize to bundle the manifests together then just +remove references to https://github.com/argoproj-labs/argocd-notifications and https://github.com/argoproj-labs/applicationset. + +If you are using [the argocd-notifications helm chart](https://github.com/argoproj/argo-helm/tree/argocd-notifications-1.8.1/charts/argocd-notifications), you can move the chart [values](https://github.com/argoproj/argo-helm/blob/argocd-notifications-1.8.1/charts/argocd-notifications/values.yaml) to the `notifications` section of the argo-cd chart [values](https://github.com/argoproj/argo-helm/blob/main/charts/argo-cd/values.yaml#L2152). Although most values remain as is, for details please look up the values that are relevant to you. + +No action is required if you are using `kubectl apply`. + +## Configure Additional Argo CD Binaries + +We have removed non-Linux Argo CD binaries (Darwin amd64 and Windows amd64) from the image ([#7668](https://github.com/argoproj/argo-cd/pull/7668)) and the associated download buttons in the help page in the UI. + +Those removed binaries will still be included in the release assets and we made those configurable in [#7755](https://github.com/argoproj/argo-cd/pull/7755). You can add download buttons for other OS architectures by adding the following to your `argocd-cm` ConfigMap: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm + namespace: argocd + labels: + app.kubernetes.io/name: argocd-cm + app.kubernetes.io/part-of: argocd +data: + help.download.linux-arm64: "path-or-url-to-download" + help.download.darwin-amd64: "path-or-url-to-download" + help.download.darwin-arm64: "path-or-url-to-download" + help.download.windows-amd64: "path-or-url-to-download" +``` + +## Removed Python from the base image + +If you are using a [Config Management Plugin](../config-management-plugins.md) that relies on Python, you +will need to build a custom image on the Argo CD base to install Python. + +## Upgraded Kustomize Version + +Note that bundled Kustomize version has been upgraded from 4.2.0 to 4.4.1. + +## Upgraded Helm Version + +Note that bundled Helm version has been upgraded from 3.7.1 to 3.8.0. + +## Support for private repo SSH keys using the SHA-1 signature hash algorithm is removed in 2.3.7 + +Argo CD 2.3.7 upgraded its base image from Ubuntu 21.04 to Ubuntu 22.04, which upgraded OpenSSH to 8.9. OpenSSH starting +with 8.8 [dropped support for the `ssh-rsa` SHA-1 key signature algorithm](https://www.openssh.com/txt/release-8.8). + +The signature algorithm is _not_ the same as the algorithm used when generating the key. There is no need to update +keys. + +The signature algorithm is negotiated with the SSH server when the connection is being set up. The client offers its +list of accepted signature algorithms, and if the server has a match, the connection proceeds. For most SSH servers on +up-to-date git providers, acceptable algorithms other than `ssh-rsa` should be available. + +Before upgrading to Argo CD 2.3.7, check whether your git provider(s) using SSH authentication support algorithms newer +than `rsa-ssh`. + +1. Make sure your version of SSH >= 8.9 (the version used by Argo CD). If not, upgrade it before proceeding. + + ```shell + ssh -V + ``` + + Example output: `OpenSSH_8.9p1 Ubuntu-3, OpenSSL 3.0.2 15 Mar 2022` + +2. Once you have a recent version of OpenSSH, follow the directions from the [OpenSSH 8.8 release notes](https://www.openssh.com/txt/release-8.7): + + > To check whether a server is using the weak ssh-rsa public key + > algorithm, for host authentication, try to connect to it after + > removing the ssh-rsa algorithm from ssh(1)'s allowed list: + > + > ```shell + > ssh -oHostKeyAlgorithms=-ssh-rsa user@host + > ``` + > + > If the host key verification fails and no other supported host key + > types are available, the server software on that host should be + > upgraded. + + If the server does not support an acceptable version, you will get an error similar to this; + + ``` + $ ssh -oHostKeyAlgorithms=-ssh-rsa vs-ssh.visualstudio.com + Unable to negotiate with 20.42.134.1 port 22: no matching host key type found. Their offer: ssh-rsa + ``` + + This indicates that the server needs to update its supported key signature algorithms, and Argo CD will not connect + to it. + +### Workaround + +The [OpenSSH 8.8 release notes](https://www.openssh.com/txt/release-8.8) describe a workaround if you cannot change the +server's key signature algorithms configuration. + +> Incompatibility is more likely when connecting to older SSH +> implementations that have not been upgraded or have not closely tracked +> improvements in the SSH protocol. For these cases, it may be necessary +> to selectively re-enable RSA/SHA1 to allow connection and/or user +> authentication via the HostkeyAlgorithms and PubkeyAcceptedAlgorithms +> options. For example, the following stanza in ~/.ssh/config will enable +> RSA/SHA1 for host and user authentication for a single destination host: +> +> ``` +> Host old-host +> HostkeyAlgorithms +ssh-rsa +> PubkeyAcceptedAlgorithms +ssh-rsa +> ``` +> +> We recommend enabling RSA/SHA1 only as a stopgap measure until legacy +> implementations can be upgraded or reconfigured with another key type +> (such as ECDSA or Ed25519). + +To apply this to Argo CD, you could create a ConfigMap with the desired ssh config file and then mount it at +`/home/argocd/.ssh/config`. + diff --git a/docs/operator-manual/upgrading/2.3-2.4.md b/docs/operator-manual/upgrading/2.3-2.4.md new file mode 100644 index 0000000000000..6e8c70ff8d9ab --- /dev/null +++ b/docs/operator-manual/upgrading/2.3-2.4.md @@ -0,0 +1,260 @@ +# v2.3 to 2.4 + +## Known Issues + +### Broken `project` filter before 2.4.27 + +Argo CD 2.4.0 introduced a breaking API change, renaming the `project` filter to `projects`. + +#### Impact to API clients + +A similar issue applies to other API clients which communicate with the Argo CD API server via its REST API. If the +client uses the `project` field to filter projects, the filter will not be applied. **The failing project filter could +have detrimental consequences if, for example, you rely on it to list Applications to be deleted.** + +#### Impact to CLI clients + +CLI clients older that v2.4.0 rely on client-side filtering and are not impacted by this bug. + +#### How to fix the problem + +Upgrade to Argo CD >=2.4.27, >=2.5.15, or >=2.6.6. This version of Argo CD will accept both `project` and `projects` as +valid filters. + +## KSonnet support is removed + +Ksonnet was deprecated in [2019](https://github.com/ksonnet/ksonnet/pull/914/files) and is no longer maintained. +The time has come to remove it from the Argo CD. + +## Helm 2 support is removed + +Helm 2 has not been officially supported since [Nov 2020](https://helm.sh/blog/helm-2-becomes-unsupported/). In order to ensure a smooth transition, +Helm 2 support was preserved in the Argo CD. We feel that Helm 3 is stable, and it is time to drop Helm 2 support. + +## Support for private repo SSH keys using the SHA-1 signature hash algorithm is removed + +Note: this change was back-ported to 2.3.7 and 2.2.12. + +Argo CD 2.4 upgraded its base image from Ubuntu 20.04 to Ubuntu 22.04, which upgraded OpenSSH to 8.9. OpenSSH starting +with 8.8 [dropped support for the `ssh-rsa` SHA-1 key signature algorithm](https://www.openssh.com/txt/release-8.8). + +The signature algorithm is _not_ the same as the algorithm used when generating the key. There is no need to update +keys. + +The signature algorithm is negotiated with the SSH server when the connection is being set up. The client offers its +list of accepted signature algorithms, and if the server has a match, the connection proceeds. For most SSH servers on +up-to-date git providers, acceptable algorithms other than `ssh-rsa` should be available. + +Before upgrading to Argo CD 2.4, check whether your git provider(s) using SSH authentication support algorithms newer +than `rsa-ssh`. + +1. Make sure your version of SSH >= 8.9 (the version used by Argo CD). If not, upgrade it before proceeding. + + ```shell + ssh -V + ``` + + Example output: `OpenSSH_8.9p1 Ubuntu-3, OpenSSL 3.0.2 15 Mar 2022` + +2. Once you have a recent version of OpenSSH, follow the directions from the [OpenSSH 8.8 release notes](https://www.openssh.com/txt/release-8.7): + + > To check whether a server is using the weak ssh-rsa public key + > algorithm, for host authentication, try to connect to it after + > removing the ssh-rsa algorithm from ssh(1)'s allowed list: + > + > ```shell + > ssh -oHostKeyAlgorithms=-ssh-rsa user@host + > ``` + > + > If the host key verification fails and no other supported host key + > types are available, the server software on that host should be + > upgraded. + + If the server does not support an acceptable version, you will get an error similar to this; + + ``` + $ ssh -oHostKeyAlgorithms=-ssh-rsa vs-ssh.visualstudio.com + Unable to negotiate with 20.42.134.1 port 22: no matching host key type found. Their offer: ssh-rsa + ``` + + This indicates that the server needs to update its supported key signature algorithms, and Argo CD will not connect + to it. + +### Workaround + +The [OpenSSH 8.8 release notes](https://www.openssh.com/txt/release-8.8) describe a workaround if you cannot change the +server's key signature algorithms configuration. + +> Incompatibility is more likely when connecting to older SSH +> implementations that have not been upgraded or have not closely tracked +> improvements in the SSH protocol. For these cases, it may be necessary +> to selectively re-enable RSA/SHA1 to allow connection and/or user +> authentication via the HostkeyAlgorithms and PubkeyAcceptedAlgorithms +> options. For example, the following stanza in ~/.ssh/config will enable +> RSA/SHA1 for host and user authentication for a single destination host: +> +> ``` +> Host old-host +> HostkeyAlgorithms +ssh-rsa +> PubkeyAcceptedAlgorithms +ssh-rsa +> ``` +> +> We recommend enabling RSA/SHA1 only as a stopgap measure until legacy +> implementations can be upgraded or reconfigured with another key type +> (such as ECDSA or Ed25519). + +To apply this to Argo CD, you could create a ConfigMap with the desired ssh config file and then mount it at +`/home/argocd/.ssh/config`. + +## Configure RBAC to account for new `exec` resource + +2.4 introduces a new `exec` [RBAC resource](https://argo-cd.readthedocs.io/en/stable/operator-manual/rbac/#rbac-resources-and-actions). + +When you upgrade to 2.4, RBAC policies with `*` in the resource field and `create` or `*` in the action field will automatically grant the `exec` privilege. + +To avoid granting the new privilege, replace the existing policy with a list of new policies explicitly listing the old resources. + +The exec feature is [disabled by default](https://argo-cd.readthedocs.io/en/stable/operator-manual/rbac/#exec-resource), +but it is still a good idea to double-check your RBAC configuration to enforce least necessary privileges. + +### Example + +Old: + +```csv +p, role:org-admin, *, create, my-proj/*, allow +``` + +New: + +```csv +p, role:org-admin, clusters, create, my-proj/*, allow +p, role:org-admin, projects, create, my-proj/*, allow +p, role:org-admin, applications, create, my-proj/*, allow +p, role:org-admin, repositories, create, my-proj/*, allow +p, role:org-admin, certificates, create, my-proj/*, allow +p, role:org-admin, accounts, create, my-proj/*, allow +p, role:org-admin, gpgkeys, create, my-proj/*, allow +``` + +## Enable logs RBAC enforcement + +2.4 introduced `logs` as a new RBAC resource. In 2.3, users with `applications, get` access automatically get logs +access. In 2.5, you will have to explicitly grant `logs, get` access. Logs RBAC enforcement can be enabled with a flag +in 2.4. We recommend enabling the flag now for an easier upgrade experience in 2.5. + +!!! important + Logs RBAC enforcement **will not** be enabled by default in 2.5. This decision + [was made](https://github.com/argoproj/argo-cd/issues/10551#issuecomment-1242303457) to avoid breaking logs access + under [Project Roles](../../user-guide/projects.md#project-roles), which do not provide a mechanism to grant `logs` + resource access. + +To enabled logs RBAC enforcement, add this to your argocd-cm ConfigMap: + +```yaml +server.rbac.log.enforce.enable: "true" +``` + +If you want to allow the same users to continue to have logs access, just find every line that grants +`applications, get` access and also grant `logs, get`. + +### Example + +Old: + +```csv +p, role:staging-db-admins, applications, get, staging-db-admins/*, allow + +p, role:test-db-admins, applications, *, staging-db-admins/*, allow +``` + +New: + +```csv +p, role:staging-db-admins, applications, get, staging-db-admins/*, allow +p, role:staging-db-admins, logs, get, staging-db-admins/*, allow + +p, role:test-db-admins, applications, *, staging-db-admins/*, allow +p, role:test-db-admins, logs, get, staging-db-admins/*, allow +``` + +### Pod Logs UI + +Since 2.4.9, the LOGS tab in pod view is visible in the UI only for users with explicit allow get logs policy. + +### Known pod logs UI issue prior to 2.4.9 + +Upon pressing the "LOGS" tab in pod view by users who don't have an explicit allow get logs policy, the red "unable to load data: Internal error" is received in the bottom of the screen, and "Failed to load data, please try again" is displayed. + +## Test repo-server with its new dedicated Service Account + +As a security enhancement, the argocd-repo-server Deployment uses its own Service Account instead of `default`. + +If you have a custom environment that might depend on repo-server using the `default` Service Account (such as a plugin +that uses the Service Account for auth), be sure to test before deploying the 2.4 upgrade to production. + +## Plugins + +### Remove the shared volume from any sidecar plugins + +As a security enhancement, [sidecar plugins](../config-management-plugins.md#option-2-configure-plugin-via-sidecar) +no longer share the /tmp directory with the repo-server. + +If you have one or more sidecar plugins enabled, replace the /tmp volume mount for each sidecar to use a volume specific +to each plugin. + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: argocd-repo-server +spec: + template: + spec: + containers: + - name: your-plugin-name + volumeMounts: + - mountPath: /tmp + name: your-plugin-name-tmp + volumes: + # Add this volume. + - name: your-plugin-name-tmp + emptyDir: {} +``` + +### Update plugins to use newly-prefixed environment variables + +If you use plugins that depend on user-supplied environment variables, then they must be updated to be compatible with +Argo CD 2.4. Here is an example of user-supplied environment variables in the `plugin` section of an Application spec: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +spec: + source: + plugin: + env: + - name: FOO + value: bar +``` + +Going forward, all user-supplied environment variables will be prefixed with `ARGOCD_ENV_` before being sent to the +plugin's `init`, `generate`, or `discover` commands. This prevents users from setting potentially-sensitive environment +variables. + +If you have written a custom plugin which handles user-provided environment variables, update it to handle the new +prefix. + +If you use a third-party plugin which does not explicitly advertise Argo CD 2.4 support, it might not handle the +prefixed environment variables. Open an issue with the plugin's authors and confirm support before upgrading to Argo CD +2.4. + +### Confirm sidecar plugins have all necessary environment variables + +A bug in < 2.4 caused `init` and `generate` commands to receive environment variables from the main repo-server +container, taking precedence over environment variables from the plugin's sidecar. + +Starting in 2.4, sidecar plugins will not receive environment variables from the main repo-server container. Make sure +that any environment variables necessary for the sidecar plugin to function are set on the sidecar plugin. + +argocd-cm plugins will continue to receive environment variables from the main repo-server container. diff --git a/docs/operator-manual/upgrading/2.4-2.5.md b/docs/operator-manual/upgrading/2.4-2.5.md new file mode 100644 index 0000000000000..8971c7cd8e3a4 --- /dev/null +++ b/docs/operator-manual/upgrading/2.4-2.5.md @@ -0,0 +1,204 @@ +# v2.4 to 2.5 + +## Known Issues + +### Broken `project` filter before 2.5.15 + +Argo CD 2.4.0 introduced a breaking API change, renaming the `project` filter to `projects`. + +#### Impact to API clients + +A similar issue applies to other API clients which communicate with the Argo CD API server via its REST API. If the +client uses the `project` field to filter projects, the filter will not be applied. **The failing project filter could +have detrimental consequences if, for example, you rely on it to list Applications to be deleted.** + +#### Impact to CLI clients + +CLI clients older that v2.4.0 rely on client-side filtering and are not impacted by this bug. + +#### How to fix the problem + +Upgrade to Argo CD >=2.4.27, >=2.5.15, or >=2.6.6. This version of Argo CD will accept both `project` and `projects` as +valid filters. + +### Broken matrix-nested git files generator in 2.5.14 + +Argo CD 2.5.14 introduced a bug in the matrix-nested git files generator. The bug only applies when the git files +generator is the second generator nested under a matrix. For example: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + generators: + - matrix: + generators: + - clusters: {} + - git: + repoURL: https://git.example.com/org/repo.git + revision: HEAD + files: + - path: "defaults/*.yaml" + template: + # ... +``` + +The nested git files generator will produce no parameters, causing the matrix generator to also produce no parameters. +This will cause the ApplicationSet to produce no Applications. If the ApplicationSet controller is +[configured with the ability to delete applications](https://argo-cd.readthedocs.io/en/latest/operator-manual/applicationset/Controlling-Resource-Modification/), +it will delete all Applications which were previously created by the ApplicationSet. + +To avoid this issue, upgrade directly to >=2.5.15 or >= 2.6.6. + +## Configure RBAC to account for new `applicationsets` resource + +2.5 introduces a new `applicationsets` [RBAC resource](https://argo-cd.readthedocs.io/en/stable/operator-manual/rbac/#rbac-resources-and-actions). + +When you upgrade to 2.5, RBAC policies with `*` in the resource field and `create`, `update`, `delete`, `get`, or `*` in the action field will automatically grant the `applicationsets` privilege. + +To avoid granting the new privilege, replace the existing policy with a list of new policies explicitly listing the old resources. + +### Example + +Old: + +```csv +p, role:org-admin, *, create, *, allow +``` + +New: + +```csv +p, role:org-admin, clusters, create, *, allow +p, role:org-admin, projects, create, *, allow +p, role:org-admin, applications, create, *, allow +p, role:org-admin, repositories, create, *, allow +p, role:org-admin, certificates, create, *, allow +p, role:org-admin, accounts, create, *, allow +p, role:org-admin, gpgkeys, create, *, allow +p, role:org-admin, exec, create, *, allow +``` + +(Note that `applicationsets` is missing from the list, to preserve pre-2.5 permissions.) + +## argocd-cm plugins (CMPs) are deprecated + +Starting with Argo CD v2.5, installing config management plugins (CMPs) via the `argocd-cm` ConfigMap is deprecated. +~~Support will be removed in v2.6.~~ Support will be removed in v2.7. + +You can continue to use the plugins by [installing them as sidecars](https://argo-cd.readthedocs.io/en/stable/user-guide/config-management-plugins/) +on the repo-server Deployment. + +Sidecar plugins are significantly more secure. Plugin code runs in its own container with an almost completely-isolated +filesystem. If an attacker compromises a plugin, the attacker's ability to cause harm is significantly mitigated. + +To determine whether argocd-cm plugins are still in use, scan your argocd-repo-server and argocd-server logs for the +following message: + +> argocd-cm plugins are deprecated, and support will be removed in v2.6. Upgrade your plugin to be installed via sidecar. https://argo-cd.readthedocs.io/en/stable/user-guide/config-management-plugins/ + +**NOTE:** removal of argocd-cm plugin support was delayed to v2.7. Update your logs scan to use `v2.7` instead of `v2.6`. + +If you run `argocd app list` as admin, the list of Applications using deprecated plugins will be logged as a warning. + +## Dex server TLS configuration + +In order to secure the communications between the dex server and the Argo CD API server, TLS is now enabled by default on the dex server. + +By default, without configuration, the dex server will generate a self-signed certificate upon startup. However, we recommend that users +configure their own TLS certificate using the `argocd-dex-server-tls` secret. Please refer to the [TLS configuration guide](../tls.md#configuring-tls-to-argocd-dex-server) for more information. + +## Invalid users.session.duration values now fall back to 24h + +Before v2.5, an invalid `users.session.duration` value in argocd-cm would 1) log a warning and 2) result in user sessions having no duration limit. + +Starting with v2.5, invalid duration values will fall back to the default value of 24 hours with a warning. + +## Out-of-bounds symlinks now blocked at fetch + +There have been several path traversal and identification vulnerabilities disclosed in the past related to symlinks. To help prevent any further vulnerabilities, we now scan all repositories and Helm charts for **out of bounds symlinks** at the time they are fetched and block further processing if they are found. + +An out-of-bounds symlink is defined as any symlink that leaves the root of the Git repository or Helm chart, even if the final target is within the root. + +If an out of bounds symlink is found, a warning will be printed to the repo server console and an error will be shown in the UI or CLI. + +Below is an example directory structure showing valid symlinks and invalid symlinks. + +``` +chart +├── Chart.yaml +├── values +│ └── values.yaml +├── bad-link.yaml -> ../out-of-bounds.yaml # Blocked +├── bad-link-2.yaml -> ../chart/values/values.yaml # Blocked because it leaves the root +├── bad-link-3.yaml -> /absolute/link.yaml # Blocked +└── good-link.yaml -> values/values.yaml # OK +``` + +If you rely on out of bounds symlinks, this check can be disabled one of three ways: + +1. The `--allow-oob-symlinks` argument on the repo server. +2. The `reposerver.allow.oob.symlinks` key if you are using `argocd-cmd-params-cm` +3. Directly setting `ARGOCD_REPO_SERVER_ALLOW_OOB_SYMLINKS` environment variable on the repo server. + +It is **strongly recommended** to leave this check enabled. Disabling the check will not allow _all_ out-of-bounds symlinks. Those will still be blocked for things like values files in Helm charts, but symlinks which are not explicitly blocked by other checks will be allowed. + +## Deprecated client-side manifest diffs + +When using `argocd app diff --local`, code from the repo server is run on the user's machine in order to locally generate manifests for comparing against the live manifests of an app. However, this requires that the necessary tools (Helm, Kustomize, etc) are installed with the correct versions. Even worse, it does not support Config Management Plugins (CMPs) whatsoever. + +In order to support CMPs and reduce local requirements, we have implemented *server-side generation* of local manifests via the `--server-side-generate` argument. For example, `argocd app diff --local repoDir --server-side-generate` will upload the contents of `repoDir` to the repo server and run your manifest generation pipeline against it, the same as it would for a Git repo. + +In ~~v2.6~~ v2.7, the `--server-side-generate` argument will become the default, ~~and client-side generation will be removed~~ and client-side generation will be supported as an alternative. + +!!! warning + The semantics of *where* Argo will start generating manifests within a repo has changed between client-side and server-side generation. With client-side generation, the application's path (`spec.source.path`) was ignored and the value of `--local-repo-root` was effectively used (by default `/` relative to `--local`). + + For example, given an application that has an application path of `/manifests`, you would have had to run `argocd app diff --local yourRepo/manifests`. This behavior did not match the repo server's process of downloading the full repo/chart and then beginning generation in the path specified in the application manifest. + + When switching to server-side generation, `--local` should point to the root of your repo *without* including your `spec.source.path`. This is especially important to keep in mind when `--server-side-generate` becomes the default in v2.7. Existing scripts utilizing `diff --local` may break in v2.7 if `spec.source.path` was not `/`. + +## Upgraded Kustomize Version + +The bundled Kustomize version has been upgraded from 4.4.1 to 4.5.7. + +## Upgraded Helm Version + +Note that bundled Helm version has been upgraded from 3.9.0 to 3.10.1. + +## Upgraded HAProxy version + +The HAProxy version in the HA manifests has been upgraded from 2.0.25 to 2.6.2. To read about the changes/improvements, +see the HAProxy major release announcements ([2.1.0](https://www.mail-archive.com/haproxy@formilux.org/msg35491.html), +[2.2.0](https://www.mail-archive.com/haproxy@formilux.org/msg37852.html), +[2.3.0](https://www.mail-archive.com/haproxy@formilux.org/msg38812.html), +[2.4.0](https://www.mail-archive.com/haproxy@formilux.org/msg40499.html), +[2.5.0](https://www.mail-archive.com/haproxy@formilux.org/msg41508.html), and +[2.6.0](https://www.mail-archive.com/haproxy@formilux.org/msg42371.html). + +## Logs RBAC enforcement will remain opt-in + +This note is just for clarity. No action is required. + +We [expected](../upgrading/2.3-2.4.md#enable-logs-rbac-enforcement) to enable logs RBAC enforcement by default in 2.5. +We have decided not to do that in the 2.x series due to disruption for users of [Project Roles](../../user-guide/projects.md#project-roles). + +## `argocd app create` for old CLI versions fails with API version >=2.5.16 + +Starting with Argo CD 2.5.16, the API returns `PermissionDenied` instead of `NotFound` for Application `GET` requests if +the Application does not exist. + +The Argo CD CLI before versions starting with version 2.5.0-rc1 and before versions 2.5.16 and 2.6.7 does a `GET` +request before the `POST` request in `argocd app create`. The command does not gracefully handle the `PermissionDenied` +response and will therefore fail to create/update the Application. + +To solve the issue, upgrade the CLI to at least 2.5.16, or 2.6.7. + +CLIs older than 2.5.0-rc1 are unaffected. + +## Golang upgrade in 2.5.20 + +In 2.5.20, we upgrade the Golang version used to build Argo CD from 1.18 to 1.19. If you use Argo CD as a library, you +may need to upgrade your Go version. diff --git a/docs/operator-manual/upgrading/2.5-2.6.md b/docs/operator-manual/upgrading/2.5-2.6.md new file mode 100644 index 0000000000000..57f1373721445 --- /dev/null +++ b/docs/operator-manual/upgrading/2.5-2.6.md @@ -0,0 +1,97 @@ +# v2.5 to 2.6 + +## Known Issues + +### Broken `project` filter before 2.6.6 + +Argo CD 2.4.0 introduced a breaking API change, renaming the `project` filter to `projects`. + +#### Impact to API clients + +A similar issue applies to other API clients which communicate with the Argo CD API server via its REST API. If the +client uses the `project` field to filter projects, the filter will not be applied. **The failing project filter could +have detrimental consequences if, for example, you rely on it to list Applications to be deleted.** + +#### Impact to CLI clients + +CLI clients older that v2.4.0 rely on client-side filtering and are not impacted by this bug. + +#### How to fix the problem + +Upgrade to Argo CD >=2.4.27, >=2.5.15, or >=2.6.6. This version of Argo CD will accept both `project` and `projects` as +valid filters. + +### Broken matrix-nested git files generator in 2.6.5 + +Argo CD 2.6.5 introduced a bug in the matrix-nested git files generator. The bug only applies when the git files +generator is the second generator nested under a matrix. For example: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + generators: + - matrix: + generators: + - clusters: {} + - git: + repoURL: https://git.example.com/org/repo.git + revision: HEAD + files: + - path: "defaults/*.yaml" + template: + # ... +``` + +The nested git files generator will produce no parameters, causing the matrix generator to also produce no parameters. +This will cause the ApplicationSet to produce no Applications. If the ApplicationSet controller is +[configured with the ability to delete applications](https://argo-cd.readthedocs.io/en/latest/operator-manual/applicationset/Controlling-Resource-Modification/), +it will delete all Applications which were previously created by the ApplicationSet. + +To avoid this issue, upgrade directly to >=2.5.15 or >= 2.6.6. + +## ApplicationSets: `^` behavior change in Sprig's semver functions +Argo CD 2.5 introduced [Go templating in ApplicationSets](https://argo-cd.readthedocs.io/en/stable/operator-manual/applicationset/GoTemplate/). Go templates have access to the Sprig function library. + +Argo CD 2.6 upgrades Sprig to v3. That upgrade includes an upgrade of [Masterminds/semver](https://github.com/Masterminds/semver/releases) to v3. + +Masterminds/semver v3 changed the behavior of the `^` prefix in semantic version constraints. If you are using Go-templated ApplicationSets which include references to [Sprig's semver functions](https://masterminds.github.io/sprig/semver.html) and use the `^` prefix, read the [Masterminds/semver changelog](https://github.com/Masterminds/semver/releases/tag/v3.0.0) to understand how your ApplicationSets' behavior may change. + +## Applications with suspended jobs now marked "Suspended" instead of "Progressing" +Prior to Argo CD v2.6, an Application managing a suspended Job would be marked as "Progressing". This was confusing/unexpected behavior for many. Starting with v2.6, Argo CD will mark such Applications as "Suspended". + +If you have processes which rely on the previous behavior (for example, a CI job with an argocd app wait call), update those before upgrading to v2.6. + +## The API Server now requires tokens to include the `aud` claim by default + +Argo CD v2.6 now requires that the `aud` claim be present in the token used to authenticate to the API Server. This is a +security improvement, as it prevents tokens from being used against the API Server which were not intended for it. + +If you rely on an OIDC provider which does not provide a `aud` claim, you can disable this requirement by setting the +`skipAudienceCheckWhenTokenHasNoAudience` flag to `true` in your Argo CD OIDC configuration. (See the +[OIDC configuration documentation](https://argo-cd.readthedocs.io/en/stable/operator-manual/user-management/#existing-oidc-provider) +for an example.) + +## Removal of argocd-cm plugin support delayed until 2.7 + +Support for argocd-cm plugins was previously scheduled for 2.6. At the time, sidecar plugins could not be specified by +name. Argo CD v2.6 introduces support for specifying sidecar plugins by name. + +Removal of argocd-cm plugin support has been delayed until 2.7 to provide a transition time for users who need to +specify plugins by name. + +## `argocd app create` for old CLI versions fails with API version >=2.6.7 + +Starting with Argo CD 2.6.7, the API returns `PermissionDenied` instead of `NotFound` for Application `GET` requests if +the Application does not exist. + +The Argo CD CLI before versions starting with version 2.5.0-rc1 and before versions 2.5.16 and 2.6.7 does a `GET` +request before the `POST` request in `argocd app create`. The command does not gracefully handle the `PermissionDenied` +response and will therefore fail to create/update the Application. + +To solve the issue, upgrade the CLI to at least 2.5.16, or 2.6.7. + +CLIs older than 2.5.0-rc1 are unaffected. + diff --git a/docs/operator-manual/upgrading/2.6-2.7.md b/docs/operator-manual/upgrading/2.6-2.7.md new file mode 100644 index 0000000000000..fa7fba02bf1b7 --- /dev/null +++ b/docs/operator-manual/upgrading/2.6-2.7.md @@ -0,0 +1,108 @@ +# v2.6 to 2.7 + +## Configure RBAC to account for new `extensions` resource + +2.7 introduces the new [Proxy Extensions][1] feature with a new `extensions` +[RBAC resource][2]. + +When you upgrade to 2.7, RBAC policies with `*` in the *resource* +field and `*` in the action field, it will automatically grant the +`extensions` privilege. + +The Proxy Extension feature is disabled by default, however it is +recommended to check your RBAC configurations to enforce the least +necessary privileges. + +Example +Old: + +```csv +p, role:org-admin, *, *, *, allow +``` + +New: + +```csv +p, role:org-admin, clusters, create, my-proj/*, allow +p, role:org-admin, projects, create, my-proj/*, allow +p, role:org-admin, applications, create, my-proj/*, allow +p, role:org-admin, repositories, create, my-proj/*, allow +p, role:org-admin, certificates, create, my-proj/*, allow +p, role:org-admin, accounts, create, my-proj/*, allow +p, role:org-admin, gpgkeys, create, my-proj/*, allow +# If you don't want to grant the new permission, don't include the following line +p, role:org-admin, extensions, invoke, my-proj/*, allow +``` + +## Upgraded Helm Version + +Note that bundled Helm version has been upgraded from 3.10.3 to 3.11.2. + +## Upgraded Kustomize Version + +Note that bundled Kustomize version has been upgraded from 4.5.7 to 5.0.1. + +## Notifications: `^` behavior change in Sprig's semver functions +Argo CD 2.7 upgrades Sprig templating specifically within Argo CD notifications to v3. That upgrade includes an upgrade of [Masterminds/semver](https://github.com/Masterminds/semver/releases) to v3. + +Masterminds/semver v3 changed the behavior of the `^` prefix in semantic version constraints. If you are using sprig template functions in your notifications templates which include references to [Sprig's semver functions](https://masterminds.github.io/sprig/semver.html) and use the `^` prefix, read the [Masterminds/semver changelog](https://github.com/Masterminds/semver/releases/tag/v3.0.0) to understand how your notifications' behavior may change. + +## Tini as entrypoint + +The manifests are now using [`tini` as entrypoint][3], instead of `entrypoint.sh`. Until 2.8, `entrypoint.sh` is retained for upgrade compatibility. This means that the deployment manifests have to be updated after upgrading to 2.7, and before upgrading to 2.8 later. In case the manifests are updated before moving to 2.8, the containers will not be able to start. + +[1]: ../../developer-guide/extensions/proxy-extensions.md +[2]: https://argo-cd.readthedocs.io/en/stable/operator-manual/rbac/#the-extensions-resource +[3]: https://github.com/argoproj/argo-cd/pull/12707 + + +## Deep Links template updates + +Deep Links now allow you to access other values like `cluster`, `project`, `application` and `resource` in the url and condition templates for specific categories of links. +The templating syntax has also been updated to be prefixed with the type of resource you want to access for example previously if you had a `resource.links` config like : +```yaml + resource.links: | + - url: https://mycompany.splunk.com?search={{.metadata.name}} + title: Splunk + if: kind == "Pod" || kind == "Deployment" +``` +This would become : +```yaml + resource.links: | + - url: https://mycompany.splunk.com?search={{.resource.metadata.name}}&env={{.project.metadata.label.env}} + title: Splunk + if: resource.kind == "Pod" || resource.kind == "Deployment" +``` + +Read the full [documentation](../deep_links.md) to see all possible combinations of values accessible fo each category of links. + +## Support of `helm.sh/resource-policy` annotation + +Argo CD now supports the `helm.sh/resource-policy` annotation to control the deletion of resources. The behavior is the same as the behavior of +`argocd.argoproj.io/sync-options: Delete=false` annotation: if the annotation is present and set to `keep`, the resource will not be deleted +when the application is deleted. + +## Check your Kustomize patches for `--redis` changes + +Starting in Argo CD 2.7, the install manifests no longer pass the Redis server name via `--redis`. + +If your environment uses Kustomize JSON patches to modify the Redis server name, the patch might break when you upgrade +to the 2.7 manifests. If it does, you can remove the patch and instead set the Redis server name via the `redis.server` +field in the argocd-cmd-params-cm ConfigMap. That value will be passed to the necessary components via `valueFrom` +environment variables. + +## `argocd applicationset` CLI incompatibilities for ApplicationSets with list generators + +If you are running Argo CD v2.7.0-2.7.2 server-side, then CLI versions outside that range will incorrectly handle list +generators. That is because the gRPC interface for those versions used the `elements` field number for the new +`elementsYaml` field. + +If you are running the Argo CD CLI versions v2.7.0-2.7.2 with a server-side version of v2.7.3 or later, then the CLI +will send the contents of the `elements` field to the server, which will interpret it as the `elementsYaml` field. This +will cause the ApplicationSet to fail at runtime with an error similar to this: + +``` +error unmarshling decoded ElementsYaml error converting YAML to JSON: yaml: control characters are not allowed +``` + +Be sure to use CLI version v2.7.3 or later with server-side version v2.7.3 or later. diff --git a/docs/operator-manual/upgrading/2.7-2.8.md b/docs/operator-manual/upgrading/2.7-2.8.md new file mode 100644 index 0000000000000..1e403bf981ab4 --- /dev/null +++ b/docs/operator-manual/upgrading/2.7-2.8.md @@ -0,0 +1,73 @@ +# v2.7 to 2.8 + +## Support dropped for argocd-cm plugins + +Config Management Plugins installed via the argocd-cm ConfigMap will not work starting with v2.8. + +See the [migration guide](https://argo-cd.readthedocs.io/en/stable/operator-manual/config-management-plugins/#migrating-from-argocd-cm-plugins) +to upgrade your plugin. + +## Tini as entrypoint + +With the 2.8 release `entrypoint.sh` will be removed from the containers, +because starting with 2.7, the implicit entrypoint is set to `tini` in the +`Dockerfile` explicitly, and the kubernetes manifests has been updated to use +it. Simply updating the containers without updating the deployment manifests +will result in pod startup failures, as the old manifests are relying on +`entrypoint.sh` instead of `tini`. Please make sure the manifests are updated +properly before moving to 2.8. + +## Filtering applied to cluster `List` API endpoint + +Prior to `v2.8`, the `List` endpoint on the `ClusterService` did **not** filter +clusters when responding, despite accepting query parameters. This bug has +been addressed, and query parameters are now taken into account to filter the +resulting list of clusters. + +## Configure RBAC to account for new actions + +2.8 introduces three new actions: + +* Create a Job from a CronJob +* Create a Workflow from a CronWorkflow +* Create a Workflow from a WorkflowTemplate + +When you upgrade to 2.8, RBAC policies with `applications` in the *resource* +field and `*` or `action/*` in the action field, it will automatically grant the +ability to use these new actions. + +If you would like to avoid granting these new permissions, you can update your RBAC policies to be more specific. + +### Example + +Old: + +```csv +p, role:action-runner, applications, actions/, *, allow +``` + +New: + +```csv +p, role:action-runner, applications, action/argoproj.io/Rollout/abort, *, allow +p, role:action-runner, applications, action/argoproj.io/Rollout/promote-full, *, allow +p, role:action-runner, applications, action/argoproj.io/Rollout/retry, *, allow +p, role:action-runner, applications, action/argoproj.io/Rollout/resume, *, allow +p, role:action-runner, applications, action/argoproj.io/Rollout/restart, *, allow +p, role:action-runner, applications, action/argoproj.io/AnalysisRun/terminate, *, allow +p, role:action-runner, applications, action/apps/DaemonSet/restart, *, allow +p, role:action-runner, applications, action/apps/StatefulSet/restart, *, allow +p, role:action-runner, applications, action/apps/Deployment/pause, *, allow +p, role:action-runner, applications, action/apps/Deployment/resume, *, allow +p, role:action-runner, applications, action/apps/Deployment/restart, *, allow + +# If you don't want to grant the new permissions, don't include the following lines +p, role:action-runner, applications, action/argoproj.io/WorkflowTemplate/create-workflow, *, allow +p, role:action-runner, applications, action/argoproj.io/CronWorkflow/create-workflow, *, allow +p, role:action-runner, applications, action/batch/CronJob/create-job, *, allow +``` + +## Change default file open mode + +In version 2.7, the CMP plugin was changed to open Git/Helm files with all executable bits set (unless `preserveFileMode` was specified). +Version 2.8 removes the executable bits in cases where they are not necessary. diff --git a/docs/operator-manual/upgrading/overview.md b/docs/operator-manual/upgrading/overview.md index 2e4fd464b36c0..419fc7bbb1353 100644 --- a/docs/operator-manual/upgrading/overview.md +++ b/docs/operator-manual/upgrading/overview.md @@ -3,7 +3,7 @@ !!!note This section contains information on upgrading Argo CD. Before upgrading please make sure to read details about - the breaking changes between Argo CD versions. + the breaking changes between Argo CD versions. Argo CD uses the semver versioning and ensures that following rules: @@ -16,7 +16,7 @@ please make sure to check upgrading details in both [v1.3 to v1.4](./1.3-1.4.md Argo CD settings using disaster recovery [guide](../disaster_recovery.md). After reading the relevant notes about possible breaking changes introduced in Argo CD version use the following -command to upgrade Argo CD. Make sure to replace `` with the required version number: +command to upgrade Argo CD. Make sure to replace `` with the required version number: **Non-HA**: @@ -33,15 +33,24 @@ kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/ -* [v1.7 to v1.8](./1.7-1.8.md) -* [v1.6 to v1.7](./1.6-1.7.md) -* [v1.5 to v1.6](./1.5-1.6.md) -* [v1.4 to v1.5](./1.4-1.5.md) -* [v1.3 to v1.4](./1.3-1.4.md) +* [v2.7 to v2.8](./2.7-2.8.md) +* [v2.6 to v2.7](./2.6-2.7.md) +* [v2.5 to v2.6](./2.5-2.6.md) +* [v2.4 to v2.5](./2.4-2.5.md) +* [v2.3 to v2.4](./2.3-2.4.md) +* [v2.2 to v2.3](./2.2-2.3.md) +* [v2.1 to v2.2](./2.1-2.2.md) +* [v2.0 to v2.1](./2.0-2.1.md) +* [v1.8 to v2.0](./1.8-2.0.md) +* [v1.7 to v1.8](./1.7-1.8.md) +* [v1.6 to v1.7](./1.6-1.7.md) +* [v1.5 to v1.6](./1.5-1.6.md) +* [v1.4 to v1.5](./1.4-1.5.md) +* [v1.3 to v1.4](./1.3-1.4.md) * [v1.2 to v1.3](./1.2-1.3.md) -* [v1.1 to v1.2](./1.1-1.2.md) -* [v1.0 to v1.1](./1.0-1.1.md) +* [v1.1 to v1.2](./1.1-1.2.md) +* [v1.0 to v1.1](./1.0-1.1.md) diff --git a/docs/operator-manual/user-management/auth0.md b/docs/operator-manual/user-management/auth0.md index b9d397b1214cb..411517df05e06 100644 --- a/docs/operator-manual/user-management/auth0.md +++ b/docs/operator-manual/user-management/auth0.md @@ -12,7 +12,7 @@ Follow the [register app](https://auth0.com/docs/dashboard/guides/applications/r * Take note of the _clientId_ and _clientSecret_ values. * Register login url as https://your.argoingress.address/login * Set allowed callback url to https://your.argoingress.address/auth/callback -* Under connections, select the user-registries you want to use with argo +* Under connections, select the user-registries you want to use with argo. Any other settings are non-essential for the authentication to work. @@ -70,4 +70,4 @@ data:
!!! note "Storing Client Secrets" - Details on storing your clientSecret securely and correctly can be found on the [User Management Overview page](../../user-management/#sensitive-data-and-sso-client-secrets). \ No newline at end of file + Details on storing your clientSecret securely and correctly can be found on the [User Management Overview page](index.md#sensitive-data-and-sso-client-secrets). diff --git a/docs/operator-manual/user-management/google.md b/docs/operator-manual/user-management/google.md index 354f93d4fac64..7113e51018ca2 100644 --- a/docs/operator-manual/user-management/google.md +++ b/docs/operator-manual/user-management/google.md @@ -1,6 +1,13 @@ # Google -* [G Suite SAML App Auth using Dex](#g-suite-saml-app-auth-using-dex) +There are three different ways to integrate Argo CD login with your Google Workspace users. Generally the OpenID Connect (_oidc_) method would be the recommended way of doing this integration (and easier, as well...), but depending on your needs, you may choose a different option. + +* [OpenID Connect using Dex](#openid-connect-using-dex) + This is the recommended login method if you don't need information about the groups the user's belongs to. Google doesn't expose the `groups` claim via _oidc_, so you won't be able to use Google Groups membership information for RBAC. +* [SAML App Auth using Dex](#saml-app-auth-using-dex) + Dex [recommends avoiding this method](https://dexidp.io/docs/connectors/saml/#warning). Also, you won't get Google Groups membership information through this method. +* [OpenID Connect plus Google Groups using Dex](#openid-connect-plus-google-groups-using-dex) + This is the recommended method if you need to use Google Groups membership in your RBAC configuration. Once you've set up one of the above integrations, be sure to edit `argo-rbac-cm` to configure permissions (as in the example below). See [RBAC Configurations](../rbac.md) for more detailed scenarios. @@ -14,10 +21,66 @@ data: policy.default: role:readonly ``` -## G Suite SAML App Auth using Dex +## OpenID Connect using Dex + +### Configure your OAuth consent screen + +If you've never configured this, you'll be redirected straight to this if you try to create an OAuth Client ID + +1. Go to your [OAuth Consent](https://console.cloud.google.com/apis/credentials/consent) configuration. If you still haven't created one, select `Internal` or `External` and click `Create` +2. Go and [edit your OAuth consent screen](https://console.cloud.google.com/apis/credentials/consent/edit) Verify you're in the correct project! +3. Configure a name for your login app and a user support email address +4. The app logo and filling the information links is not mandatory, but it's a nice touch for the login page +5. In "Authorized domains" add the domains who are allowed to log in to ArgoCD (e.g. if you add `example.com`, all Google Workspace users with an `@example.com` address will be able to log in) +6. Save to continue to the "Scopes" section +7. Click on "Add or remove scopes" and add the `.../auth/userinfo.profile` and the `openid` scopes +8. Save, review the summary of your changes and finish + +### Configure a new OAuth Client ID + +1. Go to your [Google API Credentials](https://console.cloud.google.com/apis/credentials) console, and make sure you're in the correct project. +2. Click on "+Create Credentials"/"OAuth Client ID" +3. Select "Web Application" in the Application Type drop down menu, and enter an identifying name for your app (e.g. `Argo CD`) +4. Fill "Authorized JavaScript origins" with your Argo CD URL, e.g. `https://argocd.example.com` +5. Fill "Authorized redirect URIs" with your Argo CD URL plus `/api/dex/callback`, e.g. `https://argocd.example.com/api/dex/callback` + + ![](../../assets/google-admin-oidc-uris.png) + +6. Click "Create" and save your "Client ID" and your "Client Secret" for later + +### Configure Argo to use OpenID Connect + +Edit `argocd-cm` and add the following `dex.config` to the data section, replacing `clientID` and `clientSecret` with the values you saved before: + +```yaml +data: + url: https://argocd.example.com + dex.config: | + connectors: + - config: + issuer: https://accounts.google.com + clientID: XXXXXXXXXXXXX.apps.googleusercontent.com + clientSecret: XXXXXXXXXXXXX + type: oidc + id: google + name: Google +``` + +### References + +- [Dex oidc connector docs](https://dexidp.io/docs/connectors/oidc/) + +## SAML App Auth using Dex ### Configure a new SAML App +--- +!!! warning "Deprecation Warning" + + Note that, according to [Dex documentation](https://dexidp.io/docs/connectors/saml/#warning), SAML is considered unsafe and they are planning to deprecate that module. + +--- + 1. In the [Google admin console](https://admin.google.com), open the left-side menu and select `Apps` > `SAML Apps` ![Google Admin Apps Menu](../../assets/google-admin-saml-apps-menu.png "Google Admin menu with the Apps / SAML Apps path selected") @@ -26,7 +89,7 @@ data: ![Google Admin Add Custom SAML App](../../assets/google-admin-saml-add-app-menu.png "Add apps menu with add custom SAML app highlighted") -3. Enter a `Name` for the application (eg. `Argo CD`), then choose `Continue` +3. Enter a `Name` for the application (e.g. `Argo CD`), then choose `Continue` ![Google Admin Apps Menu](../../assets/google-admin-saml-app-details.png "Add apps menu with add custom SAML app highlighted") @@ -44,13 +107,13 @@ data: 6. Add SAML Attribute Mapping, Map `Primary email` to `name` and `Primary Email` to `email`. and click `ADD MAPPING` button. - ![Google Admin SAML Attribute Mapping Details](../../assets/google-admin-saml-attribute-mapping-details.png "A screenshot of the Google Admin SAML Attribut Mapping Details") + ![Google Admin SAML Attribute Mapping Details](../../assets/google-admin-saml-attribute-mapping-details.png "A screenshot of the Google Admin SAML Attribute Mapping Details") 7. Finish creating the application. ### Configure Argo to use the new Google SAML App -Edit `argo-cm` and add the following `dex.config` to the data section, replacing the `caData`, `argocd.example.com`, `sso-url`, and optionally `google-entity-id` with your values from the Google SAML App: +Edit `argocd-cm` and add the following `dex.config` to the data section, replacing the `caData`, `argocd.example.com`, `sso-url`, and optionally `google-entity-id` with your values from the Google SAML App: ```yaml data: @@ -61,7 +124,7 @@ data: id: saml name: saml config: - ssoURL: https://sso-url (eg. https://accounts.google.com/o/saml2/idp?idpid=Abcde0) + ssoURL: https://sso-url (e.g. https://accounts.google.com/o/saml2/idp?idpid=Abcde0) entityIssuer: https://argocd.example.com/api/dex/callback caData: | BASE64-ENCODED-CERTIFICATE-DATA @@ -76,3 +139,97 @@ data: - [Dex SAML connector docs](https://dexidp.io/docs/connectors/saml/) - [Google's SAML error messages](https://support.google.com/a/answer/6301076?hl=en) + +## OpenID Connect plus Google Groups using Dex + +--- +!!! warning "Limited group information" + + When using this feature you'll only receive the list of groups the user is a direct member. + + So, lets say you have this hierarchy of groups and subgroups: + `all@example.com --> tech@example.com --> devs@example.com --> you@example.com` + The only group you would receive through Dex would be `devs@example.com` + +--- + +We're going to use Dex's `google` connector to get additional Google Groups information from your users, allowing you to use group membership on your RBAC, i.e., giving `admin` role to the whole `sysadmins@yourcompany.com` group. + +This connector uses two different credentials: + +- An oidc client ID and secret + Same as when you're configuring an [OpenID connection](#openid-connect-using-dex), this authenticates your users +- A Google service account + This is used to connect to the Google Directory API and pull information about your user's group membership + +Also, you'll need the email address for an admin user on this domain. Dex will impersonate that user identity to fetch user information from the API. + +### Configure OpenID Connect + +Go through the same steps as in [OpenID Connect using Dex](#openid-connect-using-dex), except for configuring `argocd-cm`. We'll do that later. + +### Set up Directory API access + +1. Follow [Google instructions to create a service account with Domain-Wide Delegation](https://developers.google.com/admin-sdk/directory/v1/guides/delegation) + - When assigning API scopes to the service account assign **only** the `https://www.googleapis.com/auth/admin.directory.group.readonly` scope and nothing else. If you assign any other scopes, you won't be able to fetch information from the API + - Create the credentials in JSON format and store them in a safe place, we'll need them later +2. Enable the [Admin SDK](https://console.developers.google.com/apis/library/admin.googleapis.com/) + +### Configure Dex + +1. Create a secret with the contents of the previous json file encoded in base64, like this: + + apiVersion: v1 + kind: Secret + metadata: + name: argocd-google-groups-json + namespace: argocd + data: + googleAuth.json: JSON_FILE_BASE64_ENCODED + +2. Edit your `argocd-dex-server` deployment to mount that secret as a file + - Add a volume mount in `/spec/template/spec/containers/0/volumeMounts/` like this. Be aware of editing the running container and not the init container! + + volumeMounts: + - mountPath: /shared + name: static-files + - mountPath: /tmp + name: dexconfig + - mountPath: /tmp/oidc + name: google-json + readOnly: true + + - Add a volume in `/spec/template/spec/volumes/` like this: + + volumes: + - emptyDir: {} + name: static-files + - emptyDir: {} + name: dexconfig + - name: google-json + secret: + defaultMode: 420 + secretName: argocd-google-groups-json + +3. Edit `argocd-cm` and add the following `dex.config` to the data section, replacing `clientID` and `clientSecret` with the values you saved before, `adminEmail` with the address for the admin user you're going to impersonate, and editing `redirectURI` with your Argo CD domain: + + dex.config: | + connectors: + - config: + redirectURI: https://argocd.example.com/api/dex/callback + clientID: XXXXXXXXXXXXX.apps.googleusercontent.com + clientSecret: XXXXXXXXXXXXX + serviceAccountFilePath: /tmp/oidc/googleAuth.json + adminEmail: admin-email@example.com + type: google + id: google + name: Google + +4. Restart your `argocd-dex-server` deployment to be sure it's using the latest configuration +5. Login to Argo CD and go to the "User info" section, were you should see the groups you're member + ![User info](../../assets/google-groups-membership.png) +6. Now you can use groups email addresses to give RBAC permissions + +### References + +- [Dex Google connector docs](https://dexidp.io/docs/connectors/google/) diff --git a/docs/operator-manual/user-management/index.md b/docs/operator-manual/user-management/index.md index 8d976f96c18bf..8c3f2e169597c 100644 --- a/docs/operator-manual/user-management/index.md +++ b/docs/operator-manual/user-management/index.md @@ -3,7 +3,7 @@ Once installed Argo CD has one built-in `admin` user that has full access to the system. It is recommended to use `admin` user only for initial configuration and then switch to local users or configure SSO integration. -## Local users/accounts (v1.5) +## Local users/accounts The local users/accounts feature serves two main use-cases: @@ -44,6 +44,24 @@ Each user might have two capabilities: * apiKey - allows generating authentication tokens for API access * login - allows to login using UI +### Delete user + +In order to delete a user, you must remove the corresponding entry defined in the `argocd-cm` ConfigMap: + +Example: + +```bash +kubectl patch -n argocd cm argocd-cm --type='json' -p='[{"op": "remove", "path": "/data/accounts.alice"}]' +``` + +It is recommended to also remove the password entry in the `argocd-secret` Secret: + +Example: + +```bash +kubectl patch -n argocd secrets argocd-secret --type='json' -p='[{"op": "remove", "path": "/data/accounts.alice.password"}]' +``` + ### Disable admin user As soon as additional users are created it is recommended to disable `admin` user: @@ -72,14 +90,15 @@ argocd account list * Get specific user details ```bash -argocd account get +argocd account get --account ``` * Set user password ```bash +# if you are managing users as the admin user, should be the current admin password. argocd account update-password \ --account \ - --current-password \ + --current-password \ --new-password ``` @@ -94,7 +113,7 @@ argocd account generate-token --account Argo CD rejects login attempts after too many failed in order to prevent password brute-forcing. The following environments variables are available to control throttling settings: -* `ARGOCD_SESSION_MAX_FAIL_COUNT`: Maximum number of failed logins before Argo CD starts +* `ARGOCD_SESSION_FAILURE_MAX_FAIL_COUNT`: Maximum number of failed logins before Argo CD starts rejecting login attempts. Default: 5. * `ARGOCD_SESSION_FAILURE_WINDOW_SECONDS`: Number of seconds for the failure window. @@ -114,7 +133,8 @@ There are two ways that SSO can be configured: * [Bundled Dex OIDC provider](#dex) - use this option if your current provider does not support OIDC (e.g. SAML, LDAP) or if you wish to leverage any of Dex's connector features (e.g. the ability to map GitHub - organizations and teams to OIDC groups claims). + organizations and teams to OIDC groups claims). Dex also supports OIDC directly and can fetch user + information from the identity provider when the groups cannot be included in the IDToken. * [Existing OIDC provider](#existing-oidc-provider) - use this if you already have an OIDC provider which you are using (e.g. [Okta](okta.md), [OneLogin](onelogin.md), [Auth0](auth0.md), [Microsoft](microsoft.md), [Keycloak](keycloak.md), @@ -172,7 +192,7 @@ data: name: GitHub config: clientID: aabbccddeeff00112233 - clientSecret: $dex.github.clientSecret + clientSecret: $dex.github.clientSecret # Alternatively $:dex.github.clientSecret orgs: - name: your-github-org @@ -183,7 +203,7 @@ data: config: hostName: github.acme.com clientID: abcdefghijklmnopqrst - clientSecret: $dex.acme.clientSecret + clientSecret: $dex.acme.clientSecret # Alternatively $:dex.acme.clientSecret orgs: - name: your-github-org ``` @@ -195,10 +215,99 @@ NOTES: * There is no need to set `redirectURI` in the `connectors.config` as shown in the dex documentation. Argo CD will automatically use the correct `redirectURI` for any OAuth2 connectors, to match the correct external callback URL (e.g. `https://argocd.example.com/api/dex/callback`) +* When using a custom secret (e.g., `some_K8S_secret` above,) it *must* have the label `app.kubernetes.io/part-of: argocd`. + +## OIDC Configuration with DEX + +Dex can be used for OIDC authentication instead of ArgoCD directly. This provides a separate set of +features such as fetching information from the `UserInfo` endpoint and +[federated tokens](https://dexidp.io/docs/custom-scopes-claims-clients/#cross-client-trust-and-authorized-party) + +### Configuration: +* In the `argocd-cm` ConfigMap add the `OIDC` connector to the `connectors` sub field inside `dex.config`. +See Dex's [OIDC connect documentation](https://dexidp.io/docs/connectors/oidc/) to see what other +configuration options might be useful. We're going to be using a minimal configuration here. +* The issuer URL should be where Dex talks to the OIDC provider. There would normally be a +`.well-known/openid-configuration` under this URL which has information about what the provider supports. +e.g. https://accounts.google.com/.well-known/openid-configuration + + +```yaml +data: + url: "https://argocd.example.com" + dex.config: | + connectors: + # OIDC + - type: oidc + id: oidc + name: OIDC + config: + issuer: https://example-OIDC-provider.com + clientID: aaaabbbbccccddddeee + clientSecret: $dex.oidc.clientSecret +``` + +### Requesting additional ID token claims + +By default Dex only retrieves the profile and email scopes. In order to retrieve more claims you +can add them under the `scopes` entry in the Dex configuration. To enable group claims through Dex, +`insecureEnableGroups` also needs to enabled. Group information is currently only refreshed at authentication +time and support to refresh group information more dynamically can be tracked here: [dexidp/dex#1065](https://github.com/dexidp/dex/issues/1065). + +```yaml +data: + url: "https://argocd.example.com" + dex.config: | + connectors: + # OIDC + - type: OIDC + id: oidc + name: OIDC + config: + issuer: https://example-OIDC-provider.com + clientID: aaaabbbbccccddddeee + clientSecret: $dex.oidc.clientSecret + insecureEnableGroups: true + scopes: + - profile + - email + - groups +``` + +!!! warning + Because group information is only refreshed at authentication time just adding or removing an account from a group will not change a user's membership until they reauthenticate. Depending on your organization's needs this could be a security risk and could be mitigated by changing the authentication token's lifetime. + +### Retrieving claims that are not in the token + +When an Idp does not or cannot support certain claims in an IDToken they can be retrieved separately using +the UserInfo endpoint. Dex supports this functionality using the `getUserInfo` endpoint. One of the most +common claims that is not supported in the IDToken is the `groups` claim and both `getUserInfo` and `insecureEnableGroups` +must be set to true. + +```yaml +data: + url: "https://argocd.example.com" + dex.config: | + connectors: + # OIDC + - type: OIDC + id: oidc + name: OIDC + config: + issuer: https://example-OIDC-provider.com + clientID: aaaabbbbccccddddeee + clientSecret: $dex.oidc.clientSecret + insecureEnableGroups: true + scopes: + - profile + - email + - groups + getUserInfo: true +``` ## Existing OIDC Provider -To configure Argo CD to delegate authenticate to your existing OIDC provider, add the OAuth2 +To configure Argo CD to delegate authentication to your existing OIDC provider, add the OAuth2 configuration to the `argocd-cm` ConfigMap under the `oidc.config` key: ```yaml @@ -210,6 +319,19 @@ data: issuer: https://dev-123456.oktapreview.com clientID: aaaabbbbccccddddeee clientSecret: $oidc.okta.clientSecret + + # Optional list of allowed aud claims. If omitted or empty, defaults to the clientID value above (and the + # cliClientID, if that is also specified). If you specify a list and want the clientID to be allowed, you must + # explicitly include it in the list. + # Token verification will pass if any of the token's audiences matches any of the audiences in this list. + allowedAudiences: + - aaaabbbbccccddddeee + - qqqqwwwweeeerrrrttt + + # Optional. If false, tokens without an audience will always fail validation. If true, tokens without an audience + # will always pass validation. + # Defaults to true for Argo CD < 2.6.0. Defaults to false for Argo CD >= 2.6.0. + skipAudienceCheckWhenTokenHasNoAudience: true # Optional set of OIDC scopes to request. If omitted, defaults to: ["openid", "profile", "email", "groups"] requestedScopes: ["openid", "profile", "email", "groups"] @@ -286,18 +408,36 @@ You are not required to specify a logoutRedirectURL as this is automatically gen !!! note The post logout redirect URI may need to be whitelisted against your OIDC provider's client settings for ArgoCD. +### Configuring a custom root CA certificate for communicating with the OIDC provider + +If your OIDC provider is setup with a certificate which is not signed by one of the well known certificate authorities +you can provide a custom certificate which will be used in verifying the OIDC provider's TLS certificate when +communicating with it. +Add a `rootCA` to your `oidc.config` which contains the PEM encoded root certificate: + +```yaml + oidc.config: | + ... + rootCA: | + -----BEGIN CERTIFICATE----- + ... encoded certificate data here ... + -----END CERTIFICATE----- +``` ## SSO Further Reading ### Sensitive Data and SSO Client Secrets -You can use the `argocd-secret` to store any sensitive data. ArgoCD knows to check the keys under `data` in the `argocd-secret` secret for a corresponding key whenever a value in a configmap starts with `$`. This can be used to store things such as your `clientSecret`. +`argocd-secret` can be used to store sensitive data which can be referenced by ArgoCD. Values starting with `$` in configmaps are interpreted as follows: -Data should be base64 encoded before it is added to `argocd-secret`. You can do so by running `printf RAW_SECRET_STRING | base64`. +- If value has the form: `$:a.key.in.k8s.secret`, look for a k8s secret with the name `` (minus the `$`), and read its value. +- Otherwise, look for a key in the k8s secret named `argocd-secret`. #### Example +SSO `clientSecret` can thus be stored as a kubernetes secret with the following manifests + `argocd-secret`: ```yaml apiVersion: v1 @@ -309,6 +449,55 @@ metadata: app.kubernetes.io/name: argocd-secret app.kubernetes.io/part-of: argocd type: Opaque +data: + ... + # The secret value must be base64 encoded **once** + # this value corresponds to: `printf "hello-world" | base64` + oidc.auth0.clientSecret: "aGVsbG8td29ybGQ=" + ... +``` + +`argocd-cm`: +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm + namespace: argocd + labels: + app.kubernetes.io/name: argocd-cm + app.kubernetes.io/part-of: argocd +data: + ... + oidc.config: | + name: Auth0 + clientID: aabbccddeeff00112233 + + # Reference key in argocd-secret + clientSecret: $oidc.auth0.clientSecret + ... +``` + +#### Alternative + +If you want to store sensitive data in **another** Kubernetes `Secret`, instead of `argocd-secret`. ArgoCD knows to check the keys under `data` in your Kubernetes `Secret` for a corresponding key whenever a value in a configmap starts with `$`, then your Kubernetes `Secret` name and `:` (colon). + +Syntax: `$:` + +> NOTE: Secret must have label `app.kubernetes.io/part-of: argocd` + +##### Example + +`another-secret`: +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: another-secret + namespace: argocd + labels: + app.kubernetes.io/part-of: argocd +type: Opaque data: ... # Store client secret like below. @@ -332,7 +521,24 @@ data: oidc.config: | name: Auth0 clientID: aabbccddeeff00112233 - # Reference key in argocd-secret - clientSecret: $oidc.auth0.clientSecret + # Reference key in another-secret (and not argocd-secret) + clientSecret: $another-secret:oidc.auth0.clientSecret # Mind the ':' ... ``` + +### Skipping certificate verification on OIDC provider connections + +By default, all connections made by the API server to OIDC providers (either external providers or the bundled Dex +instance) must pass certificate validation. These connections occur when getting the OIDC provider's well-known +configuration, when getting the OIDC provider's keys, and when exchanging an authorization code or verifying an ID +token as part of an OIDC login flow. + +Disabling certificate verification might make sense if: +* You are using the bundled Dex instance **and** your Argo CD instance has TLS configured with a self-signed certificate + **and** you understand and accept the risks of skipping OIDC provider cert verification. +* You are using an external OIDC provider **and** that provider uses an invalid certificate **and** you cannot solve + the problem by setting `oidcConfig.rootCA` **and** you understand and accept the risks of skipping OIDC provider cert + verification. + +If either of those two applies, then you can disable OIDC provider certificate verification by setting +`oidc.tls.insecure.skip.verify` to `"true"` in the `argocd-cm` ConfigMap. diff --git a/docs/operator-manual/user-management/keycloak.md b/docs/operator-manual/user-management/keycloak.md index 84c6bc1d0ea85..6f0c99de0dec2 100644 --- a/docs/operator-manual/user-management/keycloak.md +++ b/docs/operator-manual/user-management/keycloak.md @@ -9,17 +9,24 @@ to determine privileges in Argo. ## Creating a new client in Keycloak First we need to setup a new client. Start by logging into your keycloak server, select the realm you want to use (`master` by default) -and then go to __Clients__ and click the __create__ button top right. +and then go to __Clients__ and click the __Create client__ button at the top. ![Keycloak add client](../../assets/keycloak-add-client.png "Keycloak add client") -Configure the client by setting the __Access Type__ to _confidential_ and set the Valid Redirect URIs to the callback url for your ArgoCD -hostname. It should be https://{hostname}/auth/callback (you can also leave the default less secure https://{hostname}/* ). You can also set the -__Base URL__ to _/applications_. +Enable the __Client authentication__. + +![Keycloak add client Step 2](../../assets/keycloak-add-client_2.png "Keycloak add client Step 2") + +Configure the client by setting the __Root URL__, __Web origins__, __Admin URL__ to the hostname (https://{hostname}). + +Also you can set __Home URL__ to your _/applications_ path and __Valid Post logout redirect URIs__ to "+". + +The Valid Redirect URIs should be set to https://{hostname}/auth/callback (you can also set the less secure https://{hostname}/* for testing/development purposes, +but it's not recommended in production). ![Keycloak configure client](../../assets/keycloak-configure-client.png "Keycloak configure client") -Make sure to click __Save__. You should now have a new tab called __Credentials__. You can copy the Secret that we'll use in our ArgoCD +Make sure to click __Save__. There should be a tab called __Credentials__. You can copy the Secret that we'll use in our ArgoCD configuration. ![Keycloak client secret](../../assets/keycloak-client-secret.png "Keycloak client secret") @@ -32,21 +39,18 @@ To do this we'll start by creating a new __Client Scope__ called _groups_. ![Keycloak add scope](../../assets/keycloak-add-scope.png "Keycloak add scope") Once you've created the client scope you can now add a Token Mapper which will add the groups claim to the token when the client requests -the groups scope. Make sure to set the __Name__ as well as the __Token Claim Name__ to _groups_. +the groups scope. In the Tab "Mappers", click on "Configure a new mapper" and choose __Group Membership__. +Make sure to set the __Name__ as well as the __Token Claim Name__ to _groups_. Also disable the "Full group path". ![Keycloak groups mapper](../../assets/keycloak-groups-mapper.png "Keycloak groups mapper") -We can now configure the client to provide the _groups_ scope. You can now assign the _groups_ scope either to the __Assigned Default Client Scopes__ -or to the __Assigned Optional Client Scopes__. If you put it in the Optional category you will need to make sure that ArgoCD requests the scope in -it's OIDC configuration. +We can now configure the client to provide the _groups_ scope. Go back to the client we've created earlier and go to the Tab "Client Scopes". +Click on "Add client scope", choose the _groups_ scope and add it either to the __Default__ or to the __Optional__ Client Scope. If you put it in the Optional +category you will need to make sure that ArgoCD requests the scope in its OIDC configuration. Since we will always want group information, I recommend +using the Default category. ![Keycloak client scope](../../assets/keycloak-client-scope.png "Keycloak client scope") -Since we will always want group information, I recommend using the Default category. Make sure you click __Add selected__ -and that the _groups_ claim is in the correct list on the __right__. - -![Keycloak client scope selected](../../assets/keycloak-client-scope-selected.png "Keycloak client scope selected") - Create a group called _ArgoCDAdmins_ and have your current user join the group. ![Keycloak user group](../../assets/keycloak-user-group.png "Keycloak user group") @@ -57,17 +61,19 @@ Let's start by storing the client secret you generated earlier in the argocd sec 1. First you'll need to encode the client secret in base64: `$ echo -n '83083958-8ec6-47b0-a411-a8c55381fbd2' | base64` 2. Then you can edit the secret and add the base64 value to a new key called _oidc.keycloak.clientSecret_ using `$ kubectl edit secret argocd-secret`. - Your Secret should look something like this: - ```yaml - apiVersion: v1 - kind: Secret - metadata: - name: argocd-secret - data: - ... - oidc.keycloak.clientSecret: ODMwODM5NTgtOGVjNi00N2IwLWE0MTEtYThjNTUzODFmYmQy - ... - ``` + +Your Secret should look something like this: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: argocd-secret +data: + ... + oidc.keycloak.clientSecret: ODMwODM5NTgtOGVjNi00N2IwLWE0MTEtYThjNTUzODFmYmQy + ... +``` Now we can configure the config map and add the oidc configuration to enable our keycloak authentication. You can use `$ kubectl edit configmap argocd-cm`. @@ -83,14 +89,16 @@ data: url: https://argocd.example.com oidc.config: | name: Keycloak - issuer: https://keycloak.example.com/auth/realms/master + issuer: https://keycloak.example.com/realms/master clientID: argocd clientSecret: $oidc.keycloak.clientSecret requestedScopes: ["openid", "profile", "email", "groups"] ``` Make sure that: + - __issuer__ ends with the correct realm (in this example _master_) +- __issuer__ on Keycloak releases older than version 17 the URL must include /auth (in this example /auth/realms/master) - __clientID__ is set to the Client ID you configured in Keycloak - __clientSecret__ points to the right key you created in the _argocd-secret_ Secret - __requestedScopes__ contains the _groups_ claim if you didn't add it to the Default scopes diff --git a/docs/operator-manual/user-management/microsoft.md b/docs/operator-manual/user-management/microsoft.md index 4526c7a227f03..33a6b3e945940 100644 --- a/docs/operator-manual/user-management/microsoft.md +++ b/docs/operator-manual/user-management/microsoft.md @@ -5,160 +5,178 @@ * [Azure AD App Registration Auth using Dex](#azure-ad-app-registration-auth-using-dex) ## Azure AD SAML Enterprise App Auth using Dex - ### Configure a new Azure AD Enterprise App 1. From the `Azure Active Directory` > `Enterprise applications` menu, choose `+ New application` 2. Select `Non-gallery application` -3. Enter a `Name` for the application (eg. `Argo CD`), then choose `Add` +3. Enter a `Name` for the application (e.g. `Argo CD`), then choose `Add` 4. Once the application is created, open it from the `Enterprise applications` menu. 5. From the `Users and groups` menu of the app, add any users or groups requiring access to the service. - ![Azure Enterprise SAML Users](../../assets/azure-enterprise-users.png "Azure Enterprise SAML Users") - 6. From the `Single sign-on` menu, edit the `Basic SAML Configuration` section as follows (replacing `my-argo-cd-url` with your Argo URL): - - **Identifier (Entity ID):** https://``/api/dex/callback - - **Reply URL (Assertion Consumer Service URL):** https://``/api/dex/callback - - **Sign on URL:** https://``/auth/login - - **Relay State:** `` - - **Logout Url:** `` - - ![Azure Enterprise SAML URLs](../../assets/azure-enterprise-saml-urls.png "Azure Enterprise SAML URLs") - + - **Identifier (Entity ID):** https://``/api/dex/callback + - **Reply URL (Assertion Consumer Service URL):** https://``/api/dex/callback + - **Sign on URL:** https://``/auth/login + - **Relay State:** `` + - **Logout Url:** `` + ![Azure Enterprise SAML URLs](../../assets/azure-enterprise-saml-urls.png "Azure Enterprise SAML URLs") 7. From the `Single sign-on` menu, edit the `User Attributes & Claims` section to create the following claims: - - `+ Add new claim` | **Name:** email | **Source:** Attribute | **Source attribute:** user.mail - - `+ Add group claim` | **Which groups:** All groups | **Source attribute:** Group ID | **Customize:** True | **Name:** Group | **Namespace:** `` | **Emit groups as role claims:** False - - *Note: The `Unique User Identifier` required claim can be left as the default `user.userprincipalname`* - - ![Azure Enterprise SAML Claims](../../assets/azure-enterprise-claims.png "Azure Enterprise SAML Claims") - + - `+ Add new claim` | **Name:** email | **Source:** Attribute | **Source attribute:** user.mail + - `+ Add group claim` | **Which groups:** All groups | **Source attribute:** Group ID | **Customize:** True | **Name:** Group | **Namespace:** `` | **Emit groups as role claims:** False + - *Note: The `Unique User Identifier` required claim can be left as the default `user.userprincipalname`* + ![Azure Enterprise SAML Claims](../../assets/azure-enterprise-claims.png "Azure Enterprise SAML Claims") 8. From the `Single sign-on` menu, download the SAML Signing Certificate (Base64) - - Base64 encode the contents of the downloaded certificate file, for example: - - `$ cat ArgoCD.cer | base64` - - *Keep a copy of the encoded output to be used in the next section.* + - Base64 encode the contents of the downloaded certificate file, for example: + - `$ cat ArgoCD.cer | base64` + - *Keep a copy of the encoded output to be used in the next section.* 9. From the `Single sign-on` menu, copy the `Login URL` parameter, to be used in the next section. ### Configure Argo to use the new Azure AD Enterprise App 1. Edit `argocd-cm` and add the following `dex.config` to the data section, replacing the `caData`, `my-argo-cd-url` and `my-login-url` your values from the Azure AD App: -``` -data: - url: https://my-argo-cd-url - dex.config: | - logger: - level: debug - format: json - connectors: - - type: saml - id: saml - name: saml - config: - entityIssuer: https://my-argo-cd-url/api/dex/callback - ssoURL: https://my-login-url (eg. https://login.microsoftonline.com/xxxxx/a/saml2) - caData: | - MY-BASE64-ENCODED-CERTIFICATE-DATA - redirectURI: https://my-argo-cd-url/api/dex/callback - usernameAttr: email - emailAttr: email - groupsAttr: Group -``` + data: + url: https://my-argo-cd-url + dex.config: | + logger: + level: debug + format: json + connectors: + - type: saml + id: saml + name: saml + config: + entityIssuer: https://my-argo-cd-url/api/dex/callback + ssoURL: https://my-login-url (e.g. https://login.microsoftonline.com/xxxxx/a/saml2) + caData: | + MY-BASE64-ENCODED-CERTIFICATE-DATA + redirectURI: https://my-argo-cd-url/api/dex/callback + usernameAttr: email + emailAttr: email + groupsAttr: Group 2. Edit `argocd-rbac-cm` to configure permissions, similar to example below. - - - Use Azure AD `Group IDs` for assigning roles. - - See [RBAC Configurations](../rbac.md) for more detailed scenarios. - -``` -# example policy -policy.default: role:readonly - policy.csv: | - p, role:org-admin, applications, *, */*, allow - p, role:org-admin, clusters, get, *, allow - p, role:org-admin, repositories, get, *, allow - p, role:org-admin, repositories, create, *, allow - p, role:org-admin, repositories, update, *, allow - p, role:org-admin, repositories, delete, *, allow - g, "84ce98d1-e359-4f3b-85af-985b458de3c6", role:org-admin # (azure group assigned to role) -``` + - Use Azure AD `Group IDs` for assigning roles. + - See [RBAC Configurations](../rbac.md) for more detailed scenarios. + + # example policy + policy.default: role:readonly + policy.csv: | + p, role:org-admin, applications, *, */*, allow + p, role:org-admin, clusters, get, *, allow + p, role:org-admin, repositories, get, *, allow + p, role:org-admin, repositories, create, *, allow + p, role:org-admin, repositories, update, *, allow + p, role:org-admin, repositories, delete, *, allow + g, "84ce98d1-e359-4f3b-85af-985b458de3c6", role:org-admin # (azure group assigned to role) ## Azure AD App Registration Auth using OIDC +### Configure a new Azure AD App registration +#### Add a new Azure AD App registration + +1. From the `Azure Active Directory` > `App registrations` menu, choose `+ New registration` +2. Enter a `Name` for the application (e.g. `Argo CD`). +3. Specify who can use the application (e.g. `Accounts in this organizational directory only`). +4. Enter Redirect URI (optional) as follows (replacing `my-argo-cd-url` with your Argo URL), then choose `Add`. + - **Platform:** `Web` + - **Redirect URI:** https://``/auth/callback +5. When registration finishes, the Azure portal displays the app registration's Overview pane. You see the Application (client) ID. + ![Azure App registration's Overview](../../assets/azure-app-registration-overview.png "Azure App registration's Overview") + +#### Configure additional platform settings for ArgoCD CLI + +1. In the Azure portal, in App registrations, select your application. +2. Under Manage, select Authentication. +3. Under Platform configurations, select Add a platform. +4. Under Configure platforms, select the "Mobile and desktop applications" tile. Use the below value. You shouldn't change it. + - **Redirect URI:** `http://localhost:8085/auth/callback` + ![Azure App registration's Authentication](../../assets/azure-app-registration-authentication.png "Azure App registration's Authentication") + +#### Add credentials a new Azure AD App registration + +1. From the `Certificates & secrets` menu, choose `+ New client secret` +2. Enter a `Name` for the secret (e.g. `ArgoCD-SSO`). + - Make sure to copy and save generated value. This is a value for the `client_secret`. + ![Azure App registration's Secret](../../assets/azure-app-registration-secret.png "Azure App registration's Secret") + +#### Setup permissions for Azure AD Application + +1. From the `API permissions` menu, choose `+ Add a permission` +2. Find `User.Read` permission (under `Microsoft Graph`) and grant it to the created application: + ![Azure AD API permissions](../../assets/azure-api-permissions.png "Azure AD API permissions") +3. From the `Token Configuration` menu, choose `+ Add groups claim` + ![Azure AD token configuration](../../assets/azure-token-configuration.png "Azure AD token configuration") + +### Associate an Azure AD group to your Azure AD App registration + +1. From the `Azure Active Directory` > `Enterprise applications` menu, search the App that you created (e.g. `Argo CD`). + - An Enterprise application with the same name of the Azure AD App registration is created when you add a new Azure AD App registration. +2. From the `Users and groups` menu of the app, add any users or groups requiring access to the service. + ![Azure Enterprise SAML Users](../../assets/azure-enterprise-users.png "Azure Enterprise SAML Users") -1. Register a new Azure AD Application - - [Quickstart: Register an application](https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-register-app) - - App Registrations Inputs - Redirect URI: https://argocd.example.com/auth/callback - Outputs - Application (client) ID: aaaaaaaa-1111-bbbb-2222-cccccccccccc - Directory (tenant) ID: 33333333-dddd-4444-eeee-555555555555 - Secret: some_secret - -2. Setup permissions for Azure AD Application - - On "API permissions" page find `User.Read` permission (under `Microsoft Graph`) and grant it to the created application: - - ![Azure AD API permissions](../../assets/azure-api-permissions.png "Azure AD API permissions") - - Also, on "Token Configuration" page add groups claim for the groups assigned to the application: - - ![Azure AD token configuration](../../assets/azure-token-configuration.png "Azure AD token configuration") - -3. Edit `argocd-cm` and configure the `data.oidc.config` section: - - ConfigMap -> argocd-cm - - data: - url: https://argocd.example.com/ - oidc.config: | - name: Azure - issuer: https://login.microsoftonline.com/{directory_tenant_id}/v2.0 - clientID: {azure_ad_application_client_id} - clientSecret: $oidc.azure.clientSecret - requestedIDTokenClaims: - groups: - essential: true - requestedScopes: - - openid - - profile - - email - -4. Edit `argocd-secret` and configure the `data.oidc.azure.clientSecret` section: - - Secret -> argocd-secret - - data: - oidc.azure.clientSecret: {client_secret | base64_encoded} - -5. Edit `argocd-rbac-cm` to configure permissions. Use group ID from Azure for assigning roles - - [RBAC Configurations](../rbac.md) - - ConfigMap -> argocd-rbac-cm - - policy.default: role:readonly - policy.csv: | - p, role:org-admin, applications, *, */*, allow - p, role:org-admin, clusters, get, *, allow - p, role:org-admin, repositories, get, *, allow - p, role:org-admin, repositories, create, *, allow - p, role:org-admin, repositories, update, *, allow - p, role:org-admin, repositories, delete, *, allow - g, "84ce98d1-e359-4f3b-85af-985b458de3c6", role:org-admin - -6. Mapping role from jwt token to argo - - If you want to map the roles from the jwt token to match the default roles (readonly and admin) then you must change the scope variable in the rbac-configmap. - - scopes: '[roles, email]' +### Configure Argo to use the new Azure AD App registration + +1. Edit `argocd-cm` and configure the `data.oidc.config` and `data.url` section: + + ConfigMap -> argocd-cm + + data: + url: https://argocd.example.com/ # Replace with the external base URL of your Argo CD + oidc.config: | + name: Azure + issuer: https://login.microsoftonline.com/{directory_tenant_id}/v2.0 + clientID: {azure_ad_application_client_id} + clientSecret: $oidc.azure.clientSecret + requestedIDTokenClaims: + groups: + essential: true + requestedScopes: + - openid + - profile + - email + +2. Edit `argocd-secret` and configure the `data.oidc.azure.clientSecret` section: + + Secret -> argocd-secret + + data: + oidc.azure.clientSecret: {client_secret | base64_encoded} + +3. Edit `argocd-rbac-cm` to configure permissions. Use group ID from Azure for assigning roles + [RBAC Configurations](../rbac.md) + + ConfigMap -> argocd-rbac-cm + + policy.default: role:readonly + policy.csv: | + p, role:org-admin, applications, *, */*, allow + p, role:org-admin, clusters, get, *, allow + p, role:org-admin, repositories, get, *, allow + p, role:org-admin, repositories, create, *, allow + p, role:org-admin, repositories, update, *, allow + p, role:org-admin, repositories, delete, *, allow + g, "84ce98d1-e359-4f3b-85af-985b458de3c6", role:org-admin + +4. Mapping role from jwt token to argo + If you want to map the roles from the jwt token to match the default roles (readonly and admin) then you must change the scope variable in the rbac-configmap. + + policy.default: role:readonly + policy.csv: | + p, role:org-admin, applications, *, */*, allow + p, role:org-admin, clusters, get, *, allow + p, role:org-admin, repositories, get, *, allow + p, role:org-admin, repositories, create, *, allow + p, role:org-admin, repositories, update, *, allow + p, role:org-admin, repositories, delete, *, allow + g, "84ce98d1-e359-4f3b-85af-985b458de3c6", role:org-admin + scopes: '[groups, email]' + + Refer to [operator-manual/argocd-rbac-cm.yaml](https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/argocd-rbac-cm.yaml) for all of the available variables. ## Azure AD App Registration Auth using Dex Configure a new AD App Registration, as above. - Then, add the `dex.config` to `argocd-cm`: - ```yaml ConfigMap -> argocd-cm @@ -176,3 +194,33 @@ data: groups: - DevOps ``` + +## Validation +### Log in to ArgoCD UI using SSO + +1. Open a new browser tab and enter your ArgoCD URI: https://`` + ![Azure SSO Web Log In](../../assets/azure-sso-web-log-in-via-azure.png "Azure SSO Web Log In") +3. Click `LOGIN VIA AZURE` button to log in with your Azure Active Directory account. You’ll see the ArgoCD applications screen. + ![Azure SSO Web Application](../../assets/azure-sso-web-application.png "Azure SSO Web Application") +4. Navigate to User Info and verify Group ID. Groups will have your group’s Object ID that you added in the `Setup permissions for Azure AD Application` step. + ![Azure SSO Web User Info](../../assets/azure-sso-web-user-info.png "Azure SSO Web User Info") + +### Log in to ArgoCD using CLI + +1. Open terminal, execute the below command. + + argocd login --grpc-web-root-path / --sso + +2. You will see the below message after entering your credentials from the browser. + ![Azure SSO CLI Log In](../../assets/azure-sso-cli-log-in-success.png "Azure SSO CLI Log In") +3. Your terminal output will be similar as below. + + WARNING: server certificate had error: x509: certificate is valid for ingress.local, not my-argo-cd-url. Proceed insecurely (y/n)? y + Opening browser for authentication + INFO[0003] RequestedClaims: map[groups:essential:true ] + Performing authorization_code flow login: https://login.microsoftonline.com/XXXXXXXXXXXXX/oauth2/v2.0/authorize?access_type=offline&claims=%7B%22id_token%22%3A%7B%22groups%22%3A%7B%22essential%22%3Atrue%7D%7D%7D&client_id=XXXXXXXXXXXXX&code_challenge=XXXXXXXXXXXXX&code_challenge_method=S256&redirect_uri=http%3A%2F%2Flocalhost%3A8085%2Fauth%2Fcallback&response_type=code&scope=openid+profile+email+offline_access&state=XXXXXXXX + Authentication successful + 'yourid@example.com' logged in successfully + Context 'my-argo-cd-url' updated + + You may get an warning if you are not using a correctly signed certs. Refer to [Why Am I Getting x509: certificate signed by unknown authority When Using The CLI?](https://argo-cd.readthedocs.io/en/stable/faq/#why-am-i-getting-x509-certificate-signed-by-unknown-authority-when-using-the-cli). diff --git a/docs/operator-manual/user-management/okta.md b/docs/operator-manual/user-management/okta.md index ffec1e8eb8e3b..09d7099d19954 100644 --- a/docs/operator-manual/user-management/okta.md +++ b/docs/operator-manual/user-management/okta.md @@ -19,7 +19,15 @@ A working Single Sign-On configuration using Okta via at least two methods was a * ![Okta SAML App 2](../../assets/saml-2.png) 1. Click `View setup instructions` after creating the application in Okta. * ![Okta SAML App 3](../../assets/saml-3.png) -1. Copy the SSO URL to the `argocd-cm` in the data.oicd +1. Copy the Argo CD URL to the `argocd-cm` in the data.url + + +```yaml +data: + url: https://argocd.example.com +``` + + 1. Download the CA certificate to use in the `argocd-cm` configuration. * If you are using this in the caData field, you will need to pass the entire certificate (including `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----` stanzas) through base64 encoding, for example, `base64 my_cert.pem`. * If you are using the ca field and storing the CA certificate separately as a secret, you will need to mount the secret to the `dex` container in the `argocd-dex-server` Deployment. diff --git a/docs/operator-manual/user-management/onelogin.md b/docs/operator-manual/user-management/onelogin.md index d487e0d5e7e70..21432d7312732 100644 --- a/docs/operator-manual/user-management/onelogin.md +++ b/docs/operator-manual/user-management/onelogin.md @@ -111,11 +111,14 @@ apiVersion: v1 kind: ConfigMap metadata: name: argocd-cm + namespace: argocd + labels: + app.kubernetes.io/part-of: argocd data: url: https:// oidc.config: | name: OneLogin - issuer: https://openid-connect.onelogin.com/oidc + issuer: https://.onelogin.com/oidc/2 clientID: aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaaaaaaaa clientSecret: abcdef123456 @@ -143,6 +146,8 @@ kind: ConfigMap metadata: name: argocd-rbac-cm namespace: argocd + labels: + app.kubernetes.io/part-of: argocd data: policy.default: role:readonly policy.csv: | diff --git a/docs/operator-manual/user-management/openunison.md b/docs/operator-manual/user-management/openunison.md index 469d85f14935b..fecaafd074aa1 100644 --- a/docs/operator-manual/user-management/openunison.md +++ b/docs/operator-manual/user-management/openunison.md @@ -19,7 +19,7 @@ metadata: spec: accessTokenSkewMillis: 120000 accessTokenTimeToLive: 1200000 - authChainName: LoginService + authChainName: login-service clientId: argocd codeLastMileKeyName: lastmile-oidc codeTokenSkewMilis: 60000 diff --git a/docs/operator-manual/user-management/zitadel.md b/docs/operator-manual/user-management/zitadel.md new file mode 100644 index 0000000000000..08841983bc95f --- /dev/null +++ b/docs/operator-manual/user-management/zitadel.md @@ -0,0 +1,210 @@ +# Zitadel +Please also consult the [Zitadel Documentation](https://zitadel.com/docs). +## Integrating Zitadel and ArgoCD +These instructions will take you through the entire process of getting your ArgoCD application authenticating and authorizing with Zitadel. You will create an application within Zitadel and configure ArgoCD to use Zitadel for authentication using roles set in Zitadel to determine privileges in ArgoCD. + +The following steps are required to integrate ArgoCD with Zitadel: +1. Create a new project and a new application in Zitadel +2. Configure the application in Zitadel +3. Set up roles in Zitadel +4. Set up an action in Zitadel +5. Configure ArgoCD configmaps +6. Test the setup + +The following values will be used in this example: +- Zitadel FQDN: `auth.example.com` +- Zitadel Project: `argocd-project` +- Zitadel Application: `argocd-application` +- Zitadel Action: `groupsClaim` +- ArgoCD FQDN: `argocd.example.com` +- ArgoCD Administrator Role: `argocd_administrators` +- ArgoCD User Role: `argocd_users` + +You may choose different values in your setup; these are used to keep the guide consistent. + +## Setting up your project and application in Zitadel +First, we will create a new project within Zitadel. Go to **Projects** and select **Create New Project**. +You should now see the following screen. + +![Zitadel Project](../../assets/zitadel-project.png "Zitadel Project") + +Check the following options: +- Assert Roles on Authentication +- Check authorization on Authentication + +![Zitadel Project Settings](../../assets/zitadel-project-settings.png "Zitadel Project Settings") + +### Roles + +Go to **Roles** and click **New**. Create the following two roles. Use the specified values below for both fields **Key** and **Group**. +- `argocd_administrators` +- `argocd_users` + +Your roles should now look like this: + +![Zitadel Project Roles](../../assets/zitadel-project-roles.png "Zitadel Project Roles") + +### Authorizations + +Next, go to **Authorizations** and assign your user the role `argocd_administrators`. +Click **New**, enter the name of your user and click **Continue**. Select the role `argocd_administrators` and click **Save**. + +Your authorizations should now look like this: + +![Zitadel Project Authorizations](../../assets/zitadel-project-authorizations.png "Zitadel Project Authorizations") + +### Creating an application + +Go to **General** and create a new application. Name the application `argocd-application`. + +For type of the application, select **WEB** and click continue. + +![Zitadel Application Setup Step 1](../../assets/zitadel-application-1.png "Zitadel Application Setup Step 1") + +Select **CODE** and continue. + +![Zitadel Application Setup Step 2](../../assets/zitadel-application-2.png "Zitadel Application Setup Step 2") + +Next, we will set up the redirect and post-logout URIs. Set the following values: +- Redirect URI: `https://argocd.example.com/auth/callback` +- Post Logout URI: `https://argocd.example.com` + +The post logout URI is optional. In the example setup users will be taken back to the ArgoCD login page after logging out. + +![Zitadel Application Setup Step 3](../../assets/zitadel-application-3.png "Zitadel Application Setup Step 3") + +Verify your configuration on the next screen and click **Create** to create the application. + +![Zitadel Application Setup Step 4](../../assets/zitadel-application-4.png "Zitadel Application Setup Step 4") + +After clicking **Create** you will be shown the `ClientId` and the `ClientSecret` for your application. Make sure to copy the ClientSecret as you will not be able to retrieve it after closing this window. +For our example, the following values are used: +- ClientId: `227060711795262483@argocd-project` +- ClientSecret: `UGvTjXVFAQ8EkMv2x4GbPcrEwrJGWZ0sR2KbwHRNfYxeLsDurCiVEpa5bkgW0pl0` + +![Zitadel Application Secrets](../../assets/zitadel-application-secrets.png "Zitadel Application Secrets") + +Once you have saved the ClientSecret in a safe place, click **Close** to complete creating the application. + +Go to **Token Settings** and enable the following options: +- User roles inside ID Token +- User Info inside ID Token + +![Zitadel Application Settings](../../assets/zitadel-application-settings.png "Zitadel Application Settings") + +## Setting up an action in Zitadel + +To include the role of the user in the token issued by Zitadel, we will need to set up a Zitadel Action. The authorization in ArgoCD will be determined by the role contained within the auth token. +Go to **Actions**, click **New** and choose `groupsClaim` as the name of your action. + +Paste the following code into the action: + +```javascript +/** + * sets the roles an additional claim in the token with roles as value an project as key + * + * The role claims of the token look like the following: + * + * // added by the code below + * "groups": ["{roleName}", "{roleName}", ...], + * + * Flow: Complement token, Triggers: Pre Userinfo creation, Pre access token creation + * + * @param ctx + * @param api + */ +function groupsClaim(ctx, api) { + if (ctx.v1.user.grants === undefined || ctx.v1.user.grants.count == 0) { + return; + } + + let grants = []; + ctx.v1.user.grants.grants.forEach((claim) => { + claim.roles.forEach((role) => { + grants.push(role); + }); + }); + + api.v1.claims.setClaim("groups", grants); +} +``` + +Check **Allowed To Fail** and click **Add** to add your action. + +*Note: If **Allowed To Fail** is not checked and a user does not have a role assigned, it may be possible that the user is no longer able to log in to Zitadel as the login flow fails when the action fails.* + +Next, add your action to the **Complement Token** flow. Select the **Complement Token** flow from the dropdown and click **Add trigger**. +Add your action to both triggers **Pre Userinfo creation** and **Pre access token creation**. + +Your Actions page should now look like the following screenshot: + +![Zitadel Actions](../../assets/zitadel-actions.png "Zitadel Actions") + + +## Configuring the ArgoCD configmaps + +Next, we will configure two ArgoCD configmaps: +- [argocd-cm.yaml](https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/argocd-cm.yaml) +- [argocd-rbac-cm.yaml](https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/argocd-rbac-cm.yaml) + +Configure your configmaps as follows while making sure to replace the relevant values such as `url`, `issuer`, `clientID`, `clientSecret` and `logoutURL` with ones matching your setup. + +### argocd-cm.yaml +```yaml +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm + namespace: argocd + labels: + app.kubernetes.io/part-of: argocd +data: + admin.enabled: "false" + url: https://argocd.example.com + oidc.config: | + name: Zitadel + issuer: https://auth.example.com + clientID: 227060711795262483@argocd-project + clientSecret: UGvTjXVFAQ8EkMv2x4GbPcrEwrJGWZ0sR2KbwHRNfYxeLsDurCiVEpa5bkgW0pl0 + requestedScopes: + - openid + - profile + - email + - groups + logoutURL: https://auth.example.com/oidc/v1/end_session +``` + +### argocd-rbac-cm.yaml +```yaml +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-rbac-cm + namespace: argocd + labels: + app.kubernetes.io/part-of: argocd +data: + scopes: '[groups]' + policy.csv: | + g, argocd_administrators, role:admin + g, argocd_users, role:readonly + policy.default: '' +``` + +The roles specified under `policy.csv` must match the roles configured in Zitadel. +The Zitadel role `argocd_administrators` will be assigned the ArgoCD role `admin` granting admin access to ArgoCD. +The Zitadel role `argocd_users` will be assigned the ArgoCD role `readonly` granting read-only access to ArgoCD. + +Deploy your ArgoCD configmaps. ArgoCD and Zitadel should now be set up correctly to allow users to log in to ArgoCD using Zitadel. + +## Testing the setup + +Go to your ArgoCD instance. You should now see the **LOG IN WITH ZITADEL** button above the usual username/password login. + +![Zitadel ArgoCD Login](../../assets/zitadel-argocd-login.png "Zitadel ArgoCD Login") + +After logging in with your Zitadel user go to **User Info**. If everything is set up correctly you should now see the group `argocd_administrators` as shown below. + +![Zitadel ArgoCD User Info](../../assets/zitadel-argocd-user-info.png "Zitadel ArgoCD User Info") diff --git a/docs/operator-manual/web_based_terminal.md b/docs/operator-manual/web_based_terminal.md new file mode 100644 index 0000000000000..5c791e9faa00f --- /dev/null +++ b/docs/operator-manual/web_based_terminal.md @@ -0,0 +1,46 @@ +# Web-based Terminal + +![Argo CD Terminal](../assets/terminal.png) + +Since v2.4, Argo CD has a web-based terminal that allows you to get a shell inside a running pod just like you would with +`kubectl exec`. It's basically SSH from your browser, full ANSI color support and all! However, for security this feature +is disabled by default. + +This is a powerful privilege. It allows the user to run arbitrary code on any Pod managed by an Application for which +they have the `exec/create` privilege. If the Pod mounts a ServiceAccount token (which is the default behavior of +Kubernetes), then the user effectively has the same privileges as that ServiceAccount. + +## Enabling the terminal + + +1. Set the `exec.enabled` key to `"true"` on the `argocd-cm` ConfigMap. + +2. Patch the `argocd-server` Role (if using namespaced Argo) or ClusterRole (if using clustered Argo) to allow `argocd-server` +to exec into pods + + - apiGroups: + - "" + resources: + - pods/exec + verbs: + - create + + +3. Add RBAC rules to allow your users to `create` the `exec` resource, i.e. + + p, role:myrole, exec, create, */*, allow + + +See [RBAC Configuration](rbac.md#exec-resource) for more info. + +## Changing allowed shells + +By default, Argo CD attempts to execute shells in this order: + +1. bash +2. sh +3. powershell +4. cmd + +If none of the shells are found, the terminal session will fail. To add to or change the allowed shells, change the +`exec.shells` key in the `argocd-cm` ConfigMap, separating them with commas. diff --git a/docs/operator-manual/webhook.md b/docs/operator-manual/webhook.md index d870c0d8ea8b7..1d5ad5ec79c96 100644 --- a/docs/operator-manual/webhook.md +++ b/docs/operator-manual/webhook.md @@ -4,22 +4,36 @@ Argo CD polls Git repositories every three minutes to detect changes to the manifests. To eliminate this delay from polling, the API server can be configured to receive webhook events. Argo CD supports -Git webhook notifications from GitHub, GitLab, Bitbucket, Bitbucket Server and Gogs. The following explains how to configure +Git webhook notifications from GitHub, GitLab, Bitbucket, Bitbucket Server, Azure DevOps and Gogs. The following explains how to configure a Git webhook for GitHub, but the same process should be applicable to other providers. -### 1. Create The WebHook In The Git Provider +!!! note + The webhook handler does not differentiate between branch events and tag events where the branch and tag names are + the same. A hook event for a push to branch `x` will trigger a refresh for an app pointing at the same repo with + `targetRevision: refs/tags/x`. + +## 1. Create The WebHook In The Git Provider In your Git provider, navigate to the settings page where webhooks can be configured. The payload URL configured in the Git provider should use the `/api/webhook` endpoint of your Argo CD instance (e.g. `https://argocd.example.com/api/webhook`). If you wish to use a shared secret, input an arbitrary value in the secret. This value will be used when configuring the webhook in the next step. +## Github + ![Add Webhook](../assets/webhook-config.png "Add Webhook") !!! note - When creating the webhook in Github, the "Content type" needs to be set to "application/json". The default value "application/x-www-form-urlencoded" is not supported by the library used to handle the hooks + When creating the webhook in GitHub, the "Content type" needs to be set to "application/json". The default value "application/x-www-form-urlencoded" is not supported by the library used to handle the hooks + +## Azure DevOps -### 2. Configure Argo CD With The WebHook Secret (Optional) +![Add Webhook](../assets/azure-devops-webhook-config.png "Add Webhook") + +Azure DevOps optionally supports securing the webhook using basic authentication. To use it, specify the username and password in the webhook configuration and configure the same username/password in `argocd-secret` Kubernetes secret in +`webhook.azuredevops.username` and `webhook.azuredevops.password` keys. + +## 2. Configure Argo CD With The WebHook Secret (Optional) Configuring a webhook shared secret is optional, since Argo CD will still refresh applications related to the Git repository, even with unauthenticated webhook events. This is safe to do since @@ -31,12 +45,14 @@ In the `argocd-secret` kubernetes secret, configure one of the following keys wi provider's webhook secret configured in step 1. | Provider | K8s Secret Key | -|-----------------| ---------------------------------| +|-----------------|----------------------------------| | GitHub | `webhook.github.secret` | | GitLab | `webhook.gitlab.secret` | | BitBucket | `webhook.bitbucket.uuid` | | BitBucketServer | `webhook.bitbucketserver.secret` | | Gogs | `webhook.gogs.secret` | +| Azure DevOps | `webhook.azuredevops.username` | +| | `webhook.azuredevops.password` | Edit the Argo CD kubernetes secret: @@ -61,19 +77,23 @@ data: stringData: # github webhook secret - webhook.github.secret: shhhh! it's a github secret + webhook.github.secret: shhhh! it's a GitHub secret # gitlab webhook secret - webhook.gitlab.secret: shhhh! it's a gitlab secret + webhook.gitlab.secret: shhhh! it's a GitLab secret # bitbucket webhook secret webhook.bitbucket.uuid: your-bitbucket-uuid # bitbucket server webhook secret - webhook.bitbucketserver.secret: shhhh! it's a bitbucket server secret + webhook.bitbucketserver.secret: shhhh! it's a Bitbucket server secret # gogs server webhook secret webhook.gogs.secret: shhhh! it's a gogs server secret + + # azuredevops username and password + webhook.azuredevops.username: admin + webhook.azuredevops.password: secret-password ``` After saving, the changes should take effect automatically. diff --git a/docs/proposals/001-proposal-template.md b/docs/proposals/001-proposal-template.md index 5d0d25f935db1..9db805bcafa6b 100644 --- a/docs/proposals/001-proposal-template.md +++ b/docs/proposals/001-proposal-template.md @@ -3,7 +3,7 @@ title: Neat-enhancement-idea authors: - "@sbose78" # Authors' github accounts here. sponsors: - - TBD # List all intereste parties here. + - TBD # List all interested parties here. reviewers: - "@alexmt" - TBD @@ -58,10 +58,10 @@ This is where we get down to details of what the proposal is about. Add a list of detailed use cases this enhancement intends to take care of. -## Use case 1: +#### Use case 1: As a user, I would like to understand the drift. (This is an example) -## Use case 2: +#### Use case 2: As a user, I would like to take an action on the deviation/drift. (This is an example) ### Implementation Details/Notes/Constraints [optional] @@ -77,11 +77,11 @@ You may have a work-in-progress Pull Request to demonstrate the functioning of t ### Security Considerations * How does this proposal impact the security aspects of Argo CD workloads ? -* Are there any unresolved follow-ups that need to be done to make the enhancement more robust ? +* Are there any unresolved follow-ups that need to be done to make the enhancement more robust ? ### Risks and Mitigations -What are the risks of this proposal and how do we mitigate. Think broadly. +What are the risks of this proposal and how do we mitigate. Think broadly. For example, consider both security and how this will impact the larger Kubernetes ecosystem. diff --git a/docs/proposals/002-ui-extensions.md b/docs/proposals/002-ui-extensions.md new file mode 100644 index 0000000000000..583888da68c66 --- /dev/null +++ b/docs/proposals/002-ui-extensions.md @@ -0,0 +1,209 @@ +--- +title: Argo CD Extensions +authors: + - "@rbreeze" + - "@jsuen" + - "@alexmt" +sponsors: + - TBD +reviewers: + - "@alexmt" + - "@jsuen" + - TBD +approvers: + - TBD + +creation-date: 2021-05-13 +last-updated: 2021-05-27 +--- + +# Argo CD Extensions + + +## Summary + +This proposal is to provide a mechanism to extend Argo CD such that it can provide resource-specific visualizations, capabilities and interactions in the following ways: + +1. Richer and context-sensitive UI components can be displayed in the user interface about custom resources. +2. Custom health checks can be configured to assess the health of the resource. +3. Custom actions could be performed to manipulate resources in predefined ways. + + +## Motivation + +Argo CD is commonly used as a dashboard to Kubernetes applications. The current UI is limited in that it only displays very general information about Kubernetes objects. Any special visualizations can currently only be done for native Kubernetes kinds. + +For custom resources, Argo CD does not by default have any special handling or understanding of CRs, such as how to assess the health of the object or visualizations. When examining a resource, a user can only see a YAML view of the object, which is not helpful unless they are familiar with the object's spec and status information. + +Note that Argo CD does currently have a resource customizations feature, which allows operators to define health checks and actions via lua scripts in the argocd-cm ConfigMap. However, the current mechanism of configuring resource customizations is difficult and highly error prone. + +This proposal would allow operators to more easily configure Argo CD to understand custom resources, as well as provide more powerful visualization of objects. + + +## Use cases + +### Use case 1: +As a user, I would like to see visual information about my Rollout without having to use the CLI or otherwise leave Argo CD. + +### Use case 2: +As an operator, I would like to configure Argo CD to be able to assess the health of a custom resource based on its status. + +### Use case 3: +As an operator, I would like to configure Argo CD to perform pre-defined actions (object mutations) on a custom resource, for example restarting a Rollout. + + +### Goals + +- Enable new visualizations in the UI for resources that do not have baked-in support +- Extensions can be configured by operators at runtime, without a feature being built directly into Argo CD, and with no need to recompile UI code. +- Extensions should be easy to develop and install. +- Loose coupling between Argo CD and extensions. +- Replace current resource customizations in `argocd-cm` ConfigMap with extensions + + +## Proposal + +A new `ArgoCDExtension` CRD would be introduced which will allow operators configure Argo CD to understand how to handle and visualize custom resources. Visualizing a object requires javascript to render the object, and health/actions require lua scripts. As such, the extension CR would need to point to some location where the javascript/lua code would be hosted. + +It is proposed that a git repository be used to contain the javascript code, as well as the lua scripts necessary to assess health or perform actions of a resource. + + +### ArgoCDExtension CRD + +In the most simplest form, an Argo CD extension could simply be a pointer to a git repository at a revision: + +```yaml +kind: ArgoCDExtension +metadata: + name: argo-rollouts +spec: + repository: https://github.com/argoproj-labs/rollout-extension + revision: HEAD +``` + +### Git Repository Structure + +The git repository would have an expected structure, such that the scripts and UI component could be discovered easily by Argo CD based on resource kind. + +``` +├── README.md +├── argoproj.io +│   ├── AnalysisRun +│   │   ├── actions +│   │   │   ├── discovery.lua +│   │   │   └── terminate +│   │   │      └── action.lua +│   │   └── health.lua +│   ├── Experiment +│   │   └── health.lua +│   └── Rollout +│   ├── ui +│   │   └── extension.js # dynamically loaded by argocd-server +│   ├── actions +│   │   ├── abort +│   │   │   └── action.lua +│   │   ├── discovery.lua +│   │   ├── promote-full +│   │   │   └── action.lua +│   │   ├── restart +│   │   │   └── action.lua +│   │   ├── resume +│   │   │   └── action.lua +│   │   ├── retry +│   │   │   └── action.lua +``` + +Note that it may be necessary to support multiple versions of a resource (e.g. v1alpha1 vs. a v1 version of a custom esource), and so the path structure may need to also support incorporating the version in the path. For example: + +``` +├── argoproj.io +│   ├── v1alpha1 +│   │   ├── AnalysisRun +``` + +### User Interface + +In the UI, a new tab in the Resource View will be made available. The contents of that tab would dynamically loaded by the Argo CD API server at the git URL specified in the extension, which would be cloned locally by the API server. + + +## Implementation Details + +At a high level an Argo CD extension is simply a React component, which is dynamically loaded at runtime by the Argo CD API server. + +In order for the component to render some visualization about a resource, it needs to be supplied at least two key peices of information: +1. The full resource object itself +2. The entire Application Resource Tree + +We provide the entire application tree to accomplish two things: + +1. Extensions get basic (shallow) live updates for free because the Resource Tree is already live updated +2. Extensions may wish to display richer hierarchical structure for other related objects (e.g. the Rollout extension would want to display ReplicaSets and Pods) + +Further, if an Extension needs richer information than that provided by the Resource Tree, it can request additional information about a resource from the Argo CD API server. + +```typescript +interface Extention { + ResourceTab: React.Component<{resource: any}>; +} +``` + + +The UI will dynamically import an Extension React component from the Argo CD API Server. This is accomplished by specifying the generic Extension component as a Webpack external, and including a ` diff --git a/ui/src/app/index.tsx b/ui/src/app/index.tsx index 1826bf409f150..7331e1c6c173a 100644 --- a/ui/src/app/index.tsx +++ b/ui/src/app/index.tsx @@ -1,5 +1,6 @@ import * as React from 'react'; import * as ReactDOM from 'react-dom'; +import * as Moment from 'moment'; import {App} from './app'; ReactDOM.render(, document.getElementById('app')); @@ -11,3 +12,7 @@ if (mdl.hot) { ReactDOM.render(, document.getElementById('app')); }); } + +(window as any).React = React; +(window as any).ReactDOM = ReactDOM; +(window as any).Moment = Moment; diff --git a/ui/src/app/login/components/login.tsx b/ui/src/app/login/components/login.tsx index 8662d4c2f132d..db67ff185cf78 100644 --- a/ui/src/app/login/components/login.tsx +++ b/ui/src/app/login/components/login.tsx @@ -20,7 +20,7 @@ interface State { loginError: string; loginInProgress: boolean; returnUrl: string; - ssoLoginError: string; + hasSsoLoginError: boolean; } export class Login extends React.Component, State> { @@ -31,13 +31,13 @@ export class Login extends React.Component, State> { public static getDerivedStateFromProps(props: RouteComponentProps<{}>): Partial { const search = new URLSearchParams(props.history.location.search); const returnUrl = search.get('return_url') || ''; - const ssoLoginError = search.get('sso_error') || ''; - return {ssoLoginError, returnUrl}; + const hasSsoLoginError = search.get('has_sso_error') === 'true'; + return {hasSsoLoginError, returnUrl}; } constructor(props: RouteComponentProps<{}>) { super(props); - this.state = {authSettings: null, loginError: null, returnUrl: null, ssoLoginError: null, loginInProgress: false}; + this.state = {authSettings: null, loginError: null, returnUrl: null, hasSsoLoginError: false, loginInProgress: false}; } public async componentDidMount() { @@ -69,7 +69,7 @@ export class Login extends React.Component, State> { )} - {this.state.ssoLoginError &&
{this.state.ssoLoginError}
} + {this.state.hasSsoLoginError &&
Login failed.
} {authSettings && !authSettings.userLoginsDisabled && (
or diff --git a/ui/src/app/settings/components/account-details/account-details.tsx b/ui/src/app/settings/components/account-details/account-details.tsx index e418de21834b5..e81993f07053e 100644 --- a/ui/src/app/settings/components/account-details/account-details.tsx +++ b/ui/src/app/settings/components/account-details/account-details.tsx @@ -48,7 +48,7 @@ export const AccountDetails = (props: RouteComponentProps<{name: string}>) => {
{ const expiresIn = convertExpiresInToSeconds(params.expiresIn); - const confirmed = await ctx.popup.confirm('Generate new token?', 'Are you sure you want to generate new token?'); + const confirmed = await ctx.popup.confirm('Generate new token', 'Are you sure you want to generate a new token?'); if (!confirmed) { return; } diff --git a/ui/src/app/settings/components/accounts-list/accounts-list.tsx b/ui/src/app/settings/components/accounts-list/accounts-list.tsx index 4a86bcfbb9ee6..f6e362d7743ca 100644 --- a/ui/src/app/settings/components/accounts-list/accounts-list.tsx +++ b/ui/src/app/settings/components/accounts-list/accounts-list.tsx @@ -29,7 +29,7 @@ export const AccountsList = () => {
{account.name}
{(account.enabled && 'true') || 'false'}
-
{account.capabilities.join(', ')}
+
{account.capabilities && account.capabilities.join(', ')}
))} diff --git a/ui/src/app/settings/components/appearance-list/appearance-list.scss b/ui/src/app/settings/components/appearance-list/appearance-list.scss new file mode 100644 index 0000000000000..dc70dcd77696c --- /dev/null +++ b/ui/src/app/settings/components/appearance-list/appearance-list.scss @@ -0,0 +1,23 @@ +@import 'node_modules/argo-ui/src/styles/config'; +@import 'node_modules/argo-ui/src/styles/theme'; + +.appearance-list { + &__panel { + margin: 18px 0; + position: relative; + padding: 20px; + font-size: 16px; + @include themify($themes) { + background-color: themed('background-2'); + color: themed('text-1'); + } + border-radius: 4px; + box-shadow: 1px 2px 3px rgba(#000, 0.1); + } + + &__button { + position: absolute; + top: 25%; + right: 30px; + } +} \ No newline at end of file diff --git a/ui/src/app/settings/components/appearance-list/appearance-list.tsx b/ui/src/app/settings/components/appearance-list/appearance-list.tsx new file mode 100644 index 0000000000000..5e6fb06502f84 --- /dev/null +++ b/ui/src/app/settings/components/appearance-list/appearance-list.tsx @@ -0,0 +1,37 @@ +import * as React from 'react'; +import {DataLoader, Page} from '../../../shared/components'; +import {services} from '../../../shared/services'; + +require('./appearance-list.scss'); + +export const AppearanceList = () => { + return ( + + services.viewPreferences.getPreferences()}> + {pref => ( +
+
+
+
Dark Theme
+
+ +
+
+
+
+ )} +
+
+ ); +}; diff --git a/ui/src/app/settings/components/certs-list/certs-list.tsx b/ui/src/app/settings/components/certs-list/certs-list.tsx index 135172f883376..14a57facf124e 100644 --- a/ui/src/app/settings/components/certs-list/certs-list.tsx +++ b/ui/src/app/settings/components/certs-list/certs-list.tsx @@ -35,18 +35,20 @@ export class CertsList extends React.Component> { public render() { return ( (this.showAddTLSCertificate = true) }, { title: 'Add SSH known hosts', + iconClassName: 'fa fa-plus', action: () => (this.showAddSSHKnownHosts = true) } ] @@ -123,33 +125,31 @@ export class CertsList extends React.Component> { }> -
-
-

Create TLS repository certificate

- this.addTLSCertificate(params as NewTLSCertParams)} - getApi={api => (this.formApiTLS = api)} - preSubmit={(params: NewTLSCertParams) => ({ - serverName: params.serverName, - certData: btoa(params.certData) - })} - validateError={(params: NewTLSCertParams) => ({ - serverName: !params.serverName && 'Repository server name is required', - certData: !params.certData && 'Certificate data is required' - })}> - {formApiTLS => ( - -
- -
-
- -
- - )} - -
-
+
this.addTLSCertificate(params as NewTLSCertParams)} + getApi={api => (this.formApiTLS = api)} + preSubmit={(params: NewTLSCertParams) => ({ + serverName: params.serverName, + certData: btoa(params.certData) + })} + validateError={(params: NewTLSCertParams) => ({ + serverName: !params.serverName && 'Repository Server Name is required', + certData: !params.certData && 'TLS Certificate is required' + })}> + {formApiTLS => ( + +
+

CREATE TLS REPOSITORY CERTIFICATE

+
+ +
+
+ +
+
+
+ )} + > { }> -
-
-

Create SSH known host entries

- -

- Paste SSH known hosts data in the text area below, one entry per line. You can use output from ssh-keyscan or the contents on an{' '} - ssh_known_hosts file verbatim. Lines starting with # will be treated as comments and ignored. -

-

- Make sure there are no linebreaks in the keys. -

-
this.addSSHKnownHosts(params as NewSSHKnownHostParams)} - getApi={api => (this.formApiSSH = api)} - preSubmit={(params: NewSSHKnownHostParams) => ({ - certData: btoa(params.certData) - })} - validateError={(params: NewSSHKnownHostParams) => ({ - certData: !params.certData && 'SSH known hosts data is required' - })}> - {formApiSSH => ( - -
- -
-
- )} - -
-
+
this.addSSHKnownHosts(params as NewSSHKnownHostParams)} + getApi={api => (this.formApiSSH = api)} + preSubmit={(params: NewSSHKnownHostParams) => ({ + certData: btoa(params.certData) + })} + validateError={(params: NewSSHKnownHostParams) => ({ + certData: !params.certData && 'SSH known hosts data is required' + })}> + {formApiSSH => ( + +
+

CREATE SSH KNOWN HOST ENTRIES

+

+ Paste SSH known hosts data in the text area below, one entry per line. You can use output from ssh-keyscan or the contents on + an ssh_known_hosts file verbatim. Lines starting with # will be treated as comments and ignored. +

+

+ Make sure there are no linebreaks in the keys. +

+
+ +
+
+
+ )} +
); diff --git a/ui/src/app/settings/components/cluster-details/cluster-details.tsx b/ui/src/app/settings/components/cluster-details/cluster-details.tsx index 58562b8cadfc3..472ccf26ee283 100644 --- a/ui/src/app/settings/components/cluster-details/cluster-details.tsx +++ b/ui/src/app/settings/components/cluster-details/cluster-details.tsx @@ -3,10 +3,11 @@ import * as moment from 'moment'; import * as React from 'react'; import {FieldApi, FormField as ReactFormField, Text} from 'react-form'; import {RouteComponentProps} from 'react-router-dom'; -import {Observable} from 'rxjs'; +import {from, timer} from 'rxjs'; +import {mergeMap} from 'rxjs/operators'; import {FormField, Ticker} from 'argo-ui'; -import {ConnectionStateIcon, DataLoader, EditablePanel, Page, Timestamp} from '../../../shared/components'; +import {ConnectionStateIcon, DataLoader, EditablePanel, Page, Timestamp, MapInputField} from '../../../shared/components'; import {Cluster} from '../../../shared/models'; import {services} from '../../../shared/services'; @@ -24,12 +25,12 @@ export const ClusterDetails = (props: RouteComponentProps<{server: string}>) => const loaderRef = React.useRef(); const [updating, setUpdating] = React.useState(false); return ( - Observable.timer(0, 1000).flatMap(() => Observable.fromPromise(services.clusters.get(url, '')))}> + timer(0, 1000).pipe(mergeMap(() => from(services.clusters.get(url, ''))))}> {(cluster: Cluster) => ( ) => const item = await services.clusters.get(updated.server, ''); item.name = updated.name; item.namespaces = updated.namespaces; - loaderRef.current.setData(await services.clusters.update(item, 'name', 'namespaces')); + item.labels = updated.labels; + item.annotations = updated.annotations; + loaderRef.current.setData(await services.clusters.update(item, 'name', 'namespaces', 'labels', 'annotations')); }} title='GENERAL' items={[ @@ -82,6 +85,20 @@ export const ClusterDetails = (props: RouteComponentProps<{server: string}>) => title: 'NAMESPACES', view: ((cluster.namespaces || []).length === 0 && 'All namespaces') || cluster.namespaces.join(', '), edit: formApi => + }, + { + title: 'LABELS', + view: Object.keys(cluster.labels || []) + .map(label => `${label}=${cluster.labels[label]}`) + .join(' '), + edit: formApi => + }, + { + title: 'ANNOTATIONS', + view: Object.keys(cluster.annotations || []) + .map(annotation => `${annotation}=${cluster.annotations[annotation]}`) + .join(' '), + edit: formApi => } ]} /> diff --git a/ui/src/app/settings/components/clusters-list/clusters-list.tsx b/ui/src/app/settings/components/clusters-list/clusters-list.tsx index fe8734ccfba82..c6dea9ab372aa 100644 --- a/ui/src/app/settings/components/clusters-list/clusters-list.tsx +++ b/ui/src/app/settings/components/clusters-list/clusters-list.tsx @@ -1,4 +1,5 @@ -import {DropDownMenu} from 'argo-ui'; +import {DropDownMenu, ErrorNotification, NotificationType} from 'argo-ui'; +import {Tooltip} from 'argo-ui'; import * as React from 'react'; import {RouteComponentProps} from 'react-router-dom'; import {clusterName, ConnectionStateIcon, DataLoader, EmptyState, Page} from '../../../shared/components'; @@ -36,9 +37,16 @@ export const ClustersList = (props: RouteComponentProps<{}>) => { onClick={() => ctx.navigation.goto(`./${encodeURIComponent(cluster.server)}`)}>
- {clusterName(cluster.name)} + + + {clusterName(cluster.name)} + +
+
+ + {cluster.server} +
-
{cluster.server}
{cluster.info.serverVersion}
{cluster.info.connectionState.status} @@ -51,13 +59,27 @@ export const ClustersList = (props: RouteComponentProps<{}>) => { items={[ { title: 'Delete', - action: () => - services.clusters.delete(cluster.server).finally(() => { - ctx.navigation.goto('.', {new: null}); - if (clustersLoaderRef.current) { - clustersLoaderRef.current.reload(); + action: async () => { + const confirmed = await ctx.popup.confirm( + 'Delete cluster?', + `Are you sure you want to delete cluster: ${cluster.name}` + ); + if (confirmed) { + try { + await services.clusters.delete(cluster.server).finally(() => { + ctx.navigation.goto('.', {new: null}, {replace: true}); + if (clustersLoaderRef.current) { + clustersLoaderRef.current.reload(); + } + }); + } catch (e) { + ctx.notifications.show({ + content: , + type: NotificationType.Error + }); } - }) + } + } } ]} /> diff --git a/ui/src/app/settings/components/gpgkeys-list/gpgkeys-list.tsx b/ui/src/app/settings/components/gpgkeys-list/gpgkeys-list.tsx index cc648d0a9ff1d..248052a6afc3b 100644 --- a/ui/src/app/settings/components/gpgkeys-list/gpgkeys-list.tsx +++ b/ui/src/app/settings/components/gpgkeys-list/gpgkeys-list.tsx @@ -36,6 +36,7 @@ export class GpgKeysList extends React.Component> { items: [ { title: 'Add GnuPG key', + iconClassName: 'fa fa-plus', action: () => (this.showAddGnuPGKey = true) } ] @@ -107,7 +108,6 @@ export class GpgKeysList extends React.Component> {
}> -

Add GnuPG public key

this.addGnuPGPublicKey({keyData: params.keyData})} getApi={api => (this.formApi = api)} @@ -115,12 +115,15 @@ export class GpgKeysList extends React.Component> { keyData: params.keyData })} validateError={(params: NewGnuPGPublicKeyParams) => ({ - keyData: !params.keyData && 'Key data is required' + keyData: !params.keyData && 'GnuPG public key data is required' })}> {formApi => ( -
- +
+

ADD GnuPG PUBLIC KEY

+
+ +
)} diff --git a/ui/src/app/settings/components/project-details/project-details.tsx b/ui/src/app/settings/components/project-details/project-details.tsx index 037f72ee2e1e6..224c2e1e45e12 100644 --- a/ui/src/app/settings/components/project-details/project-details.tsx +++ b/ui/src/app/settings/components/project-details/project-details.tsx @@ -7,7 +7,7 @@ import {RouteComponentProps} from 'react-router'; import {BadgePanel, CheckboxField, DataLoader, EditablePanel, ErrorNotification, MapInputField, Page, Query} from '../../../shared/components'; import {AppContext, Consumer} from '../../../shared/context'; -import {GroupKind, Groups, Project, ProjectSpec, ResourceKinds} from '../../../shared/models'; +import {GroupKind, Groups, Project, DetailedProjectsResponse, ProjectSpec, ResourceKinds} from '../../../shared/models'; import {CreateJWTTokenParams, DeleteJWTTokenParams, ProjectRoleParams, services} from '../../../shared/services'; import {SyncWindowStatusIcon} from '../../../applications/components/utils'; @@ -16,6 +16,7 @@ import {ProjectEvents} from '../project-events/project-events'; import {ProjectRoleEditPanel} from '../project-role-edit-panel/project-role-edit-panel'; import {ProjectSyncWindowsEditPanel} from '../project-sync-windows-edit-panel/project-sync-windows-edit-panel'; import {ResourceListsPanel} from './resource-lists-panel'; +import {DeepLinks} from '../../../shared/components/deep-links'; require('./project-details.scss'); @@ -42,87 +43,85 @@ function emptyMessage(title: string) { return

Project has no {title}

; } -function loadGlobal(name: string) { - return services.projects.getGlobalProjects(name).then(projs => - (projs || []).reduce( - (merged, proj) => { - merged.clusterResourceBlacklist = merged.clusterResourceBlacklist.concat(proj.spec.clusterResourceBlacklist || []); - merged.clusterResourceWhitelist = merged.clusterResourceWhitelist.concat(proj.spec.clusterResourceWhitelist || []); - merged.namespaceResourceBlacklist = merged.namespaceResourceBlacklist.concat(proj.spec.namespaceResourceBlacklist || []); - merged.namespaceResourceWhitelist = merged.namespaceResourceWhitelist.concat(proj.spec.namespaceResourceWhitelist || []); - merged.sourceRepos = merged.sourceRepos.concat(proj.spec.sourceRepos || []); - merged.destinations = merged.destinations.concat(proj.spec.destinations || []); - - merged.sourceRepos = merged.sourceRepos.filter((item, index) => { - return ( - index === - merged.sourceRepos.findIndex(obj => { - return obj === item; - }) - ); - }); - - merged.destinations = merged.destinations.filter((item, index) => { - return ( - index === - merged.destinations.findIndex(obj => { - return obj.server === item.server && obj.namespace === item.namespace; - }) - ); - }); - - merged.clusterResourceBlacklist = merged.clusterResourceBlacklist.filter((item, index) => { - return ( - index === - merged.clusterResourceBlacklist.findIndex(obj => { - return obj.kind === item.kind && obj.group === item.group; - }) - ); - }); - - merged.clusterResourceWhitelist = merged.clusterResourceWhitelist.filter((item, index) => { - return ( - index === - merged.clusterResourceWhitelist.findIndex(obj => { - return obj.kind === item.kind && obj.group === item.group; - }) - ); - }); - - merged.namespaceResourceBlacklist = merged.namespaceResourceBlacklist.filter((item, index) => { - return ( - index === - merged.namespaceResourceBlacklist.findIndex(obj => { - return obj.kind === item.kind && obj.group === item.group; - }) - ); - }); - - merged.namespaceResourceWhitelist = merged.namespaceResourceWhitelist.filter((item, index) => { - return ( - index === - merged.namespaceResourceWhitelist.findIndex(obj => { - return obj.kind === item.kind && obj.group === item.group; - }) - ); - }); - merged.count += 1; - - return merged; - }, - { - clusterResourceBlacklist: new Array(), - namespaceResourceBlacklist: new Array(), - namespaceResourceWhitelist: new Array(), - clusterResourceWhitelist: new Array(), - sourceRepos: [], - signatureKeys: [], - destinations: [], - description: '', - roles: [], - count: 0 - } - ) +function reduceGlobal(projs: Project[]): ProjectSpec & {count: number} { + return (projs || []).reduce( + (merged, proj) => { + merged.clusterResourceBlacklist = merged.clusterResourceBlacklist.concat(proj.spec.clusterResourceBlacklist || []); + merged.clusterResourceWhitelist = merged.clusterResourceWhitelist.concat(proj.spec.clusterResourceWhitelist || []); + merged.namespaceResourceBlacklist = merged.namespaceResourceBlacklist.concat(proj.spec.namespaceResourceBlacklist || []); + merged.namespaceResourceWhitelist = merged.namespaceResourceWhitelist.concat(proj.spec.namespaceResourceWhitelist || []); + merged.sourceRepos = merged.sourceRepos.concat(proj.spec.sourceRepos || []); + merged.destinations = merged.destinations.concat(proj.spec.destinations || []); + + merged.sourceRepos = merged.sourceRepos.filter((item, index) => { + return ( + index === + merged.sourceRepos.findIndex(obj => { + return obj === item; + }) + ); + }); + + merged.destinations = merged.destinations.filter((item, index) => { + return ( + index === + merged.destinations.findIndex(obj => { + return obj.server === item.server && obj.namespace === item.namespace; + }) + ); + }); + + merged.clusterResourceBlacklist = merged.clusterResourceBlacklist.filter((item, index) => { + return ( + index === + merged.clusterResourceBlacklist.findIndex(obj => { + return obj.kind === item.kind && obj.group === item.group; + }) + ); + }); + + merged.clusterResourceWhitelist = merged.clusterResourceWhitelist.filter((item, index) => { + return ( + index === + merged.clusterResourceWhitelist.findIndex(obj => { + return obj.kind === item.kind && obj.group === item.group; + }) + ); + }); + + merged.namespaceResourceBlacklist = merged.namespaceResourceBlacklist.filter((item, index) => { + return ( + index === + merged.namespaceResourceBlacklist.findIndex(obj => { + return obj.kind === item.kind && obj.group === item.group; + }) + ); + }); + + merged.namespaceResourceWhitelist = merged.namespaceResourceWhitelist.filter((item, index) => { + return ( + index === + merged.namespaceResourceWhitelist.findIndex(obj => { + return obj.kind === item.kind && obj.group === item.group; + }) + ); + }); + merged.count += 1; + + return merged; + }, + { + clusterResourceBlacklist: new Array(), + namespaceResourceBlacklist: new Array(), + namespaceResourceWhitelist: new Array(), + clusterResourceWhitelist: new Array(), + sourceRepos: [], + signatureKeys: [], + destinations: [], + description: '', + roles: [], + count: 0 + } ); } @@ -149,8 +148,8 @@ export class ProjectDetails extends React.Component ctx.navigation.goto('.', {newRole: true})}, - {title: 'Add Sync Window', iconClassName: 'fa fa-plus', action: () => ctx.navigation.goto('.', {newWindow: true})}, + {title: 'Add Role', iconClassName: 'fa fa-plus', action: () => ctx.navigation.goto('.', {newRole: true}, {replace: true})}, + {title: 'Add Sync Window', iconClassName: 'fa fa-plus', action: () => ctx.navigation.goto('.', {newWindow: true}, {replace: true})}, { title: 'Delete', iconClassName: 'fa fa-times-circle', @@ -159,7 +158,7 @@ export class ProjectDetails extends React.Component, @@ -174,206 +173,212 @@ export class ProjectDetails extends React.Component { - return Promise.all([services.projects.get(this.props.match.params.name), loadGlobal(this.props.match.params.name)]); + return services.projects.getDetailed(this.props.match.params.name); }} ref={loader => (this.loader = loader)}> - {([proj, globalProj]) => ( + {scopedProj => ( - {params => ( -
- ctx.navigation.goto('.', {tab})} - navCenter={true} - tabs={[ - { - key: 'summary', - title: 'Summary', - content: this.summaryTab(proj, globalProj) - }, - { - key: 'roles', - title: 'Roles', - content: this.rolesTab(proj, ctx) - }, - { - key: 'windows', - title: 'Windows', - content: this.SyncWindowsTab(proj, ctx) - }, - { - key: 'events', - title: 'Events', - content: this.eventsTab(proj) - } - ].map(tab => ({...tab, isOnlyContentScrollable: true, extraVerticalScrollPadding: 160}))} - /> - { - this.setState({token: ''}); - ctx.navigation.goto('.', {editRole: null, newRole: null}); - }} - header={ -
- {' '} - {' '} - {params.get('newRole') === null ? ( + {params => { + const {project: proj, globalProjects: globalProj} = scopedProj; + return ( +
+ ctx.navigation.goto('.', {tab}, {replace: true})} + navCenter={true} + tabs={[ + { + key: 'summary', + title: 'Summary', + content: this.summaryTab(proj, reduceGlobal(globalProj), scopedProj) + }, + { + key: 'roles', + title: 'Roles', + content: this.rolesTab(proj, ctx) + }, + { + key: 'windows', + title: 'Windows', + content: this.SyncWindowsTab(proj, ctx) + }, + { + key: 'events', + title: 'Events', + content: this.eventsTab(proj) + } + ].map(tab => ({...tab, isOnlyContentScrollable: true, extraVerticalScrollPadding: 160}))} + /> + { + this.setState({token: ''}); + ctx.navigation.goto('.', {editRole: null, newRole: null}, {replace: true}); + }} + header={ +
+ {' '} - ) : null} -
- }> - {(params.get('editRole') !== null || params.get('newRole') === 'true') && ( - params.get('editRole') === x.name) - : undefined, - jwtTokens: - params.get('newRole') === null && proj.spec.roles !== undefined && proj.status.jwtTokensByRole !== undefined - ? proj.status.jwtTokensByRole[params.get('editRole')].items - : undefined - }} - getApi={(api: FormApi) => (this.projectRoleFormApi = api)} - submit={async (projRoleParams: ProjectRoleParams) => { - try { - await services.projects.updateRole(projRoleParams); - ctx.navigation.goto('.', {editRole: null, newRole: null}); - this.loader.reload(); - } catch (e) { - ctx.notifications.show({ - content: , - type: NotificationType.Error - }); - } - }} - token={this.state.token} - createJWTToken={async (jwtTokenParams: CreateJWTTokenParams) => this.createJWTToken(jwtTokenParams, ctx.notifications)} - deleteJWTToken={async (jwtTokenParams: DeleteJWTTokenParams) => this.deleteJWTToken(jwtTokenParams, ctx.notifications)} - hideJWTToken={() => this.setState({token: ''})} - /> - )} -
- { - this.setState({token: ''}); - ctx.navigation.goto('.', {editWindow: null, newWindow: null}); - }} - header={ -
- {' '} + {params.get('newRole') === null ? ( + + ) : null} +
+ }> + {(params.get('editRole') !== null || params.get('newRole') === 'true') && ( + params.get('editRole') === x.name) + : undefined, + jwtTokens: + params.get('newRole') === null && proj.spec.roles !== undefined && proj.status.jwtTokensByRole !== undefined + ? proj.status.jwtTokensByRole[params.get('editRole')].items + : undefined }} - className='argo-button argo-button--base-o'> - Cancel - {' '} - {' '} - {params.get('newWindow') === null ? ( + token={this.state.token} + createJWTToken={async (jwtTokenParams: CreateJWTTokenParams) => this.createJWTToken(jwtTokenParams, ctx.notifications)} + deleteJWTToken={async (jwtTokenParams: DeleteJWTTokenParams) => this.deleteJWTToken(jwtTokenParams, ctx.notifications)} + hideJWTToken={() => this.setState({token: ''})} + /> + )} +
+ { + this.setState({token: ''}); + ctx.navigation.goto('.', {editWindow: null, newWindow: null}, {replace: true}); + }} + header={ +
- ) : null} -
- }> - {(params.get('editWindow') !== null || params.get('newWindow') === 'true') && ( - (this.projectSyncWindowsFormApi = api)} - submit={async (projectSyncWindowsParams: ProjectSyncWindowsParams) => { - try { - await services.projects.updateWindow(projectSyncWindowsParams); - ctx.navigation.goto('.', {editWindow: null, newWindow: null}); - this.loader.reload(); - } catch (e) { - ctx.notifications.show({ - content: , - type: NotificationType.Error - }); - } - }} - /> - )} -
-
- )} + {params.get('newWindow') != null ? 'Create' : 'Update'} + {' '} + {' '} + {params.get('newWindow') === null ? ( + + ) : null} +
+ }> + {(params.get('editWindow') !== null || params.get('newWindow') === 'true') && ( + (this.projectSyncWindowsFormApi = api)} + submit={async (projectSyncWindowsParams: ProjectSyncWindowsParams) => { + try { + await services.projects.updateWindow(projectSyncWindowsParams); + ctx.navigation.goto('.', {editWindow: null, newWindow: null}, {replace: true}); + this.loader.reload(); + } catch (e) { + ctx.notifications.show({ + content: , + type: NotificationType.Error + }); + } + }} + /> + )} +
+
+ ); + }}
)}
@@ -386,9 +391,8 @@ export class ProjectDetails extends React.Component, @@ -400,9 +404,8 @@ export class ProjectDetails extends React.Component
- {window.kind}:{window.schedule}:{window.duration} + {window.kind}:{window.schedule}:{window.duration}:{window.timeZone}
{(window.applications || ['-']).join(',')}
{(window.namespaces || ['-']).join(',')}
@@ -535,9 +538,9 @@ export class ProjectDetails extends React.Component, @@ -546,7 +549,7 @@ export class ProjectDetails extends React.Component `${label}=${proj.metadata.labels[label]}`) .join(' '), edit: (formApi: FormApi) => + }, + { + title: 'LINKS', + view: ( +
+ services.projects.getLinks(proj.metadata.name)}>{links => } +
+ ) } ]} /> @@ -621,6 +632,23 @@ export class ProjectDetails extends React.Component + SCOPED REPOSITORIES{helpTip('Git repositories where application manifests are permitted to be retrieved from')}} + view={ + + {scopedProj.repositories && scopedProj.repositories.length + ? scopedProj.repositories.map((repo, i) => ( +
+
{repo.repo}
+
+ )) + : emptyMessage('source repositories')} +
+ } + items={[]} + /> + this.saveProject(item)} values={proj} @@ -631,12 +659,14 @@ export class ProjectDetails extends React.Component
Server
-
Namespace
+
Name
+
Namespace
{proj.spec.destinations.map((dest, i) => (
{dest.server}
-
{dest.namespace}
+
{dest.name}
+
{dest.namespace}
))} @@ -651,7 +681,8 @@ export class ProjectDetails extends React.Component
Server
-
Namespace
+
Name
+
Namespace
{(formApi.values.spec.destinations || []).map((_: Project, i: number) => (
@@ -663,7 +694,15 @@ export class ProjectDetails extends React.Component cluster.server)}} />
-
+
+ cluster.name)}} + /> +
+
formApi.setValue('spec.destinations', removeEl(formApi.values.spec.destinations, i))} /> @@ -676,7 +715,8 @@ export class ProjectDetails extends React.Component @@ -689,6 +729,23 @@ export class ProjectDetails extends React.Component + SCOPED CLUSTERS{helpTip('Cluster and namespaces where applications are permitted to be deployed to')}} + view={ + + {scopedProj.clusters && scopedProj.clusters.length + ? scopedProj.clusters.map((cluster, i) => ( +
+
{cluster.server}
+
+ )) + : emptyMessage('destinations')} +
+ } + items={[]} + /> + this.saveProject(item)} /> {globalProj.count > 0 && ( { groups: (props.defaultParams.role && props.defaultParams.role.groups) || [] }} validateError={(params: ProjectRoleParams) => ({ - projName: !params.projName && 'Project name is required', - roleName: !params.roleName && 'Role name is required' + projName: !params.projName && 'Project Name is required', + roleName: !params.roleName && 'Role Name is required' })}> {api => (
-
- +
+

GENERAL

+
+ +
+
+ +
-
- +
+ +
+
+
- - )} diff --git a/ui/src/app/settings/components/project-role-groups-edit/project-role-groups-edit.tsx b/ui/src/app/settings/components/project-role-groups-edit/project-role-groups-edit.tsx index ffcdd7f795001..131d8ed3e496e 100644 --- a/ui/src/app/settings/components/project-role-groups-edit/project-role-groups-edit.tsx +++ b/ui/src/app/settings/components/project-role-groups-edit/project-role-groups-edit.tsx @@ -13,7 +13,7 @@ interface ProjectRoleGroupsProps { export const ProjectRoleGroupsEdit = (props: ProjectRoleGroupsProps) => ( -

Groups

+

GROUPS

OIDC group names to bind to this role
{
@@ -38,11 +38,12 @@ export const ProjectRoleGroupsEdit = (props: ProjectRoleGroupsProps) => (
-
+
-
+
{ if (api.values.groupName.length > 0) { props.formApi.setValue('groups', (props.formApi.values.groups || []).concat(api.values.groupName)); diff --git a/ui/src/app/settings/components/project-role-policies-edit/project-role-policies-edit.tsx b/ui/src/app/settings/components/project-role-policies-edit/project-role-policies-edit.tsx index 23c647a44256f..d8a9b75e25cad 100644 --- a/ui/src/app/settings/components/project-role-policies-edit/project-role-policies-edit.tsx +++ b/ui/src/app/settings/components/project-role-policies-edit/project-role-policies-edit.tsx @@ -23,7 +23,7 @@ export const ProjectRolePoliciesEdit = (props: ProjectRolePoliciesProps) => ( services.applications.list([props.projName], {fields: ['items.metadata.name']}).then(list => list.items)}> {applications => ( -

Policy Rules

+

POLICY RULES

Manage this role's permissions to applications
@@ -50,6 +50,7 @@ export const ProjectRolePoliciesEdit = (props: ProjectRolePoliciesProps) => (
{ const newPolicy = generatePolicy(props.projName, props.roleName); props.formApi.setValue('policies', (props.formApi.values.policies || []).concat(newPolicy)); diff --git a/ui/src/app/settings/components/project-sync-windows-edit-panel/project-sync-windows-edit-panel.tsx b/ui/src/app/settings/components/project-sync-windows-edit-panel/project-sync-windows-edit-panel.tsx index 1eaa7dc627bfa..1a0fb520dc314 100644 --- a/ui/src/app/settings/components/project-sync-windows-edit-panel/project-sync-windows-edit-panel.tsx +++ b/ui/src/app/settings/components/project-sync-windows-edit-panel/project-sync-windows-edit-panel.tsx @@ -14,6 +14,8 @@ import { ProjectSyncWindowScheduleEdit } from '../project-sync-windows-edit/project-sync-windows-edit'; +import timezones from 'timezones-list'; + interface ProjectSyncWindowsDefaultParams { projName: string; window: models.SyncWindow; @@ -49,22 +51,44 @@ export const ProjectSyncWindowsEditPanel = (props: ProjectSyncWindowsEditPanelPr })}> {api => (
-
- +
+

GENERAL

+
+ +
+ +
+
+ +
+
+ +
+
+ +
+
+
+
- -
- +
+
-
- +
+
- - - )}
); + + function generateTimezones(): string[] { + const zones: string[] = []; + for (const tz of timezones) { + zones.push(tz.tzCode); + } + zones.sort(); + return zones; + } }; diff --git a/ui/src/app/settings/components/project-sync-windows-edit/project-sync-windows-edit.scss b/ui/src/app/settings/components/project-sync-windows-edit/project-sync-windows-edit.scss index 621d113e35c98..45aecc9cdc126 100644 --- a/ui/src/app/settings/components/project-sync-windows-edit/project-sync-windows-edit.scss +++ b/ui/src/app/settings/components/project-sync-windows-edit/project-sync-windows-edit.scss @@ -16,4 +16,14 @@ form > .row > .columns { padding-left: 0; } -} \ No newline at end of file + + &__options-wrapper { + display: block; + height: 206px; + max-height: 206px; + } + + &__text-wrapper { + white-space: normal; + } +} diff --git a/ui/src/app/settings/components/project-sync-windows-edit/project-sync-windows-edit.tsx b/ui/src/app/settings/components/project-sync-windows-edit/project-sync-windows-edit.tsx index d245c784bcec8..7253b05f08a4d 100644 --- a/ui/src/app/settings/components/project-sync-windows-edit/project-sync-windows-edit.tsx +++ b/ui/src/app/settings/components/project-sync-windows-edit/project-sync-windows-edit.tsx @@ -3,6 +3,7 @@ import * as React from 'react'; import * as ReactForm from 'react-form'; import {SyncWindow} from '../../../shared/models'; +require('./project-sync-windows-edit.scss'); interface ProjectSyncWindowProps { projName: string; window: SyncWindow; @@ -22,7 +23,7 @@ function helpTip(text: string) { export const ProjectSyncWindowApplicationsEdit = (props: ProjectSyncWindowProps) => ( -
Applications
+

APPLICATIONS

Manage applications assigned to this window ("*" for any)
{(props.window.applications || []).map((a, i) => ( @@ -37,6 +38,7 @@ export const ProjectSyncWindowApplicationsEdit = (props: ProjectSyncWindowProps)
{ const newA = ''; props.formApi.setValue('window.applications', (props.formApi.values.window.applications || []).concat(newA)); @@ -51,7 +53,7 @@ export const ProjectSyncWindowApplicationsEdit = (props: ProjectSyncWindowProps) export const ProjectSyncWindowNamespaceEdit = (props: ProjectSyncWindowProps) => ( -
Namespaces
+

NAMESPACES

Manage namespaces assigned to this window ("*" for any)
{(props.window.namespaces || []).map((n, i) => ( @@ -66,6 +68,7 @@ export const ProjectSyncWindowNamespaceEdit = (props: ProjectSyncWindowProps) =>
{ const newN = ''; props.formApi.setValue('window.namespaces', (props.formApi.values.window.namespaces || []).concat(newN)); @@ -80,7 +83,7 @@ export const ProjectSyncWindowNamespaceEdit = (props: ProjectSyncWindowProps) => export const ProjectSyncWindowClusterEdit = (props: ProjectSyncWindowProps) => ( -
Clusters
+

CLUSTERS

Manage clusters assigned to this window ("*" for any)
{(props.window.clusters || []).map((c, i) => ( @@ -95,6 +98,7 @@ export const ProjectSyncWindowClusterEdit = (props: ProjectSyncWindowProps) => (
{ const newC = ''; props.formApi.setValue('window.clusters', (props.formApi.values.window.clusters || []).concat(newC)); @@ -156,7 +160,7 @@ function generateSchedule(minute?: string, hour?: string, dom?: string, month?: export const ProjectSyncWindowScheduleEdit = (props: ProjectSyncWindowProps) => ( -
Schedule
+

Schedule

Minute{helpTip('The minute/minutes assigned to the schedule')}
@@ -240,7 +244,7 @@ class ScheduleWrapper extends React.Component {
{ } this.setValues(hourValues, 1); }}> - {generateRange(24, true).map(m => ( @@ -290,7 +294,7 @@ class ScheduleWrapper extends React.Component {
{ } this.setValues(monthValues, 3); }}> -
onChange({...item, name: e.target.value})} - title='Name' - /> -   =   +export const NameValueEditor = (item: NameValue, onChange?: (item: NameValue) => any) => { + return ( + + onChange({...item, name: e.target.value})} + // onBlur={e=>onChange({...item, name: e.target.value})} + title='Name' + readOnly={!onChange} + /> +   =   + onChange({...item, value: e.target.value})} + title='Value' + readOnly={!onChange} + /> +   + + ); +}; + +export const ValueEditor = (item: string, onChange: (item: string) => any) => { + return ( onChange({...item, value: e.target.value})} + value={item || ''} + onChange={e => onChange(e.target.value)} title='Value' + readOnly={!onChange} /> -   - -); + ); +}; interface Props { items: T[]; @@ -97,6 +119,50 @@ export function ArrayInput(props: Props) { ); } +export const ResetOrDeleteButton = (props: { + isPluginPar: boolean; + getValue: () => FormValue; + name: string; + index: number; + setValue: (value: FormValue) => void; + setAppParamsDeletedState: any; +}) => { + const handleDeleteChange = () => { + if (props.index >= 0) { + props.setAppParamsDeletedState((val: string[]) => val.concat(props.name)); + } + }; + + const handleResetChange = () => { + if (props.index >= 0) { + const items = [...props.getValue()]; + items.splice(props.index, 1); + props.setValue(items); + } + }; + + const disabled = props.index === -1; + + const content = props.isPluginPar ? 'Reset' : 'Delete'; + let tooltip = ''; + if (content === 'Reset' && !disabled) { + tooltip = 'Resets the parameter to the value provided by the plugin. This removes the parameter override from the application manifest'; + } else if (content === 'Delete' && !disabled) { + tooltip = 'Deletes this parameter values from the application manifest.'; + } + + return ( + + ); +}; + export const ArrayInputField = ReactForm.FormField((props: {fieldApi: ReactForm.FieldApi}) => { const { fieldApi: {getValue, setValue} @@ -104,6 +170,95 @@ export const ArrayInputField = ReactForm.FormField((props: {fieldApi: ReactForm. return ; }); +export const ArrayValueField = ReactForm.FormField( + (props: {fieldApi: ReactForm.FieldApi; name: string; defaultVal: string[]; isPluginPar: boolean; setAppParamsDeletedState: any}) => { + const { + fieldApi: {getValue, setValue} + } = props; + + let liveParamArray; + const liveParam = getValue()?.find((val: {name: string; array: object}) => val.name === props.name); + if (liveParam) { + liveParamArray = liveParam?.array ?? []; + } + const index = getValue()?.findIndex((val: {name: string; array: object}) => val.name === props.name) ?? -1; + const values = liveParamArray ?? props.defaultVal ?? []; + + return ( + + + { + const update = change.map((val: string | object) => (typeof val !== 'string' ? '' : val)); + if (index >= 0) { + getValue()[index].array = update; + setValue([...getValue()]); + } else { + setValue([...(getValue() || []), {name: props.name, array: update}]); + } + }} + /> + + ); + } +); + +export const StringValueField = ReactForm.FormField( + (props: {fieldApi: ReactForm.FieldApi; name: string; defaultVal: string; isPluginPar: boolean; setAppParamsDeletedState: any}) => { + const { + fieldApi: {getValue, setValue} + } = props; + let liveParamString; + const liveParam = getValue()?.find((val: {name: string; string: string}) => val.name === props.name); + if (liveParam) { + liveParamString = liveParam?.string ? liveParam?.string : ''; + } + const values = liveParamString ?? props.defaultVal ?? ''; + const index = getValue()?.findIndex((val: {name: string; string: string}) => val.name === props.name) ?? -1; + + return ( + + +
+ { + if (index >= 0) { + getValue()[index].string = e.target.value; + setValue([...getValue()]); + } else { + setValue([...(getValue() || []), {name: props.name, string: e.target.value}]); + } + }} + title='Value' + /> +
+
+ ); + } +); + export const MapInputField = ReactForm.FormField((props: {fieldApi: ReactForm.FieldApi}) => { const { fieldApi: {getValue, setValue} @@ -123,3 +278,55 @@ export const MapInputField = ReactForm.FormField((props: {fieldApi: ReactForm.Fi /> ); }); + +export const MapValueField = ReactForm.FormField( + (props: {fieldApi: ReactForm.FieldApi; name: string; defaultVal: Map; isPluginPar: boolean; setAppParamsDeletedState: any}) => { + const { + fieldApi: {getValue, setValue} + } = props; + const items = new Array(); + const liveParam = getValue()?.find((val: {name: string; map: object}) => val.name === props.name); + const index = getValue()?.findIndex((val: {name: string; map: object}) => val.name === props.name) ?? -1; + if (liveParam) { + liveParam.map = liveParam.map ? liveParam.map : new Map(); + } + if (liveParam?.array) { + items.push(...liveParam.array); + } else { + const map = liveParam?.map ?? props.defaultVal ?? new Map(); + Object.keys(map).forEach(item => items.push({name: item || '', value: map[item] || ''})); + if (liveParam?.map) { + getValue()[index].array = items; + } + } + + return ( + + + + { + if (index === -1) { + getValue().push({ + name: props.name, + array: change + }); + } else { + getValue()[index].array = change; + } + setValue([...getValue()]); + }} + /> + + ); + } +); diff --git a/ui/src/app/shared/components/badge-panel/badge-panel.tsx b/ui/src/app/shared/components/badge-panel/badge-panel.tsx index 0c3ef11d2553a..ad6a4f6c187e2 100644 --- a/ui/src/app/shared/components/badge-panel/badge-panel.tsx +++ b/ui/src/app/shared/components/badge-panel/badge-panel.tsx @@ -9,66 +9,67 @@ require('./badge-panel.scss'); export const BadgePanel = ({app, project}: {app?: string; project?: string}) => { const [badgeType, setBadgeType] = React.useState('URL'); const context = React.useContext(Context); - const root = `${location.protocol}//${location.host}${context.baseHref}`; + if (!app && !project) { + throw new Error('Either app or project property must be specified'); + } - let badgeURL = ''; - let entityURL = ''; - let alt = ''; - if (app) { - badgeURL = `${root}api/badge?name=${app}&revision=true`; - entityURL = `${root}applications/${app}`; - alt = 'App Status'; - } else if (project) { - badgeURL = `${root}api/badge?project=${project}&revision=true`; - entityURL = `${root}projects/${project}`; - alt = 'Project Status'; - } else { - throw new Error('Either app of project property must be specified'); + function badgeContent(statusBadgeRootUrl: string) { + const root = statusBadgeRootUrl ? statusBadgeRootUrl : `${location.protocol}//${location.host}${context.baseHref}`; + let badgeURL = ''; + let entityURL = ''; + let alt = ''; + if (app) { + badgeURL = `${root}api/badge?name=${app}&revision=true`; + entityURL = `${root}applications/${app}`; + alt = 'App Status'; + } else if (project) { + badgeURL = `${root}api/badge?project=${project}&revision=true`; + entityURL = `${root}projects/${project}`; + alt = 'Project Status'; + } + return ( +
+
+

STATUS BADGE

+

+ {' '} +

+
+ ( +

+ {badgeType} +

+ )} + items={['URL', 'Markdown', 'Textile', 'Rdoc', 'AsciiDoc'].map(type => ({title: type, action: () => setBadgeType(type)}))} + /> +