diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 56e93483c234..0d60bf282af4 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,9 +1,17 @@ -Tips: +Don't bother creating a PR until you've done this: + +* [ ] Run `make pre-commit -B` to fix codegen, lint, and commit message problems. + +Create your PR as a draft. + -* Maybe add you organization to [USERS.md](https://github.com/argoproj/argo-workflows/blob/master/USERS.md). -* Your PR needs to pass the required checks before it can be approved. If the check is not required (e.g. E2E tests) it does not need to pass -* Sign-off your commits to pass the DCO check: `git commit --signoff`. -* Run `make pre-commit -B` to fix codegen or lint problems. +* Your PR needs to pass the required checks before it can be approved. If the check is not required (e.g. E2E tests) it + does not need to pass. +* Once required tests have passed, you can make it "Ready for review". * Say how how you tested your changes. If you changed the UI, attach screenshots. -* If changes were requested, and you've made them, then dismis the review to get it looked at again. -* You can ask for help! + +Tips: + +* If changes were requested, and you've made them, then dismiss the review to get it looked at again. +* Add you organization to [USERS.md](https://github.com/argoproj/argo-workflows/blob/master/USERS.md) if you like. +* You can ask for help! diff --git a/.github/workflows/ci-build.yaml b/.github/workflows/ci-build.yaml index c3bf974b1a62..2282ac400a16 100644 --- a/.github/workflows/ci-build.yaml +++ b/.github/workflows/ci-build.yaml @@ -13,7 +13,7 @@ on: jobs: tests: name: Unit Tests - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest # 5m30 timeout-minutes: 8 steps: @@ -33,16 +33,13 @@ jobs: - run: make server/static/files.go STATIC_FILES=false - run: go build -v ./... - run: make test STATIC_FILES=false GOTEST='go test -covermode=atomic -coverprofile=coverage.out' + # engineers just ignore this in PRs, so lets not even run it - run: bash <(curl -s https://codecov.io/bash) + if: github.ref == 'refs/heads/master' e2e-tests: name: E2E Tests - runs-on: ubuntu-20.04 - # test-api: 7m (1m10s locally) - # test-cli: 12m (4m locally) - # test-cron: 8m - # test-executor 8m (2m locally) - # test-functional: 13m (7m locally) + runs-on: ubuntu-latest timeout-minutes: 20 env: KUBECONFIG: /home/runner/.kubeconfig @@ -57,14 +54,20 @@ jobs: containerRuntimeExecutor: docker profile: mysql - test: test-cron - containerRuntimeExecutor: docker + containerRuntimeExecutor: emissary profile: minimal - - test: test-examples + - test: test-executor + containerRuntimeExecutor: emissary + profile: minimal + - test: test-functional containerRuntimeExecutor: emissary profile: minimal - test: test-executor containerRuntimeExecutor: docker profile: minimal + - test: test-examples + containerRuntimeExecutor: emissary + profile: minimal - test: test-executor containerRuntimeExecutor: k8sapi profile: minimal @@ -74,9 +77,6 @@ jobs: - test: test-executor containerRuntimeExecutor: pns profile: minimal - - test: test-functional - containerRuntimeExecutor: docker - profile: minimal steps: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 @@ -94,9 +94,27 @@ jobs: with: path: /home/runner/go/bin key: go-bin-v1-${{ hashFiles('**/go.mod') }} + - name: Cache Docker layers + uses: actions/cache@v2 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx- + - uses: docker/setup-buildx-action@v1 - run: mkdir -p /tmp/log/argo-e2e + # we never want to pull images by accident on CI after we built them + # so imagePullPolicy=Never + # but, we can pull it if we don't need it + - run: docker pull quay.io/argoproj/argoexec:latest + if: ${{!(matrix.test == 'test-executor' || matrix.test == 'test-functional')}} + - name: make argoexec-image + if: ${{matrix.test == 'test-executor' || matrix.test == 'test-functional'}} + # retry this once, as it can be flakey + run: | + make argoexec-image STATIC_FILES=false || make argoexec-image STATIC_FILES=false + docker image prune -f - name: Install and start K3S - timeout-minutes: 3 run: | curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=v1.21.2+k3s1 INSTALL_K3S_CHANNEL=stable INSTALL_K3S_EXEC=--docker K3S_KUBECONFIG_MODE=644 sh - until kubectl --kubeconfig=/etc/rancher/k3s/k3s.yaml cluster-info ; do sleep 10s ; done @@ -113,21 +131,20 @@ jobs: echo '127.0.0.1 mysql' | sudo tee -a /etc/hosts - run: make install controller cli $(go env GOPATH)/bin/goreman PROFILE=${{matrix.profile}} E2E_EXECUTOR=${{matrix.containerRuntimeExecutor}} AUTH_MODE=client STATIC_FILES=false LOG_LEVEL=info - run: make start PROFILE=${{matrix.profile}} E2E_EXECUTOR=${{matrix.containerRuntimeExecutor}} AUTH_MODE=client STATIC_FILES=false LOG_LEVEL=info UI=false > /tmp/log/argo-e2e/argo.log 2>&1 & - timeout-minutes: 4 - - name: make argoexec-image - run: make argoexec-image STATIC_FILES=false - run: make wait timeout-minutes: 4 - run: make ${{matrix.test}} E2E_TIMEOUT=1m STATIC_FILES=false - name: cat argo.log if: ${{ failure() }} run: cat /tmp/log/argo-e2e/argo.log - + - name: MinIO logs + if: ${{ failure() }} + run: kubectl -n argo logs deploy/minio codegen: name: Codegen - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest needs: [ tests ] - timeout-minutes: 9 + timeout-minutes: 15 env: GOPATH: /home/runner/go PROTOC_ZIP: protoc-3.11.1-linux-x86_64.zip @@ -173,7 +190,7 @@ jobs: lint: name: Lint - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest needs: [ tests, codegen ] timeout-minutes: 6 env: @@ -187,7 +204,7 @@ jobs: ui: name: UI - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest timeout-minutes: 6 env: NODE_OPTIONS: --max-old-space-size=4096 diff --git a/.github/workflows/gh-pages.yaml b/.github/workflows/gh-pages.yaml index b1b87ea6f373..dd396fa887a6 100644 --- a/.github/workflows/gh-pages.yaml +++ b/.github/workflows/gh-pages.yaml @@ -8,7 +8,7 @@ on: jobs: deploy: if: github.repository == 'argoproj/argo-workflows' - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Setup Python diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 8360c8cdb543..bf110a03d3e5 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -17,7 +17,7 @@ jobs: build-linux-amd64: name: Build & push linux/amd64 if: github.repository == 'codefresh-io/argo-workflows' - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest strategy: matrix: platform: [ linux/amd64 ] @@ -85,7 +85,7 @@ jobs: build-linux-arm64: name: Build & push linux/arm64 if: github.repository == 'codefresh-io/argo-workflows' - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest strategy: matrix: platform: [ linux/arm64 ] @@ -200,7 +200,7 @@ jobs: push-images: name: Push manifest with all images if: github.repository == 'codefresh-io/argo-workflows' - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest needs: [ build-linux-amd64, build-linux-arm64, build-windows ] steps: - uses: actions/checkout@v2 @@ -253,7 +253,7 @@ jobs: test-images-linux-amd64: name: Try pulling linux/amd64 if: github.repository == 'codefresh-io/argo-workflows' - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest needs: [ push-images ] strategy: matrix: @@ -328,7 +328,7 @@ jobs: done publish-release: - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest if: github.repository == 'codefresh-io/argo-workflows' needs: [ push-images, test-images-linux-amd64, test-images-windows ] env: diff --git a/.github/workflows/sdks.yaml b/.github/workflows/sdks.yaml index 30f0cdb94ae1..c9653335c2f9 100644 --- a/.github/workflows/sdks.yaml +++ b/.github/workflows/sdks.yaml @@ -3,10 +3,8 @@ on: push: tags: - v* - - 'v3.2.*' - - 'v3.1.*' branches: - - dev-* + - master jobs: sdk: if: github.repository == 'argoproj/argo-workflows' @@ -21,9 +19,3 @@ jobs: - run: make --directory sdks/${{matrix.name}} publish -B env: JAVA_SDK_MAVEN_PASSWORD: ${{ secrets.GITHUB_TOKEN }} - - uses: peter-evans/create-pull-request@v3 - with: - title: 'chore: updated ${{matrix.name}} SDK' - commit-message: 'chore: updated ${{matrix.name}} SDK' - branch: create-pull-request/sdk/${{matrix.name}} - signoff: true diff --git a/.golangci.yml b/.golangci.yml index f8bf17c54041..e5c7b134f2cc 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,7 +1,7 @@ # https://golangci-lint.run/usage/quick-start/ run: concurrency: 4 - timeout: 5m + timeout: 8m skip-dirs: - pkg/client - vendor diff --git a/Makefile b/Makefile index 3e0c638de4f0..2a70a4a15526 100644 --- a/Makefile +++ b/Makefile @@ -219,7 +219,13 @@ argoexec-image: %-image: [ ! -e dist/$* ] || mv dist/$* . - docker buildx build -t $(IMAGE_NAMESPACE)/$*:$(VERSION) --target $* --output=type=docker . + docker buildx install + docker build \ + -t $(IMAGE_NAMESPACE)/$*:$(VERSION) \ + --target $* \ + --cache-from "type=local,src=/tmp/.buildx-cache" \ + --cache-to "type=local,dest=/tmp/.buildx-cache" \ + --output=type=docker . [ ! -e $* ] || mv $* dist/ docker run --rm -t $(IMAGE_NAMESPACE)/$*:$(VERSION) version if [ $(K3D) = true ]; then k3d image import $(IMAGE_NAMESPACE)/$*:$(VERSION); fi @@ -396,7 +402,7 @@ test: server/static/files.go dist/argosay env KUBECONFIG=/dev/null $(GOTEST) ./... .PHONY: install -install: +install: githooks kubectl get ns $(KUBE_NAMESPACE) || kubectl create ns $(KUBE_NAMESPACE) kubectl config set-context --current --namespace=$(KUBE_NAMESPACE) @echo "installing PROFILE=$(PROFILE), E2E_EXECUTOR=$(E2E_EXECUTOR)" @@ -570,15 +576,15 @@ validate-examples: api/jsonschema/schema.json cd examples && go test # pre-push +.git/hooks/commit-msg: hack/git/hooks/commit-msg + cp -v hack/git/hooks/commit-msg .git/hooks/commit-msg -.PHONY: pre-commit -pre-commit: codegen lint test start -ifeq ($(GIT_BRANCH),master) -LOG_OPTS := '-n10' -else -LOG_OPTS := 'origin/master..' -endif +.PHONY: githooks +githooks: .git/hooks/commit-msg + +.PHONY: pre-commit +pre-commit: githooks codegen lint release-notes: /dev/null version=$(VERSION) envsubst < hack/release-notes.md > release-notes diff --git a/USERS.md b/USERS.md index a69f6539862a..f2c7f35b7ab4 100644 --- a/USERS.md +++ b/USERS.md @@ -147,6 +147,7 @@ Currently, the following organizations are **officially** using Argo Workflows: 1. [WooliesX](https://wooliesx.com.au/) 1. [Woolworths Group](https://www.woolworthsgroup.com.au/) 1. [Workiva](https://www.workiva.com/) +1. [Voyager](https://investvoyager.com/) 1. [Zhihu](https://www.zhihu.com/) ### Projects Using Argo diff --git a/cmd/argo/commands/cron/delete.go b/cmd/argo/commands/cron/delete.go index 5e55e73b418b..9e3bf4790ade 100644 --- a/cmd/argo/commands/cron/delete.go +++ b/cmd/argo/commands/cron/delete.go @@ -38,6 +38,6 @@ func NewDeleteCommand() *cobra.Command { }, } - command.Flags().BoolVar(&all, "all", false, "Delete all workflow templates") + command.Flags().BoolVar(&all, "all", false, "Delete all cron workflows") return command } diff --git a/cmd/argo/commands/cron/util.go b/cmd/argo/commands/cron/util.go index 4f5d2df7b467..bd0fc8b98300 100644 --- a/cmd/argo/commands/cron/util.go +++ b/cmd/argo/commands/cron/util.go @@ -11,11 +11,7 @@ import ( // GetNextRuntime returns the next time the workflow should run in local time. It assumes the workflow-controller is in // UTC, but nevertheless returns the time in the local timezone. func GetNextRuntime(cwf *v1alpha1.CronWorkflow) (time.Time, error) { - cronScheduleString := cwf.Spec.Schedule - if cwf.Spec.Timezone != "" { - cronScheduleString = "CRON_TZ=" + cwf.Spec.Timezone + " " + cronScheduleString - } - cronSchedule, err := cron.ParseStandard(cronScheduleString) + cronSchedule, err := cron.ParseStandard(cwf.Spec.GetScheduleString()) if err != nil { return time.Time{}, err } diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index ff5d5c27cc75..fc3c3aebd3e1 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -8,13 +8,19 @@ Please [raise an issue in Github](https://github.com/argoproj/argo-workflows/iss See [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). +## Contributor Meetings + +A weekly opportunity for committers and maintainers of Workflows, Events, and Dataflow to discuss their current work and talk +about what’s next. Feel free to join us! For Contributor Meeting information, minutes and recordings +please [see here](https://bit.ly/argo-data-weekly). + ## How To Contribute We're always looking for contributors. * Documentation - something missing or unclear? Please submit a pull request! * Code contribution - investigate a [help wanted issue](https://github.com/argoproj/argo-workflows/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22+label%3A%22good+first+issue%22), or anything labelled with "good first issue"? -* Join the #argo-devs channel on [our Slack](https://argoproj.github.io/community/join-slack). +* Join the `#argo-contributors` channel on [our Slack](https://argoproj.github.io/community/join-slack). ### Running Locally diff --git a/docs/cli/argo_cron_delete.md b/docs/cli/argo_cron_delete.md index 76a8ef9772ef..ea41fd86c6cc 100644 --- a/docs/cli/argo_cron_delete.md +++ b/docs/cli/argo_cron_delete.md @@ -9,7 +9,7 @@ argo cron delete [CRON_WORKFLOW... | --all] [flags] ### Options ``` - --all Delete all workflow templates + --all Delete all cron workflows -h, --help help for delete ``` diff --git a/docs/fields.md b/docs/fields.md index d1fd5fa5a503..5ffc46a9c45a 100644 --- a/docs/fields.md +++ b/docs/fields.md @@ -2185,6 +2185,8 @@ Outputs hold parameters, artifacts, and results from a step - [`dag-conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/dag-conditional-parameters.yaml) +- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/data-transformations.yaml) + - [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/exit-handler-with-artifacts.yaml) - [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/exit-handler-with-param.yaml) diff --git a/docs/node-field-selector.md b/docs/node-field-selector.md index 5465c06f2e52..086f5cf04e2c 100644 --- a/docs/node-field-selector.md +++ b/docs/node-field-selector.md @@ -61,7 +61,7 @@ Consider the following workflow: Here we have two steps with the same `displayName`: `wait-approval`. To select one to suspend, we need to use their `name`, either `appr-promotion-ffsv4.app1.wait-approval` or `appr-promotion-ffsv4.app3.wait-approval`. If it is not clear -what the full name of a done is, it can be found using `kubectl`: +what the full name of a node is, it can be found using `kubectl`: ``` $ kubectl get wf appr-promotion-ffsv4 -o yaml diff --git a/docs/releases.md b/docs/releases.md index 479f0c745643..3c3ec7b00e8d 100644 --- a/docs/releases.md +++ b/docs/releases.md @@ -17,12 +17,13 @@ Both the `argo-server` and `argocli` should be the same version as the controlle # Release Cycle +For **stable**, use the latest patch version. For **unstable**, we build and tag `latest` images for every commit to master. -New major versions are released roughly every 3 months. Release candidates for each major release are typically available -for 6 weeks before the release becomes generally available. +New minor versions are released roughly every 3 months. Release candidates for each major release are typically available +for 4-6 weeks before the release becomes generally available. -Otherwise, we typically release once a week: +Otherwise, we typically release weekly: -* Patch fixes for the current stable version. These are tagged `stable`. +* Patch fixes for the current stable version. * The next release candidate, if we are currently in a release-cycle. diff --git a/docs/security.md b/docs/security.md index b132eaa53fb4..44f88d57ecb2 100644 --- a/docs/security.md +++ b/docs/security.md @@ -75,7 +75,7 @@ Argo Workflows requires various levels of network access depending on configurat The argo server is commonly exposed to end-users to provide users with a user interface for visualizing and managing their workflows. It must also be exposed if leveraging [webhooks](webhooks.md) to trigger workflows. Both of these use cases require that the argo-server Service to be exposed for ingress traffic (e.g. with an Ingress object or load balancer). Note that the Argo UI is also available to be accessed by running the server locally (i.e. `argo server`) using local kubeconfig credentials, and visiting the UI over https://localhost:2746. -The argo server additionally has a feature to allow downloading of artifacts through the user interface. This feature requires that the argo-server be given egress access to the underlying artifact provider (e.g. S3, GCS, MinIO, Arfactory) in order to download and stream the artifact. +The argo server additionally has a feature to allow downloading of artifacts through the user interface. This feature requires that the argo-server be given egress access to the underlying artifact provider (e.g. S3, GCS, MinIO, Artifactory) in order to download and stream the artifact. ### Workflow Controller diff --git a/docs/variables.md b/docs/variables.md index f42fdf51b626..b49e057cc6ac 100644 --- a/docs/variables.md +++ b/docs/variables.md @@ -234,7 +234,8 @@ For `Template`-level metrics: | `workflow.annotations.` | Workflow annotations | | `workflow.labels.` | Workflow labels | | `workflow.creationTimestamp` | Workflow creation timestamp formatted in RFC 3339 (e.g. `2018-08-23T05:42:49Z`) | -| `workflow.creationTimestamp.` | Creation timestamp formatted with a [strftime](http://strftime.org) format character | +| `workflow.creationTimestamp.` | Creation timestamp formatted with a [strftime](http://strftime.org) format character. | +| `workflow.creationTimestamp.RFC3339` | Creation timestamp formatted with in RFC 3339. | | `workflow.priority` | Workflow priority | | `workflow.duration` | Workflow duration estimate, may differ from actual duration by a couple of seconds | | `workflow.scheduledTime` | Scheduled runtime formatted in RFC 3339 (only available for CronWorkflows) | diff --git a/examples/README.md b/examples/README.md index c81bea74448f..04c4b114c8d4 100644 --- a/examples/README.md +++ b/examples/README.md @@ -235,12 +235,12 @@ spec: parameters: - name: message value: "hello2a" - - name: hello2b # single dash => run in parallel with previous step - template: whalesay - arguments: - parameters: - - name: message - value: "hello2b" + - name: hello2b # single dash => run in parallel with previous step + template: whalesay + arguments: + parameters: + - name: message + value: "hello2b" # This is the same template as from the previous example - name: whalesay diff --git a/examples/data-transformations.yaml b/examples/data-transformations.yaml index 7c0921ac499f..9abb3ff43805 100644 --- a/examples/data-transformations.yaml +++ b/examples/data-transformations.yaml @@ -42,6 +42,10 @@ spec: transformation: - expression: "filter(data, {# endsWith \"main.log\"})" + outputs: + artifacts: + - name: file + path: /file - name: process-logs inputs: diff --git a/examples/expression-tag-template-workflow.yaml b/examples/expression-tag-template-workflow.yaml index 914f291a720e..87e58e40a01a 100644 --- a/examples/expression-tag-template-workflow.yaml +++ b/examples/expression-tag-template-workflow.yaml @@ -3,14 +3,14 @@ kind: Workflow metadata: generateName: expression-tag-template- labels: - workflows.argoproj.io/test: "true" + workflows.argoproj.io/test-local: "true" annotations: # available in v3.1.0 workflows.argoproj.io/version: ">= 3.1.0" workflows.argoproj.io/verify.py: | assert status["phase"] == "Succeeded" assert nodes["task-0(0:3)"]["phase"] == "Succeeded" - assert nodes["task-0(0:3)"]["outputs"]["parameters"][0]["value"] == "hello 30 @ 2021\n" + assert nodes["task-0(0:3)"]["outputs"]["parameters"][0]["value"] == "hello 30 @ 2022\n" spec: entrypoint: main templates: diff --git a/go.mod b/go.mod index 855ec3ac8652..e66a3252acf2 100644 --- a/go.mod +++ b/go.mod @@ -70,7 +70,6 @@ require ( gopkg.in/jcmturner/gokrb5.v5 v5.3.0 gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 // indirect gopkg.in/square/go-jose.v2 v2.5.1 - gopkg.in/src-d/go-git.v4 v4.13.1 k8s.io/api v0.21.5 k8s.io/apimachinery v0.21.5 k8s.io/client-go v0.21.5 @@ -82,3 +81,11 @@ require ( sigs.k8s.io/yaml v1.2.0 upper.io/db.v3 v3.6.3+incompatible ) + +require github.com/go-git/go-git/v5 v5.3.0 + +require ( + github.com/onsi/ginkgo v1.16.4 // indirect + github.com/onsi/gomega v1.13.0 // indirect + google.golang.org/protobuf v1.27.1 // indirect +) diff --git a/go.sum b/go.sum index f317d39cd335..91091367f5bc 100644 --- a/go.sum +++ b/go.sum @@ -352,10 +352,14 @@ github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-billy/v5 v5.1.0 h1:4pl5BV4o7ZG/lterP4S6WzJ6xr49Ba5ET9ygheTYahk= github.com/go-git/go-billy/v5 v5.1.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12 h1:PbKy9zOy4aAKrJ5pibIRpVO2BXnK1Tlcg+caKI7Ox5M= github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= +github.com/go-git/go-git/v5 v5.3.0 h1:8WKMtJR2j8RntEXR/uvTKagfEt4GYlwQ7mntE4+0GWc= github.com/go-git/go-git/v5 v5.3.0/go.mod h1:xdX4bWJ48aOrdhnl2XqHYstHbbp6+LFS4r4X+lNVprw= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -464,6 +468,7 @@ github.com/go-swagger/go-swagger v0.25.0 h1:FxhyrWWV8V/A9P6GtI5szWordAdbb6Y0nqdY github.com/go-swagger/go-swagger v0.25.0/go.mod h1:9639ioXrPX9E6BbnbaDklGXjNz7upAXoNBwL4Ok11Vk= github.com/go-swagger/scan-repo-boundary v0.0.0-20180623220736-973b3573c013 h1:l9rI6sNaZgNC0LnF3MiE+qTmyBA/tZAg1rtyrGbUMK0= github.com/go-swagger/scan-repo-boundary v0.0.0-20180623220736-973b3573c013/go.mod h1:b65mBPzqzZWxOZGxSWrqs4GInLIn+u99Q9q7p+GKni0= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= @@ -731,7 +736,6 @@ github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q github.com/karrick/godirwalk v1.7.8/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= -github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck= github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= @@ -763,7 +767,6 @@ github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= @@ -885,8 +888,9 @@ github.com/nicksnyder/go-i18n v1.10.1-0.20190510212457-b280125b035a/go.mod h1:e4 github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ= github.com/nsqio/go-nsq v1.0.8/go.mod h1:vKq36oyeVXgsS5Q8YEO7WghqidAVXQlcFxzQbQTuDEY= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= @@ -900,16 +904,19 @@ github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4= github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= @@ -925,7 +932,6 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/getopt v0.0.0-20180729010549-6fdd0a2c7117/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= @@ -1071,8 +1077,6 @@ github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/y github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= -github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4= -github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -1127,7 +1131,6 @@ github.com/valyala/gozstd v1.7.0/go.mod h1:y5Ew47GLlP37EkTB+B4s7r6A5rdaeB7ftbl9z github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/xanzy/go-gitlab v0.33.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= -github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= @@ -1337,6 +1340,7 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1380,7 +1384,6 @@ golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1437,6 +1440,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1502,7 +1506,6 @@ golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190808195139-e713427fea3f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1547,6 +1550,7 @@ golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82u golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= @@ -1681,8 +1685,9 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1725,12 +1730,6 @@ gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76 gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/src-d/go-billy.v4 v4.3.2 h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg= -gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98= -gopkg.in/src-d/go-git-fixtures.v3 v3.5.0 h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOAJK+LsJg= -gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g= -gopkg.in/src-d/go-git.v4 v4.13.1 h1:SRtFyV8Kxc0UP7aCHcijOMQGPxHSmMOPrzulQWolkYE= -gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= diff --git a/hack/git/hooks/commit-msg b/hack/git/hooks/commit-msg new file mode 100755 index 000000000000..5ea0ef6e5b4d --- /dev/null +++ b/hack/git/hooks/commit-msg @@ -0,0 +1,14 @@ +#!/bin/sh +set -eu + +grep -q 'Signed-off-by: ' "$1" || { + echo >&2 'Commits must be signed-off: https://probot.github.io/apps/dco/' + exit 1 +} + +grep -qE '^(?:feat|fix|docs|style|refactor|perf|test|chore)\(?(?:\w+|\s|\-|_)?\)?:\s\w+' "$1" || grep -q 'Merge' "$1" || { + echo >&2 'Commit message must be semantic: https://github.com/zeke/semantic-pull-requests' + exit 1 +} + +echo 'Your commit message is acceptable' \ No newline at end of file diff --git a/hack/port-forward.sh b/hack/port-forward.sh index 2978e0c88af8..8fbb64f379e1 100755 --- a/hack/port-forward.sh +++ b/hack/port-forward.sh @@ -9,7 +9,7 @@ pf() { ./hack/free-port.sh $port echo "port-forward $resource $port" kubectl -n argo port-forward "svc/$resource" "$port:$dest_port" > /dev/null & - until lsof -i ":$port" > /dev/null ; do sleep 1s ; done + until lsof -i ":$port" > /dev/null ; do sleep 1 ; done } wait-for() { diff --git a/hack/ssh_known_hosts b/hack/ssh_known_hosts index 31a7bae3fce5..a89bb62154a5 100644 --- a/hack/ssh_known_hosts +++ b/hack/ssh_known_hosts @@ -1,5 +1,7 @@ # This file was automatically generated. DO NOT EDIT bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw== +github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg= +github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ== gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY= gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf diff --git a/hack/test-examples.sh b/hack/test-examples.sh index db66b2f942b8..fad30c3d8c84 100755 --- a/hack/test-examples.sh +++ b/hack/test-examples.sh @@ -6,6 +6,6 @@ set -eu -o pipefail # Load the configmaps that contains the parameter values used for certain examples. kubectl apply -f examples/configmaps/simple-parameters-configmap.yaml -grep -lR 'workflows.argoproj.io/test' examples/* | while read f ; do +grep -lR 'workflows.argoproj.io/test-local' examples/* | while read f ; do ./dist/argo submit --watch --verify $f done diff --git a/hack/update-ssh-known-hosts.sh b/hack/update-ssh-known-hosts.sh index aa74c6489add..d90467f765c1 100755 --- a/hack/update-ssh-known-hosts.sh +++ b/hack/update-ssh-known-hosts.sh @@ -15,6 +15,8 @@ chmod 0644 $KNOWN_HOSTS_FILE # - ssh.dev.azure.com, vs-ssh.visualstudio.com: https://docs.microsoft.com/en-us/azure/devops/repos/git/use-ssh-keys-to-authenticate?view=azure-devops diff - <(ssh-keygen -l -f $KNOWN_HOSTS_FILE | sort -k 3) <- + https://raw.githubusercontent.com/argoproj/argo-workflows/4e450e250168e6b4d51a126b784e90b11a0162bc/pkg/apis/workflow/v1alpha1/generated.swagger.json + - name: echo + template: echo + arguments: + parameters: + - name: msg + value: '{{tasks.http.outputs.result}}' + dependencies: + - http + - name: http + inputs: + parameters: + - name: url + http: + url: '{{inputs.parameters.url}}' + - name: echo + inputs: + parameters: + - name: msg + container: + image: 'argoproj/argosay:v2' + args: + - echo + - '{{inputs.parameters.msg}}' diff --git a/ui/src/app/archived-workflows/components/archived-workflow-details/archived-workflow-details.tsx b/ui/src/app/archived-workflows/components/archived-workflow-details/archived-workflow-details.tsx index 1d27137b011e..f357ecda9f88 100644 --- a/ui/src/app/archived-workflows/components/archived-workflow-details/archived-workflow-details.tsx +++ b/ui/src/app/archived-workflows/components/archived-workflow-details/archived-workflow-details.tsx @@ -12,7 +12,8 @@ import {ResourceEditor} from '../../../shared/components/resource-editor/resourc import {services} from '../../../shared/services'; import {WorkflowArtifacts} from '../../../workflows/components/workflow-artifacts'; -import {getPodName} from '../../../shared/pod-name'; +import {ANNOTATION_KEY_POD_NAME_VERSION} from '../../../shared/annotations'; +import {getPodName, getTemplateNameFromNode} from '../../../shared/pod-name'; import {WorkflowResourcePanel} from '../../../workflows/components/workflow-details/workflow-resource-panel'; import {WorkflowLogsViewer} from '../../../workflows/components/workflow-logs-viewer/workflow-logs-viewer'; import {WorkflowNodeInfo} from '../../../workflows/components/workflow-node-info/workflow-node-info'; @@ -252,8 +253,13 @@ export class ArchivedWorkflowDetails extends BasePage, private get podName() { if (this.nodeId && this.state.workflow) { const workflowName = this.state.workflow.metadata.name; - const {name, templateName} = this.node; - return getPodName(workflowName, name, templateName, this.nodeId); + let annotations: {[name: string]: string} = {}; + if (typeof this.state.workflow.metadata.annotations !== 'undefined') { + annotations = this.state.workflow.metadata.annotations; + } + const version = annotations[ANNOTATION_KEY_POD_NAME_VERSION]; + const templateName = getTemplateNameFromNode(this.node); + return getPodName(workflowName, this.node.name, templateName, this.nodeId, version); } } diff --git a/ui/src/app/event-sources/components/event-source-list/event-source-list.tsx b/ui/src/app/event-sources/components/event-source-list/event-source-list.tsx index b449eb9817b7..065a42e20f72 100644 --- a/ui/src/app/event-sources/components/event-source-list/event-source-list.tsx +++ b/ui/src/app/event-sources/components/event-source-list/event-source-list.tsx @@ -3,8 +3,7 @@ import * as classNames from 'classnames'; import * as React from 'react'; import {useContext, useEffect, useState} from 'react'; import {Link, RouteComponentProps} from 'react-router-dom'; -import {EventSource} from '../../../../models'; -import {kubernetes} from '../../../../models'; +import {EventSource, kubernetes} from '../../../../models'; import {ID} from '../../../event-flow/components/event-flow-details/id'; import {Utils as EventsUtils} from '../../../sensors/components/utils'; import {uiUrl} from '../../../shared/base'; @@ -80,6 +79,9 @@ export const EventSourceList = ({match, location, history}: RouteComponentProps< return {value, ...x}; })(); + const loading = !error && !eventSources; + const zeroState = (eventSources || []).length === 0; + return ( ] }}> - {!eventSources ? ( - - ) : eventSources.length === 0 ? ( + {loading && } + {zeroState && (

An event source defines what events can be used to trigger actions. Typical event sources are calender (to create events on schedule) GitHub or GitLab (to @@ -111,7 +112,8 @@ export const EventSourceList = ({match, location, history}: RouteComponentProps<

{learnMore}.

- ) : ( + )} + {eventSources && eventSources.length > 0 && ( <>
diff --git a/ui/src/app/pipelines/components/pipeline-details/pipeline-graph.ts b/ui/src/app/pipelines/components/pipeline-details/pipeline-graph.ts index d8e599888eb1..7736596f74ff 100644 --- a/ui/src/app/pipelines/components/pipeline-details/pipeline-graph.ts +++ b/ui/src/app/pipelines/components/pipeline-details/pipeline-graph.ts @@ -87,7 +87,7 @@ export const graph = (pipeline: Pipeline, steps: Step[]) => { g.nodes.set(subjectId, {genre: 'stan', icon: 'stream', label: x.stan.subject}); g.edges.set({v: subjectId, w: stepId}, {classNames}); } else if (x.http) { - const y = new URL('http://' + (x.http.serviceName || pipeline.metadata.name + '-' + step.spec.name) + '/sources/' + x.name); + const y = new URL('https://' + (x.http.serviceName || pipeline.metadata.name + '-' + step.spec.name) + '/sources/' + x.name); const subjectId = 'http/' + y; g.nodes.set(subjectId, {genre: 'http', icon: 'cloud', label: y.hostname}); g.edges.set({v: subjectId, w: stepId}, {classNames}); diff --git a/ui/src/app/pipelines/components/pipeline-list/pipeline-list.tsx b/ui/src/app/pipelines/components/pipeline-list/pipeline-list.tsx index bdcbc597186d..aada836b49c1 100644 --- a/ui/src/app/pipelines/components/pipeline-list/pipeline-list.tsx +++ b/ui/src/app/pipelines/components/pipeline-list/pipeline-list.tsx @@ -46,6 +46,9 @@ export const PipelineList = ({match, history}: RouteComponentProps) => { return () => lw.stop(); }, [namespace]); + const loading = !error && !pipelines; + const zeroState = (pipelines || []).length === 0; + return ( ) => { tools: [] }}> - {!pipelines ? ( - - ) : pipelines.length === 0 ? ( + {loading && } + {zeroState && (

Argo Dataflow is a Kubernetes native platform for executing large parallel data-processing pipelines.

@@ -74,7 +76,8 @@ export const PipelineList = ({match, history}: RouteComponentProps) => { Learn more

- ) : ( + )} + {pipelines && pipelines.length > 0 && ( <>
diff --git a/ui/src/app/sensors/components/sensor-list/sensor-list.tsx b/ui/src/app/sensors/components/sensor-list/sensor-list.tsx index 42600d25e3f6..90a6b008b6dc 100644 --- a/ui/src/app/sensors/components/sensor-list/sensor-list.tsx +++ b/ui/src/app/sensors/components/sensor-list/sensor-list.tsx @@ -3,8 +3,7 @@ import * as classNames from 'classnames'; import * as React from 'react'; import {useContext, useEffect, useState} from 'react'; import {Link, RouteComponentProps} from 'react-router-dom'; -import {Sensor} from '../../../../models'; -import {kubernetes} from '../../../../models'; +import {kubernetes, Sensor} from '../../../../models'; import {ID} from '../../../event-flow/components/event-flow-details/id'; import {uiUrl} from '../../../shared/base'; import {ErrorNotice} from '../../../shared/components/error-notice'; @@ -76,6 +75,9 @@ export const SensorList = ({match, location, history}: RouteComponentProps) return {value, ...x}; })(); + const loading = !error && !sensors; + const zeroState = (sensors || []).length === 0; + return ( ) tools: [] }}> - {!sensors ? ( - - ) : sensors.length === 0 ? ( + {loading && } + {zeroState && (

A sensor defines what actions to trigger when certain events occur. Typical events are a Git push, a file dropped into a bucket, or a message on a queue or @@ -107,7 +108,8 @@ export const SensorList = ({match, location, history}: RouteComponentProps)

{learnMore}.

- ) : ( + )} + {sensors && sensors.length > 0 && ( <>
diff --git a/ui/src/app/shared/annotations.ts b/ui/src/app/shared/annotations.ts new file mode 100644 index 000000000000..95e7e033000c --- /dev/null +++ b/ui/src/app/shared/annotations.ts @@ -0,0 +1 @@ +export const ANNOTATION_KEY_POD_NAME_VERSION = 'workflows.argoproj.io/pod-name-format'; diff --git a/ui/src/app/shared/components/graph/graph-panel.tsx b/ui/src/app/shared/components/graph/graph-panel.tsx index e9b07a3cc28b..8840527ea6f4 100644 --- a/ui/src/app/shared/components/graph/graph-panel.tsx +++ b/ui/src/app/shared/components/graph/graph-panel.tsx @@ -134,16 +134,16 @@ export const GraphPanel = (props: Props) => { ]} /> setHorizontal(s => !s)} title='Horizontal/vertical layout'> - + setNodeSize(s => s * 1.2)} title='Zoom in'> - + setNodeSize(s => s / 1.2)} title='Zoom out'> - + setFast(s => !s)} title='Use faster, but less pretty renderer' className={fast ? 'active' : ''}> - + {props.options}
diff --git a/ui/src/app/shared/pod-name.test.ts b/ui/src/app/shared/pod-name.test.ts index b9dd4ae19468..6aed683c5f44 100644 --- a/ui/src/app/shared/pod-name.test.ts +++ b/ui/src/app/shared/pod-name.test.ts @@ -1,4 +1,4 @@ -import {createFNVHash, ensurePodNamePrefixLength, getPodName, k8sNamingHashLength, maxK8sResourceNameLength} from './pod-name'; +import {createFNVHash, ensurePodNamePrefixLength, getPodName, k8sNamingHashLength, maxK8sResourceNameLength, POD_NAME_V1, POD_NAME_V2} from './pod-name'; describe('pod names', () => { test('createFNVHash', () => { @@ -27,6 +27,11 @@ describe('pod names', () => { }); test('getPodName', () => { - expect(getPodName(shortWfName, nodeName, shortTemplateName, nodeID)).toEqual(nodeID); + expect(getPodName(shortWfName, nodeName, shortTemplateName, nodeID, POD_NAME_V2)).toEqual('wfname-templatename-1454367246'); + expect(getPodName(shortWfName, nodeName, shortTemplateName, nodeID, POD_NAME_V1)).toEqual(nodeID); + expect(getPodName(shortWfName, nodeName, shortTemplateName, nodeID, '')).toEqual(nodeID); + + const name = getPodName(longWfName, nodeName, longTemplateName, nodeID, POD_NAME_V2); + expect(name.length).toEqual(maxK8sResourceNameLength); }); }); diff --git a/ui/src/app/shared/pod-name.ts b/ui/src/app/shared/pod-name.ts index d10dd54f8bdd..09dd2f94fda9 100644 --- a/ui/src/app/shared/pod-name.ts +++ b/ui/src/app/shared/pod-name.ts @@ -1,8 +1,24 @@ +import {NodeStatus} from '../../models'; + +export const POD_NAME_V1 = 'v1'; +export const POD_NAME_V2 = 'v2'; + export const maxK8sResourceNameLength = 253; export const k8sNamingHashLength = 10; // getPodName returns a deterministic pod name -export const getPodName = (workflowName: string, nodeName: string, templateName: string, nodeID: string): string => { +export const getPodName = (workflowName: string, nodeName: string, templateName: string, nodeID: string, version: string): string => { + if (version === POD_NAME_V2) { + if (workflowName === nodeName) { + return workflowName; + } + + const prefix = ensurePodNamePrefixLength(`${workflowName}-${templateName}`); + + const hash = createFNVHash(nodeName); + return `${prefix}-${hash}`; + } + return nodeID; }; @@ -29,3 +45,11 @@ export const createFNVHash = (input: string): number => { return hashint >>> 0; }; + +export const getTemplateNameFromNode = (node: NodeStatus): string => { + if (node.templateName && node.templateName !== '') { + return node.templateName; + } + + return node.templateRef.template; +}; diff --git a/ui/src/app/shared/services/workflows-service.ts b/ui/src/app/shared/services/workflows-service.ts index e2a02e77201e..f1aaf6d2cd06 100644 --- a/ui/src/app/shared/services/workflows-service.ts +++ b/ui/src/app/shared/services/workflows-service.ts @@ -135,10 +135,10 @@ export class WorkflowsService { .then(res => res.body as Workflow); } - public getContainerLogsFromCluster(workflow: Workflow, nodeId: string, container: string, grep: string): Observable { + public getContainerLogsFromCluster(workflow: Workflow, podName: string, container: string, grep: string): Observable { const namespace = workflow.metadata.namespace; const name = workflow.metadata.name; - const podLogsURL = `api/v1/workflows/${namespace}/${name}/log?logOptions.container=${container}&grep=${grep}&logOptions.follow=true${nodeId ? `&podName=${nodeId}` : ''}`; + const podLogsURL = `api/v1/workflows/${namespace}/${name}/log?logOptions.container=${container}&grep=${grep}&logOptions.follow=true${podName ? `&podName=${podName}` : ''}`; return requests .loadEventSource(podLogsURL) .filter(line => !!line) @@ -149,9 +149,9 @@ export class WorkflowsService { // that the connection to the server was interrupted while the node is still pending or running, this is not // correct since we actually want the EventSource to re-connect and continue streaming logs. In the event // that the pod has completed, then we want to allow the unsubscribe to happen since no additional logs exist. - return Observable.fromPromise(this.isWorkflowNodePendingOrRunning(workflow, nodeId)).switchMap(isPendingOrRunning => { + return Observable.fromPromise(this.isWorkflowNodePendingOrRunning(workflow, podName)).switchMap(isPendingOrRunning => { if (isPendingOrRunning) { - return this.getContainerLogsFromCluster(workflow, nodeId, container, grep); + return this.getContainerLogsFromCluster(workflow, podName, container, grep); } // If our workflow is completed, then simply complete the Observable since nothing else @@ -186,7 +186,7 @@ export class WorkflowsService { .filter(x => !!x.content.match(grep)); } - public getContainerLogs(workflow: Workflow, nodeId: string, container: string, grep: string, archived: boolean): Observable { + public getContainerLogs(workflow: Workflow, podName: string, nodeId: string, container: string, grep: string, archived: boolean): Observable { const getLogsFromArtifact = () => this.getContainerLogsFromArtifact(workflow, nodeId, container, grep, archived); // If our workflow is archived, don't even bother inspecting the cluster for logs since it's likely @@ -199,7 +199,7 @@ export class WorkflowsService { if (!isPendingOrRunning && this.hasArtifactLogs(workflow, nodeId, container) && container === 'main') { return getLogsFromArtifact(); } - return this.getContainerLogsFromCluster(workflow, nodeId, container, grep).catch(getLogsFromArtifact); + return this.getContainerLogsFromCluster(workflow, podName, container, grep).catch(getLogsFromArtifact); }); } diff --git a/ui/src/app/workflows/components/events-panel.tsx b/ui/src/app/workflows/components/events-panel.tsx index da67a07f0f54..ec50cc2ca086 100644 --- a/ui/src/app/workflows/components/events-panel.tsx +++ b/ui/src/app/workflows/components/events-panel.tsx @@ -110,6 +110,7 @@ export const EventsPanel = ({namespace, name, kind}: {namespace: string; name: s
Message
{events + .filter(e => e && e.lastTimestamp) .sort((a, b) => -a.lastTimestamp.localeCompare(b.lastTimestamp)) .map(e => (
diff --git a/ui/src/app/workflows/components/workflow-dag/workflow-dag-render-options-panel.tsx b/ui/src/app/workflows/components/workflow-dag/workflow-dag-render-options-panel.tsx index e9efee3166e3..09e3f0bf3378 100644 --- a/ui/src/app/workflows/components/workflow-dag/workflow-dag-render-options-panel.tsx +++ b/ui/src/app/workflows/components/workflow-dag/workflow-dag-render-options-panel.tsx @@ -17,7 +17,7 @@ export class WorkflowDagRenderOptionsPanel extends React.Component - + @@ -27,7 +27,7 @@ export class WorkflowDagRenderOptionsPanel extends React.Component - + ); diff --git a/ui/src/app/workflows/components/workflow-details/workflow-details.tsx b/ui/src/app/workflows/components/workflow-details/workflow-details.tsx index f1858156509e..8a4a978b96ac 100644 --- a/ui/src/app/workflows/components/workflow-details/workflow-details.tsx +++ b/ui/src/app/workflows/components/workflow-details/workflow-details.tsx @@ -4,6 +4,7 @@ import * as React from 'react'; import {useContext, useEffect, useState} from 'react'; import {RouteComponentProps} from 'react-router'; import {execSpec, Link, NodeStatus, Workflow} from '../../../../models'; +import {ANNOTATION_KEY_POD_NAME_VERSION} from '../../../shared/annotations'; import {uiUrl} from '../../../shared/base'; import {CostOptimisationNudge} from '../../../shared/components/cost-optimisation-nudge'; import {ErrorNotice} from '../../../shared/components/error-notice'; @@ -13,7 +14,7 @@ import {SecurityNudge} from '../../../shared/components/security-nudge'; import {hasWarningConditionBadge} from '../../../shared/conditions-panel'; import {Context} from '../../../shared/context'; import {historyUrl} from '../../../shared/history'; -import {getPodName} from '../../../shared/pod-name'; +import {getPodName, getTemplateNameFromNode} from '../../../shared/pod-name'; import {RetryWatch} from '../../../shared/retry-watch'; import {services} from '../../../shared/services'; import {useQueryParams} from '../../../shared/use-query-params'; @@ -226,7 +227,13 @@ export const WorkflowDetails = ({history, location, match}: RouteComponentProps< }; const ensurePodName = (wf: Workflow, node: NodeStatus, nodeID: string): string => { if (workflow && node) { - return getPodName(wf.metadata.name, node.name, node.templateName, node.id); + let annotations: {[name: string]: string} = {}; + if (typeof workflow.metadata.annotations !== 'undefined') { + annotations = workflow.metadata.annotations; + } + const version = annotations[ANNOTATION_KEY_POD_NAME_VERSION]; + const templateName = getTemplateNameFromNode(node); + return getPodName(wf.metadata.name, node.name, templateName, node.id, version); } return nodeID; diff --git a/ui/src/app/workflows/components/workflow-logs-viewer/workflow-logs-viewer.tsx b/ui/src/app/workflows/components/workflow-logs-viewer/workflow-logs-viewer.tsx index 13118764ef57..71d9d10fd20b 100644 --- a/ui/src/app/workflows/components/workflow-logs-viewer/workflow-logs-viewer.tsx +++ b/ui/src/app/workflows/components/workflow-logs-viewer/workflow-logs-viewer.tsx @@ -5,10 +5,11 @@ import {Autocomplete} from 'argo-ui'; import {Observable} from 'rxjs'; import * as models from '../../../../models'; import {execSpec} from '../../../../models'; +import {ANNOTATION_KEY_POD_NAME_VERSION} from '../../../shared/annotations'; import {ErrorNotice} from '../../../shared/components/error-notice'; import {InfoIcon, WarningIcon} from '../../../shared/components/fa-icons'; import {Links} from '../../../shared/components/links'; -import {getPodName} from '../../../shared/pod-name'; +import {getPodName, getTemplateNameFromNode} from '../../../shared/pod-name'; import {services} from '../../../shared/services'; import {FullHeightLogsViewer} from './full-height-logs-viewer'; @@ -36,10 +37,15 @@ export const WorkflowLogsViewer = ({workflow, nodeId, initialPodName, container, setError(null); setLoaded(false); const source = services.workflows - .getContainerLogs(workflow, podName, selectedContainer, grep, archived) + .getContainerLogs(workflow, podName, nodeId, selectedContainer, grep, archived) .map(e => (!podName ? e.podName + ': ' : '') + e.content + '\n') // this next line highlights the search term in bold with a yellow background, white text - .map(x => x.replace(new RegExp(grep, 'g'), y => '\u001b[1m\u001b[43;1m\u001b[37m' + y + '\u001b[0m')) + .map(x => { + if (grep !== '') { + return x.replace(new RegExp(grep, 'g'), y => '\u001b[1m\u001b[43;1m\u001b[37m' + y + '\u001b[0m'); + } + return x; + }) .publishReplay() .refCount(); const subscription = source.subscribe( @@ -58,12 +64,19 @@ export const WorkflowLogsViewer = ({workflow, nodeId, initialPodName, container, return () => clearTimeout(x); }, [filter]); + let annotations: {[name: string]: string} = {}; + if (typeof workflow.metadata.annotations !== 'undefined') { + annotations = workflow.metadata.annotations; + } + const podNameVersion = annotations[ANNOTATION_KEY_POD_NAME_VERSION]; + const podNames = [{value: '', label: 'All'}].concat( Object.values(workflow.status.nodes || {}) .filter(x => x.type === 'Pod') .map(targetNode => { - const {name, id, templateName, displayName} = targetNode; - const targetPodName = getPodName(workflow.metadata.name, name, templateName, id); + const {name, id, displayName} = targetNode; + const templateName = getTemplateNameFromNode(targetNode); + const targetPodName = getPodName(workflow.metadata.name, name, templateName, id, podNameVersion); return {value: targetPodName, label: (displayName || name) + ' (' + targetPodName + ')'}; }) ); diff --git a/ui/src/app/workflows/components/workflow-node-info/workflow-node-info.tsx b/ui/src/app/workflows/components/workflow-node-info/workflow-node-info.tsx index 48be1b314a43..7f4474d12769 100644 --- a/ui/src/app/workflows/components/workflow-node-info/workflow-node-info.tsx +++ b/ui/src/app/workflows/components/workflow-node-info/workflow-node-info.tsx @@ -4,6 +4,7 @@ import * as React from 'react'; import * as models from '../../../../models'; import {Artifact, NodeStatus, Workflow} from '../../../../models'; +import {ANNOTATION_KEY_POD_NAME_VERSION} from '../../../shared/annotations'; import {Button} from '../../../shared/components/button'; import {ClipboardText} from '../../../shared/components/clipboard-text'; import {DropDownButton} from '../../../shared/components/drop-down-button'; @@ -12,7 +13,7 @@ import {InlineTable} from '../../../shared/components/inline-table/inline-table' import {Links} from '../../../shared/components/links'; import {Phase} from '../../../shared/components/phase'; import {Timestamp} from '../../../shared/components/timestamp'; -import {getPodName} from '../../../shared/pod-name'; +import {getPodName, getTemplateNameFromNode} from '../../../shared/pod-name'; import {ResourcesDuration} from '../../../shared/resources-duration'; import {services} from '../../../shared/services'; import {getResolvedTemplates} from '../../../shared/template-resolution'; @@ -81,7 +82,14 @@ const AttributeRows = (props: {attributes: {title: string; value: any}[]}) => ( const WorkflowNodeSummary = (props: Props) => { const {workflow, node} = props; - const podName = getPodName(workflow.metadata.name, node.name, node.templateName, node.id); + let annotations: {[name: string]: string} = {}; + if (typeof workflow.metadata.annotations !== 'undefined') { + annotations = workflow.metadata.annotations; + } + const version = annotations[ANNOTATION_KEY_POD_NAME_VERSION]; + const templateName = getTemplateNameFromNode(node); + + const podName = getPodName(workflow.metadata.name, node.name, templateName, node.id, version); const attributes = [ {title: 'NAME', value: }, diff --git a/ui/src/app/workflows/components/workflows-toolbar/workflows-toolbar.tsx b/ui/src/app/workflows/components/workflows-toolbar/workflows-toolbar.tsx index 6938bc15b07e..c0ab53e9cd92 100644 --- a/ui/src/app/workflows/components/workflows-toolbar/workflows-toolbar.tsx +++ b/ui/src/app/workflows/components/workflows-toolbar/workflows-toolbar.tsx @@ -53,7 +53,7 @@ export class WorkflowsToolbar extends React.Component private performActionOnSelectedWorkflows(ctx: any, title: string, action: WorkflowOperationAction): Promise { if (!confirm(`Are you sure you want to ${title.toLowerCase()} all selected workflows?`)) { - return Promise.resolve(); + return Promise.resolve(false); } const promises: Promise[] = []; this.props.selectedWorkflows.forEach((wf: Workflow) => { @@ -82,13 +82,15 @@ export class WorkflowsToolbar extends React.Component groupIsDisabled: disabled[actionName], action, groupAction: () => { - return this.performActionOnSelectedWorkflows(ctx, action.title, action.action).then(() => { - this.props.clearSelection(); - this.appContext.apis.notifications.show({ - content: `Performed '${action.title}' on selected workflows.`, - type: NotificationType.Success - }); - this.props.loadWorkflows(); + return this.performActionOnSelectedWorkflows(ctx, action.title, action.action).then(confirmed => { + if (confirmed) { + this.props.clearSelection(); + this.appContext.apis.notifications.show({ + content: `Performed '${action.title}' on selected workflows.`, + type: NotificationType.Success + }); + this.props.loadWorkflows(); + } }); }, className: action.title, diff --git a/util/env/env.go b/util/env/env.go index 281aaca50037..76e23461f64c 100644 --- a/util/env/env.go +++ b/util/env/env.go @@ -10,7 +10,7 @@ import ( func LookupEnvDurationOr(key string, o time.Duration) time.Duration { v, found := os.LookupEnv(key) - if found { + if found && v != "" { d, err := time.ParseDuration(v) if err != nil { log.WithField(key, v).WithError(err).Panic("failed to parse") @@ -23,7 +23,7 @@ func LookupEnvDurationOr(key string, o time.Duration) time.Duration { func LookupEnvIntOr(key string, o int) int { v, found := os.LookupEnv(key) - if found { + if found && v != "" { d, err := strconv.Atoi(v) if err != nil { log.WithField(key, v).WithError(err).Panic("failed to convert to int") @@ -36,7 +36,7 @@ func LookupEnvIntOr(key string, o int) int { func LookupEnvFloatOr(key string, o float64) float64 { v, found := os.LookupEnv(key) - if found { + if found && v != "" { d, err := strconv.ParseFloat(v, 64) if err != nil { log.WithField(key, v).WithError(err).Panic("failed to convert to float") diff --git a/util/env/env_test.go b/util/env/env_test.go index d5daa4788b5a..8a51b3655ff2 100644 --- a/util/env/env_test.go +++ b/util/env/env_test.go @@ -15,6 +15,8 @@ func TestLookupEnvDurationOr(t *testing.T) { assert.Panics(t, func() { LookupEnvDurationOr("FOO", time.Second) }, "bad value") _ = os.Setenv("FOO", "1h") assert.Equal(t, time.Hour, LookupEnvDurationOr("FOO", time.Second), "env var value") + _ = os.Setenv("FOO", "") + assert.Equal(t, time.Second, LookupEnvDurationOr("FOO", time.Second), "empty var value; default value") } func TestLookupEnvIntOr(t *testing.T) { @@ -24,6 +26,8 @@ func TestLookupEnvIntOr(t *testing.T) { assert.Panics(t, func() { LookupEnvIntOr("FOO", 1) }, "bad value") _ = os.Setenv("FOO", "2") assert.Equal(t, 2, LookupEnvIntOr("FOO", 1), "env var value") + _ = os.Setenv("FOO", "") + assert.Equal(t, 1, LookupEnvIntOr("FOO", 1), "empty var value; default value") } func TestLookupEnvFloatOr(t *testing.T) { @@ -33,4 +37,6 @@ func TestLookupEnvFloatOr(t *testing.T) { assert.Panics(t, func() { LookupEnvFloatOr("FOO", 1.) }, "bad value") _ = os.Setenv("FOO", "2.0") assert.Equal(t, 2., LookupEnvFloatOr("FOO", 1.), "env var value") + _ = os.Setenv("FOO", "") + assert.Equal(t, 1., LookupEnvFloatOr("FOO", 1.), "empty var value; default value") } diff --git a/util/kubeconfig/kubeconfig.go b/util/kubeconfig/kubeconfig.go index acb1ec2ce29a..18b93d54ca97 100644 --- a/util/kubeconfig/kubeconfig.go +++ b/util/kubeconfig/kubeconfig.go @@ -165,18 +165,21 @@ func GetBearerToken(in *restclient.Config, explicitKubeConfigPath string) (strin // This code is not making actual request. We can ignore it. _ = auth.UpdateTransportConfig(tc) - rt, err := transport.New(tc) + tp, err := transport.New(tc) if err != nil { return "", err } - req := http.Request{Header: map[string][]string{}} - - newT := NewUserAgentRoundTripper("dummy", rt) - resp, err := newT.RoundTrip(&req) + req, err := http.NewRequest("GET", in.Host, nil) + if err != nil { + return "", err + } + resp, err := tc.WrapTransport(tp).RoundTrip(req) if err != nil { return "", err } - resp.Body.Close() + if err := resp.Body.Close(); err != nil { + return "", err + } token := req.Header.Get("Authorization") return strings.TrimPrefix(token, "Bearer "), nil diff --git a/util/kubeconfig/roundtripper.go b/util/kubeconfig/roundtripper.go deleted file mode 100644 index 1eb4bb2fa8a6..000000000000 --- a/util/kubeconfig/roundtripper.go +++ /dev/null @@ -1,17 +0,0 @@ -package kubeconfig - -import "net/http" - -type userAgentRoundTripper struct { - agent string - rt http.RoundTripper -} - -func (rt userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - req.Header.Set("User-Agent", rt.agent) - return rt.rt.RoundTrip(req) -} - -func NewUserAgentRoundTripper(agent string, rt http.RoundTripper) http.RoundTripper { - return &userAgentRoundTripper{agent, rt} -} diff --git a/util/runtime/panic.go b/util/runtime/panic.go new file mode 100644 index 000000000000..e66a4cf52869 --- /dev/null +++ b/util/runtime/panic.go @@ -0,0 +1,22 @@ +package runtime + +import ( + "runtime" + + log "github.com/sirupsen/logrus" +) + +func RecoverFromPanic(log *log.Entry) { + if r := recover(); r != nil { + // Same as stdlib http server code. Manually allocate stack trace buffer size + // to prevent excessively large logs + const size = 64 << 10 + stackTraceBuffer := make([]byte, size) + stackSize := runtime.Stack(stackTraceBuffer, false) + // Free up the unused spaces + stackTraceBuffer = stackTraceBuffer[:stackSize] + log.Errorf("recovered from panic %q. Call stack:\n%s", + r, + stackTraceBuffer) + } +} diff --git a/util/tls/tls.go b/util/tls/tls.go index 227b04e2c9c7..b5a77495fe92 100644 --- a/util/tls/tls.go +++ b/util/tls/tls.go @@ -22,7 +22,7 @@ func pemBlockForKey(priv interface{}) *pem.Block { case *ecdsa.PrivateKey: b, err := x509.MarshalECPrivateKey(k) if err != nil { - log.Fatal(err) + log.Print(err) os.Exit(2) } return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} diff --git a/workflow/artifacts/git/git.go b/workflow/artifacts/git/git.go index 9931c49bc1af..271294853578 100644 --- a/workflow/artifacts/git/git.go +++ b/workflow/artifacts/git/git.go @@ -10,14 +10,14 @@ import ( "regexp" "strings" + "github.com/go-git/go-git/v5" + "github.com/go-git/go-git/v5/config" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/transport" + "github.com/go-git/go-git/v5/plumbing/transport/http" + ssh2 "github.com/go-git/go-git/v5/plumbing/transport/ssh" log "github.com/sirupsen/logrus" "golang.org/x/crypto/ssh" - "gopkg.in/src-d/go-git.v4" - "gopkg.in/src-d/go-git.v4/config" - "gopkg.in/src-d/go-git.v4/plumbing" - "gopkg.in/src-d/go-git.v4/plumbing/transport" - "gopkg.in/src-d/go-git.v4/plumbing/transport/http" - ssh2 "gopkg.in/src-d/go-git.v4/plumbing/transport/ssh" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo-workflows/v3/workflow/artifacts/common" diff --git a/workflow/common/common.go b/workflow/common/common.go index 1b12141a7498..d36da67f374c 100644 --- a/workflow/common/common.go +++ b/workflow/common/common.go @@ -39,6 +39,9 @@ const ( // AnnotationKeyWorkflowUID is the uid of the workflow AnnotationKeyWorkflowUID = workflow.WorkflowFullName + "/workflow-uid" + // AnnotationKeyPodNameVersion stores the pod naming convention version + AnnotationKeyPodNameVersion = workflow.WorkflowFullName + "/pod-name-format" + // LabelKeyControllerInstanceID is the label the controller will carry forward to workflows/pod labels // for the purposes of workflow segregation LabelKeyControllerInstanceID = workflow.WorkflowFullName + "/controller-instanceid" @@ -72,6 +75,13 @@ const ( // LabelKeyOnExit is a label applied to Pods that are run from onExit nodes, so that they are not shut down when stopping a Workflow LabelKeyOnExit = workflow.WorkflowFullName + "/on-exit" + // LabelKeyConfigMapType is the label key for the type of configmap. + LabelKeyConfigMapType = "workflows.argoproj.io/configmap-type" + // LabelValueTypeConfigMapCache is a key for configmaps that are memoization cache. + LabelValueTypeConfigMapCache = "Cache" + // LabelValueTypeConfigMapParameter is a key for configmaps that contains parameter values. + LabelValueTypeConfigMapParameter = "Parameter" + // ExecutorArtifactBaseDir is the base directory in the init container in which artifacts will be copied to. // Each artifact will be named according to its input name (e.g: /argo/inputs/artifacts/CODE) ExecutorArtifactBaseDir = "/argo/inputs/artifacts" diff --git a/util/configmap.go b/workflow/common/configmap.go similarity index 72% rename from util/configmap.go rename to workflow/common/configmap.go index edf4f481fbc4..1440874045f3 100644 --- a/util/configmap.go +++ b/workflow/common/configmap.go @@ -1,4 +1,4 @@ -package util +package common import ( "fmt" @@ -18,6 +18,11 @@ func GetConfigMapValue(configMapInformer cache.SharedIndexInformer, namespace, n if !ok { return "", fmt.Errorf("unable to convert object %s to configmap when syncing ConfigMaps", name) } + if cmType := cm.Labels[LabelKeyConfigMapType]; cmType != LabelValueTypeConfigMapParameter { + return "", fmt.Errorf( + "ConfigMap '%s' needs to have the label %s: %s to load parameters", + name, LabelKeyConfigMapType, LabelValueTypeConfigMapParameter) + } cmValue, ok := cm.Data[key] if !ok { return "", fmt.Errorf("ConfigMap '%s' does not have the key '%s'", name, key) diff --git a/workflow/common/parse.go b/workflow/common/parse.go index 56836ca8fd56..29d5de552e95 100644 --- a/workflow/common/parse.go +++ b/workflow/common/parse.go @@ -22,7 +22,7 @@ type ParseResult struct { } func ParseObjects(body []byte, strict bool) []ParseResult { - res := []ParseResult{} + var res []ParseResult if jsonpkg.IsJSON(body) { un := &unstructured.Unstructured{} err := jsonpkg.Unmarshal(body, un) @@ -34,15 +34,19 @@ func ParseObjects(body []byte, strict bool) []ParseResult { return append(res, ParseResult{v, err}) } - for _, text := range yamlSeparator.Split(string(body), -1) { + for i, text := range yamlSeparator.Split(string(body), -1) { if strings.TrimSpace(text) == "" { continue } un := &unstructured.Unstructured{} err := yaml.Unmarshal([]byte(text), un) - if un.GetKind() != "" && err != nil { - // only return an error if this is a kubernetes object, otherwise, ignore - res = append(res, ParseResult{nil, err}) + if err != nil { + // Only return an error if this is a kubernetes object, otherwise, print the error + if un.GetKind() != "" { + res = append(res, ParseResult{nil, err}) + } else { + log.Errorf("yaml file at index %d is not valid: %s", i, err) + } continue } v, err := toWorkflowTypeYAML([]byte(text), un.GetKind(), strict) diff --git a/workflow/common/util.go b/workflow/common/util.go index a3fcd5f98ff5..12bc14c74d1a 100644 --- a/workflow/common/util.go +++ b/workflow/common/util.go @@ -124,18 +124,22 @@ func ProcessArgs(tmpl *wfv1.Template, args wfv1.ArgumentsProvider, globalParams, // 3) if no default value, it is an error newTmpl := tmpl.DeepCopy() for i, inParam := range newTmpl.Inputs.Parameters { - if inParam.Default != nil { + if inParam.Value == nil && inParam.Default != nil { // first set to default value inParam.Value = inParam.Default } // overwrite value from argument (if supplied) argParam := args.GetParameterByName(inParam.Name) - if argParam != nil && argParam.Value != nil { - inParam.Value = argParam.Value + if argParam != nil { + if argParam.Value != nil { + inParam.Value = argParam.Value + } else { + inParam.ValueFrom = argParam.ValueFrom + } } if inParam.ValueFrom != nil && inParam.ValueFrom.ConfigMapKeyRef != nil { if configMapInformer != nil { - cmValue, err := util.GetConfigMapValue(configMapInformer, namespace, inParam.ValueFrom.ConfigMapKeyRef.Name, inParam.ValueFrom.ConfigMapKeyRef.Key) + cmValue, err := GetConfigMapValue(configMapInformer, namespace, inParam.ValueFrom.ConfigMapKeyRef.Name, inParam.ValueFrom.ConfigMapKeyRef.Key) if err != nil { return nil, errors.Errorf(errors.CodeBadRequest, "unable to retrieve inputs.parameters.%s from ConfigMap: %s", inParam.Name, err) } diff --git a/workflow/controller/agent.go b/workflow/controller/agent.go index a207ed931d49..9a2de98cfa00 100644 --- a/workflow/controller/agent.go +++ b/workflow/controller/agent.go @@ -69,11 +69,9 @@ func (woc *wfOperationCtx) createAgentPod(ctx context.Context) (*apiv1.Pod, erro podName := woc.getAgentPodName() obj, exists, err := woc.controller.podInformer.GetStore().Get(cache.ExplicitKey(woc.wf.Namespace + "/" + podName)) - if err != nil { return nil, fmt.Errorf("failed to get pod from informer store: %w", err) } - if exists { existing, ok := obj.(*apiv1.Pod) if ok { @@ -99,10 +97,11 @@ func (woc *wfOperationCtx) createAgentPod(ctx context.Context) (*apiv1.Pod, erro ImagePullSecrets: woc.execWf.Spec.ImagePullSecrets, Containers: []apiv1.Container{ { - Name: "main", - Command: []string{"argoexec"}, - Args: []string{"agent"}, - Image: woc.controller.executorImage(), + Name: "main", + Command: []string{"argoexec"}, + Args: []string{"agent"}, + Image: woc.controller.executorImage(), + ImagePullPolicy: woc.controller.executorImagePullPolicy(), Env: []apiv1.EnvVar{ {Name: common.EnvVarWorkflowName, Value: woc.wf.Name}, }, diff --git a/workflow/controller/controller.go b/workflow/controller/controller.go index d715c7671ef5..91fe1c4e7883 100644 --- a/workflow/controller/controller.go +++ b/workflow/controller/controller.go @@ -774,24 +774,6 @@ func (wfc *WorkflowController) processNextItem(ctx context.Context) bool { if err != nil { log.WithError(err).Warn("error to complete the taskset") } - // Send all completed pods to gcPods channel to delete it later depend on the PodGCStrategy. - var doPodGC bool - if woc.execWf.Spec.PodGC != nil { - switch woc.execWf.Spec.PodGC.Strategy { - case wfv1.PodGCOnWorkflowCompletion: - doPodGC = true - case wfv1.PodGCOnWorkflowSuccess: - if woc.wf.Status.Successful() { - doPodGC = true - } - } - } - if doPodGC { - for podName := range woc.completedPods { - delay := woc.controller.Config.GetPodGCDeleteDelayDuration() - woc.controller.queuePodForCleanupAfter(woc.wf.Namespace, podName, deletePod, delay) - } - } } // TODO: operate should return error if it was unable to operate properly diff --git a/workflow/controller/dag.go b/workflow/controller/dag.go index 1813c713482e..d8d474a842fd 100644 --- a/workflow/controller/dag.go +++ b/workflow/controller/dag.go @@ -9,11 +9,13 @@ import ( "time" "github.com/antonmedv/expr" + log "github.com/sirupsen/logrus" "github.com/argoproj/argo-workflows/v3/errors" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo-workflows/v3/util/template" "github.com/argoproj/argo-workflows/v3/workflow/common" + controllercache "github.com/argoproj/argo-workflows/v3/workflow/controller/cache" "github.com/argoproj/argo-workflows/v3/workflow/templateresolution" ) @@ -299,6 +301,14 @@ func (woc *wfOperationCtx) executeDAG(ctx context.Context, nodeName string, tmpl node = woc.wf.GetNodeByName(nodeName) node.Outputs = outputs woc.wf.Status.Nodes[node.ID] = *node + if node.MemoizationStatus != nil { + c := woc.controller.cacheFactory.GetCache(controllercache.ConfigMapCache, node.MemoizationStatus.CacheName) + err := c.Save(ctx, node.MemoizationStatus.Key, node.ID, node.Outputs) + if err != nil { + woc.log.WithFields(log.Fields{"nodeID": node.ID}).WithError(err).Error("Failed to save node outputs to cache") + node.Phase = wfv1.NodeError + } + } } woc.updateOutboundNodesForTargetTasks(dagCtx, targetTasks, nodeName) @@ -437,7 +447,12 @@ func (woc *wfOperationCtx) executeDAGTask(ctx context.Context, dagCtx *dagContex // For example, if we had task A with withItems of ['foo', 'bar'] which expanded to ['A(0:foo)', 'A(1:bar)'], we still // need to create a node for A. if task.ShouldExpand() { - if taskGroupNode == nil { + // DAG task with empty withParams list should be skipped + if len(expandedTasks) == 0 { + skipReason := "Skipped, empty params" + woc.initializeNode(nodeName, wfv1.NodeTypeSkipped, dagTemplateScope, task, dagCtx.boundaryID, wfv1.NodeSkipped, skipReason) + connectDependencies(nodeName) + } else if taskGroupNode == nil { connectDependencies(nodeName) taskGroupNode = woc.initializeNode(nodeName, wfv1.NodeTypeTaskGroup, dagTemplateScope, task, dagCtx.boundaryID, wfv1.NodeRunning, "") } diff --git a/workflow/controller/dag_test.go b/workflow/controller/dag_test.go index d51571feb4f7..6e96d3435dda 100644 --- a/workflow/controller/dag_test.go +++ b/workflow/controller/dag_test.go @@ -3320,3 +3320,85 @@ func TestDAGReferTaskAggregatedOutputs(t *testing.T) { } } } + +var dagHttpChildrenAssigned = `apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + name: http-template-nv52d +spec: + entrypoint: main + templates: + - dag: + tasks: + - arguments: + parameters: + - name: url + value: https://raw.githubusercontent.com/argoproj/argo-workflows/4e450e250168e6b4d51a126b784e90b11a0162bc/pkg/apis/workflow/v1alpha1/generated.swagger.json + name: good1 + template: http + - arguments: + parameters: + - name: url + value: https://raw.githubusercontent.com/argoproj/argo-workflows/4e450e250168e6b4d51a126b784e90b11a0162bc/pkg/apis/workflow/v1alpha1/generated.swagger.json + dependencies: + - good1 + name: good2 + template: http + name: main + - http: + url: '{{inputs.parameters.url}}' + inputs: + parameters: + - name: url + name: http +status: + nodes: + http-template-nv52d: + children: + - http-template-nv52d-444770636 + displayName: http-template-nv52d + id: http-template-nv52d + name: http-template-nv52d + outboundNodes: + - http-template-nv52d-478325874 + phase: Running + startedAt: "2021-10-27T13:46:08Z" + templateName: main + templateScope: local/http-template-nv52d + type: DAG + http-template-nv52d-444770636: + boundaryID: http-template-nv52d + children: + - http-template-nv52d-495103493 + displayName: good1 + finishedAt: null + id: http-template-nv52d-444770636 + name: http-template-nv52d.good1 + phase: Succeeded + startedAt: "2021-10-27T13:46:08Z" + templateName: http + templateScope: local/http-template-nv52d + type: HTTP + phase: Running + startedAt: "2021-10-27T13:46:08Z" +` + +func TestDagHttpChildrenAssigned(t *testing.T) { + wf := wfv1.MustUnmarshalWorkflow(dagHttpChildrenAssigned) + cancel, controller := newController(wf) + defer cancel() + + ctx := context.Background() + woc := newWorkflowOperationCtx(wf, controller) + woc.operate(ctx) + + dagNode := woc.wf.Status.Nodes.FindByDisplayName("good2") + assert.NotNil(t, dagNode) + + dagNode = woc.wf.Status.Nodes.FindByDisplayName("good1") + if assert.NotNil(t, dagNode) { + if assert.Len(t, dagNode.Children, 1) { + assert.Equal(t, "http-template-nv52d-495103493", dagNode.Children[0]) + } + } +} diff --git a/workflow/controller/exit_handler_test.go b/workflow/controller/exit_handler_test.go index 9e39042dbaa2..294d34a00b95 100644 --- a/workflow/controller/exit_handler_test.go +++ b/workflow/controller/exit_handler_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/assert" apiv1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" ) @@ -687,3 +688,63 @@ func TestDagOnExitAndRetryStrategy(t *testing.T) { assert.Equal(t, wfv1.WorkflowSucceeded, woc.wf.Status.Phase) } + +var testWorkflowOnExitHttpReconciliation = `apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + name: hello-world-sx6lw +spec: + entrypoint: whalesay + onExit: exit-handler + templates: + - container: + args: + - hello world + command: + - cowsay + image: docker/whalesay:latest + name: whalesay + - http: + url: https://example.com + name: exit-handler +status: + nodes: + hello-world-sx6lw: + displayName: hello-world-sx6lw + finishedAt: "2021-10-27T14:38:30Z" + hostNodeName: k3d-k3s-default-server-0 + id: hello-world-sx6lw + name: hello-world-sx6lw + phase: Succeeded + progress: 1/1 + resourcesDuration: + cpu: 2 + memory: 1 + startedAt: "2021-10-27T14:38:27Z" + templateName: whalesay + templateScope: local/hello-world-sx6lw + type: Pod + phase: Running + startedAt: "2021-10-27T14:38:27Z" +` + +func TestWorkflowOnExitHttpReconciliation(t *testing.T) { + wf := wfv1.MustUnmarshalWorkflow(testWorkflowOnExitHttpReconciliation) + cancel, controller := newController(wf) + defer cancel() + + ctx := context.Background() + woc := newWorkflowOperationCtx(wf, controller) + + taskSets, err := woc.controller.wfclientset.ArgoprojV1alpha1().WorkflowTaskSets("").List(ctx, v1.ListOptions{}) + if assert.NoError(t, err) { + assert.Len(t, taskSets.Items, 0) + } + woc.operate(ctx) + + assert.Len(t, woc.wf.Status.Nodes, 2) + taskSets, err = woc.controller.wfclientset.ArgoprojV1alpha1().WorkflowTaskSets("").List(ctx, v1.ListOptions{}) + if assert.NoError(t, err) { + assert.Len(t, taskSets.Items, 1) + } +} diff --git a/workflow/controller/http_template.go b/workflow/controller/http_template.go index 65ffbca85754..6a545c5afdee 100644 --- a/workflow/controller/http_template.go +++ b/workflow/controller/http_template.go @@ -1,6 +1,8 @@ package controller import ( + "context" + wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" ) @@ -12,3 +14,37 @@ func (woc *wfOperationCtx) executeHTTPTemplate(nodeName string, templateScope st } return node } + +func (woc *wfOperationCtx) httpReconciliation(ctx context.Context) { + err := woc.reconcileTaskSet(ctx) + if err != nil { + woc.log.WithError(err).Error("error in workflowtaskset reconciliation") + return + } + + err = woc.reconcileAgentPod(ctx) + if err != nil { + woc.log.WithError(err).Error("error in agent pod reconciliation") + woc.markWorkflowError(ctx, err) + return + } +} + +func (woc *wfOperationCtx) nodeRequiresHttpReconciliation(nodeName string) bool { + node := woc.wf.GetNodeByName(nodeName) + if node == nil { + return false + } + // If this node is of type HTTP, it will need an HTTP reconciliation + if node.Type == wfv1.NodeTypeHTTP { + return true + } + for _, child := range node.Children { + // If any of the node's children need an HTTP reconciliation, the parent node will also need one + if woc.nodeRequiresHttpReconciliation(child) { + return true + } + } + // If neither of the children need one -- or if there are no children -- no HTTP reconciliation is needed. + return false +} diff --git a/workflow/controller/http_template_test.go b/workflow/controller/http_template_test.go new file mode 100644 index 000000000000..9fc8f84d86ff --- /dev/null +++ b/workflow/controller/http_template_test.go @@ -0,0 +1,41 @@ +package controller + +import ( + "testing" + + "github.com/stretchr/testify/assert" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" +) + +func TestNodeRequiresHttpReconciliation(t *testing.T) { + woc := &wfOperationCtx{ + wf: &v1alpha1.Workflow{ + ObjectMeta: v1.ObjectMeta{ + Name: "test-wf", + }, + Status: v1alpha1.WorkflowStatus{ + Nodes: v1alpha1.Nodes{ + "test-wf-1996333140": v1alpha1.NodeStatus{ + Name: "not-needed", + Type: v1alpha1.NodeTypePod, + }, + "test-wf-3939368189": v1alpha1.NodeStatus{ + Name: "parent", + Type: v1alpha1.NodeTypeSteps, + Children: []string{"child-http"}, + }, + "test-wf-1430055856": v1alpha1.NodeStatus{ + Name: "child-http", + Type: v1alpha1.NodeTypeHTTP, + }, + }, + }, + }, + } + + assert.False(t, woc.nodeRequiresHttpReconciliation("not-needed")) + assert.True(t, woc.nodeRequiresHttpReconciliation("child-http")) + assert.True(t, woc.nodeRequiresHttpReconciliation("parent")) +} diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index 9631e9493192..498f58d44a4b 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -46,6 +46,7 @@ import ( "github.com/argoproj/argo-workflows/v3/util/intstr" "github.com/argoproj/argo-workflows/v3/util/resource" "github.com/argoproj/argo-workflows/v3/util/retry" + argoruntime "github.com/argoproj/argo-workflows/v3/util/runtime" "github.com/argoproj/argo-workflows/v3/util/template" waitutil "github.com/argoproj/argo-workflows/v3/util/wait" "github.com/argoproj/argo-workflows/v3/workflow/common" @@ -84,7 +85,7 @@ type wfOperationCtx struct { // ArtifactRepository contains the default location of an artifact repository for container artifacts artifactRepository *wfv1.ArtifactRepository // map of completed pods with their corresponding phases - completedPods map[string]apiv1.PodPhase + completedPods map[string]*apiv1.Pod // deadline is the dealine time in which this operation should relinquish // its hold on the workflow so that an operation does not run for too long // and starve other workqueue items. It also enables workflow progress to @@ -142,6 +143,7 @@ func newWorkflowOperationCtx(wf *wfv1.Workflow, wfc *WorkflowController) *wfOper // You can use DeepCopy() to make a deep copy of original object and modify this copy // Or create a copy manually for better performance wfCopy := wf.DeepCopyObject().(*wfv1.Workflow) + woc := wfOperationCtx{ wf: wfCopy, orig: wf, @@ -154,7 +156,7 @@ func newWorkflowOperationCtx(wf *wfv1.Workflow, wfc *WorkflowController) *wfOper controller: wfc, globalParams: make(map[string]string), volumes: wf.Spec.DeepCopy().Volumes, - completedPods: make(map[string]apiv1.PodPhase), + completedPods: make(map[string]*apiv1.Pod), deadline: time.Now().UTC().Add(maxOperationTime), eventRecorder: wfc.eventRecorderManager.Get(wf.Namespace), preExecutionNodePhases: make(map[string]wfv1.NodePhase), @@ -177,6 +179,8 @@ func newWorkflowOperationCtx(wf *wfv1.Workflow, wfc *WorkflowController) *wfOper // later time // As you must not call `persistUpdates` twice, you must not call `operate` twice. func (woc *wfOperationCtx) operate(ctx context.Context) { + defer argoruntime.RecoverFromPanic(woc.log) + defer func() { if woc.wf.Status.Fulfilled() { woc.killDaemonedChildren("") @@ -263,6 +267,8 @@ func (woc *wfOperationCtx) operate(ctx context.Context) { if woc.wf.Status.Phase == wfv1.WorkflowUnknown { woc.markWorkflowRunning(ctx) + setWfPodNamesAnnotation(woc.wf) + err := woc.createPDBResource(ctx) if err != nil { msg := fmt.Sprintf("Unable to create PDB resource for workflow, %s error: %s", woc.wf.Name, err) @@ -352,18 +358,8 @@ func (woc *wfOperationCtx) operate(ctx context.Context) { return } - err = woc.taskSetReconciliation(ctx) - if err != nil { - woc.log.WithError(err).Error("error in workflowtaskset reconciliation") - return - } - - err = woc.reconcileAgentPod(ctx) - if err != nil { - woc.log.WithError(err).Error("error in agent pod reconciliation") - woc.markWorkflowError(ctx, err) - return - } + // Reconcile TaskSet and Agent for HTTP templates + woc.httpReconciliation(ctx) if node == nil || !node.Fulfilled() { // node can be nil if a workflow created immediately in a parallelism == 0 state @@ -423,6 +419,12 @@ func (woc *wfOperationCtx) operate(ctx context.Context) { } return } + + // If the onExit node (or any child of the onExit node) requires HTTP reconciliation, do it here + if onExitNode != nil && woc.nodeRequiresHttpReconciliation(onExitNode.Name) { + woc.httpReconciliation(ctx) + } + if onExitNode == nil || !onExitNode.Fulfilled() { return } @@ -515,6 +517,7 @@ func (woc *wfOperationCtx) setGlobalParameters(executionParameters wfv1.Argument woc.globalParams[cTimeVar] = strftime.Format("%"+string(char), woc.wf.ObjectMeta.CreationTimestamp.Time) } woc.globalParams[common.GlobalVarWorkflowCreationTimestamp+".s"] = strconv.FormatInt(woc.wf.ObjectMeta.CreationTimestamp.Time.Unix(), 10) + woc.globalParams[common.GlobalVarWorkflowCreationTimestamp+".RFC3339"] = woc.wf.ObjectMeta.CreationTimestamp.Format(time.RFC3339) if workflowParameters, err := json.Marshal(woc.execWf.Spec.Arguments.Parameters); err == nil { woc.globalParams[common.GlobalVarWorkflowParameters] = string(workflowParameters) @@ -524,7 +527,7 @@ func (woc *wfOperationCtx) setGlobalParameters(executionParameters wfv1.Argument woc.globalParams["workflow.parameters."+param.Name] = param.Value.String() } else if param.ValueFrom != nil { if param.ValueFrom.ConfigMapKeyRef != nil { - cmValue, err := util.GetConfigMapValue(woc.controller.configMapInformer, woc.wf.ObjectMeta.Namespace, param.ValueFrom.ConfigMapKeyRef.Name, param.ValueFrom.ConfigMapKeyRef.Key) + cmValue, err := common.GetConfigMapValue(woc.controller.configMapInformer, woc.wf.ObjectMeta.Namespace, param.ValueFrom.ConfigMapKeyRef.Name, param.ValueFrom.ConfigMapKeyRef.Key) if err != nil { return fmt.Errorf("failed to set global parameter %s from configmap with name %s and key %s: %w", param.Name, param.ValueFrom.ConfigMapKeyRef.Name, param.ValueFrom.ConfigMapKeyRef.Key, err) @@ -641,28 +644,8 @@ func (woc *wfOperationCtx) persistUpdates(ctx context.Context) { // It is important that we *never* label pods as completed until we successfully updated the workflow // Failing to do so means we can have inconsistent state. - // TODO: The completedPods will be labeled multiple times. I think it would be improved in the future. - // Send succeeded pods or completed pods to gcPods channel to delete it later depend on the PodGCStrategy. - // Notice we do not need to label the pod if we will delete it later for GC. Otherwise, that may even result in - // errors if we label a pod that was deleted already. - for podName, podPhase := range woc.completedPods { - if woc.execWf.Spec.PodGC != nil { - switch woc.execWf.Spec.PodGC.Strategy { - case wfv1.PodGCOnPodSuccess: - if podPhase == apiv1.PodSucceeded { - delay := woc.controller.Config.GetPodGCDeleteDelayDuration() - woc.controller.queuePodForCleanupAfter(woc.wf.Namespace, podName, deletePod, delay) - } - case wfv1.PodGCOnPodCompletion: - delay := woc.controller.Config.GetPodGCDeleteDelayDuration() - woc.controller.queuePodForCleanupAfter(woc.wf.Namespace, podName, deletePod, delay) - } - } else { - // label pods which will not be deleted - woc.controller.queuePodForCleanup(woc.wf.Namespace, podName, labelPodCompleted) - } - } - + // Pods may be be labeled multiple times. + woc.queuePodsForCleanup() } func (woc *wfOperationCtx) writeBackToInformer() error { @@ -981,15 +964,6 @@ func (woc *wfOperationCtx) podReconciliation(ctx context.Context) error { woc.updated = true } node := woc.wf.Status.Nodes[nodeID] - match := true - if woc.execWf.Spec.PodGC.GetLabelSelector() != nil { - var podLabels labels.Set = pod.GetLabels() - match, err = woc.execWf.Spec.PodGC.Matches(podLabels) - if err != nil { - woc.markWorkflowFailed(ctx, fmt.Sprintf("failed to parse label selector %s for pod GC: %v", woc.execWf.Spec.PodGC.LabelSelector, err)) - return - } - } if node.Type == wfv1.NodeTypePod { if node.HostNodeName != pod.Spec.NodeName { node.HostNodeName = pod.Spec.NodeName @@ -998,18 +972,13 @@ func (woc *wfOperationCtx) podReconciliation(ctx context.Context) error { } } if node.Fulfilled() && !node.IsDaemoned() { - if pod.GetLabels()[common.LabelKeyCompleted] == "true" { - return - } - if match { - woc.completedPods[pod.Name] = pod.Status.Phase - } if woc.shouldPrintPodSpec(node) { printPodSpecLog(pod, woc.wf.Name) } } - if node.Succeeded() && match { - woc.completedPods[pod.Name] = pod.Status.Phase + switch pod.Status.Phase { + case apiv1.PodSucceeded, apiv1.PodFailed: + woc.completedPods[pod.Name] = pod } } } @@ -1621,7 +1590,7 @@ func (woc *wfOperationCtx) executeTemplate(ctx context.Context, nodeName string, // Inject the pod name. If the pod has a retry strategy, the pod name will be changed and will be injected when it // is determined if resolvedTmpl.IsPodType() && woc.retryStrategy(resolvedTmpl) == nil { - localParams[common.LocalVarPodName] = woc.wf.NodeID(nodeName) + localParams[common.LocalVarPodName] = wfutil.PodName(woc.wf.Name, nodeName, resolvedTmpl.Name, woc.wf.NodeID(nodeName)) } // Merge Template defaults to template @@ -1810,7 +1779,7 @@ func (woc *wfOperationCtx) executeTemplate(ctx context.Context, nodeName string, localParams := make(map[string]string) // Change the `pod.name` variable to the new retry node name if processedTmpl.IsPodType() { - localParams[common.LocalVarPodName] = woc.wf.NodeID(nodeName) + localParams[common.LocalVarPodName] = wfutil.PodName(woc.wf.Name, nodeName, processedTmpl.Name, woc.wf.NodeID(nodeName)) } // Inject the retryAttempt number localParams[common.LocalVarRetries] = strconv.Itoa(len(retryParentNode.Children)) @@ -2038,6 +2007,21 @@ func (woc *wfOperationCtx) findTemplate(pod *apiv1.Pod) *wfv1.Template { if node == nil { return nil // I don't expect this to happen in production, just in tests } + return woc.GetNodeTemplate(node) +} + +func (woc *wfOperationCtx) GetNodeTemplate(node *wfv1.NodeStatus) *wfv1.Template { + if node.TemplateRef != nil { + tmplCtx, err := woc.createTemplateContext(node.GetTemplateScope()) + if err != nil { + woc.markNodeError(node.Name, err) + } + tmpl, err := tmplCtx.GetTemplateFromRef(node.TemplateRef) + if err != nil { + woc.markNodeError(node.Name, err) + } + return tmpl + } return woc.wf.GetTemplateByName(node.TemplateName) } @@ -2390,7 +2374,7 @@ func (woc *wfOperationCtx) executeContainer(ctx context.Context, nodeName string func (woc *wfOperationCtx) getOutboundNodes(nodeID string) []string { node := woc.wf.Status.Nodes[nodeID] switch node.Type { - case wfv1.NodeTypeSkipped, wfv1.NodeTypeSuspend: + case wfv1.NodeTypeSkipped, wfv1.NodeTypeSuspend, wfv1.NodeTypeHTTP: return []string{node.ID} case wfv1.NodeTypePod: @@ -3520,3 +3504,15 @@ func (woc *wfOperationCtx) substituteGlobalVariables() error { } return nil } + +// setWfPodNamesAnnotation sets an annotation on a workflow with the pod naming +// convention version +func setWfPodNamesAnnotation(wf *wfv1.Workflow) { + podNameVersion := wfutil.GetPodNameVersion() + + if wf.Annotations == nil { + wf.Annotations = map[string]string{} + } + + wf.Annotations[common.AnnotationKeyPodNameVersion] = podNameVersion.String() +} diff --git a/workflow/controller/operator_test.go b/workflow/controller/operator_test.go index d0b65bdb4aef..4b8fa2aa4400 100644 --- a/workflow/controller/operator_test.go +++ b/workflow/controller/operator_test.go @@ -315,6 +315,7 @@ func TestGlobalParams(t *testing.T) { assert.Contains(t, woc.globalParams, fmt.Sprintf("%s.%s", "workflow.creationTimestamp", string(char))) } assert.Contains(t, woc.globalParams, "workflow.creationTimestamp.s") + assert.Contains(t, woc.globalParams, "workflow.creationTimestamp.RFC3339") assert.Contains(t, woc.globalParams, "workflow.duration") assert.Contains(t, woc.globalParams, "workflow.name") @@ -2789,20 +2790,30 @@ spec: ` func TestResolvePodNameInRetries(t *testing.T) { - ctx := context.Background() - wf := wfv1.MustUnmarshalWorkflow(podNameInRetries) - woc := newWoc(*wf) - woc.operate(ctx) - assert.Equal(t, wfv1.WorkflowRunning, woc.wf.Status.Phase) - pods, err := woc.controller.kubeclientset.CoreV1().Pods(wf.ObjectMeta.Namespace).List(ctx, metav1.ListOptions{}) - assert.NoError(t, err) - assert.True(t, len(pods.Items) > 0, "pod was not created successfully") + tests := []struct { + podNameVersion string + wantPodName string + }{ + {"v1", "output-value-placeholders-wf-3033990984"}, + {"v2", "output-value-placeholders-wf-tell-pod-name-3033990984"}, + } + for _, tt := range tests { + _ = os.Setenv("POD_NAMES", tt.podNameVersion) + ctx := context.Background() + wf := wfv1.MustUnmarshalWorkflow(podNameInRetries) + woc := newWoc(*wf) + woc.operate(ctx) + assert.Equal(t, wfv1.WorkflowRunning, woc.wf.Status.Phase) + pods, err := woc.controller.kubeclientset.CoreV1().Pods(wf.ObjectMeta.Namespace).List(ctx, metav1.ListOptions{}) + assert.NoError(t, err) + assert.True(t, len(pods.Items) > 0, "pod was not created successfully") - template, err := getPodTemplate(&pods.Items[0]) - assert.NoError(t, err) - parameterValue := template.Outputs.Parameters[0].Value - assert.NotNil(t, parameterValue) - assert.Equal(t, "output-value-placeholders-wf-3033990984", parameterValue.String()) + template, err := getPodTemplate(&pods.Items[0]) + assert.NoError(t, err) + parameterValue := template.Outputs.Parameters[0].Value + assert.NotNil(t, parameterValue) + assert.Equal(t, tt.wantPodName, parameterValue.String()) + } } var outputStatuses = ` @@ -7607,3 +7618,58 @@ func TestExitHandlerWithRetryNodeParam(t *testing.T) { onExitNode := woc.wf.GetNodeByName("exit-handler-with-param-xbh52[0].step-1.onExit") assert.Equal(t, "hello world", onExitNode.Inputs.Parameters[0].Value.String()) } + +func TestReOperateCompletedWf(t *testing.T) { + wf := wfv1.MustUnmarshalWorkflow(` +metadata: + name: my-wf + namespace: my-ns +spec: + entrypoint: main + templates: + - name: main + dag: + tasks: + - name: pod + template: pod + - name: pod + container: + image: my-image +`) + wf.Status.Phase = wfv1.WorkflowError + wf.Status.FinishedAt = metav1.Now() + cancel, controller := newController(wf) + defer cancel() + + ctx := context.Background() + woc := newWorkflowOperationCtx(wf, controller) + assert.NotPanics(t, func() { woc.operate(ctx) }) +} + +func TestSetWFPodNamesAnnotation(t *testing.T) { + defer func() { + _ = os.Unsetenv("POD_NAMES") + }() + + tests := []struct { + podNameVersion string + }{ + {"v1"}, + {"v2"}, + } + + for _, tt := range tests { + _ = os.Setenv("POD_NAMES", tt.podNameVersion) + + wf := wfv1.MustUnmarshalWorkflow(exitHandlerWithRetryNodeParam) + cancel, controller := newController(wf) + defer cancel() + + ctx := context.Background() + woc := newWorkflowOperationCtx(wf, controller) + + woc.operate(ctx) + annotations := woc.wf.ObjectMeta.GetAnnotations() + assert.Equal(t, annotations[common.AnnotationKeyPodNameVersion], tt.podNameVersion) + } +} diff --git a/workflow/controller/pod_cleanup.go b/workflow/controller/pod_cleanup.go new file mode 100644 index 000000000000..c167db4384b8 --- /dev/null +++ b/workflow/controller/pod_cleanup.go @@ -0,0 +1,52 @@ +package controller + +import ( + apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + + wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" +) + +func (woc *wfOperationCtx) queuePodsForCleanup() { + delay := woc.controller.Config.GetPodGCDeleteDelayDuration() + podGC := woc.execWf.Spec.PodGC + strategy := podGC.GetStrategy() + selector, _ := podGC.GetLabelSelector() + workflowPhase := woc.wf.Status.Phase + for _, pod := range woc.completedPods { + switch determinePodCleanupAction(selector, pod.Labels, strategy, workflowPhase, pod.Status.Phase) { + case deletePod: + woc.controller.queuePodForCleanupAfter(pod.Namespace, pod.Name, deletePod, delay) + case labelPodCompleted: + woc.controller.queuePodForCleanup(pod.Namespace, pod.Name, labelPodCompleted) + } + } +} + +func determinePodCleanupAction( + selector labels.Selector, + podLabels map[string]string, + strategy wfv1.PodGCStrategy, + workflowPhase wfv1.WorkflowPhase, + podPhase apiv1.PodPhase, +) podCleanupAction { + switch { + case !selector.Matches(labels.Set(podLabels)): // if the pod will never be deleted, label it now + return labelPodCompleted + case strategy == wfv1.PodGCOnPodNone: + return labelPodCompleted + case strategy == wfv1.PodGCOnWorkflowCompletion && workflowPhase.Completed(): + return deletePod + case strategy == wfv1.PodGCOnWorkflowSuccess && workflowPhase == wfv1.WorkflowSucceeded: + return deletePod + case strategy == wfv1.PodGCOnPodCompletion: + return deletePod + case strategy == wfv1.PodGCOnPodSuccess && podPhase == apiv1.PodSucceeded: + return deletePod + case strategy == wfv1.PodGCOnPodSuccess && podPhase == apiv1.PodFailed: + return labelPodCompleted + case workflowPhase.Completed(): + return labelPodCompleted + } + return "" +} diff --git a/workflow/controller/pod_cleanup_test.go b/workflow/controller/pod_cleanup_test.go new file mode 100644 index 000000000000..80c69d02d467 --- /dev/null +++ b/workflow/controller/pod_cleanup_test.go @@ -0,0 +1,72 @@ +package controller + +import ( + "testing" + + "github.com/stretchr/testify/assert" + apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + + wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" +) + +func Test_determinePodCleanupAction(t *testing.T) { + + assert.Equal(t, labelPodCompleted, determinePodCleanupAction(labels.Nothing(), nil, wfv1.PodGCOnPodCompletion, wfv1.WorkflowSucceeded, apiv1.PodSucceeded)) + assert.Equal(t, labelPodCompleted, determinePodCleanupAction(labels.Everything(), nil, wfv1.PodGCOnPodNone, wfv1.WorkflowSucceeded, apiv1.PodSucceeded)) + + type fields = struct { + Strategy wfv1.PodGCStrategy `json:"strategy,omitempty"` + WorkflowPhase wfv1.WorkflowPhase `json:"workflowPhase,omitempty"` + PodPhase apiv1.PodPhase `json:"podPhase,omitempty"` + } + for _, tt := range []struct { + Fields fields `json:"fields"` + Want podCleanupAction `json:"want,omitempty"` + }{ + + // strategy = 4 options + // workflow phase = 3 options + // pod phase = 2 options + + // 4 * 3 * 2 = 24 options + + {fields{wfv1.PodGCOnWorkflowSuccess, wfv1.WorkflowRunning, apiv1.PodSucceeded}, ""}, + {fields{wfv1.PodGCOnWorkflowSuccess, wfv1.WorkflowRunning, apiv1.PodFailed}, ""}, + {fields{wfv1.PodGCOnWorkflowSuccess, wfv1.WorkflowSucceeded, apiv1.PodSucceeded}, deletePod}, + {fields{wfv1.PodGCOnWorkflowSuccess, wfv1.WorkflowSucceeded, apiv1.PodFailed}, deletePod}, + {fields{wfv1.PodGCOnWorkflowSuccess, wfv1.WorkflowFailed, apiv1.PodSucceeded}, labelPodCompleted}, + {fields{wfv1.PodGCOnWorkflowSuccess, wfv1.WorkflowFailed, apiv1.PodFailed}, labelPodCompleted}, + + {fields{wfv1.PodGCOnWorkflowCompletion, wfv1.WorkflowRunning, apiv1.PodSucceeded}, ""}, + {fields{wfv1.PodGCOnWorkflowCompletion, wfv1.WorkflowRunning, apiv1.PodFailed}, ""}, + {fields{wfv1.PodGCOnWorkflowCompletion, wfv1.WorkflowSucceeded, apiv1.PodSucceeded}, deletePod}, + {fields{wfv1.PodGCOnWorkflowCompletion, wfv1.WorkflowSucceeded, apiv1.PodFailed}, deletePod}, + {fields{wfv1.PodGCOnWorkflowCompletion, wfv1.WorkflowFailed, apiv1.PodSucceeded}, deletePod}, + {fields{wfv1.PodGCOnWorkflowCompletion, wfv1.WorkflowFailed, apiv1.PodFailed}, deletePod}, + + {fields{wfv1.PodGCOnPodSuccess, wfv1.WorkflowRunning, apiv1.PodSucceeded}, deletePod}, + {fields{wfv1.PodGCOnPodSuccess, wfv1.WorkflowRunning, apiv1.PodFailed}, labelPodCompleted}, + {fields{wfv1.PodGCOnPodSuccess, wfv1.WorkflowSucceeded, apiv1.PodSucceeded}, deletePod}, + {fields{wfv1.PodGCOnPodSuccess, wfv1.WorkflowSucceeded, apiv1.PodFailed}, labelPodCompleted}, + {fields{wfv1.PodGCOnPodSuccess, wfv1.WorkflowFailed, apiv1.PodSucceeded}, deletePod}, + {fields{wfv1.PodGCOnPodSuccess, wfv1.WorkflowFailed, apiv1.PodFailed}, labelPodCompleted}, + + {fields{wfv1.PodGCOnPodCompletion, wfv1.WorkflowRunning, apiv1.PodSucceeded}, deletePod}, + {fields{wfv1.PodGCOnPodCompletion, wfv1.WorkflowRunning, apiv1.PodFailed}, deletePod}, + {fields{wfv1.PodGCOnPodCompletion, wfv1.WorkflowSucceeded, apiv1.PodSucceeded}, deletePod}, + {fields{wfv1.PodGCOnPodCompletion, wfv1.WorkflowSucceeded, apiv1.PodFailed}, deletePod}, + {fields{wfv1.PodGCOnPodCompletion, wfv1.WorkflowFailed, apiv1.PodSucceeded}, deletePod}, + {fields{wfv1.PodGCOnPodCompletion, wfv1.WorkflowFailed, apiv1.PodFailed}, deletePod}, + } { + t.Run(wfv1.MustMarshallJSON(tt), func(t *testing.T) { + action := determinePodCleanupAction( + labels.Everything(), + nil, + tt.Fields.Strategy, + tt.Fields.WorkflowPhase, + tt.Fields.PodPhase) + assert.Equal(t, tt.Want, action) + }) + } +} diff --git a/workflow/controller/steps.go b/workflow/controller/steps.go index 3ac112a6c8cb..d995b440ace9 100644 --- a/workflow/controller/steps.go +++ b/workflow/controller/steps.go @@ -8,11 +8,13 @@ import ( "time" "github.com/Knetic/govaluate" + log "github.com/sirupsen/logrus" "github.com/argoproj/argo-workflows/v3/errors" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo-workflows/v3/util/template" "github.com/argoproj/argo-workflows/v3/workflow/common" + controllercache "github.com/argoproj/argo-workflows/v3/workflow/controller/cache" "github.com/argoproj/argo-workflows/v3/workflow/templateresolution" ) @@ -152,7 +154,16 @@ func (woc *wfOperationCtx) executeSteps(ctx context.Context, nodeName string, tm node.Outputs = outputs woc.addOutputsToGlobalScope(node.Outputs) woc.wf.Status.Nodes[node.ID] = *node + if node.MemoizationStatus != nil { + c := woc.controller.cacheFactory.GetCache(controllercache.ConfigMapCache, node.MemoizationStatus.CacheName) + err := c.Save(ctx, node.MemoizationStatus.Key, node.ID, node.Outputs) + if err != nil { + woc.log.WithFields(log.Fields{"nodeID": node.ID}).WithError(err).Error("Failed to save node outputs to cache") + node.Phase = wfv1.NodeError + } + } } + return woc.markNodePhase(nodeName, wfv1.NodeSucceeded), nil } diff --git a/workflow/controller/taskset.go b/workflow/controller/taskset.go index 0def937824bd..18a7d52fa736 100644 --- a/workflow/controller/taskset.go +++ b/workflow/controller/taskset.go @@ -69,32 +69,33 @@ func (woc *wfOperationCtx) completeTaskSet(ctx context.Context) error { } func (woc *wfOperationCtx) getWorkflowTaskSet() (*wfv1.WorkflowTaskSet, error) { - taskSet, exist, err := woc.controller.wfTaskSetInformer.Informer().GetIndexer().GetByKey(woc.wf.Namespace + "/" + woc.wf.Name) + taskSet, exists, err := woc.controller.wfTaskSetInformer.Informer().GetIndexer().GetByKey(woc.wf.Namespace + "/" + woc.wf.Name) if err != nil { return nil, err } - if !exist { + if !exists { return nil, nil } - return taskSet.(*wfv1.WorkflowTaskSet), nil } -func (woc *wfOperationCtx) taskSetReconciliation(ctx context.Context) error { - workflowTaskset, err := woc.getWorkflowTaskSet() +func (woc *wfOperationCtx) reconcileTaskSet(ctx context.Context) error { + workflowTaskSet, err := woc.getWorkflowTaskSet() if err != nil { return err } woc.log.WithField("workflow", woc.wf.Name).WithField("namespace", woc.wf.Namespace).Infof("TaskSet Reconciliation") - if workflowTaskset != nil && len(workflowTaskset.Status.Nodes) > 0 { - for nodeID, taskResult := range workflowTaskset.Status.Nodes { + if workflowTaskSet != nil && len(workflowTaskSet.Status.Nodes) > 0 { + for nodeID, taskResult := range workflowTaskSet.Status.Nodes { node := woc.wf.Status.Nodes[nodeID] + node.Outputs = taskResult.Outputs.DeepCopy() node.Phase = taskResult.Phase node.Message = taskResult.Message - woc.wf.Status.Nodes[nodeID] = node node.FinishedAt = metav1.Now() + + woc.wf.Status.Nodes[nodeID] = node woc.updated = true } } @@ -105,6 +106,7 @@ func (woc *wfOperationCtx) createTaskSet(ctx context.Context) error { if len(woc.taskSet) == 0 { return nil } + key := fmt.Sprintf("%s/%s", woc.wf.Namespace, woc.wf.Name) log.WithField("workflow", woc.wf.Name).WithField("namespace", woc.wf.Namespace).WithField("TaskSet", key).Infof("Creating TaskSet") taskSet := wfv1.WorkflowTaskSet{ @@ -143,7 +145,6 @@ func (woc *wfOperationCtx) createTaskSet(ctx context.Context) error { log.WithError(err).WithField("workflow", woc.wf.Name).WithField("namespace", woc.wf.Namespace).Error("Failed to patch WorkflowTaskSet") return fmt.Errorf("failed to patch TaskSet. %v", err) } - } else if err != nil { log.WithError(err).WithField("workflow", woc.wf.Name).WithField("namespace", woc.wf.Namespace).Error("Failed to create WorkflowTaskSet") return err diff --git a/workflow/controller/taskset_test.go b/workflow/controller/taskset_test.go index 6b1ff83945ae..32a14bc2949e 100644 --- a/workflow/controller/taskset_test.go +++ b/workflow/controller/taskset_test.go @@ -317,9 +317,9 @@ func TestNonHTTPTemplateScenario(t *testing.T) { wf := wfv1.MustUnmarshalWorkflow(helloWorldWf) woc := newWorkflowOperationCtx(wf, controller) ctx := context.Background() - t.Run("taskSetReconciliation", func(t *testing.T) { + t.Run("reconcileTaskSet", func(t *testing.T) { woc.operate(ctx) - err := woc.taskSetReconciliation(ctx) + err := woc.reconcileTaskSet(ctx) assert.NoError(t, err) }) t.Run("completeTaskSet", func(t *testing.T) { diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index 9ebbe8a08349..112831a79d5d 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -377,7 +377,7 @@ func (woc *wfOperationCtx) createWorkflowPod(ctx context.Context, nodeName strin // Final substitution for workflow level PodSpecPatch localParams := make(map[string]string) if tmpl.IsPodType() { - localParams[common.LocalVarPodName] = woc.wf.NodeID(nodeName) + localParams[common.LocalVarPodName] = pod.Name } tmpl, err := common.ProcessArgs(tmpl, &wfv1.Arguments{}, woc.globalParams, localParams, false, woc.wf.Namespace, woc.controller.configMapInformer) if err != nil { diff --git a/workflow/controller/workflowpod_test.go b/workflow/controller/workflowpod_test.go index a24f7d7c1841..a2ac1d008f24 100644 --- a/workflow/controller/workflowpod_test.go +++ b/workflow/controller/workflowpod_test.go @@ -3,6 +3,7 @@ package controller import ( "context" "fmt" + "os" "path/filepath" "strconv" "testing" @@ -1223,6 +1224,10 @@ spec: image: docker/whalesay:latest command: [cowsay] args: ["hello world"] + outputs: + parameters: + - name: pod-name + value: "{{pod.name}}" ` var helloWorldWfWithWFPatch = ` @@ -1308,6 +1313,59 @@ func TestPodSpecPatch(t *testing.T) { assert.EqualError(t, err, "Failed to merge the workflow PodSpecPatch with the template PodSpecPatch due to invalid format") } +var helloWorldStepWfWithPatch = ` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + name: hello-world +spec: + entrypoint: hello + templates: + - name: hello + steps: + - - name: hello + template: whalesay + - name: whalesay + podSpecPatch: '{"containers":[{"name":"main", "resources":{"limits":{"cpu": "800m"}}}]}' + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["hello world"] + outputs: + parameters: + - name: pod-name + value: "{{pod.name}}" +` + +func TestPodSpecPatchPodName(t *testing.T) { + tests := []struct { + podNameVersion string + wantPodName string + workflowYaml string + }{ + {"v1", "hello-world", helloWorldWfWithPatch}, + {"v2", "hello-world", helloWorldWfWithPatch}, + {"v1", "hello-world-3731220306", helloWorldStepWfWithPatch}, + {"v2", "hello-world-whalesay-3731220306", helloWorldStepWfWithPatch}, + } + for _, tt := range tests { + os.Setenv("POD_NAMES", tt.podNameVersion) + ctx := context.Background() + wf := wfv1.MustUnmarshalWorkflow(tt.workflowYaml) + woc := newWoc(*wf) + woc.operate(ctx) + assert.Equal(t, wfv1.WorkflowRunning, woc.wf.Status.Phase) + pods, err := listPods(woc) + assert.NoError(t, err) + assert.True(t, len(pods.Items) > 0, "pod was not created successfully") + template, err := getPodTemplate(&pods.Items[0]) + assert.NoError(t, err) + parameterValue := template.Outputs.Parameters[0].Value + assert.NotNil(t, parameterValue) + assert.Equal(t, tt.wantPodName, parameterValue.String()) + } +} + func TestMainContainerCustomization(t *testing.T) { mainCtrSpec := &apiv1.Container{ Name: common.MainContainerName, diff --git a/workflow/cron/controller.go b/workflow/cron/controller.go index 4d1ee511df63..9e16e70ef528 100644 --- a/workflow/cron/controller.go +++ b/workflow/cron/controller.go @@ -182,12 +182,7 @@ func (cc *Controller) processNextCronItem(ctx context.Context) bool { // The job is currently scheduled, remove it and re add it. cc.cron.Delete(key.(string)) - cronSchedule := cronWf.Spec.Schedule - if cronWf.Spec.Timezone != "" { - cronSchedule = "CRON_TZ=" + cronWf.Spec.Timezone + " " + cronSchedule - } - - lastScheduledTimeFunc, err := cc.cron.AddJob(key.(string), cronSchedule, cronWorkflowOperationCtx) + lastScheduledTimeFunc, err := cc.cron.AddJob(key.(string), cronWf.Spec.GetScheduleString(), cronWorkflowOperationCtx) if err != nil { logCtx.WithError(err).Error("could not schedule CronWorkflow") return true diff --git a/workflow/cron/operator.go b/workflow/cron/operator.go index 62abc6fc57cd..1a2fba8ad5c8 100644 --- a/workflow/cron/operator.go +++ b/workflow/cron/operator.go @@ -73,6 +73,11 @@ func (woc *cronWfOperationCtx) run(ctx context.Context, scheduledRuntime time.Ti woc.log.Infof("Running %s", woc.name) + // If the cron workflow has a schedule that was just updated, update its annotation + if woc.cronWf.IsUsingNewSchedule() { + woc.cronWf.SetSchedule(woc.cronWf.Spec.GetScheduleString()) + } + err := woc.validateCronWorkflow() if err != nil { return @@ -129,7 +134,7 @@ func getWorkflowObjectReference(wf *v1alpha1.Workflow, runWf *v1alpha1.Workflow) } func (woc *cronWfOperationCtx) persistUpdate(ctx context.Context) { - woc.patch(ctx, map[string]interface{}{"status": woc.cronWf.Status}) + woc.patch(ctx, map[string]interface{}{"status": woc.cronWf.Status, "metadata": map[string]interface{}{"annotations": woc.cronWf.Annotations}}) } func (woc *cronWfOperationCtx) persistUpdateActiveWorkflows(ctx context.Context) { @@ -214,6 +219,10 @@ func (woc *cronWfOperationCtx) runOutstandingWorkflows(ctx context.Context) (boo } func (woc *cronWfOperationCtx) shouldOutstandingWorkflowsBeRun() (time.Time, error) { + // If the CronWorkflow schedule was just updated, then do not run any outstanding workflows. + if woc.cronWf.IsUsingNewSchedule() { + return time.Time{}, nil + } // If this CronWorkflow has been run before, check if we have missed any scheduled executions if woc.cronWf.Status.LastScheduledTime != nil { var now time.Time @@ -225,7 +234,7 @@ func (woc *cronWfOperationCtx) shouldOutstandingWorkflowsBeRun() (time.Time, err } now = time.Now().In(loc) - cronScheduleString := "CRON_TZ=" + woc.cronWf.Spec.Timezone + " " + woc.cronWf.Spec.Schedule + cronScheduleString := woc.cronWf.Spec.GetScheduleString() cronSchedule, err = cron.ParseStandard(cronScheduleString) if err != nil { return time.Time{}, fmt.Errorf("unable to form timezone schedule '%s': %s", cronScheduleString, err) diff --git a/workflow/cron/operator_test.go b/workflow/cron/operator_test.go index f268e2ec029c..1217fab8fd04 100644 --- a/workflow/cron/operator_test.go +++ b/workflow/cron/operator_test.go @@ -32,7 +32,6 @@ var scheduledWf = ` schedule: '* * * * *' startingDeadlineSeconds: 30 workflowSpec: - entrypoint: whalesay templates: - container: @@ -76,6 +75,7 @@ func TestRunOutstandingWorkflows(t *testing.T) { cronWf: &cronWf, log: logrus.WithFields(logrus.Fields{}), } + woc.cronWf.SetSchedule(woc.cronWf.Spec.GetScheduleString()) missedExecutionTime, err := woc.shouldOutstandingWorkflowsBeRun() assert.NoError(t, err) // The missedExecutionTime should be the last complete minute mark, which we can get with inferScheduledTime @@ -92,8 +92,14 @@ func TestRunOutstandingWorkflows(t *testing.T) { assert.NoError(t, err) assert.True(t, missedExecutionTime.IsZero()) - // Run the same test in a different timezone + // Same test, but simulate a change to the schedule immediately prior by setting a different last-used-schedule annotation + // In this case, since a schedule change is detected, not workflow should be run + woc.cronWf.SetSchedule("0 * * * *") + missedExecutionTime, err = woc.shouldOutstandingWorkflowsBeRun() + assert.NoError(t, err) + assert.True(t, missedExecutionTime.IsZero()) + // Run the same test in a different timezone testTimezone := "Pacific/Niue" testLocation, err := time.LoadLocation(testTimezone) if err != nil { @@ -109,6 +115,8 @@ func TestRunOutstandingWorkflows(t *testing.T) { cronWf: &cronWf, log: logrus.WithFields(logrus.Fields{}), } + // Reset last-used-schedule as if the current schedule has been used before + woc.cronWf.SetSchedule(woc.cronWf.Spec.GetScheduleString()) missedExecutionTime, err = woc.shouldOutstandingWorkflowsBeRun() assert.NoError(t, err) // The missedExecutionTime should be the last complete minute mark, which we can get with inferScheduledTime @@ -124,6 +132,13 @@ func TestRunOutstandingWorkflows(t *testing.T) { missedExecutionTime, err = woc.shouldOutstandingWorkflowsBeRun() assert.NoError(t, err) assert.True(t, missedExecutionTime.IsZero()) + + // Same test, but simulate a change to the schedule immediately prior by setting a different last-used-schedule annotation + // In this case, since a schedule change is detected, not workflow should be run + woc.cronWf.SetSchedule("0 * * * *") + missedExecutionTime, err = woc.shouldOutstandingWorkflowsBeRun() + assert.NoError(t, err) + assert.True(t, missedExecutionTime.IsZero()) } type fakeLister struct{} @@ -144,7 +159,6 @@ var invalidWf = ` schedule: '* * * * *' startingDeadlineSeconds: 30 workflowSpec: - entrypoint: whalesay templates: - container: @@ -263,3 +277,56 @@ func TestScheduleTimeParam(t *testing.T) { assert.Len(t, wf.GetAnnotations(), 1) assert.NotEmpty(t, wf.GetAnnotations()[common.AnnotationKeyCronWfScheduledTime]) } + +const lastUsedSchedule = `apiVersion: argoproj.io/v1alpha1 +kind: CronWorkflow +metadata: + name: test +spec: + concurrencyPolicy: Forbid + failedJobsHistoryLimit: 1 + schedule: 41 12 * * * + successfulJobsHistoryLimit: 1 + timezone: America/New_York + workflowSpec: + arguments: {} + entrypoint: job + templates: + - container: + args: + - /bin/echo "hello argo" + command: + - /bin/sh + - -c + image: alpine + imagePullPolicy: Always + name: job +` + +func TestLastUsedSchedule(t *testing.T) { + var cronWf v1alpha1.CronWorkflow + v1alpha1.MustUnmarshal([]byte(lastUsedSchedule), &cronWf) + + cs := fake.NewSimpleClientset() + testMetrics := metrics.New(metrics.ServerConfig{}, metrics.ServerConfig{}) + woc := &cronWfOperationCtx{ + wfClientset: cs, + wfClient: cs.ArgoprojV1alpha1().Workflows(""), + cronWfIf: cs.ArgoprojV1alpha1().CronWorkflows(""), + cronWf: &cronWf, + log: logrus.WithFields(logrus.Fields{}), + metrics: testMetrics, + scheduledTimeFunc: inferScheduledTime, + } + + missedExecutionTime, err := woc.shouldOutstandingWorkflowsBeRun() + if assert.NoError(t, err) { + assert.Equal(t, time.Time{}, missedExecutionTime) + } + + woc.cronWf.SetSchedule(woc.cronWf.Spec.GetScheduleString()) + + if assert.NotNil(t, woc.cronWf.Annotations) { + assert.Equal(t, woc.cronWf.Spec.GetScheduleString(), woc.cronWf.GetLatestSchedule()) + } +} diff --git a/workflow/executor/agent.go b/workflow/executor/agent.go index 1b4def9c9ce9..dd2976a5d04d 100644 --- a/workflow/executor/agent.go +++ b/workflow/executor/agent.go @@ -16,6 +16,7 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + "k8s.io/utils/pointer" "github.com/argoproj/argo-workflows/v3/errors" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" @@ -108,6 +109,9 @@ func (ae *AgentExecutor) Agent(ctx context.Context) error { } func (ae *AgentExecutor) executeHTTPTemplate(ctx context.Context, tmpl wfv1.Template) (*wfv1.Outputs, error) { + if tmpl.HTTP == nil { + return nil, fmt.Errorf("attempting to execute template that is not of type HTTP") + } httpTemplate := tmpl.HTTP request, err := http.NewRequest(httpTemplate.Method, httpTemplate.URL, bytes.NewBufferString(httpTemplate.Body)) if err != nil { @@ -125,12 +129,12 @@ func (ae *AgentExecutor) executeHTTPTemplate(ctx context.Context, tmpl wfv1.Temp } request.Header.Add(header.Name, value) } - response, err := argohttp.SendHttpRequest(request) + response, err := argohttp.SendHttpRequest(request, httpTemplate.TimeoutSeconds) if err != nil { return nil, err } outputs := &wfv1.Outputs{} - outputs.Parameters = append(outputs.Parameters, wfv1.Parameter{Name: "result", Value: wfv1.AnyStringPtr(response)}) + outputs.Result = pointer.StringPtr(response) return outputs, nil } diff --git a/workflow/executor/executor.go b/workflow/executor/executor.go index e3fee86eb455..4beee60e48c1 100644 --- a/workflow/executor/executor.go +++ b/workflow/executor/executor.go @@ -251,7 +251,7 @@ func (we *WorkflowExecutor) StageFiles() error { default: return nil } - err := ioutil.WriteFile(filePath, body, 0o600) + err := ioutil.WriteFile(filePath, body, 0o644) if err != nil { return errors.InternalWrapError(err) } diff --git a/workflow/executor/http/http.go b/workflow/executor/http/http.go index 32225c876345..9c775cff57b3 100644 --- a/workflow/executor/http/http.go +++ b/workflow/executor/http/http.go @@ -4,20 +4,23 @@ import ( "fmt" "io/ioutil" "net/http" + "time" log "github.com/sirupsen/logrus" ) -func SendHttpRequest(request *http.Request) (string, error) { - out, err := http.DefaultClient.Do(request) - +func SendHttpRequest(request *http.Request, timeout *int64) (string, error) { + httpClient := http.DefaultClient + if timeout != nil { + httpClient.Timeout = time.Duration(*timeout) * time.Second + } + out, err := httpClient.Do(request) if err != nil { return "", err } - // Close the connection defer out.Body.Close() - log.WithFields(log.Fields{"url": request.URL, "status": out.Status}).Info("HTTP request made") + log.WithFields(log.Fields{"url": request.URL, "status": out.Status}).Info("HTTP Request Sent") data, err := ioutil.ReadAll(out.Body) if err != nil { return "", err @@ -27,5 +30,4 @@ func SendHttpRequest(request *http.Request) (string, error) { } return string(data), nil - } diff --git a/workflow/executor/http/http_test.go b/workflow/executor/http/http_test.go index febab81fd241..1db2b2464a82 100644 --- a/workflow/executor/http/http_test.go +++ b/workflow/executor/http/http_test.go @@ -6,22 +6,31 @@ import ( "testing" "github.com/stretchr/testify/assert" + "k8s.io/utils/pointer" ) func TestSendHttpRequest(t *testing.T) { t.Run("SuccessfulRequest", func(t *testing.T) { - request, err := http.NewRequest(http.MethodGet, "http://www.google.com", bytes.NewBuffer([]byte{})) + request, err := http.NewRequest(http.MethodGet, "http://httpstat.us/200", bytes.NewBuffer([]byte{})) assert.NoError(t, err) - response, err := SendHttpRequest(request) + _, err = SendHttpRequest(request, nil) assert.NoError(t, err) - assert.NotEmpty(t, response) }) t.Run("NotFoundRequest", func(t *testing.T) { - request, err := http.NewRequest(http.MethodGet, "http://www.notfound.com/test", bytes.NewBuffer([]byte{})) + request, err := http.NewRequest(http.MethodGet, "http://httpstat.us/404", bytes.NewBuffer([]byte{})) assert.NoError(t, err) - response, err := SendHttpRequest(request) + response, err := SendHttpRequest(request, nil) assert.Error(t, err) assert.Empty(t, response) assert.Equal(t, "404 Not Found", err.Error()) }) + t.Run("TimeoutRequest", func(t *testing.T) { + // Request sleeps for 4 seconds, but timeout is 2 + request, err := http.NewRequest(http.MethodGet, "https://httpstat.us/200?sleep=4000", bytes.NewBuffer([]byte{})) + assert.NoError(t, err) + response, err := SendHttpRequest(request, pointer.Int64Ptr(2)) + assert.Error(t, err) + assert.Empty(t, response) + assert.Equal(t, `Get "https://httpstat.us/200?sleep=4000": context deadline exceeded (Client.Timeout exceeded while awaiting headers)`, err.Error()) + }) } diff --git a/workflow/util/pod_name.go b/workflow/util/pod_name.go index b993332dd8ac..cac26c5433f7 100644 --- a/workflow/util/pod_name.go +++ b/workflow/util/pod_name.go @@ -11,22 +11,52 @@ const ( k8sNamingHashLength = 10 ) +// PodNameVersion stores which type of pod names should be used. +// v1 represents the node id. +// v2 is the combination of a node id and template name. +type PodNameVersion string + +const ( + // PodNameV1 is the v1 name that uses node ids for pod names + PodNameV1 PodNameVersion = "v1" + // PodNameV2 is the v2 name that uses node id combined with + // the template name + PodNameV2 PodNameVersion = "v2" +) + +// String stringifies the pod name version +func (v PodNameVersion) String() string { + return string(v) +} + +// GetPodNameVersion returns the pod name version to be used +func GetPodNameVersion() PodNameVersion { + switch os.Getenv("POD_NAMES") { + case "v2": + return PodNameV2 + case "v1": + return PodNameV1 + default: + return PodNameV1 + } +} + // PodName return a deterministic pod name func PodName(workflowName, nodeName, templateName, nodeID string) string { - if os.Getenv("POD_NAMES") == "v2" { - if workflowName == nodeName { - return workflowName - } - - prefix := fmt.Sprintf("%s-%s", workflowName, templateName) - prefix = ensurePodNamePrefixLength(prefix) + if GetPodNameVersion() == PodNameV1 { + return nodeID + } - h := fnv.New32a() - _, _ = h.Write([]byte(nodeName)) - return fmt.Sprintf("%s-%v", prefix, h.Sum32()) + if workflowName == nodeName { + return workflowName } - return nodeID + prefix := fmt.Sprintf("%s-%s", workflowName, templateName) + prefix = ensurePodNamePrefixLength(prefix) + + h := fnv.New32a() + _, _ = h.Write([]byte(nodeName)) + return fmt.Sprintf("%s-%v", prefix, h.Sum32()) } func ensurePodNamePrefixLength(prefix string) string { diff --git a/workflow/util/util.go b/workflow/util/util.go index 62451bad7169..c0044422eaf4 100644 --- a/workflow/util/util.go +++ b/workflow/util/util.go @@ -169,7 +169,7 @@ func IsWorkflowCompleted(wf *wfv1.Workflow) bool { return false } -// SubmitWorkflow validates and submit a single workflow and override some of the fields of the workflow +// SubmitWorkflow validates and submits a single workflow and overrides some of the fields of the workflow func SubmitWorkflow(ctx context.Context, wfIf v1alpha1.WorkflowInterface, wfClientset wfclientset.Interface, namespace string, wf *wfv1.Workflow, opts *wfv1.SubmitOpts) (*wfv1.Workflow, error) { err := ApplySubmitOpts(wf, opts) if err != nil { @@ -178,7 +178,7 @@ func SubmitWorkflow(ctx context.Context, wfIf v1alpha1.WorkflowInterface, wfClie wftmplGetter := templateresolution.WrapWorkflowTemplateInterface(wfClientset.ArgoprojV1alpha1().WorkflowTemplates(namespace)) cwftmplGetter := templateresolution.WrapClusterWorkflowTemplateInterface(wfClientset.ArgoprojV1alpha1().ClusterWorkflowTemplates()) - _, err = validate.ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, validate.ValidateOpts{}) + _, err = validate.ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, validate.ValidateOpts{Submit: true}) if err != nil { return nil, err } @@ -804,8 +804,9 @@ func retryWorkflow(ctx context.Context, kubeClient kubernetes.Interface, hydrato return nil, errors.InternalErrorf("Workflow cannot be retried with node %s in %s phase", node.Name, node.Phase) } if node.Type == wfv1.NodeTypePod { - log.Infof("Deleting pod: %s", node.ID) - podName := PodName(wf.Name, node.Name, node.TemplateName, node.ID) + templateName := getTemplateFromNode(node) + podName := PodName(wf.Name, node.Name, templateName, node.ID) + log.Infof("Deleting pod: %s", podName) err := podIf.Delete(ctx, podName, metav1.DeleteOptions{}) if err != nil && !apierr.IsNotFound(err) { return nil, errors.InternalWrapError(err) @@ -856,6 +857,13 @@ func retryWorkflow(ctx context.Context, kubeClient kubernetes.Interface, hydrato return wfClient.Update(ctx, newWF, metav1.UpdateOptions{}) } +func getTemplateFromNode(node wfv1.NodeStatus) string { + if node.TemplateRef != nil { + return node.TemplateRef.Template + } + return node.TemplateName +} + func getNodeIDsToReset(restartSuccessful bool, nodeFieldSelector string, nodes wfv1.Nodes) (map[string]bool, error) { nodeIDsToReset := make(map[string]bool) if !restartSuccessful || len(nodeFieldSelector) == 0 { diff --git a/workflow/util/util_test.go b/workflow/util/util_test.go index 19b355e8a7ab..b2423a7a38d0 100644 --- a/workflow/util/util_test.go +++ b/workflow/util/util_test.go @@ -839,3 +839,34 @@ func TestToUnstructured(t *testing.T) { assert.Equal(t, workflow.Version, gv.Version) } } + +func TestGetTemplateFromNode(t *testing.T) { + cases := []struct { + inputNode wfv1.NodeStatus + expectedTemplateName string + }{ + { + inputNode: wfv1.NodeStatus{ + TemplateRef: &wfv1.TemplateRef{ + Name: "foo-workflowtemplate", + Template: "foo-template", + ClusterScope: false, + }, + TemplateName: "", + }, + expectedTemplateName: "foo-template", + }, + { + inputNode: wfv1.NodeStatus{ + TemplateRef: nil, + TemplateName: "bar-template", + }, + expectedTemplateName: "bar-template", + }, + } + + for _, tc := range cases { + actual := getTemplateFromNode(tc.inputNode) + assert.Equal(t, tc.expectedTemplateName, actual) + } +} diff --git a/workflow/validate/validate.go b/workflow/validate/validate.go index ec08ee760b29..3c937e6ed265 100644 --- a/workflow/validate/validate.go +++ b/workflow/validate/validate.go @@ -33,6 +33,7 @@ type ValidateOpts struct { // skip some validations which is permissible during linting but not submission (e.g. missing // input parameters to the workflow) Lint bool + // ContainerRuntimeExecutor will trigger additional validation checks specific to different // types of executors. For example, the inability of kubelet/k8s executors to copy artifacts // out of the base image layer. If unspecified, will use docker executor validation @@ -44,6 +45,10 @@ type ValidateOpts struct { // WorkflowTemplateValidation indicates that the current context is validating a WorkflowTemplate or ClusterWorkflowTemplate WorkflowTemplateValidation bool + + // Submit indicates that the current operation is a workflow submission. This will impose + // more stringent requirements (e.g. require input values for all spec arguments) + Submit bool } // templateValidationCtx is the context for validating a workflow spec @@ -149,13 +154,12 @@ func ValidateWorkflow(wftmplGetter templateresolution.WorkflowTemplateNamespaced if err != nil { return nil, errors.Errorf(errors.CodeBadRequest, "spec.templates%s", err.Error()) } - if ctx.Lint { - // if we are just linting we don't care if spec.arguments.parameters.XXX doesn't have an - // explicit value. workflows without a default value is a desired use case - err = validateArgumentsFieldNames("spec.arguments.", wfArgs) - } else { - err = validateArguments("spec.arguments.", wfArgs) - } + + // if we are linting, we don't care if spec.arguments.parameters.XXX doesn't have an + // explicit value. Workflow templates without a default value are also a desired use + // case, since values will be provided during workflow submission. + allowEmptyValues := ctx.Lint || (ctx.WorkflowTemplateValidation && !ctx.Submit) + err = validateArguments("spec.arguments.", wfArgs, allowEmptyValues) if err != nil { return nil, err } @@ -213,12 +217,11 @@ func ValidateWorkflow(wftmplGetter templateresolution.WorkflowTemplateNamespaced } } - if wf.Spec.PodGC != nil { - switch wf.Spec.PodGC.Strategy { - case wfv1.PodGCOnPodCompletion, wfv1.PodGCOnPodSuccess, wfv1.PodGCOnWorkflowCompletion, wfv1.PodGCOnWorkflowSuccess: - default: - return nil, errors.Errorf(errors.CodeBadRequest, "podGC.strategy unknown strategy '%s'", wf.Spec.PodGC.Strategy) - } + if !wf.Spec.PodGC.GetStrategy().IsValid() { + return nil, errors.Errorf(errors.CodeBadRequest, "podGC.strategy unknown strategy '%s'", wf.Spec.PodGC.Strategy) + } + if _, err := wf.Spec.PodGC.GetLabelSelector(); err != nil { + return nil, errors.Errorf(errors.CodeBadRequest, "podGC.labelSelector invalid: %v", err) } // Check if all templates can be resolved. @@ -239,7 +242,7 @@ func ValidateWorkflowTemplateRefFields(wfSpec wfv1.WorkflowSpec) error { } // ValidateWorkflowTemplate accepts a workflow template and performs validation against it. -func ValidateWorkflowTemplate(wftmplGetter templateresolution.WorkflowTemplateNamespacedGetter, cwftmplGetter templateresolution.ClusterWorkflowTemplateGetter, wftmpl *wfv1.WorkflowTemplate) (*wfv1.Conditions, error) { +func ValidateWorkflowTemplate(wftmplGetter templateresolution.WorkflowTemplateNamespacedGetter, cwftmplGetter templateresolution.ClusterWorkflowTemplateGetter, wftmpl *wfv1.WorkflowTemplate, opts ValidateOpts) (*wfv1.Conditions, error) { if len(wftmpl.Name) > maxCharsInObjectName { return nil, fmt.Errorf("workflow template name %q must not be more than 63 characters long (currently %d)", wftmpl.Name, len(wftmpl.Name)) } @@ -251,11 +254,13 @@ func ValidateWorkflowTemplate(wftmplGetter templateresolution.WorkflowTemplateNa }, Spec: wftmpl.Spec.WorkflowSpec, } - return ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{IgnoreEntrypoint: wf.Spec.Entrypoint == "", WorkflowTemplateValidation: true}) + opts.IgnoreEntrypoint = wf.Spec.Entrypoint == "" + opts.WorkflowTemplateValidation = true + return ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, opts) } // ValidateClusterWorkflowTemplate accepts a cluster workflow template and performs validation against it. -func ValidateClusterWorkflowTemplate(wftmplGetter templateresolution.WorkflowTemplateNamespacedGetter, cwftmplGetter templateresolution.ClusterWorkflowTemplateGetter, cwftmpl *wfv1.ClusterWorkflowTemplate) (*wfv1.Conditions, error) { +func ValidateClusterWorkflowTemplate(wftmplGetter templateresolution.WorkflowTemplateNamespacedGetter, cwftmplGetter templateresolution.ClusterWorkflowTemplateGetter, cwftmpl *wfv1.ClusterWorkflowTemplate, opts ValidateOpts) (*wfv1.Conditions, error) { if len(cwftmpl.Name) > maxCharsInObjectName { return nil, fmt.Errorf("cluster workflow template name %q must not be more than 63 characters long (currently %d)", cwftmpl.Name, len(cwftmpl.Name)) } @@ -267,7 +272,9 @@ func ValidateClusterWorkflowTemplate(wftmplGetter templateresolution.WorkflowTem }, Spec: cwftmpl.Spec.WorkflowSpec, } - return ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{IgnoreEntrypoint: wf.Spec.Entrypoint == "", WorkflowTemplateValidation: true}) + opts.IgnoreEntrypoint = wf.Spec.Entrypoint == "" + opts.WorkflowTemplateValidation = true + return ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, opts) } // ValidateCronWorkflow validates a CronWorkflow @@ -303,6 +310,15 @@ func ValidateCronWorkflow(wftmplGetter templateresolution.WorkflowTemplateNamesp return nil } +func (ctx *templateValidationCtx) validateInitContainers(containers []wfv1.UserContainer) error { + for _, container := range containers { + if len(container.Container.Name) == 0 { + return errors.Errorf(errors.CodeBadRequest, "initContainers must all have container name") + } + } + return nil +} + func (ctx *templateValidationCtx) validateTemplate(tmpl *wfv1.Template, tmplCtx *templateresolution.Context, args wfv1.ArgumentsProvider) error { if err := validateTemplateType(tmpl); err != nil { return err @@ -313,6 +329,10 @@ func (ctx *templateValidationCtx) validateTemplate(tmpl *wfv1.Template, tmplCtx return err } + if err := ctx.validateInitContainers(tmpl.InitContainers); err != nil { + return err + } + localParams := make(map[string]string) if tmpl.IsPodType() { localParams[common.LocalVarPodName] = placeholderGenerator.NextPlaceholder() @@ -680,12 +700,12 @@ func (ctx *templateValidationCtx) validateLeaf(scope map[string]interface{}, tmp return nil } -func validateArguments(prefix string, arguments wfv1.Arguments) error { +func validateArguments(prefix string, arguments wfv1.Arguments, allowEmptyValues bool) error { err := validateArgumentsFieldNames(prefix, arguments) if err != nil { return err } - return validateArgumentsValues(prefix, arguments) + return validateArgumentsValues(prefix, arguments, allowEmptyValues) } func validateArgumentsFieldNames(prefix string, arguments wfv1.Arguments) error { @@ -703,10 +723,12 @@ func validateArgumentsFieldNames(prefix string, arguments wfv1.Arguments) error } // validateArgumentsValues ensures that all arguments have parameter values or artifact locations -func validateArgumentsValues(prefix string, arguments wfv1.Arguments) error { +func validateArgumentsValues(prefix string, arguments wfv1.Arguments, allowEmptyValues bool) error { for _, param := range arguments.Parameters { if param.ValueFrom == nil && param.Value == nil { - return errors.Errorf(errors.CodeBadRequest, "%s%s.value is required", prefix, param.Name) + if !allowEmptyValues { + return errors.Errorf(errors.CodeBadRequest, "%s%s.value is required", prefix, param.Name) + } } if param.Enum != nil { if len(param.Enum) == 0 { @@ -761,7 +783,7 @@ func (ctx *templateValidationCtx) validateSteps(scope map[string]interface{}, tm if err != nil { return errors.Errorf(errors.CodeBadRequest, "templates.%s.steps[%d].%s %s", tmpl.Name, i, step.Name, err.Error()) } - err = validateArguments(fmt.Sprintf("templates.%s.steps[%d].%s.arguments.", tmpl.Name, i, step.Name), step.Arguments) + err = validateArguments(fmt.Sprintf("templates.%s.steps[%d].%s.arguments.", tmpl.Name, i, step.Name), step.Arguments, false) if err != nil { return err } @@ -894,6 +916,7 @@ func (ctx *templateValidationCtx) addOutputsToScope(tmpl *wfv1.Template, prefix case wfv1.TemplateTypeScript, wfv1.TemplateTypeContainerSet: scope[fmt.Sprintf("%s.outputs.result", prefix)] = true scope[fmt.Sprintf("%s.exitCode", prefix)] = true + scope[fmt.Sprintf("%s.outputs.parameters", prefix)] = true default: scope[fmt.Sprintf("%s.outputs.parameters", prefix)] = true } @@ -1265,7 +1288,7 @@ func (ctx *templateValidationCtx) validateDAG(scope map[string]interface{}, tmpl if err != nil { return errors.Errorf(errors.CodeBadRequest, "templates.%s.tasks.%s %s", tmpl.Name, task.Name, err.Error()) } - err = validateArguments(fmt.Sprintf("templates.%s.tasks.%s.arguments.", tmpl.Name, task.Name), task.Arguments) + err = validateArguments(fmt.Sprintf("templates.%s.tasks.%s.arguments.", tmpl.Name, task.Name), task.Arguments, false) if err != nil { return err } diff --git a/workflow/validate/validate_test.go b/workflow/validate/validate_test.go index 997d360bf5d3..6ffd62b5ed70 100644 --- a/workflow/validate/validate_test.go +++ b/workflow/validate/validate_test.go @@ -45,9 +45,9 @@ func validateWithOptions(yamlStr string, opts ValidateOpts) (*wfv1.Conditions, e // validateWorkflowTemplate is a test helper to accept WorkflowTemplate YAML as a string and return // its validation result. -func validateWorkflowTemplate(yamlStr string) error { +func validateWorkflowTemplate(yamlStr string, opts ValidateOpts) error { wftmpl := unmarshalWftmpl(yamlStr) - _, err := ValidateWorkflowTemplate(wftmplGetter, cwftmplGetter, wftmpl) + _, err := ValidateWorkflowTemplate(wftmplGetter, cwftmplGetter, wftmpl, opts) return err } @@ -1671,7 +1671,7 @@ spec: ` func TestWorkflowTemplate(t *testing.T) { - err := validateWorkflowTemplate(templateRefTarget) + err := validateWorkflowTemplate(templateRefTarget, ValidateOpts{}) assert.NoError(t, err) } @@ -1814,38 +1814,42 @@ func TestInvalidResourceWorkflow(t *testing.T) { } var invalidPodGC = ` -apiVersion: argoproj.io/v1alpha1 -kind: Workflow metadata: generateName: pod-gc-strategy-unknown- spec: podGC: strategy: Foo - entrypoint: whalesay + entrypoint: main templates: - - name: whalesay + - name: main container: - image: docker/whalesay:latest - command: [cowsay] - args: ["hello world"] + image: docker/whalesay ` // TestIncorrectPodGCStrategy verifies pod gc strategy is correct. func TestIncorrectPodGCStrategy(t *testing.T) { wf := unmarshalWf(invalidPodGC) _, err := ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{}) - assert.EqualError(t, err, "podGC.strategy unknown strategy 'Foo'") +} - for _, start := range []wfv1.PodGCStrategy{wfv1.PodGCOnPodCompletion, wfv1.PodGCOnPodSuccess, wfv1.PodGCOnWorkflowCompletion, wfv1.PodGCOnWorkflowSuccess} { - wf.Spec.PodGC.Strategy = start - _, err = ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{}) - assert.NoError(t, err) - - wf.Spec.PodGC.LabelSelector = &metav1.LabelSelector{MatchLabels: map[string]string{"evicted": "true"}} - _, err = ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{}) - assert.NoError(t, err) - } +func TestInvalidPodGCLabelSelector(t *testing.T) { + wf := unmarshalWf(` +metadata: + generateName: pod-gc-strategy-unknown- +spec: + podGC: + labelSelector: + matchExpressions: + - {key: environment, operator: InvalidOperator, values: [dev]} + entrypoint: main + templates: + - name: main + container: + image: docker/whalesay +`) + _, err := ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{}) + assert.EqualError(t, err, "podGC.labelSelector invalid: \"InvalidOperator\" is not a valid pod selector operator") } //nolint:gosec @@ -2298,7 +2302,7 @@ spec: ` func TestWorkflowTemplateWithEntrypoint(t *testing.T) { - err := validateWorkflowTemplate(wfTemplateWithEntrypoint) + err := validateWorkflowTemplate(wfTemplateWithEntrypoint, ValidateOpts{}) assert.NoError(t, err) } @@ -2567,7 +2571,7 @@ spec: ` func TestWorkflowTemplateLabels(t *testing.T) { - err := validateWorkflowTemplate(testWorkflowTemplateLabels) + err := validateWorkflowTemplate(testWorkflowTemplateLabels, ValidateOpts{}) assert.NoError(t, err) } @@ -2733,17 +2737,29 @@ spec: ` func TestWorkflowTemplateWithEnumValue(t *testing.T) { - err := validateWorkflowTemplate(workflowTeamplateWithEnumValues) + err := validateWorkflowTemplate(workflowTeamplateWithEnumValues, ValidateOpts{}) + assert.NoError(t, err) + err = validateWorkflowTemplate(workflowTeamplateWithEnumValues, ValidateOpts{Lint: true}) + assert.NoError(t, err) + err = validateWorkflowTemplate(workflowTeamplateWithEnumValues, ValidateOpts{Submit: true}) assert.NoError(t, err) } func TestWorkflowTemplateWithEmptyEnumList(t *testing.T) { - err := validateWorkflowTemplate(workflowTemplateWithEmptyEnumList) + err := validateWorkflowTemplate(workflowTemplateWithEmptyEnumList, ValidateOpts{}) + assert.EqualError(t, err, "spec.arguments.message.enum should contain at least one value") + err = validateWorkflowTemplate(workflowTemplateWithEmptyEnumList, ValidateOpts{Lint: true}) + assert.EqualError(t, err, "spec.arguments.message.enum should contain at least one value") + err = validateWorkflowTemplate(workflowTemplateWithEmptyEnumList, ValidateOpts{Submit: true}) assert.EqualError(t, err, "spec.arguments.message.enum should contain at least one value") } func TestWorkflowTemplateWithArgumentValueNotFromEnumList(t *testing.T) { - err := validateWorkflowTemplate(workflowTemplateWithArgumentValueNotFromEnumList) + err := validateWorkflowTemplate(workflowTemplateWithArgumentValueNotFromEnumList, ValidateOpts{}) + assert.EqualError(t, err, "spec.arguments.message.value should be present in spec.arguments.message.enum list") + err = validateWorkflowTemplate(workflowTemplateWithArgumentValueNotFromEnumList, ValidateOpts{Lint: true}) + assert.EqualError(t, err, "spec.arguments.message.value should be present in spec.arguments.message.enum list") + err = validateWorkflowTemplate(workflowTemplateWithArgumentValueNotFromEnumList, ValidateOpts{Submit: true}) assert.EqualError(t, err, "spec.arguments.message.value should be present in spec.arguments.message.enum list") } @@ -2787,7 +2803,7 @@ spec: ` func TestValidActiveDeadlineSecondsArgoVariable(t *testing.T) { - err := validateWorkflowTemplate(validActiveDeadlineSecondsArgoVariable) + err := validateWorkflowTemplate(validActiveDeadlineSecondsArgoVariable, ValidateOpts{}) assert.NoError(t, err) } @@ -2797,11 +2813,11 @@ func TestMaxLengthName(t *testing.T) { assert.EqualError(t, err, "workflow name \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\" must not be more than 63 characters long (currently 70)") wftmpl := &wfv1.WorkflowTemplate{ObjectMeta: metav1.ObjectMeta{Name: strings.Repeat("a", 70)}} - _, err = ValidateWorkflowTemplate(wftmplGetter, cwftmplGetter, wftmpl) + _, err = ValidateWorkflowTemplate(wftmplGetter, cwftmplGetter, wftmpl, ValidateOpts{}) assert.EqualError(t, err, "workflow template name \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\" must not be more than 63 characters long (currently 70)") cwftmpl := &wfv1.ClusterWorkflowTemplate{ObjectMeta: metav1.ObjectMeta{Name: strings.Repeat("a", 70)}} - _, err = ValidateClusterWorkflowTemplate(wftmplGetter, cwftmplGetter, cwftmpl) + _, err = ValidateClusterWorkflowTemplate(wftmplGetter, cwftmplGetter, cwftmpl, ValidateOpts{}) assert.EqualError(t, err, "cluster workflow template name \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\" must not be more than 63 characters long (currently 70)") cwf := &wfv1.CronWorkflow{ObjectMeta: metav1.ObjectMeta{Name: strings.Repeat("a", 60)}} @@ -2904,7 +2920,7 @@ spec: invalidContainerSetTemplateWithOutputParams, } for _, manifest := range invalidManifests { - err := validateWorkflowTemplate(manifest) + err := validateWorkflowTemplate(manifest, ValidateOpts{}) if assert.NotNil(t, err) { assert.Contains(t, err.Error(), "containerSet.containers must have a container named \"main\" for input or output") } @@ -2994,3 +3010,168 @@ spec: _, err := validate(wf) assert.NoError(t, err) } + +var templateReferenceWorkflowConfigMapRefArgument = ` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: arguments-parameters-from-configmap- +spec: + entrypoint: whalesay + serviceAccountName: argo + arguments: + parameters: + - name: message + valueFrom: + configMapKeyRef: + name: simple-parameters + key: msg + templates: + - name: whalesay + inputs: + parameters: + - name: message + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["{{inputs.parameters.message}}"] +` + +func TestTemplateReferenceWorkflowConfigMapRefArgument(t *testing.T) { + _, err := validate(templateReferenceWorkflowConfigMapRefArgument) + assert.NoError(t, err) +} + +var stepsOutputParametersForScript = ` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: parameter-aggregation- +spec: + entrypoint: parameter-aggregation + templates: + - name: parameter-aggregation + steps: + - - name: echo-num + template: echo-num + arguments: + parameters: + - name: num + value: "{{item}}" + withItems: [1, 2, 3, 4] + - - name: echo-num-from-param + template: echo-num + arguments: + parameters: + - name: num + value: "{{item.num}}" + withParam: "{{steps.echo-num.outputs.parameters}}" + + - name: echo-num + inputs: + parameters: + - name: num + script: + image: argoproj/argosay:v1 + command: [sh, -x] + source: | + sleep 1 + echo {{inputs.parameters.num}} > /tmp/num + outputs: + parameters: + - name: num + valueFrom: + path: /tmp/num +` + +func TestStepsOutputParametersForScript(t *testing.T) { + _, err := validate(stepsOutputParametersForScript) + assert.NoError(t, err) +} + +var stepsOutputParametersForContainerSet = ` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: parameter-aggregation- +spec: + entrypoint: parameter-aggregation + templates: + - name: parameter-aggregation + steps: + - - name: echo-num + template: echo-num + arguments: + parameters: + - name: num + value: "{{item}}" + withItems: [1, 2, 3, 4] + - - name: echo-num-from-param + template: echo-num + arguments: + parameters: + - name: num + value: "{{item.num}}" + withParam: "{{steps.echo-num.outputs.parameters}}" + + - name: echo-num + inputs: + parameters: + - name: num + containerSet: + containers: + - name: main + image: 'docker/whalesay:latest' + command: + - sh + - '-c' + args: + - 'sleep 1; echo {{inputs.parameters.num}} > /tmp/num' + outputs: + parameters: + - name: num + valueFrom: + path: /tmp/num +` + +func TestStepsOutputParametersForContainerSet(t *testing.T) { + _, err := validate(stepsOutputParametersForContainerSet) + assert.NoError(t, err) +} + +var testInitContainerHasName = ` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: spurious- +spec: + entrypoint: main + + templates: + - name: main + dag: + tasks: + - name: spurious + template: spurious + + - name: spurious + retryStrategy: + retryPolicy: Always + initContainers: + - image: alpine:latest + # name: sleep + command: + - sleep + - "15" + container: + image: alpine:latest + command: + - echo + - "i am running" +` + +func TestInitContainerHasName(t *testing.T) { + + _, err := validate(testInitContainerHasName) + assert.EqualError(t, err, "templates.main.tasks.spurious initContainers must all have container name") +}