Skip to content

Commit

Permalink
Merge branch 'master' into upstream-nodestatus
Browse files Browse the repository at this point in the history
  • Loading branch information
isubasinghe authored Aug 1, 2023
2 parents 92a903e + ce9e50c commit d26a57c
Show file tree
Hide file tree
Showing 77 changed files with 1,971 additions and 919 deletions.
11 changes: 9 additions & 2 deletions .devcontainer/devcontainer.json
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
"version": "1.20"
},
"ghcr.io/devcontainers/features/node:1": {
"version": "16"
"version": "20"
},
"ghcr.io/devcontainers/features/docker-in-docker:2": {},
"ghcr.io/devcontainers/features/python:1": {}
Expand All @@ -15,7 +15,14 @@
"hostRequirements": {
"cpus": 4
},
"runArgs": ["--add-host=host.docker.internal:host-gateway"],
"runArgs": [
"--add-host=host.docker.internal:host-gateway",
"--add-host=dex:127.0.0.1",
"--add-host=minio:127.0.0.1",
"--add-host=postgres:127.0.0.1",
"--add-host=mysql:127.0.0.1",
"--add-host=azurite:127.0.0.1"
],
"onCreateCommand": ".devcontainer/pre-build.sh",
"workspaceMount": "source=${localWorkspaceFolder},target=/home/vscode/go/src/github.com/argoproj/argo-workflows,type=bind",
"workspaceFolder": "/home/vscode/go/src/github.com/argoproj/argo-workflows",
Expand Down
7 changes: 0 additions & 7 deletions .devcontainer/pre-build.sh
Original file line number Diff line number Diff line change
@@ -1,13 +1,6 @@
#!/usr/bin/env sh
set -eux

# Add hosts
sudo bash -c 'echo "127.0.0.1 dex" >> /etc/hosts'
sudo bash -c 'echo "127.0.0.1 minio" >> /etc/hosts'
sudo bash -c 'echo "127.0.0.1 postgres" >> /etc/hosts'
sudo bash -c 'echo "127.0.0.1 mysql" >> /etc/hosts'
sudo bash -c 'echo "127.0.0.1 azurite" >> /etc/hosts'

# install kubernetes
wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash
k3d cluster get k3s-default || k3d cluster create --image rancher/k3s:v1.27.3-k3s1 --wait
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/ci-build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ jobs:
- uses: actions/checkout@v3
- uses: actions/setup-node@v3
with:
node-version: "16"
node-version: "20" # change in all GH Workflows
cache: yarn
cache-dependency-path: ui/yarn.lock
- run: yarn --cwd ui install
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/release.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ jobs:
login-server: quay.io
username: ${{ secrets.QUAYIO_USERNAME }}
password: ${{ secrets.QUAYIO_PASSWORD }}

- name: Build & Push Windows Docker Images
env:
DOCKERIO_ORG: ${{ secrets.DOCKERIO_ORG }}
Expand All @@ -133,7 +133,7 @@ jobs:
-t $image_name \
-f Dockerfile.windows \
.
docker push $image_name
docker tag $image_name quay.io/$image_name
Expand Down Expand Up @@ -287,7 +287,7 @@ jobs:
- uses: actions/checkout@v3
- uses: actions/setup-node@v3
with:
node-version: "16"
node-version: "20" # change in all GH Workflows
- uses: actions/setup-go@v4
with:
go-version: "1.20"
Expand Down
5 changes: 5 additions & 0 deletions .github/workflows/snyk.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,11 @@ jobs:
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
steps:
- uses: actions/checkout@v3
- uses: actions/setup-node@v3
with:
node-version: "20" # change in all GH Workflows
cache: yarn
cache-dependency-path: ui/yarn.lock
- run: yarn --cwd ui install
- name: Run Snyk to check for vulnerabilities
uses: snyk/actions/node@master
Expand Down
3 changes: 2 additions & 1 deletion .spelling
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,7 @@ v1.2
v1.3
v1.24
v2
v2.0
v2.10
v2.11
v2.12
Expand Down Expand Up @@ -236,6 +237,6 @@ devenv
vendored
nix.conf
LDFlags
dev
dev
vendorSha256
dependabot
4 changes: 2 additions & 2 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ COPY . .

####################################################################################################

FROM node:16-alpine as argo-ui
FROM node:20-alpine as argo-ui

RUN apk update && apk add --no-cache git

Expand All @@ -39,7 +39,7 @@ COPY api api

RUN --mount=type=cache,target=/root/.yarn \
YARN_CACHE_FOLDER=/root/.yarn JOBS=max \
NODE_OPTIONS="--max-old-space-size=2048" JOBS=max yarn --cwd ui build
NODE_OPTIONS="--openssl-legacy-provider --max-old-space-size=2048" JOBS=max yarn --cwd ui build

####################################################################################################

Expand Down
8 changes: 8 additions & 0 deletions api/jsonschema/schema.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 8 additions & 0 deletions api/openapi-spec/swagger.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 3 additions & 0 deletions cmd/argo/commands/retry.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,9 @@ func NewRetryCommand() *cobra.Command {
# Retry the latest workflow:
argo retry @latest
# Restart node with id 5 on successful workflow, using node-field-selector
argo retry my-wf --restart-successful --node-field-selector id=5
`,
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 && !retryOpts.hasSelector() {
Expand Down
6 changes: 4 additions & 2 deletions cmd/workflow-controller/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ func NewRootCommand() *cobra.Command {
workflowWorkers int // --workflow-workers
workflowTTLWorkers int // --workflow-ttl-workers
podCleanupWorkers int // --pod-cleanup-workers
cronWorkflowWorkers int // --cron-workflow-workers
burst int
qps float32
namespaced bool // --namespaced
Expand Down Expand Up @@ -116,7 +117,7 @@ func NewRootCommand() *cobra.Command {
if leaderElectionOff == "true" {
log.Info("Leader election is turned off. Running in single-instance mode")
log.WithField("id", "single-instance").Info("starting leading")
go wfController.Run(ctx, workflowWorkers, workflowTTLWorkers, podCleanupWorkers)
go wfController.Run(ctx, workflowWorkers, workflowTTLWorkers, podCleanupWorkers, cronWorkflowWorkers)
go wfController.RunMetricsServer(ctx, false)
} else {
nodeID, ok := os.LookupEnv("LEADER_ELECTION_IDENTITY")
Expand Down Expand Up @@ -146,7 +147,7 @@ func NewRootCommand() *cobra.Command {
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) {
dummyCancel()
go wfController.Run(ctx, workflowWorkers, workflowTTLWorkers, podCleanupWorkers)
go wfController.Run(ctx, workflowWorkers, workflowTTLWorkers, podCleanupWorkers, cronWorkflowWorkers)
go wfController.RunMetricsServer(ctx, false)
},
OnStoppedLeading: func() {
Expand Down Expand Up @@ -183,6 +184,7 @@ func NewRootCommand() *cobra.Command {
command.Flags().IntVar(&workflowWorkers, "workflow-workers", 32, "Number of workflow workers")
command.Flags().IntVar(&workflowTTLWorkers, "workflow-ttl-workers", 4, "Number of workflow TTL workers")
command.Flags().IntVar(&podCleanupWorkers, "pod-cleanup-workers", 4, "Number of pod cleanup workers")
command.Flags().IntVar(&cronWorkflowWorkers, "cron-workflow-workers", 8, "Number of cron workflow workers")
command.Flags().IntVar(&burst, "burst", 30, "Maximum burst for throttle.")
command.Flags().Float32Var(&qps, "qps", 20.0, "Queries per second")
command.Flags().BoolVar(&namespaced, "namespaced", false, "run workflow-controller as namespaced mode")
Expand Down
3 changes: 3 additions & 0 deletions dev/nix/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# Argo Nixfiles

See [Try Argo using Nix](../../docs/running-nix.md).
3 changes: 3 additions & 0 deletions docs/cli/argo_retry.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,9 @@ argo retry [WORKFLOW...] [flags]
argo retry @latest
# Restart node with id 5 on successful workflow, using node-field-selector
argo retry my-wf --restart-successful --node-field-selector id=5
```

### Options
Expand Down
45 changes: 45 additions & 0 deletions docs/configure-artifact-repository.md
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,51 @@ $ k apply -f default-artifact-repository.yaml -n demo

You can also set `createBucketIfNotPresent` to `true` to tell the artifact driver to automatically create the OSS bucket if it doesn't exist yet when saving artifacts. Note that you'll need to set additional permission for your OSS account to create new buckets.

### Alibaba Cloud OSS RRSA

If you wish to use OSS RRSA instead of passing in an `accessKey` and `secretKey`, you need to perform the following actions:

- Install [pod-identity-webhook](https://www.alibabacloud.com/help/en/ack/product-overview/ack-pod-identity-webhook) in your cluster to automatically inject the OIDC tokens and environment variables.
- Add the label `pod-identity.alibabacloud.com/injection: 'on'` to the target workflow namespace.
- Add the annotation `pod-identity.alibabacloud.com/role-name: $your_ram_role_name` to the service account of running workflow.
- Set `useSDKCreds: true` in your target artifact repository cm and remove the secret references to AK/SK.

```yaml

apiVersion: v1
kind: Namespace
metadata:
name: my-ns
labels:
pod-identity.alibabacloud.com/injection: 'on'

---
apiVersion: v1
kind: ServiceAccount
metadata:
name: my-sa
namespace: rrsa-demo
annotations:
pod-identity.alibabacloud.com/role-name: $your_ram_role_name

---
apiVersion: v1
kind: ConfigMap
metadata:
# If you want to use this config map by default, name it "artifact-repositories". Otherwise, you can provide a reference to a
# different config map in `artifactRepositoryRef.configMap`.
name: artifact-repositories
annotations:
# v3.0 and after - if you want to use a specific key, put that key into this annotation.
workflows.argoproj.io/default-artifact-repository: default-oss-artifact-repository
data:
default-oss-artifact-repository: |
oss:
endpoint: http://oss-cn-zhangjiakou-internal.aliyuncs.com
bucket: $mybucket
useSDKCreds: true
```
## Configuring Azure Blob Storage
Create an Azure Storage account and a container within that account. There are a number of
Expand Down
1 change: 1 addition & 0 deletions docs/executor_swagger.md
Original file line number Diff line number Diff line change
Expand Up @@ -2715,6 +2715,7 @@ save/load the directory appropriately.
| lifecycleRule | [OSSLifecycleRule](#o-s-s-lifecycle-rule)| `OSSLifecycleRule` | | | | |
| secretKeySecret | [SecretKeySelector](#secret-key-selector)| `SecretKeySelector` | | | | |
| securityToken | string| `string` | | | SecurityToken is the user's temporary security token. For more details, check out: https://www.alibabacloud.com/help/doc-detail/100624.htm | |
| useSDKCreds | boolean| `bool` | | | UseSDKCreds tells the driver to figure out credentials based on sdk defaults. | |



Expand Down
2 changes: 2 additions & 0 deletions docs/fields.md
Original file line number Diff line number Diff line change
Expand Up @@ -3458,6 +3458,7 @@ OSSArtifact is the location of an Alibaba Cloud OSS artifact
|`lifecycleRule`|[`OSSLifecycleRule`](#osslifecyclerule)|LifecycleRule specifies how to manage bucket's lifecycle|
|`secretKeySecret`|[`SecretKeySelector`](#secretkeyselector)|SecretKeySecret is the secret selector to the bucket's secret key|
|`securityToken`|`string`|SecurityToken is the user's temporary security token. For more details, check out: https://www.alibabacloud.com/help/doc-detail/100624.htm|
|`useSDKCreds`|`boolean`|UseSDKCreds tells the driver to figure out credentials based on sdk defaults.|

## RawArtifact

Expand Down Expand Up @@ -4200,6 +4201,7 @@ OSSArtifactRepository defines the controller configuration for an OSS artifact r
|`lifecycleRule`|[`OSSLifecycleRule`](#osslifecyclerule)|LifecycleRule specifies how to manage bucket's lifecycle|
|`secretKeySecret`|[`SecretKeySelector`](#secretkeyselector)|SecretKeySecret is the secret selector to the bucket's secret key|
|`securityToken`|`string`|SecurityToken is the user's temporary security token. For more details, check out: https://www.alibabacloud.com/help/doc-detail/100624.htm|
|`useSDKCreds`|`boolean`|UseSDKCreds tells the driver to figure out credentials based on sdk defaults.|

## S3ArtifactRepository

Expand Down
3 changes: 2 additions & 1 deletion docs/quick-start.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,8 @@ Before you start you need a Kubernetes cluster and `kubectl` set up to be able t

Alternatively, if you want to try out Argo Workflows and don't want to set up a Kubernetes cluster, try the [Killercoda course](training.md#hands-on).

⚠️ These instructions are intended to help you get started quickly. They are not suitable in production. For production installs, please refer to [the installation documentation](installation.md) ⚠️
!!! Warning "Development vs. Production"
These instructions are intended to help you get started quickly. They are not suitable in production. For production installs, please refer to [the installation documentation](installation.md).

## Install Argo Workflows

Expand Down
5 changes: 5 additions & 0 deletions docs/resource-template.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
# Resource Template

> v2.0
See [Kubernetes Resources](walk-through/kubernetes-resources.md).
23 changes: 10 additions & 13 deletions docs/running-locally.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,22 +2,17 @@

You have two options:

1. If you're using VSCode, you use the [Dev-Container](#development-container). This takes about 7 minutes. This can also be used from the dev-container CLI.
1. Use the [Dev Container](#development-container). This takes about 7 minutes. This can be used with VSCode or with the `devcontainer` CLI.
1. Install the [requirements](#requirements) on your computer manually. This takes about 1 hour.

## Git Clone

Clone the Git repo into: `$GOPATH/src/github.com/argoproj/argo-workflows`. Any other path will mean the code
generation does not work.

## Development Container

A development container is a running Docker container with a well-defined tool/runtime stack and its prerequisites. It should be able to do everything you need to do to develop argo workflows using the development container without installing tools on your local machine. It takes quite a long time to build the container. It will run k3d inside the container so you'll have a cluster to use to test against. To communicate with services running either in other development containers or directly on the local developer machine (e.g., a database) the following URL can be used in the workflow spec: `host.docker.internal:<PORT>`. This facilitates the implementation of workflows, which need to connect to a database or an API server.
The development container should be able to do everything you need to do to develop Argo Workflows without installing tools on your local machine. It takes quite a long time to build the container. It runs `k3d` inside the container so you have a cluster to test against. To communicate with services running either in other development containers or directly on the local machine (e.g. a database), the following URL can be used in the workflow spec: `host.docker.internal:<PORT>`. This facilitates the implementation of workflows which need to connect to a database or an API server.

You can use the development container in a few different ways:

1. [Visual Studio Code](https://code.visualstudio.com/) with [Dev Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) extension lets you use a Docker container as a full-featured development environment. Open the clone of argo-workflows folder in VS code and it should offer to use the development container automatically. System requirements can be found [here](https://code.visualstudio.com/docs/remote/containers#_system-requirements). Visual Studio will allow you to forward ports to allow your external browser to access the running components.
1. [Dev-container CLI](https://github.com/devcontainers/cli). Install the tool and from the argo-workflow folder do `devcontainer up --workspace-folder .` followed by `devcontainer exec --workspace-folder . /bin/bash` to get a shell where you can start building the code. You can use your choice editor outside the docker image to edit code, the changes are mirrored inside the container. Due to a limitation in the CLI only port 8080 (the Web UI) will get exposed for you to access if you run this way. Other services are usable from the shell inside.
1. [Visual Studio Code](https://code.visualstudio.com/) with [Dev Containers extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers). Open your `argo-workflows` folder in VSCode and it should offer to use the development container automatically. VSCode will allow you to forward ports to allow your external browser to access the running components.
1. [`devcontainer` CLI](https://github.com/devcontainers/cli). Once installed, go to your `argo-workflows` folder and run `devcontainer up --workspace-folder .` followed by `devcontainer exec --workspace-folder . /bin/bash` to get a shell where you can build the code. You can use any editor outside the container to edit code; any changes will be mirrored inside the container. Due to a limitation of the CLI, only port 8080 (the Web UI) will be exposed for you to access if you run this way. Other services are usable from the shell inside.

Once you have entered the container, continue to [Developing Locally](#developing-locally).

Expand All @@ -43,6 +38,8 @@ Note:

## Requirements

Clone the Git repo into: `$GOPATH/src/github.com/argoproj/argo-workflows`. Any other path will break the code generation.

Add the following to your `/etc/hosts`:

```text
Expand All @@ -53,7 +50,7 @@ Add the following to your `/etc/hosts`:
127.0.0.1 azurite
```

To build on your own machine without using the dev-container you will need
To build on your own machine without using the Dev Container you will need:

* [Go](https://golang.org/dl/)
* [Yarn](https://classic.yarnpkg.com/en/docs/install/#mac-stable)
Expand All @@ -73,15 +70,15 @@ Alternatively, you can use [Minikube](https://github.com/kubernetes/minikube) to
Once a local Kubernetes cluster has started via `minikube start`, your kube config will use Minikube's context
automatically.

⚠️ Do not use Docker for Desktop with its embedded Kubernetes, it does not support Kubernetes RBAC (i.e. `kubectl auth can-i` always
returns `allowed`).
!!! Warning
Do not use Docker Desktop's embedded Kubernetes, it does not support Kubernetes RBAC (i.e. `kubectl auth can-i` always returns `allowed`).

## Developing locally

To start:

* The controller, so you can run workflows.
* MinIO (<http://localhost:9000>, use admin/password) so you can use artifacts:
* MinIO (<http://localhost:9000>, use admin/password) so you can use artifacts.

Run:

Expand Down
Loading

0 comments on commit d26a57c

Please sign in to comment.