Skip to content

Commit

Permalink
feat(kfp): update kfp upstream manifests to 1.0.4 (#1605)
Browse files Browse the repository at this point in the history
* feat(kfp): update kfp upstream manifests to 1.0.4

* update snapshots
  • Loading branch information
Bobgy authored Nov 3, 2020
1 parent eea545d commit 82c6f35
Show file tree
Hide file tree
Showing 142 changed files with 615 additions and 181 deletions.
2 changes: 1 addition & 1 deletion hack/pull_kfp_upstream.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ set -ex

# Please edit the following version before running the script to pull new
# pipelines version.
export PIPELINES_VERSION=1.0.0
export PIPELINES_VERSION=1.0.4
export PIPELINES_SRC_REPO=https://github.com/kubeflow/pipelines.git

if [ -d pipeline/upstream ]; then
Expand Down
4 changes: 2 additions & 2 deletions pipeline/upstream/Kptfile
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ metadata:
upstream:
type: git
git:
commit: 181c35002490cf7f1b5af8c88cb8b7cf29332f2b
commit: b604c6171244cc1cd80bfdc46248eaebf5f985d6
repo: https://github.com/kubeflow/pipelines
directory: /manifests/kustomize
ref: 1.0.0
ref: 1.0.4
8 changes: 7 additions & 1 deletion pipeline/upstream/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ To install Kubeflow Pipelines, you have several options.
- Via [GCP AI Platform UI](http://console.cloud.google.com/ai-platform/pipelines).
- Via an upcoming commandline tool.
- Via Kubectl with Kustomize, it's detailed here.
- Community maintains a repo [here](https://github.com/e2fyi/kubeflow-aws/tree/master/pipelines) for AWS.

## Install via Kustomize

Expand Down Expand Up @@ -51,6 +50,13 @@ Its storage is based on CloudSQL & GCS. It's better than others for production u

Please following [sample](sample/README.md) for a customized installation.

### Option-4 Install it to AWS with S3 and RDS MySQL
Its storage is based on S3 & AWS RDS. It's more natural for AWS users to use this option.

Please following [AWS Instructions](env/aws/README.md) for installation.

Note: Community maintains a repo [e2fyi/kubeflow-aws](https://github.com/e2fyi/kubeflow-aws/tree/master/pipelines) for AWS.

## Uninstall

If the installation is based on CloudSQL/GCS, after the uninstall, the data is still there,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ data:
{
namespace: $(kfp-namespace),
executorImage: gcr.io/ml-pipeline/argoexec:v2.7.5-license-compliance,
containerRuntimeExecutor: $(kfp-container-runtime-executor),
artifactRepository:
{
s3: {
Expand Down
1 change: 1 addition & 0 deletions pipeline/upstream/base/argo/workflow-controller-role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ rules:
- ""
resources:
- persistentvolumeclaims
- events
verbs:
- create
- delete
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,5 +11,7 @@ rules:
- secrets
verbs:
- create
- delete
- get
- patch
- list
Original file line number Diff line number Diff line change
Expand Up @@ -21,4 +21,15 @@ rules:
- mutatingwebhookconfigurations
verbs:
- create
- delete
- get
- list
- patch
- apiGroups:
- certificates.k8s.io
resources:
- signers
resourceNames:
- kubernetes.io/*
verbs:
- approve
2 changes: 1 addition & 1 deletion pipeline/upstream/base/cache-deployer/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,4 @@ resources:
- cache-deployer-deployment.yaml
images:
- name: gcr.io/ml-pipeline/cache-deployer
newTag: 1.0.0
newTag: 1.0.4
2 changes: 1 addition & 1 deletion pipeline/upstream/base/cache/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,4 @@ resources:
- cache-sa.yaml
images:
- name: gcr.io/ml-pipeline/cache-server
newTag: 1.0.0
newTag: 1.0.4
7 changes: 7 additions & 0 deletions pipeline/upstream/base/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -46,5 +46,12 @@ vars:
apiVersion: v1
fieldref:
fieldpath: data.bucketName
- name: kfp-container-runtime-executor
objref:
kind: ConfigMap
name: pipeline-install-config
apiVersion: v1
fieldref:
fieldpath: data.containerRuntimeExecutor
configurations:
- params.yaml
2 changes: 1 addition & 1 deletion pipeline/upstream/base/metadata/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,4 @@ resources:
- metadata-envoy-service.yaml
images:
- name: gcr.io/ml-pipeline/metadata-envoy
newTag: 1.0.0
newTag: 1.0.4
5 changes: 3 additions & 2 deletions pipeline/upstream/base/metadata/metadata-grpc-deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ spec:
spec:
containers:
- name: container
image: gcr.io/tfx-oss-public/ml_metadata_store_server:0.21.1
image: gcr.io/tfx-oss-public/ml_metadata_store_server:0.22.1
env:
- name: DBCONFIG_USER
valueFrom:
Expand Down Expand Up @@ -49,7 +49,8 @@ spec:
"--mysql_config_host=$(MYSQL_HOST)",
"--mysql_config_port=$(MYSQL_PORT)",
"--mysql_config_user=$(DBCONFIG_USER)",
"--mysql_config_password=$(DBCONFIG_PASSWORD)"
"--mysql_config_password=$(DBCONFIG_PASSWORD)",
"--enable_database_upgrade=true"
]
ports:
- name: grpc-api
Expand Down
9 changes: 8 additions & 1 deletion pipeline/upstream/base/params.env
Original file line number Diff line number Diff line change
@@ -1,8 +1,15 @@
appName=pipeline
appVersion=1.0.0
appVersion=1.0.4
dbHost=mysql
dbPort=3306
mlmdDb=metadb
cacheDb=cachedb
pipelineDb=mlpipeline
bucketName=mlpipeline


## containerRuntimeExecutor: A workflow executor is a process
## that allows Argo to perform certain actions like monitoring pod logs,
## artifacts, container lifecycles, etc..
## Doc: https://github.com/argoproj/argo/blob/master/docs/workflow-executors.md
containerRuntimeExecutor=docker
2 changes: 0 additions & 2 deletions pipeline/upstream/base/pipeline-application.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,6 @@ spec:
kind: RoleBinding
- group: v1
kind: Service
- group: v1
kind: PersistentVolumeClaim
- group: v1
kind: ConfigMap
- group: v1
Expand Down
12 changes: 6 additions & 6 deletions pipeline/upstream/base/pipeline/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,14 @@ resources:
- viewer-sa.yaml
images:
- name: gcr.io/ml-pipeline/api-server
newTag: 1.0.0
newTag: 1.0.4
- name: gcr.io/ml-pipeline/persistenceagent
newTag: 1.0.0
newTag: 1.0.4
- name: gcr.io/ml-pipeline/scheduledworkflow
newTag: 1.0.0
newTag: 1.0.4
- name: gcr.io/ml-pipeline/frontend
newTag: 1.0.0
newTag: 1.0.4
- name: gcr.io/ml-pipeline/viewer-crd-controller
newTag: 1.0.0
newTag: 1.0.4
- name: gcr.io/ml-pipeline/visualization-server
newTag: 1.0.0
newTag: 1.0.4
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,4 @@ resources:
- metadata-writer-sa.yaml
images:
- name: gcr.io/ml-pipeline/metadata-writer
newTag: 1.0.0
newTag: 1.0.4
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: TTL_SECONDS_AFTER_WORKFLOW_FINISH
value: "86400"
image: gcr.io/ml-pipeline/persistenceagent:dummy
imagePullPolicy: IfNotPresent
name: ml-pipeline-persistenceagent
Expand Down
3 changes: 3 additions & 0 deletions pipeline/upstream/env/aws/OWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
approvers:
- Jeffwan
- PatrickXYS
82 changes: 82 additions & 0 deletions pipeline/upstream/env/aws/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
# Sample installation

1. Create an EKS cluster and setup kubectl context

Using configuration file to simplify EKS cluster creation process:
```
apiVersion: eksctl.io/v1alpha5
kind: ClusterConfig
metadata:
name: kfworkshop
region: us-west-2
version: '1.17'
# If your region has multiple availability zones, you can specify 3 of them.
availabilityZones: ["us-west-2b", "us-west-2c", "us-west-2d"]
# NodeGroup holds all configuration attributes that are specific to a nodegroup
# You can have several node group in your cluster.
nodeGroups:
- name: cpu-nodegroup
instanceType: m5.xlarge
desiredCapacity: 2
minSize: 0
maxSize: 4
volumeSize: 50
# ssh:
# allow: true
# publicKeyPath: '~/.ssh/id_rsa.pub'
# Example of GPU node group
- name: Tesla-V100
instanceType: p3.8xlarge
# Make sure the availability zone here is one of cluster availability zones.
availabilityZones: ["us-west-2b"]
desiredCapacity: 0
minSize: 0
maxSize: 4
volumeSize: 50
# ssh:
# allow: true
# publicKeyPath: '~/.ssh/id_rsa.pub'
```
Run this command to create EKS cluster
```
eksctl create cluster -f cluster.yaml
```

2. Prepare S3

Create S3 bucket. [Console](https://console.aws.amazon.com/s3/home).

Run this command to create S3 bucket by changing `<YOUR_S3_BUCKET_NAME>` to your prefer s3 bucket name.

```
export S3_BUCKET=<YOUR_S3_BUCKET_NAME>
export AWS_REGION=us-west-2
aws s3 mb s3://$S3_BUCKET --region $AWS_REGION
```

3. Prepare RDS

Follow this [doc](https://www.kubeflow.org/docs/aws/rds/#deploy-amazon-rds-mysql-in-your-environment) to set up AWS RDS instance.

4. Customize your values
- Edit [params.env](params.env), [secret.env](secret.env) and [minio-artifact-secret-patch.env](minio-artifact-secret-patch.env)

5. Install

```
kubectl apply -k ../../cluster-scoped-resources
kubectl wait crd/applications.app.k8s.io --for condition=established --timeout=60s
kubectl apply -k ./
# If upper one action got failed, e.x. you used wrong value, try delete, fix and apply again
# kubectl delete -k ./
kubectl wait applications/mypipeline -n kubeflow --for condition=Ready --timeout=1800s
kubectl port-forward -n kubeflow svc/ml-pipeline-ui 8080:80
```

Now you can access via `localhost:8080`
61 changes: 61 additions & 0 deletions pipeline/upstream/env/aws/aws-configuration-patch.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ml-pipeline-ui
spec:
template:
metadata:
labels:
app: ml-pipeline-ui
spec:
volumes:
- name: config-volume
configMap:
name: ml-pipeline-ui-configmap
containers:
- name: ml-pipeline-ui
env:
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: mlpipeline-minio-artifact
key: accesskey
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: mlpipeline-minio-artifact
key: secretkey

---
apiVersion: apps/v1
kind: Deployment
metadata:
name: ml-pipeline
spec:
template:
metadata:
labels:
app: ml-pipeline
spec:
containers:
- env:
- name: OBJECTSTORECONFIG_SECURE
value: "true"
- name: OBJECTSTORECONFIG_BUCKETNAME
valueFrom:
configMapKeyRef:
name: pipeline-install-config
key: bucketName
- name: OBJECTSTORECONFIG_HOST
valueFrom:
configMapKeyRef:
name: pipeline-install-config
key: minioServiceHost
- name: OBJECTSTORECONFIG_REGION
valueFrom:
configMapKeyRef:
name: pipeline-install-config
key: minioServiceRegion
- name: OBJECTSTORECONFIG_PORT
value: ""
name: ml-pipeline-api-server
23 changes: 23 additions & 0 deletions pipeline/upstream/env/aws/config
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
{
namespace: $(kfp-namespace),
executorImage: gcr.io/ml-pipeline/argoexec:v2.7.5-license-compliance,
containerRuntimeExecutor: $(kfp-container-runtime-executor),
artifactRepository:
{
s3: {
bucket: $(kfp-artifact-bucket-name),
keyPrefix: artifacts,
endpoint: s3.amazonaws.com,
insecure: true,
accessKeySecret: {
name: mlpipeline-minio-artifact,
key: accesskey
},
secretKeySecret: {
name: mlpipeline-minio-artifact,
key: secretkey
}
},
archiveLogs: true
}
}
Loading

0 comments on commit 82c6f35

Please sign in to comment.