From cf2058987321e9cc3dd2b7a8a3d4fd91ddbdeccd Mon Sep 17 00:00:00 2001 From: Thibault CAMALON Date: Mon, 19 Jun 2023 17:00:09 +0200 Subject: [PATCH] chore: use buildkit instead of kaniko fix(image_builder): add f-string formatting chore(charts): finalize deployment-registry-prepopulate refactor(settings): rename environment variables fix(settings): TASK key style(image_builder): unused import --- backend/backend/settings/common.py | 6 +- .../substrapp/compute_tasks/image_builder.py | 129 +++++++++++++----- backend/substrapp/kubernetes_utils.py | 1 + charts/substra-backend/Chart.lock | 2 +- .../templates/configmap-settings.yaml | 4 +- .../deployment-registry-prepopulate.yaml | 40 ++++-- .../deployment-scheduler-worker.yaml | 2 + .../templates/statefulset-worker.yaml | 24 ++-- charts/substra-backend/values.yaml | 54 ++++---- docker/substra-backend/Dockerfile | 8 +- 10 files changed, 174 insertions(+), 96 deletions(-) diff --git a/backend/backend/settings/common.py b/backend/backend/settings/common.py index 6df283ab8..56f6143a8 100644 --- a/backend/backend/settings/common.py +++ b/backend/backend/settings/common.py @@ -241,9 +241,9 @@ def build_broker_url(user: str, password: str, host: str, port: str) -> str: "CACHE_DOCKER_IMAGES": to_bool(os.environ.get("TASK_CACHE_DOCKER_IMAGES", False)), "CHAINKEYS_ENABLED": to_bool(os.environ.get("TASK_CHAINKEYS_ENABLED", False)), "LIST_WORKSPACE": to_bool(os.environ.get("TASK_LIST_WORKSPACE", True)), - "KANIKO_MIRROR": to_bool(os.environ.get("KANIKO_MIRROR", False)), - "KANIKO_IMAGE": os.environ.get("KANIKO_IMAGE"), - "KANIKO_DOCKER_CONFIG_SECRET_NAME": os.environ.get("KANIKO_DOCKER_CONFIG_SECRET_NAME"), + "BUILDER_MIRROR": to_bool(os.environ.get("BUILDER_MIRROR", False)), + "BUILDER_IMAGE": os.environ.get("BUILDER_IMAGE"), + "BUILDER_DOCKER_CONFIG_SECRET_NAME": os.environ.get("BUILDER_DOCKER_CONFIG_SECRET_NAME"), "COMPUTE_POD_STARTUP_TIMEOUT_SECONDS": int(os.environ.get("COMPUTE_POD_STARTUP_TIMEOUT_SECONDS", 300)), } diff --git a/backend/substrapp/compute_tasks/image_builder.py b/backend/substrapp/compute_tasks/image_builder.py index 691788911..11cfdc4e5 100644 --- a/backend/substrapp/compute_tasks/image_builder.py +++ b/backend/substrapp/compute_tasks/image_builder.py @@ -16,7 +16,8 @@ from substrapp.compute_tasks.volumes import get_worker_subtuple_pvc_name from substrapp.docker_registry import USER_IMAGE_REPOSITORY from substrapp.docker_registry import container_image_exists -from substrapp.kubernetes_utils import delete_pod + +# from substrapp.kubernetes_utils import delete_pod from substrapp.kubernetes_utils import get_pod_logs from substrapp.kubernetes_utils import get_security_context from substrapp.kubernetes_utils import pod_exists @@ -31,14 +32,14 @@ REGISTRY = settings.REGISTRY REGISTRY_SCHEME = settings.REGISTRY_SCHEME NAMESPACE = settings.NAMESPACE -KANIKO_MIRROR = settings.TASK["KANIKO_MIRROR"] -KANIKO_IMAGE = settings.TASK["KANIKO_IMAGE"] -KANIKO_DOCKER_CONFIG_SECRET_NAME = settings.TASK["KANIKO_DOCKER_CONFIG_SECRET_NAME"] -KANIKO_DOCKER_CONFIG_VOLUME_NAME = "docker-config" +BUILDER_MIRROR = settings.TASK["BUILDER_MIRROR"] +BUILDER_IMAGE = settings.TASK["BUILDER_IMAGE"] +BUILDER_DOCKER_CONFIG_SECRET_NAME = settings.TASK["BUILDER_DOCKER_CONFIG_SECRET_NAME"] +BUILDER_DOCKER_CONFIG_VOLUME_NAME = "docker-config" CELERY_WORKER_CONCURRENCY = settings.CELERY_WORKER_CONCURRENCY SUBTUPLE_TMP_DIR = settings.SUBTUPLE_TMP_DIR MAX_IMAGE_BUILD_TIME = 3 * 60 * 60 # 3 hours -KANIKO_CONTAINER_NAME = "kaniko" +BUILDER_CONTAINER_NAME = "buildkit" HOSTNAME = settings.HOSTNAME @@ -115,11 +116,12 @@ def _get_entrypoint_from_dockerfile(dockerfile_dir: str) -> list[str]: raise compute_task_errors.BuildError("Invalid Dockerfile: Cannot find ENTRYPOINT") -def _delete_kaniko_pod(create_pod: bool, k8s_client: kubernetes.client.CoreV1Api, pod_name: str) -> str: +def _delete_builder_pod(create_pod: bool, k8s_client: kubernetes.client.CoreV1Api, pod_name: str) -> str: logs = "" if create_pod: - logs = get_pod_logs(k8s_client, pod_name, KANIKO_CONTAINER_NAME, ignore_pod_not_found=True) - delete_pod(k8s_client, pod_name) + logs = get_pod_logs(k8s_client, pod_name, BUILDER_CONTAINER_NAME, ignore_pod_not_found=True) + # ..todo:: Reactivate. Deactivated for easier debbuging. + # delete_pod(k8s_client, pod_name) logger.info(logs or "", pod_name=pod_name) return logs @@ -156,14 +158,14 @@ def _build_container_image(path: str, tag: str) -> None: ) return - logs = _delete_kaniko_pod(create_pod, k8s_client, pod_name) + logs = _delete_builder_pod(create_pod, k8s_client, pod_name) if isinstance(e, exceptions.PodTimeoutError): raise compute_task_errors.BuildRetryError(logs) from e else: # exceptions.PodError or other raise compute_task_errors.BuildError(logs) from e - _delete_kaniko_pod(create_pod, k8s_client, pod_name) + _delete_builder_pod(create_pod, k8s_client, pod_name) def _assert_dockerfile_exist(dockerfile_path): @@ -185,6 +187,7 @@ def _build_pod(dockerfile_mount_path: str, image_tag: str) -> kubernetes.client. Label.PodType: "image-build", Label.Component: Label.Component_Compute, }, + annotations={f"container.apparmor.security.beta.kubernetes.io/{BUILDER_CONTAINER_NAME}": "unconfined"}, ), spec=pod_spec, ) @@ -192,7 +195,7 @@ def _build_pod(dockerfile_mount_path: str, image_tag: str) -> kubernetes.client. def _build_pod_name(image_tag: str) -> str: dns_1123_compliant_tag = image_tag.split("/")[-1].replace("_", "-") - return f"kaniko-{dns_1123_compliant_tag}" + return f"{BUILDER_CONTAINER_NAME}-{dns_1123_compliant_tag}" def _build_pod_spec(dockerfile_mount_path: str, image_tag: str) -> kubernetes.client.V1PodSpec: @@ -215,13 +218,23 @@ def _build_pod_spec(dockerfile_mount_path: str, image_tag: str) -> kubernetes.cl persistent_volume_claim=kubernetes.client.V1PersistentVolumeClaimVolumeSource(claim_name=dockerfile_pvc_name), ) - volumes = [cache, dockerfile] + # Should not be needed unless using Container-Optimized OS from Google: + # https://github.com/moby/buildkit/blob/master/docs/rootless.md#container-optimized-os-from-google + buildkit_daemon = kubernetes.client.V1Volume( + name="buildkit-daemon", empty_dir=kubernetes.client.V1EmptyDirVolumeSource() + ) + # Should not be needed if runAsUser / runAsGroup issue is addressed (see `deployment-scheduler-worker.yaml`#L32-L43) + buildkitd_tmp = kubernetes.client.V1Volume( + name="buildkit-tmp", empty_dir=kubernetes.client.V1EmptyDirVolumeSource() + ) + + volumes = [dockerfile, cache, buildkit_daemon, buildkitd_tmp] - if KANIKO_DOCKER_CONFIG_SECRET_NAME: + if BUILDER_DOCKER_CONFIG_SECRET_NAME: docker_config = kubernetes.client.V1Volume( - name=KANIKO_DOCKER_CONFIG_VOLUME_NAME, + name=BUILDER_DOCKER_CONFIG_VOLUME_NAME, secret=kubernetes.client.V1SecretVolumeSource( - secret_name=KANIKO_DOCKER_CONFIG_SECRET_NAME, + secret_name=BUILDER_DOCKER_CONFIG_SECRET_NAME, items=[kubernetes.client.V1KeyToPath(key=".dockerconfigjson", path="config.json")], ), ) @@ -259,52 +272,94 @@ def _build_container(dockerfile_mount_path: str, image_tag: str) -> kubernetes.c # https://github.com/moby/moby/blob/master/oci/caps/defaults.go # https://man7.org/linux/man-pages/man7/capabilities.7.html capabilities = ["CHOWN", "SETUID", "SETGID", "FOWNER", "DAC_OVERRIDE", "SETFCAP"] - container_security_context = get_security_context(root=True, capabilities=capabilities) + container_security_context = get_security_context(root=False, capabilities=capabilities) + cmd = _build_container_cmd() args = _build_container_args(dockerfile_mount_path, image_tag) + env = _build_container_env() + dockerfile_mount_subpath = dockerfile_mount_path.split("/subtuple/")[-1] dockerfile = kubernetes.client.V1VolumeMount( name="dockerfile", mount_path=dockerfile_mount_path, sub_path=dockerfile_mount_subpath, read_only=True ) cache = kubernetes.client.V1VolumeMount(name="cache", mount_path="/cache", read_only=True) - volume_mounts = [dockerfile, cache] - if KANIKO_DOCKER_CONFIG_SECRET_NAME: + # Should not be needed unless using Container-Optimized OS from Google: + # https://github.com/moby/buildkit/blob/master/docs/rootless.md#container-optimized-os-from-google + # buildkit_d = kubernetes.client.V1VolumeMount( + # name="buildkit-daemon", mount_path="/home/user/.local/share/buildkit" + # ) + # Should not be needed if runAsUser / runAsGroup issue is addressed (see `deployment-scheduler-worker.yaml`#L32-L43) + # buildkitd_tmp = kubernetes.client.V1VolumeMount(name="buildkit-tmp", mount_path="/home/user/.local/tmp") + + volume_mounts = [ + dockerfile, + cache, + # buildkit_d, + # buildkitd_tmp + ] + + if BUILDER_DOCKER_CONFIG_SECRET_NAME: docker_config = kubernetes.client.V1VolumeMount( - name=KANIKO_DOCKER_CONFIG_VOLUME_NAME, mount_path="/kaniko/.docker" + name=BUILDER_DOCKER_CONFIG_VOLUME_NAME, mount_path=f"/{BUILDER_CONTAINER_NAME}/.docker" ) volume_mounts.append(docker_config) return kubernetes.client.V1Container( - name=KANIKO_CONTAINER_NAME, - image=KANIKO_IMAGE, - command=None, + name=BUILDER_CONTAINER_NAME, + image=BUILDER_IMAGE, + command=cmd, args=args, volume_mounts=volume_mounts, security_context=container_security_context, + env=env, ) +def _build_container_cmd() -> list[str]: + # return ["buildctl-daemonless.sh"] + # Ugly lazy debug mode, do not judge please + return ["/bin/sh"] + + def _build_container_args(dockerfile_mount_path: str, image_tag: str) -> list[str]: - dockerfile_fullpath = os.path.join(dockerfile_mount_path, "Dockerfile") + base_image_name = f'"name={REGISTRY}/{USER_IMAGE_REPOSITORY}"' + full_image_name = f"{base_image_name}:{image_tag}" + args = [ - f"--dockerfile={dockerfile_fullpath}", - f"--context=dir://{dockerfile_mount_path}", - f"--destination={REGISTRY}/{USER_IMAGE_REPOSITORY}:{image_tag}", - "--cache=true", - "--log-timestamp=true", - "--snapshotMode=redo", - "--push-retry=3", - "--cache-copy-layers", - "--log-format=text", - f"--verbosity={('debug' if settings.LOG_LEVEL == 'DEBUG' else 'info')}", + "--oci-worker-no-process-sandbox", + "build", + "--frontend", + "dockerfile.v0", + "--local", + f"context={dockerfile_mount_path}", + "--local", + f"dockerfile={dockerfile_mount_path}", + "--output", ] + output_arg = f"type=image,{full_image_name},push=true" if REGISTRY_SCHEME == "http": - args.append("--insecure") + output_arg = ",".join((output_arg, "registry.insecure=true")) + + args.append(output_arg) + + # Not yet tested + # cache_registry = f"type=registry,ref={base_image_name}:buildcache" + # cache_args = [f"--export-cache {cache_registry}", f"--import-cache {cache_registry}",] + # args.append(cache_args) - if KANIKO_MIRROR: + if BUILDER_MIRROR: + # Assuming it arg update should not be needed in this case with BuildKit + pass args.append(f"--registry-mirror={REGISTRY}") if REGISTRY_SCHEME == "http": args.append("--insecure-pull") - return args + + # return args + # Ugly lazy debug mode, do not judge please + return ["-c", "while sleep 1; do sleep 1; done"] + + +def _build_container_env() -> list[kubernetes.client.V1EnvVar]: + return [kubernetes.client.V1EnvVar(name="BUILDKITD_FLAGS", value="--oci-worker-no-process-sandbox")] diff --git a/backend/substrapp/kubernetes_utils.py b/backend/substrapp/kubernetes_utils.py index ad76f7070..d99706e47 100644 --- a/backend/substrapp/kubernetes_utils.py +++ b/backend/substrapp/kubernetes_utils.py @@ -49,6 +49,7 @@ def get_security_context(root: bool = False, capabilities: list[str] = None) -> privileged=False, allow_privilege_escalation=False, capabilities=kubernetes.client.V1Capabilities(drop=["ALL"], add=capabilities), + seccomp_profile=kubernetes.client.V1SeccompProfile(type="Unconfined"), ) if root: diff --git a/charts/substra-backend/Chart.lock b/charts/substra-backend/Chart.lock index ab9783a01..201c990f1 100644 --- a/charts/substra-backend/Chart.lock +++ b/charts/substra-backend/Chart.lock @@ -12,4 +12,4 @@ dependencies: repository: https://charts.bitnami.com/bitnami version: 11.6.5 digest: sha256:b561368b783d0059338aded55c7649355a5f341d8d61e72bfcd63b757371e561 -generated: "2022-08-03T14:41:27.225387+02:00" +generated: "2023-06-26T15:00:03.019974+02:00" diff --git a/charts/substra-backend/templates/configmap-settings.yaml b/charts/substra-backend/templates/configmap-settings.yaml index 9a3b2ffd0..fda08c875 100644 --- a/charts/substra-backend/templates/configmap-settings.yaml +++ b/charts/substra-backend/templates/configmap-settings.yaml @@ -16,8 +16,8 @@ data: COMPUTE_POD_RUN_AS_GROUP: {{ .Values.worker.computePod.securityContext.runAsGroup | quote }} COMPUTE_POD_FS_GROUP: {{ .Values.worker.computePod.securityContext.fsGroup | quote }} - KANIKO_IMAGE: {{ include "common.images.name" .Values.kaniko.image }} - KANIKO_MIRROR: {{ .Values.kaniko.mirror | quote }} + BUILDER_IMAGE: {{ include "common.images.name" .Values.builder.image }} + BUILDER_MIRROR: {{ .Values.builder.mirror | quote }} OBJECTSTORE_URL: {{ .Release.Name }}-minio:9000 diff --git a/charts/substra-backend/templates/deployment-registry-prepopulate.yaml b/charts/substra-backend/templates/deployment-registry-prepopulate.yaml index a8c16ec74..8f4773dda 100644 --- a/charts/substra-backend/templates/deployment-registry-prepopulate.yaml +++ b/charts/substra-backend/templates/deployment-registry-prepopulate.yaml @@ -24,22 +24,40 @@ spec: - name: wait-registry image: jwilder/dockerize:0.6.1 command: ['dockerize', '-wait', 'tcp://{{ $.Release.Name }}-docker-registry:5000'] - - name: kaniko - image: {{ include "common.images.name" $.Values.kaniko.image }} - args: ["--context=/docker-context", - {{- if .dstImage }} - "--destination={{ $.Release.Name }}-docker-registry:5000/{{ .dstImage }}", - {{- else }} - "--destination={{ $.Release.Name }}-docker-registry:5000/{{ .image }}", - {{ end }} - "--insecure", - "--verbosity=debug"] + - name: {{ $.Values.builder.name }} + image: {{ include "common.images.name" $.Values.builder.image }} + command: ['buildctl-daemonless.sh', 'build'] + args: [ + '--frontend', + 'dockerfile.v0', + '--local', + 'context=/docker-context', + '--local', + 'dockerfile=/docker-context', + '--output', + {{- if .dstImage }} + 'type=image,"name={{ $.Release.Name }}-docker-registry:5000/{{ .dstImage }}",push=true,registry.insecure=true', + # Can be used for cache optimization: https://github.com/moby/buildkit/tree/master#export-cache + # Currently both the {{ .dstImage }} and {{ .image }} variables include the docker ':' + # We should probably consider splitting the repository and the tag for this to work or find another strategy for cache I/O + # '--export-cache type=registry,ref={{ $.Release.Name }}-docker-registry:5000/{{ .dstImage }}:buildcache', + # '--import-cache type=registry,ref={{ $.Release.Name }}-docker-registry:5000/{{ .dstImage }}:buildcache', + {{- else }} + 'type=image,"name={{ $.Release.Name }}-docker-registry:5000/{{ .image }}",push=true,registry.insecure=true', + # '--export-cache type=registry,ref={{ $.Release.Name }}-docker-registry:5000/{{ .image }}:buildcache', + # '--import-cache type=registry,ref={{ $.Release.Name }}-docker-registry:5000/{{ .image }}:buildcache', + {{ end }} + + ] + env: + - name: "BUILDKITD_FLAGS" + value: "--oci-worker-no-process-sandbox" volumeMounts: - name: dockerfile mountPath: /docker-context {{- if .dockerConfigSecretName }} - name: docker-config - mountPath: /kaniko/.docker + mountPath: '{{ $.Values.builder.name }}/.docker' {{- end }} containers: - image: gcr.io/google-containers/pause:3.2 diff --git a/charts/substra-backend/templates/deployment-scheduler-worker.yaml b/charts/substra-backend/templates/deployment-scheduler-worker.yaml index 861049361..f11385db1 100644 --- a/charts/substra-backend/templates/deployment-scheduler-worker.yaml +++ b/charts/substra-backend/templates/deployment-scheduler-worker.yaml @@ -29,6 +29,8 @@ spec: spec: {{- if .Values.schedulerWorker.podSecurityContext.enabled }} securityContext: + # fsGroup, runAsUser, runAsGroup must be 1000 to match bulidkit mapping UID: + # https://github.com/moby/buildkit/blob/master/docs/rootless.md#change-uidgid fsGroup: {{ .Values.schedulerWorker.podSecurityContext.fsGroup }} runAsUser: {{ .Values.schedulerWorker.podSecurityContext.runAsUser }} runAsGroup: {{ .Values.schedulerWorker.podSecurityContext.runAsGroup }} diff --git a/charts/substra-backend/templates/statefulset-worker.yaml b/charts/substra-backend/templates/statefulset-worker.yaml index 05efc9b61..6c50de04a 100644 --- a/charts/substra-backend/templates/statefulset-worker.yaml +++ b/charts/substra-backend/templates/statefulset-worker.yaml @@ -79,12 +79,12 @@ spec: - mountPath: /tmp/certs/ name: ssl-certs {{- end }} - {{- if .Values.kaniko.cache.warmer.cachedImages }} - - name: kaniko-cache-warmer - image: {{ include "common.images.name" .Values.kaniko.cache.warmer.image }} + {{- if .Values.builder.cache.warmer.cachedImages }} + - name: '{{ $.Values.builder.name }}-cache-warmer' + image: {{ include "common.images.name" .Values.builder.cache.warmer.image }} args: - "--cache-dir=/cache" - {{- range .Values.kaniko.cache.warmer.cachedImages }} + {{- range .Values.builder.cache.warmer.cachedImages }} - "--image={{ . }}" {{- end }} - "--verbosity=debug" @@ -92,9 +92,9 @@ spec: - name: docker-cache mountPath: /cache readOnly: False - {{- if .Values.kaniko.dockerConfigSecretName }} + {{- if .Values.builder.dockerConfigSecretName }} - name: docker-config - mountPath: /kaniko/.docker + mountPath: '{{ $.Values.builder.name }}/.docker' {{- end }} {{- end}} - name: wait-postgresql @@ -153,8 +153,8 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName - - name: KANIKO_DOCKER_CONFIG_SECRET_NAME - value: {{ .Values.kaniko.dockerConfigSecretName | quote }} + - name: BUILDER_DOCKER_CONFIG_SECRET_NAME + value: {{ .Values.builder.dockerConfigSecretName | quote }} - name: COMPUTE_POD_MAX_STARTUP_WAIT_SECONDS value: {{ .Values.worker.computePod.maxStartupWaitSeconds | quote }} - name: OBJECTSTORE_URL @@ -208,10 +208,10 @@ spec: secretName: {{ .Values.orchestrator.tls.mtls.clientCertificate }} {{ end }} {{ end }} - {{- if .Values.kaniko.dockerConfigSecretName }} + {{- if .Values.builder.dockerConfigSecretName }} - name: docker-config secret: - secretName: {{ .Values.kaniko.dockerConfigSecretName }} + secretName: {{ .Values.builder.dockerConfigSecretName }} items: - key: .dockerconfigjson path: config.json @@ -254,8 +254,8 @@ spec: name: docker-cache spec: accessModes: [ "ReadWriteOnce" ] - {{ include "common.storage.class" .Values.kaniko.cache.persistence }} + {{ include "common.storage.class" .Values.builder.cache.persistence }} resources: requests: - storage: {{ .Values.kaniko.cache.persistence.size }} + storage: {{ .Values.builder.cache.persistence.size }} {{- end }} diff --git a/charts/substra-backend/values.yaml b/charts/substra-backend/values.yaml index 7b6ccb4b2..ebef887eb 100644 --- a/charts/substra-backend/values.yaml +++ b/charts/substra-backend/values.yaml @@ -80,9 +80,9 @@ server: ## podSecurityContext: enabled: true - runAsUser: 1001 - runAsGroup: 1001 - fsGroup: 1001 + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 service: ## @param server.service.type Kubernetes Service type @@ -299,9 +299,9 @@ worker: ## podSecurityContext: enabled: true - runAsUser: 1001 - runAsGroup: 1001 - fsGroup: 1001 + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 ## @param worker.resources Worker container resources requests and limits ## resources: {} @@ -332,9 +332,9 @@ worker: ## @param worker.computePod.securityContext.runAsGroup Set the group for the Compute pod ## securityContext: - fsGroup: 1001 - runAsUser: 1001 - runAsGroup: 1001 + fsGroup: 1000 + runAsUser: 1000 + runAsGroup: 1000 events: ## @param worker.events.enabled Enable event service ## @@ -358,9 +358,9 @@ worker: ## podSecurityContext: enabled: true - runAsUser: 1001 - runAsGroup: 1001 - fsGroup: 1001 + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 ## @param worker.events.nodeSelector Node labels for pod assignment ## nodeSelector: {} @@ -423,9 +423,9 @@ schedulerWorker: ## podSecurityContext: enabled: true - runAsUser: 1001 - runAsGroup: 1001 - fsGroup: 1001 + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 ## @section Celery task scheduler settings @@ -467,9 +467,9 @@ scheduler: ## podSecurityContext: enabled: true - runAsUser: 1001 - runAsGroup: 1001 - fsGroup: 1001 + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 ## @section Substra container registry settings ## @@ -524,9 +524,9 @@ api: ## podSecurityContext: enabled: true - runAsUser: 1001 - runAsGroup: 1001 - fsGroup: 1001 + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 ## @param api.events.nodeSelector Node labels for pod assignment ## nodeSelector: {} @@ -589,15 +589,17 @@ orchestrator: ## @section Kaniko settings ## -kaniko: +builder: ## @param kaniko.image.registry Kaniko image registry ## @param kaniko.image.repository Kaniko image repository ## @param kaniko.image.tag Kaniko image tag ## + name: buildkit image: - registry: gcr.io - repository: kaniko-project/executor - tag: v1.8.1 + repository: moby/buildkit + tag: master-rootless + # ARM64 sha256 if ever needed + # tag: master-rootless@sha256:cb63454a66f5bba35eba3926962fb5a18912aa3e57629f69c714a0cd303241ce ## @param kaniko.mirror If set to `true` pull base images from the local registry. ## mirror: false @@ -609,8 +611,8 @@ kaniko: ## @param kaniko.cache.warmer.image.registry Kaniko cache warmer registry ## @param kaniko.cache.warmer.image.repository Kaniko cache warmer repository ## @param kaniko.cache.warmer.image.tag Kaniko cache warmer image tag - ## image: + # Not yet addressed registry: gcr.io repository: kaniko-project/warmer tag: v1.8.1 diff --git a/docker/substra-backend/Dockerfile b/docker/substra-backend/Dockerfile index fb90ffc1a..a4e779530 100644 --- a/docker/substra-backend/Dockerfile +++ b/docker/substra-backend/Dockerfile @@ -33,8 +33,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ RUN pip3 install --upgrade --no-cache-dir pip==21.3.1 \ && pip3 install --prefix /usr/local --no-cache-dir psycopg2==2.9.3 -ARG USER_ID=1001 -ARG GROUP_ID=1001 +ARG USER_ID=1000 +ARG GROUP_ID=1000 RUN mkdir -p /var/substra/ /tmp/django_cache \ && chown -R ${USER_ID}:${GROUP_ID} /usr/src/app /var/substra/ /tmp/django_cache @@ -53,8 +53,8 @@ FROM python:3.9-slim ARG VERSION ENV BACKEND_VERSION=${VERSION:-dev} -ARG USER_ID=1001 -ARG GROUP_ID=1001 +ARG USER_ID=1000 +ARG GROUP_ID=1000 ENV GRPC_POLL_STRATEGY=epoll1 COPY --from=build /usr/local/lib/python3.9/ /usr/local/lib/python3.9/