From e0e43aa497d1d33ed94aa1284a05ee40aba74d39 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Fri, 13 May 2022 17:26:26 +0200 Subject: [PATCH] fix --gpus option for docker (#17235) Co-authored-by: ydshieh --- .github/workflows/self-scheduled.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/self-scheduled.yml b/.github/workflows/self-scheduled.yml index 62469f8e83331d..20e85ef9c7660f 100644 --- a/.github/workflows/self-scheduled.yml +++ b/.github/workflows/self-scheduled.yml @@ -126,7 +126,7 @@ jobs: runs-on: ${{ matrix.machines }} container: image: huggingface/transformers-all-latest-gpu - options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ + options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ needs: setup steps: - name: Echo folder ${{ matrix.folders }} @@ -208,7 +208,7 @@ jobs: runs-on: ${{ matrix.machines }} container: image: huggingface/transformers-pytorch-gpu - options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ + options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ needs: setup steps: # Set machine type, i.e. `single-gpu` or `multi-gpu`. Here we just remove `-docker`. @@ -252,7 +252,7 @@ jobs: runs-on: ${{ matrix.machines }} container: image: huggingface/transformers-tensorflow-gpu - options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ + options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ needs: setup steps: # Set machine type, i.e. `single-gpu` or `multi-gpu`. Here we just remove `-docker`.