Skip to content

Commit

Permalink
fix --gpus option for docker (huggingface#17235)
Browse files Browse the repository at this point in the history
Co-authored-by: ydshieh <ydshieh@users.noreply.github.com>
  • Loading branch information
2 people authored and elusenji committed Jun 12, 2022
1 parent 57f188f commit e0e43aa
Showing 1 changed file with 3 additions and 3 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/self-scheduled.yml
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ jobs:
runs-on: ${{ matrix.machines }}
container:
image: huggingface/transformers-all-latest-gpu
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
needs: setup
steps:
- name: Echo folder ${{ matrix.folders }}
Expand Down Expand Up @@ -208,7 +208,7 @@ jobs:
runs-on: ${{ matrix.machines }}
container:
image: huggingface/transformers-pytorch-gpu
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
needs: setup
steps:
# Set machine type, i.e. `single-gpu` or `multi-gpu`. Here we just remove `-docker`.
Expand Down Expand Up @@ -252,7 +252,7 @@ jobs:
runs-on: ${{ matrix.machines }}
container:
image: huggingface/transformers-tensorflow-gpu
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
needs: setup
steps:
# Set machine type, i.e. `single-gpu` or `multi-gpu`. Here we just remove `-docker`.
Expand Down

0 comments on commit e0e43aa

Please sign in to comment.