Skip to content

Commit

Permalink
CI: Update to use pip instead of conda (fairinternal/xformers#1270)
Browse files Browse the repository at this point in the history
__original_commit__ = fairinternal/xformers@0fce003
  • Loading branch information
danthe3rd authored and xFormers Bot committed Dec 19, 2024
1 parent 9a25558 commit 89fc994
Show file tree
Hide file tree
Showing 4 changed files with 15 additions and 20 deletions.
14 changes: 4 additions & 10 deletions .github/actions/setup-env-build/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,6 @@ inputs:
description: 'CUDA version'
required: false
default: "11.8"
pytorch_version:
description: 'PyTorch version'
default: "2"
pytorch_channel:
description: 'PyTorch channel on conda'
default: "pytorch"

runs:
using: composite
Expand All @@ -34,15 +28,14 @@ runs:
import datetime
from pathlib import Path
CONDA_INSTALL_CMD = "micromamba create python=${{ inputs.python }} zlib pip ninja pytorch=${{ inputs.pytorch_version }} ccache=4.8 pytorch-mutex==1.0=cuda pytorch-cuda=${{ inputs.cuda }} -c ${{ inputs.pytorch_channel }} -c nvidia -c conda-forge -q -y"
CONDA_INSTALL_CMD = "micromamba create python=${{ inputs.python }} zlib pip ninja ccache=4.8 -c conda-forge -q -y"
conda_env_key = CONDA_INSTALL_CMD
for file in sorted(glob.glob("requirement*.txt")):
conda_env_key += f"\n########## {file}\n"
conda_env_key += Path(file).read_text()
if "${{ inputs.pytorch_channel }}" != "pytorch":
# Nightly or Test, update every week
conda_env_key += datetime.date.today().strftime("%Y-week%W") + "\n"
# Nightly or Test, update every week
conda_env_key += datetime.date.today().strftime("%Y-week%W") + "\n"
conda_env_hash = hashlib.sha224(conda_env_key.encode("ascii")).hexdigest()[:8]
shared_dir = os.environ.get("GHRUNNER_SHARED_DIR", os.getcwd())
env_path = os.path.join(shared_dir, "tmp", os.environ["GITHUB_RUN_ID"])
Expand Down Expand Up @@ -93,6 +86,7 @@ runs:
# Retry if failed after removing downloaded packages cache
$CONDA_INSTALL_CMD || (rm -rf $CONDA_PKGS_DIRS && rm -rf $CONDA_PREFIX && $CONDA_INSTALL_CMD)
$PY -m pip install cmake
$PY -m pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu118
$PY -m pip install -r requirements-benchmark.txt --progress-bar off
- name: Activate environment
shell: bash -l {0}
Expand Down
13 changes: 5 additions & 8 deletions .github/workflows/gpu_test_gh.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,13 +31,9 @@ jobs:
sm: "9.0a"
- runner: "4-core-ubuntu-gpu-t4"
sm: "7.5"
pytorch:
- channel: pytorch-nightly
cuda: 12.1
version: 2
python: [3.11]

name: test_sm${{ matrix.gpu.sm }}_cu${{ matrix.pytorch.cuda }}
name: test_sm${{ matrix.gpu.sm }}
runs-on: ${{ matrix.gpu.runner }}

timeout-minutes: 360
Expand All @@ -62,9 +58,8 @@ jobs:
set -ex
micromamba config set channel_priority strict
micromamba create -n env python=${{ matrix.python }} \
zlib pip ninja pytorch=${{ matrix.pytorch.version }} ccache=4.8 pytorch-mutex==1.0=cuda pytorch-cuda=${{ matrix.pytorch.cuda }} \
cuda-libraries-dev cuda-nvcc \
-c nvidia -c ${{ matrix.pytorch.channel }} -c conda-forge -q -y
zlib pip ninja ccache=4.8 cuda-toolkit \
-c "nvidia/label/cuda-12.6" -c conda-forge -q -y
- name: Activate environment
shell: bash -l {0}
run: |
Expand All @@ -74,6 +69,8 @@ jobs:
- name: Setup test requirements
run: |
which python
which nvcc
pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu126
pip install -r requirements-test.txt --progress-bar off
- run: TORCH_CUDA_ARCH_LIST=${{ matrix.gpu.sm }} python setup.py develop
- run: python -m xformers.info
Expand Down
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ repos:
rev: 22.3.0
hooks:
- id: black
language_version: python3.9
language_version: python3.11

- repo: https://github.com/pycqa/flake8
rev: 6.1.0
Expand Down
6 changes: 5 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -438,7 +438,11 @@ def get_extensions():
use_pt_flash = False

if (
(torch.cuda.is_available() and (CUDA_HOME is not None) and (torch.version.cuda is not None))
(
torch.cuda.is_available()
and (CUDA_HOME is not None)
and (torch.version.cuda is not None)
)
or os.getenv("FORCE_CUDA", "0") == "1"
or os.getenv("TORCH_CUDA_ARCH_LIST", "") != ""
):
Expand Down

0 comments on commit 89fc994

Please sign in to comment.