diff --git a/.github/actions/setup-env-build/action.yml b/.github/actions/setup-env-build/action.yml index 57702712d6..a6ae9b72fe 100644 --- a/.github/actions/setup-env-build/action.yml +++ b/.github/actions/setup-env-build/action.yml @@ -11,12 +11,6 @@ inputs: description: 'CUDA version' required: false default: "11.8" - pytorch_version: - description: 'PyTorch version' - default: "2" - pytorch_channel: - description: 'PyTorch channel on conda' - default: "pytorch" runs: using: composite @@ -34,15 +28,14 @@ runs: import datetime from pathlib import Path - CONDA_INSTALL_CMD = "micromamba create python=${{ inputs.python }} zlib pip ninja pytorch=${{ inputs.pytorch_version }} ccache=4.8 pytorch-mutex==1.0=cuda pytorch-cuda=${{ inputs.cuda }} -c ${{ inputs.pytorch_channel }} -c nvidia -c conda-forge -q -y" + CONDA_INSTALL_CMD = "micromamba create python=${{ inputs.python }} zlib pip ninja ccache=4.8 -c conda-forge -q -y" conda_env_key = CONDA_INSTALL_CMD for file in sorted(glob.glob("requirement*.txt")): conda_env_key += f"\n########## {file}\n" conda_env_key += Path(file).read_text() - if "${{ inputs.pytorch_channel }}" != "pytorch": - # Nightly or Test, update every week - conda_env_key += datetime.date.today().strftime("%Y-week%W") + "\n" + # Nightly or Test, update every week + conda_env_key += datetime.date.today().strftime("%Y-week%W") + "\n" conda_env_hash = hashlib.sha224(conda_env_key.encode("ascii")).hexdigest()[:8] shared_dir = os.environ.get("GHRUNNER_SHARED_DIR", os.getcwd()) env_path = os.path.join(shared_dir, "tmp", os.environ["GITHUB_RUN_ID"]) @@ -93,6 +86,7 @@ runs: # Retry if failed after removing downloaded packages cache $CONDA_INSTALL_CMD || (rm -rf $CONDA_PKGS_DIRS && rm -rf $CONDA_PREFIX && $CONDA_INSTALL_CMD) $PY -m pip install cmake + $PY -m pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu118 $PY -m pip install -r requirements-benchmark.txt --progress-bar off - name: Activate environment shell: bash -l {0} diff --git a/.github/workflows/gpu_test_gh.yml b/.github/workflows/gpu_test_gh.yml index bb5c4e273d..ef1dc1b490 100644 --- a/.github/workflows/gpu_test_gh.yml +++ b/.github/workflows/gpu_test_gh.yml @@ -31,13 +31,9 @@ jobs: sm: "9.0a" - runner: "4-core-ubuntu-gpu-t4" sm: "7.5" - pytorch: - - channel: pytorch-nightly - cuda: 12.1 - version: 2 python: [3.11] - name: test_sm${{ matrix.gpu.sm }}_cu${{ matrix.pytorch.cuda }} + name: test_sm${{ matrix.gpu.sm }} runs-on: ${{ matrix.gpu.runner }} timeout-minutes: 360 @@ -62,9 +58,8 @@ jobs: set -ex micromamba config set channel_priority strict micromamba create -n env python=${{ matrix.python }} \ - zlib pip ninja pytorch=${{ matrix.pytorch.version }} ccache=4.8 pytorch-mutex==1.0=cuda pytorch-cuda=${{ matrix.pytorch.cuda }} \ - cuda-libraries-dev cuda-nvcc \ - -c nvidia -c ${{ matrix.pytorch.channel }} -c conda-forge -q -y + zlib pip ninja ccache=4.8 cuda-toolkit \ + -c "nvidia/label/cuda-12.6" -c conda-forge -q -y - name: Activate environment shell: bash -l {0} run: | @@ -74,6 +69,8 @@ jobs: - name: Setup test requirements run: | which python + which nvcc + pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu126 pip install -r requirements-test.txt --progress-bar off - run: TORCH_CUDA_ARCH_LIST=${{ matrix.gpu.sm }} python setup.py develop - run: python -m xformers.info diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e8c4cf036b..f21a3d862c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -20,7 +20,7 @@ repos: rev: 22.3.0 hooks: - id: black - language_version: python3.9 + language_version: python3.11 - repo: https://github.com/pycqa/flake8 rev: 6.1.0 diff --git a/setup.py b/setup.py index 273c869c6a..1afc501fbc 100644 --- a/setup.py +++ b/setup.py @@ -438,7 +438,11 @@ def get_extensions(): use_pt_flash = False if ( - (torch.cuda.is_available() and (CUDA_HOME is not None) and (torch.version.cuda is not None)) + ( + torch.cuda.is_available() + and (CUDA_HOME is not None) + and (torch.version.cuda is not None) + ) or os.getenv("FORCE_CUDA", "0") == "1" or os.getenv("TORCH_CUDA_ARCH_LIST", "") != "" ):