From 6e8dfb261b43d93d13942c00502258e3cb1e6f8c Mon Sep 17 00:00:00 2001 From: Tri Dao Date: Sun, 30 Jun 2024 17:35:25 -0700 Subject: [PATCH] Drop support for pytorch 1.12 and 1.13 --- .github/workflows/publish.yaml | 30 ++++-------------------------- setup.py | 3 +-- 2 files changed, 5 insertions(+), 28 deletions(-) diff --git a/.github/workflows/publish.yaml b/.github/workflows/publish.yaml index 7e1b3793..7319ccc4 100644 --- a/.github/workflows/publish.yaml +++ b/.github/workflows/publish.yaml @@ -43,8 +43,8 @@ jobs: # Using ubuntu-20.04 instead of 22.04 for more compatibility (glibc). Ideally we'd use the # manylinux docker image, but I haven't figured out how to install CUDA on manylinux. os: [ubuntu-20.04] - python-version: ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12'] - torch-version: ['1.12.1', '1.13.1', '2.0.1', '2.1.2', '2.2.2', '2.3.0', '2.4.0.dev20240420'] + python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] + torch-version: ['2.0.1', '2.1.2', '2.2.2', '2.3.1', '2.4.0.dev20240505'] cuda-version: ['11.8.0', '12.2.2'] # We need separate wheels that either uses C++11 ABI (-D_GLIBCXX_USE_CXX11_ABI) or not. # Pytorch wheels currently don't use it, but nvcr images have Pytorch compiled with C++11 ABI. @@ -53,33 +53,11 @@ jobs: cxx11_abi: ['FALSE', 'TRUE'] exclude: # Pytorch < 2.2 does not support Python 3.12 - - torch-version: '1.12.1' - python-version: '3.12' - - torch-version: '1.13.1' - python-version: '3.12' - torch-version: '2.0.1' python-version: '3.12' - torch-version: '2.1.2' python-version: '3.12' - # Pytorch <= 1.12 does not support Python 3.11 - - torch-version: '1.12.1' - python-version: '3.11' - # Pytorch >= 2.0 only supports Python >= 3.8 - - torch-version: '2.0.1' - python-version: '3.7' - - torch-version: '2.1.2' - python-version: '3.7' - - torch-version: '2.2.2' - python-version: '3.7' - - torch-version: '2.3.0' - python-version: '3.7' - - torch-version: '2.4.0.dev20240420' - python-version: '3.7' # Pytorch <= 2.0 only supports CUDA <= 11.8 - - torch-version: '1.12.1' - cuda-version: '12.2.2' - - torch-version: '1.13.1' - cuda-version: '12.2.2' - torch-version: '2.0.1' cuda-version: '12.2.2' @@ -138,8 +116,8 @@ jobs: # e.g. we can have system CUDA version being 11.7 but if torch==1.12 then we need to download the wheel from cu116 # This code is ugly, maybe there's a better way to do this. export TORCH_CUDA_VERSION=$(python -c "from os import environ as env; \ - minv = {'1.12': 113, '1.13': 116, '2.0': 117, '2.1': 118, '2.2': 118, '2.3': 118, '2.4': 118}[env['MATRIX_TORCH_VERSION']]; \ - maxv = {'1.12': 116, '1.13': 117, '2.0': 118, '2.1': 121, '2.2': 121, '2.3': 121, '2.4': 121}[env['MATRIX_TORCH_VERSION']]; \ + minv = {'2.0': 117, '2.1': 118, '2.2': 118, '2.3': 118, '2.4': 118}[env['MATRIX_TORCH_VERSION']]; \ + maxv = {'2.0': 118, '2.1': 121, '2.2': 121, '2.3': 121, '2.4': 121}[env['MATRIX_TORCH_VERSION']]; \ print(max(min(int(env['MATRIX_CUDA_VERSION']), maxv), minv))" \ ) if [[ ${{ matrix.torch-version }} == *"dev"* ]]; then diff --git a/setup.py b/setup.py index 58772cb8..dd8d8128 100755 --- a/setup.py +++ b/setup.py @@ -207,7 +207,6 @@ def append_nvcc_threads(nvcc_extra_args): f"--offload-arch={os.getenv('HIP_ARCHITECTURES', 'native')}", "-U__CUDA_NO_HALF_OPERATORS__", "-U__CUDA_NO_HALF_CONVERSIONS__", - "-DCK_FMHA_FWD_FAST_EXP2=1", "-fgpu-flush-denormals-to-zero", ] + cc_flag, @@ -367,7 +366,7 @@ def run(self): else { "bdist_wheel": CachedWheelsCommand, }, - python_requires=">=3.7", + python_requires=">=3.8", install_requires=[ "torch", "packaging",