Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Rebuild for windows_cuda #19

12 changes: 12 additions & 0 deletions .azure-pipelines/azure-pipelines-win.yml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

45 changes: 45 additions & 0 deletions .ci_support/migrations/windows_cuda.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
migrator_ts: 1604255168
__migrator:
kind:
version
migration_number:
1
build_number:
1
override_cbc_keys:
- cuda_compiler_stub

cuda_compiler: # [linux64 or win]
- nvcc # [linux64 or win]
cuda_compiler_version:
- None
- 9.2 # [linux64]
- 10.0 # [linux64 or win]
- 10.1 # [linux64 or win]
- 10.2 # [linux64 or win]
- 11.0 # [linux64 or win]


# cdt_name: # [os.environ.get("BUILD_PLATFORM", "").startswith("linux") or (os.environ.get("CONFIG_VERSION", "1") == "1" and linux)]
# - cos6 # [os.environ.get("BUILD_PLATFORM") == "linux-64" or (os.environ.get("CONFIG_VERSION", "1") == "1" and linux64)]
# - cos7 # [os.environ.get("BUILD_PLATFORM") == "linux-aarch64" or (os.environ.get("CONFIG_VERSION", "1") == "1" and aarch64)]
# - cos7 # [os.environ.get("BUILD_PLATFORM") == "linux-ppc64le" or (os.environ.get("CONFIG_VERSION", "1") == "1" and ppc64le)]
# - cos7 # [os.environ.get("BUILD_PLATFORM") == "linux-armv7l" or (os.environ.get("CONFIG_VERSION", "1") == "1" and armv7l)]
#
# - cos6 # [linux64 and (os.environ.get("BUILD_PLATFORM") == "linux-64" or os.environ.get("CONFIG_VERSION", "1") == "1")]
# - cos6 # [linux64 and (os.environ.get("BUILD_PLATFORM") == "linux-64" or os.environ.get("CONFIG_VERSION", "1") == "1")]
# - cos6 # [linux64 and (os.environ.get("BUILD_PLATFORM") == "linux-64" or os.environ.get("CONFIG_VERSION", "1") == "1")]
# - cos6 # [linux64 and (os.environ.get("BUILD_PLATFORM") == "linux-64" or os.environ.get("CONFIG_VERSION", "1") == "1")]
# - cos7 # [linux64 and (os.environ.get("BUILD_PLATFORM") == "linux-64" or os.environ.get("CONFIG_VERSION", "1") == "1")]

docker_image: # [os.environ.get("BUILD_PLATFORM", "").startswith("linux") or (os.environ.get("CONFIG_VERSION", "1") == "1" and linux)]
- quay.io/condaforge/linux-anvil-comp7 # [os.environ.get("BUILD_PLATFORM") == "linux-64" or (os.environ.get("CONFIG_VERSION", "1") == "1" and linux64)]
- quay.io/condaforge/linux-anvil-aarch64 # [os.environ.get("BUILD_PLATFORM") == "linux-aarch64" or (os.environ.get("CONFIG_VERSION", "1") == "1" and aarch64)]
- quay.io/condaforge/linux-anvil-ppc64le # [os.environ.get("BUILD_PLATFORM") == "linux-ppc64le" or (os.environ.get("CONFIG_VERSION", "1") == "1" and ppc64le)]
- quay.io/condaforge/linux-anvil-armv7l # [os.environ.get("BUILD_PLATFORM") == "linux-armv7l" or (os.environ.get("CONFIG_VERSION", "1") == "1" and armv7l)]

- quay.io/condaforge/linux-anvil-cuda:9.2 # [linux64 and (os.environ.get("BUILD_PLATFORM") == "linux-64" or os.environ.get("CONFIG_VERSION", "1") == "1")]
- quay.io/condaforge/linux-anvil-cuda:10.0 # [linux64 and (os.environ.get("BUILD_PLATFORM") == "linux-64" or os.environ.get("CONFIG_VERSION", "1") == "1")]
- quay.io/condaforge/linux-anvil-cuda:10.1 # [linux64 and (os.environ.get("BUILD_PLATFORM") == "linux-64" or os.environ.get("CONFIG_VERSION", "1") == "1")]
- quay.io/condaforge/linux-anvil-cuda:10.2 # [linux64 and (os.environ.get("BUILD_PLATFORM") == "linux-64" or os.environ.get("CONFIG_VERSION", "1") == "1")]
- quay.io/condaforge/linux-anvil-cuda:11.0 # [linux64 and (os.environ.get("BUILD_PLATFORM") == "linux-64" or os.environ.get("CONFIG_VERSION", "1") == "1")]
33 changes: 33 additions & 0 deletions .ci_support/win_64_cuda_compiler_version10.2.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
channel_sources:
- conda-forge,defaults
channel_targets:
- conda-forge main
cuda_compiler:
- nvcc
cuda_compiler_version:
- '10.2'
cxx_compiler:
- vs2017
libblas:
- 3.8 *netlib
liblapack:
- 3.8 *netlib
numpy:
- '1.16'
- '1.16'
- '1.16'
- '1.19'
pin_run_as_build:
python:
min_pin: x.x
max_pin: x.x
python:
- 3.6.* *_cpython
- 3.7.* *_cpython
- 3.8.* *_cpython
- 3.9.* *_cpython
target_platform:
- win-64
zip_keys:
- - numpy
- python
33 changes: 33 additions & 0 deletions .ci_support/win_64_cuda_compiler_version11.0.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
channel_sources:
- conda-forge,defaults
channel_targets:
- conda-forge main
cuda_compiler:
- nvcc
cuda_compiler_version:
- '11.0'
cxx_compiler:
- vs2017
libblas:
- 3.8 *netlib
liblapack:
- 3.8 *netlib
numpy:
- '1.16'
- '1.16'
- '1.16'
- '1.19'
pin_run_as_build:
python:
min_pin: x.x
max_pin: x.x
python:
- 3.6.* *_cpython
- 3.7.* *_cpython
- 3.8.* *_cpython
- 3.9.* *_cpython
target_platform:
- win-64
zip_keys:
- - numpy
- python
33 changes: 33 additions & 0 deletions .ci_support/win_64_cuda_compiler_version11.1.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
channel_sources:
- conda-forge,defaults
channel_targets:
- conda-forge main
cuda_compiler:
- nvcc
cuda_compiler_version:
- '11.1'
cxx_compiler:
- vs2017
libblas:
- 3.8 *netlib
liblapack:
- 3.8 *netlib
numpy:
- '1.16'
- '1.16'
- '1.16'
- '1.19'
pin_run_as_build:
python:
min_pin: x.x
max_pin: x.x
python:
- 3.6.* *_cpython
- 3.7.* *_cpython
- 3.8.* *_cpython
- 3.9.* *_cpython
target_platform:
- win-64
zip_keys:
- - numpy
- python
33 changes: 33 additions & 0 deletions .ci_support/win_64_cuda_compiler_version11.2.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
channel_sources:
- conda-forge,defaults
channel_targets:
- conda-forge main
cuda_compiler:
- nvcc
cuda_compiler_version:
- '11.2'
cxx_compiler:
- vs2017
libblas:
- 3.8 *netlib
liblapack:
- 3.8 *netlib
numpy:
- '1.16'
- '1.16'
- '1.16'
- '1.19'
pin_run_as_build:
python:
min_pin: x.x
max_pin: x.x
python:
- 3.6.* *_cpython
- 3.7.* *_cpython
- 3.8.* *_cpython
- 3.9.* *_cpython
target_platform:
- win-64
zip_keys:
- - numpy
- python
2 changes: 2 additions & 0 deletions .ci_support/win_64_cuda_compiler_versionNone.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@ channel_sources:
- conda-forge,defaults
channel_targets:
- conda-forge main
cuda_compiler:
- nvcc
cuda_compiler_version:
- None
cxx_compiler:
Expand Down
28 changes: 28 additions & 0 deletions README.md

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

50 changes: 49 additions & 1 deletion recipe/build-lib.bat
Original file line number Diff line number Diff line change
@@ -1,14 +1,62 @@
@echo on

SetLocal EnableDelayedExpansion

if "%cuda_compiler_version%"=="None" (
set "FAISS_ENABLE_GPU=OFF"
set "CUDA_CONFIG_ARGS="
) else (
set "FAISS_ENABLE_GPU=ON"

REM for documentation see e.g.
REM docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#building-for-maximum-compatibility
REM docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#ptxas-options-gpu-name
REM docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#gpu-feature-list

REM for -real vs. -virtual, see cmake.org/cmake/help/latest/prop_tgt/CUDA_ARCHITECTURES.html
REM this is to support PTX JIT compilation; see first link above or cf.
REM devblogs.nvidia.com/cuda-pro-tip-understand-fat-binaries-jit-caching

REM windows support start with cuda 10.0
REM %MY_VAR:~0,2% selects first two characters
if "%cuda_compiler_version:~0,2%"=="10" (
set "CMAKE_CUDA_ARCHS=35-virtual;50-virtual;52-virtual;60-virtual;61-virtual;70-virtual;75-virtual;75-real"
)
if "%cuda_compiler_version:~0,2%"=="11" (
if "%cuda_compiler_version:~0,4%"=="11.0" (
REM cuda 11.0 deprecates arches 35, 50
set "CMAKE_CUDA_ARCHS=52-virtual;60-virtual;61-virtual;70-virtual;75-virtual;80-virtual;80-real"
) else (
REM cuda>=11.1 adds arch 86
set "CMAKE_CUDA_ARCHS=52-virtual;60-virtual;61-virtual;70-virtual;75-virtual;80-virtual;86-virtual;86-real"
)
)

REM See more extensive comment in build-lib.sh
REM TODO: Fix this in nvcc-feedstock or cmake-feedstock.
del %BUILD_PREFIX%\bin\nvcc.bat

REM ... and another workaround just to cover more bases
set "CudaToolkitDir=%CUDA_PATH%"
set "CUDAToolkit_ROOT=%CUDA_PATH%"

set CUDA_CONFIG_ARGS=-DCMAKE_CUDA_ARCHITECTURES=!CMAKE_CUDA_ARCHS!
REM cmake does not generate output for the call below; echo some info
echo Set up extra cmake-args: CUDA_CONFIG_ARGS=!CUDA_CONFIG_ARGS!
)

:: Build faiss.dll
cmake -B _build ^
-DBUILD_SHARED_LIBS=ON ^
-DBUILD_TESTING=OFF ^
-DFAISS_ENABLE_GPU=OFF ^
-DFAISS_ENABLE_PYTHON=OFF ^
-DFAISS_ENABLE_GPU=!FAISS_ENABLE_GPU! ^
-DCMAKE_BUILD_TYPE=Release ^
-DCMAKE_INSTALL_PREFIX="%LIBRARY_PREFIX%" ^
-DCMAKE_INSTALL_BINDIR="%LIBRARY_BIN%" ^
-DCMAKE_INSTALL_LIBDIR="%LIBRARY_LIB%" ^
-DCMAKE_INSTALL_INCLUDEDIR="%LIBRARY_INC%" ^
!CUDA_CONFIG_ARGS! ^
.
if %ERRORLEVEL% neq 0 exit 1

Expand Down
20 changes: 19 additions & 1 deletion recipe/build-pkg.bat
Original file line number Diff line number Diff line change
@@ -1,7 +1,25 @@
@echo on

SetLocal EnableDelayedExpansion

if "%cuda_compiler_version%"=="None" (
set FAISS_ENABLE_GPU="OFF"
) else (
set FAISS_ENABLE_GPU="ON"

REM See more extensive comment in build-pkg.sh
REM TODO: Fix this in nvcc-feedstock or cmake-feedstock.
del %BUILD_PREFIX%\bin\nvcc.bat

REM ... and another workaround just to cover more bases
set "CudaToolkitDir=%CUDA_PATH%"
h-vetinari marked this conversation as resolved.
Show resolved Hide resolved
set "CUDAToolkit_ROOT=%CUDA_PATH%"
)

:: Build vanilla version (no avx2).
:: Do not use the Python3_* variants for cmake
cmake -B _build_python ^
-DFAISS_ENABLE_GPU=OFF ^
-DFAISS_ENABLE_GPU=!FAISS_ENABLE_GPU! ^
-DCMAKE_BUILD_TYPE=Release ^
-DPython_EXECUTABLE="%PYTHON%" ^
faiss/python
Expand Down
5 changes: 1 addition & 4 deletions recipe/conda_build_config.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,3 @@
cxx_compiler_version: # [unix]
cxx_compiler_version: # [osx]
# need to downgrade on osx due to a bug that breaks the test suite
- 10 # [osx]
# need to downgrade on linux due to nvcc 9.2 not being able to deal with gcc>7,
# and conda-build not being able to zip this with cuda_compiler_version
- 7 # [linux]
18 changes: 11 additions & 7 deletions recipe/meta.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
{% set version = "1.7.0" %}
{% set number = 1 %}
{% set number = 2 %}
# see github.com/conda-forge/conda-forge.github.io/issues/1059 for naming discussion
{% set faiss_proc_type = "cuda" if cuda_compiler_version != "None" else "cpu" %}

Expand Down Expand Up @@ -74,13 +74,17 @@ source:
- patches/0001-use-c-14.patch
# backport of facebookresearch/faiss#1666, can be dropped for ver>1.7.0
- patches/0002-Add-missing-headers-in-faiss-gpu-CMakeLists.txt-1666.patch
# update version-guard to build for compute_86
# update version-guard to build for compute_86, can be dropped for ver>1.7.0
- patches/0003-update-util-guard-for-compute_86.patch
# single commit from facebookresearch/faiss#1610
- patches/0004-Add-missing-includes-for-std-min-std-max.patch
# skip test that fails without GPU drivers on windows
- patches/0005-skip-test-that-fails-without-GPU-drivers.patch # [win]

build:
number: {{ number }}
# GPU version only for linux
skip: true # [(win or osx) and cuda_compiler_version != "None"]
# GPU version for linux64 & win
skip: true # [osx and cuda_compiler_version != "None"]

requirements:
build:
Expand Down Expand Up @@ -175,16 +179,16 @@ outputs:
# - blas * *{{ blas_impl }}
# testing with MKL, as upstream considers this the most important
- blas =*=mkl
# this is necessary for a single test in the test suite
- scipy
- pytest
source_files:
- tests/
imports:
- faiss
commands:
- python -m unittest discover tests
- pytest tests -v
# running the following test requires an actual GPU device, which is not available in CI
# - python -m unittest discover faiss/gpu/test/
# - pytest faiss/gpu/test/ -v

# for compatibility with (& ease of migration from) existing packages in the pytorch channel
- name: faiss-cpu
Expand Down
Loading