Skip to content

Commit

Permalink
Merge branch 'ibverb-support' into add-simple-packet
Browse files Browse the repository at this point in the history
  • Loading branch information
dentalfloss1 authored Feb 23, 2024
2 parents 6570bab + 0c8f19b commit 390f047
Show file tree
Hide file tree
Showing 126 changed files with 3,358 additions and 2,092 deletions.
26 changes: 19 additions & 7 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ name: "Build and Test"
"on": [push, pull_request]
jobs:
pre_build:
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
outputs:
should_skip: ${{ steps.skip_check.outputs.should_skip }}
steps:
Expand All @@ -20,10 +20,8 @@ jobs:
strategy:
matrix:
os: [self-hosted, ubuntu-latest, macos-latest]
python-version: ['3.8', '3.10']
python-version: ['3.8', '3.10', '3.12']
include:
- os: ubuntu-20.04
python-version: '2.7'
- os: ubuntu-20.04
python-version: '3.6'
- os: macos-latest
Expand Down Expand Up @@ -55,7 +53,7 @@ jobs:
gnu-sed \
hwloc \
pkg-config
- uses: actions/setup-python@v4.3.0
- uses: actions/setup-python@v5.0.0
with:
python-version: ${{ matrix.python-version }}
- name: "Software Install - Python"
Expand All @@ -68,9 +66,10 @@ jobs:
pint \
graphviz \
ctypesgen==1.0.2 \
pylint \
coverage
- name: "Software Install - Python, part 2"
if: ${{ matrix.os == 'self-hosted' && matrix.python-version != '2.7' }}
if: ${{ matrix.os == 'self-hosted' }}
# Setting CPLUS_INCLUDE_PATH helps pycuda find the right
# Python header files <pyconfig.h> to use with its embedded
# subset of Boost.
Expand Down Expand Up @@ -102,14 +101,27 @@ jobs:
coverage run --source=bifrost.ring,bifrost,bifrost.pipeline \
-m unittest discover
coverage xml
- name: "Test, part 2"
if: ${{ matrix.os == 'self-hosted' }}
env:
LD_LIBRARY_PATH: /usr/local/lib:${{ env.LD_LIBRARY_PATH }}
run: |
cd testbench
python generate_test_data.py
coverage run --source=bifrost.ring,bifrost,bifrost.pipeline test_file_read_write.py
coverage run --source=bifrost.ring,bifrost,bifrost.pipeline test_fft.py
coverage run --source=bifrost.ring,bifrost,bifrost.pipeline your_first_block.py
python download_breakthrough_listen_data.py -y
coverage run --source=bifrost.ring,bifrost,bifrost.pipeline test_fdmt.py ./testdata/pulsars/blc0_guppi_57407_61054_PSR_J1840%2B5640_0004.fil
coverage xml
- name: "Upload Coverage"
env:
UNITTEST_OS: ${{ matrix.os }}
UNITTEST_PY: ${{ matrix.python-version }}
if: ${{ matrix.os == 'self-hosted' && matrix.python-version == '3.8' }}
uses: codecov/codecov-action@v2
with:
directory: ./test/
files: ./test/coverage.xml, ./testbench/coverage.xml
env_vars: UNITTEST_OS,UNITTEST_PY
fail_ci_if_error: false
verbose: true
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ doc
version.py
/python/bifrost/version/__init__.py
*_generated.py
*_typehints.py
.log*.txt
test/data/
*.bin
Expand Down
67 changes: 65 additions & 2 deletions config/cuda.m4
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,9 @@ AC_DEFUN([AX_CHECK_CUDA],
AC_SUBST([CUDA_HAVE_CXX11], [0])
AC_SUBST([GPU_MIN_ARCH], [0])
AC_SUBST([GPU_MAX_ARCH], [0])
AC_SUBST([GPU_SHAREDMEM], [0])
AC_SUBST([GPU_PASCAL_MANAGEDMEM], [0])
AC_SUBST([GPU_EXP_PINNED_ALLOC], [1])
if test "$enable_cuda" != "no"; then
AC_SUBST([HAVE_CUDA], [1])
Expand All @@ -36,14 +39,18 @@ AC_DEFUN([AX_CHECK_CUDA],
fi
if test "$HAVE_CUDA" = "1"; then
AC_MSG_CHECKING([for a working CUDA installation])
AC_MSG_CHECKING([for a working CUDA 10+ installation])
CXXFLAGS_save="$CXXFLAGS"
LDFLAGS_save="$LDFLAGS"
NVCCLIBS_save="$NVCCLIBS"
ac_compile='$NVCC -c $NVCCFLAGS conftest.$ac_ext >&5'
AC_COMPILE_IFELSE([
LDFLAGS="-L$CUDA_HOME/lib64 -L$CUDA_HOME/lib"
NVCCLIBS="$LIBS -lcuda -lcudart"
ac_link='$NVCC -o conftest$ac_exeext $NVCCFLAGS $LDFLAGS $LIBS conftest.$ac_ext >&5'
AC_LINK_IFELSE([
AC_LANG_PROGRAM([[
#include <cuda.h>
#include <cuda_runtime.h>]],
Expand Down Expand Up @@ -234,6 +241,62 @@ AC_DEFUN([AX_CHECK_CUDA],
ar_max_valid=$(echo $ar_valid | ${SED} -e 's/.* //g;' )
AC_SUBST([GPU_MAX_ARCH], [$ar_max_valid])
AC_ARG_WITH([shared_mem],
[AS_HELP_STRING([--with-shared-mem=N],
[default GPU shared memory per block in bytes (default=detect)])],
[],
[with_shared_mem='auto'])
if test "$with_gpu_archs" = "auto"; then
AC_MSG_CHECKING([for minimum shared memory per block])
CXXFLAGS_save="$CXXFLAGS"
LDFLAGS_save="$LDFLAGS"
NVCCLIBS_save="$NVCCLIBS"
LDFLAGS="-L$CUDA_HOME/lib64 -L$CUDA_HOME/lib"
NVCCLIBS="-lcuda -lcudart"
ac_run='$NVCC -o conftest$ac_ext $LDFLAGS $LIBS conftest.$ac_ext>&5'
AC_RUN_IFELSE([
AC_LANG_PROGRAM([[
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <fstream>
#include <set>]],
[[
std::set<int> smem;
int smemSize;
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
if( deviceCount == 0 ) {
return 1;
}
for(int dev=0; dev<deviceCount; dev++) {
cudaSetDevice(dev);
cudaDeviceGetAttribute(&smemSize, cudaDevAttrMaxSharedMemoryPerBlock, dev);
if( smem.count(smemSize) == 0 ) {
smem.insert(smemSize);
}
}
std::ofstream fh;
fh.open("confsmem.out");
if( smem.empty() ) {
fh << 0;
} else {
fh << *smem.begin();
}
fh.close();]])],
[AC_SUBST([GPU_SHAREDMEM], [`cat confsmem.out`])
AC_MSG_RESULT([$GPU_SHAREDMEM B])],
[AC_MSG_ERROR(failed to determine a value)])
CXXFLAGS="$CXXFLAGS_save"
LDFLAGS="$LDFLAGS_save"
NVCCLIBS="$NVCCLIBS_save"
else
AC_SUBST([GPU_SHAREDMEM], [$with_shared_mem])
fi
AC_MSG_CHECKING([for Pascal-style CUDA managed memory])
cm_invalid=$( echo $GPU_ARCHS | ${SED} -e 's/\b[[1-5]][[0-9]]\b/PRE/g;' )
if ! echo $cm_invalid | ${GREP} -q PRE; then
Expand Down
Loading

0 comments on commit 390f047

Please sign in to comment.