Skip to content

Commit

Permalink
Drop MPI-2 support (HDFGroup#3643)
Browse files Browse the repository at this point in the history
  • Loading branch information
brtnfld committed Oct 30, 2023
1 parent f232722 commit 64b32f4
Show file tree
Hide file tree
Showing 9 changed files with 976 additions and 201 deletions.
6 changes: 6 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -669,6 +669,12 @@ if (HDF5_ENABLE_PARALLEL)
find_package(MPI REQUIRED)
if (MPI_C_FOUND)
set (H5_HAVE_PARALLEL 1)

# Require MPI standard 3.0 and greater
if (MPI_VERSION LESS 3)
message (FATAL_ERROR "HDF5 requires MPI standard 3.0 or greater")
endif ()

# MPI checks, only do these if MPI_C_FOUND is true, otherwise they always fail
# and once set, they are cached as false and not regenerated
set (CMAKE_REQUIRED_LIBRARIES "${MPI_C_LIBRARIES}")
Expand Down
14 changes: 14 additions & 0 deletions configure.ac
Original file line number Diff line number Diff line change
Expand Up @@ -2651,6 +2651,20 @@ if test "X${ALLOW_UNSUPPORTED}" != "Xyes"; then
fi
fi

# Requires MPI standard 3.0 and greater
if test "X${enable_parallel}" = "Xyes"; then
AC_MSG_CHECKING([whether MPI meets the minimum 3.0 standard])
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#include <mpi.h>
#if MPI_VERSION < 3
#error, found MPI_VERSION < 3
#endif]])],
[AC_MSG_RESULT([yes])],
[AC_MSG_RESULT([no])
AC_MSG_ERROR([HDF5 requires MPI standard 3.0 or greater])]
)
fi

AC_MSG_CHECKING([for parallel support files])
case "X-$enable_parallel" in
X-|X-no|X-none)
Expand Down
7 changes: 7 additions & 0 deletions release_docs/RELEASE.txt
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,13 @@ Bug Fixes since HDF5-1.13.3 release

Validate location (offset) of the accumulated metadata when comparing.

- Dropped support for MPI-2

The MPI-2 supporting artifacts have been removed due to the cessation
of MPI-2 maintenance and testing since version HDF5 1.12.

- Fixed a bug with the way the Subfiling VFD assigns I/O concentrators

Initially, the accumulated metadata location is initialized to HADDR_UNDEF
- the highest available address. Bogus input files may provide a location
or size matching this value. Comparing this address against such bogus
Expand Down
52 changes: 18 additions & 34 deletions src/H5Dmpio.c
Original file line number Diff line number Diff line change
Expand Up @@ -3715,24 +3715,23 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk
H5D_filtered_collective_io_info_t **chunk_hash_table,
unsigned char ***chunk_msg_bufs, int *chunk_msg_bufs_len)
{
#if H5_CHECK_MPI_VERSION(3, 0)
H5D_filtered_collective_io_info_t *chunk_table = NULL;
H5S_sel_iter_t *mem_iter = NULL;
unsigned char **msg_send_bufs = NULL;
unsigned char **msg_recv_bufs = NULL;
MPI_Request *send_requests = NULL;
MPI_Request *recv_requests = NULL;
MPI_Request ibarrier = MPI_REQUEST_NULL;
hbool_t mem_iter_init = FALSE;
hbool_t ibarrier_posted = FALSE;
size_t send_bufs_nalloc = 0;
size_t num_send_requests = 0;
size_t num_recv_requests = 0;
size_t num_msgs_incoming = 0;
size_t last_assigned_idx;
size_t i;
int mpi_code;
herr_t ret_value = SUCCEED;
H5D_filtered_collective_chunk_info_t *chunk_table = NULL;
H5S_sel_iter_t *mem_iter = NULL;
unsigned char **msg_send_bufs = NULL;
unsigned char **msg_recv_bufs = NULL;
MPI_Request *send_requests = NULL;
MPI_Request *recv_requests = NULL;
MPI_Request ibarrier = MPI_REQUEST_NULL;
bool mem_iter_init = false;
bool ibarrier_posted = false;
size_t send_bufs_nalloc = 0;
size_t num_send_requests = 0;
size_t num_recv_requests = 0;
size_t num_msgs_incoming = 0;
size_t last_assigned_idx;
size_t i;
int mpi_code;
herr_t ret_value = SUCCEED;

FUNC_ENTER_PACKAGE

Expand Down Expand Up @@ -3937,20 +3936,12 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk
* post a non-blocking receive to receive it
*/
if (msg_flag) {
#if H5_CHECK_MPI_VERSION(3, 0)
MPI_Count msg_size = 0;

if (MPI_SUCCESS != (mpi_code = MPI_Get_elements_x(&status, MPI_BYTE, &msg_size)))
HMPI_GOTO_ERROR(FAIL, "MPI_Get_elements_x failed", mpi_code)

H5_CHECK_OVERFLOW(msg_size, MPI_Count, int)
#else
int msg_size = 0;

if (MPI_SUCCESS != (mpi_code = MPI_Get_elements(&status, MPI_BYTE, &msg_size)))
HMPI_GOTO_ERROR(FAIL, "MPI_Get_elements failed", mpi_code)
#endif

H5_CHECK_OVERFLOW(msg_size, MPI_Count, int);
if (msg_size <= 0)
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "invalid chunk modification message size")

Expand Down Expand Up @@ -4101,13 +4092,6 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk
#endif

FUNC_LEAVE_NOAPI(ret_value)
#else
FUNC_ENTER_PACKAGE
HERROR(
H5E_DATASET, H5E_WRITEERROR,
"unable to send chunk modification data between MPI ranks - MPI version < 3 (MPI_Ibarrier missing)")
FUNC_LEAVE_NOAPI(FAIL)
#endif
} /* end H5D__mpio_share_chunk_modification_data() */

/*-------------------------------------------------------------------------
Expand Down
Loading

0 comments on commit 64b32f4

Please sign in to comment.