diff --git a/testing/adios2/engine/staging-common/CMakeLists.txt b/testing/adios2/engine/staging-common/CMakeLists.txt index dec7d37ff1..07bbb71f4d 100644 --- a/testing/adios2/engine/staging-common/CMakeLists.txt +++ b/testing/adios2/engine/staging-common/CMakeLists.txt @@ -36,6 +36,8 @@ set (PROGS TestCommonWrite TestLocalRead TestStructWrite TestStructRead + TestWriteJoined + TestReadJoined TestOnDemandWrite TestOnDemandRead) @@ -114,7 +116,7 @@ if(ADIOS2_HAVE_MPI AND MPIEXEC_EXECUTABLE) endforeach() endif() -set (SIMPLE_TESTS "1x1;1x1.Attrs;1x1.ModAttrs;1x1DefSync;1x1VarDestruction;1x1.SpanMinMax;1x1.Local2;1x1Struct") +set (SIMPLE_TESTS "1x1;1x1.Attrs;1x1.ModAttrs;1x1DefSync;1x1VarDestruction;1x1.SpanMinMax;1x1.Local2;1x1Struct;1x1Joined") set (BP5File_ONLY_TESTS "1x1DataWrite") set (SIMPLE_FORTRAN_TESTS "") @@ -131,7 +133,7 @@ endif() set (SIMPLE_MPI_TESTS "") set (SIMPLE_MPI_FORTRAN_TESTS "") if (ADIOS2_HAVE_MPI) - set (SIMPLE_MPI_TESTS "2x1;1x2;3x5;5x3;DelayedReader_3x5;3x5LockGeometry;2x1.Local;1x2.Local;3x5.Local;5x3.Local;1x1.LocalVarying;5x3.LocalVarying;2x1ZeroDataVar;2x1ZeroDataR64;2x2.NoData;2x2.HalfNoData;2x1.SharedNothing;2x1.SharedIO;2x1.SharedVar;2x1.SharedNothingSync;2x1.SharedIOSync;2x1.SharedVarSync;3x5EarlyExit") + set (SIMPLE_MPI_TESTS "2x1;1x2;3x5;5x3;DelayedReader_3x5;3x5LockGeometry;2x1.Local;1x2.Local;3x5.Local;5x3.Local;1x1.LocalVarying;5x3.LocalVarying;2x1ZeroDataVar;2x1ZeroDataR64;2x2.NoData;2x2.HalfNoData;2x1.SharedNothing;2x1.SharedIO;2x1.SharedVar;2x1.SharedNothingSync;2x1.SharedIOSync;2x1.SharedVarSync;3x5EarlyExit;;5x1Joined") list (APPEND SPECIAL_TESTS "2x1.NoPreload;2x3.ForcePreload;PreciousTimestep.3x2;PreciousTimestepDiscard.3x2") if (ADIOS2_HAVE_Fortran) set (SIMPLE_MPI_FORTRAN_TESTS "FtoC.3x5;CtoF.3x5;FtoF.3x5") @@ -170,6 +172,9 @@ MutateTestSet( COMM_MIN_SST_TESTS "CommMin" writer "CPCommPattern=Min" "${BASIC_ # temporarily remove PreciousTimestep CommPeer tests list (REMOVE_ITEM COMM_PEER_SST_TESTS "PreciousTimestep") +# no SSE engine does Joined +list (FILTER COMM_MIN_SST_TESTS EXCLUDE REGEX "Joined*") + MutateTestSet( FFS_SST_TESTS "FFS" writer "MarshalMethod=FFS" "${COMM_MIN_SST_TESTS};${COMM_PEER_SST_TESTS}" ) MutateTestSet( FFS_SST_TESTS "BP5" writer "MarshalMethod=BP5" "${COMM_MIN_SST_TESTS};${COMM_PEER_SST_TESTS}" ) MutateTestSet( BP_SST_TESTS "BP" writer "MarshalMethod=BP" "${COMM_MIN_SST_TESTS};${COMM_PEER_SST_TESTS}" ) @@ -220,6 +225,8 @@ if(NOT WIN32) # not on windows endforeach() # BP3 fails to handle lots of timesteps list (FILTER BP_TESTS EXCLUDE REGEX "Bulk.*") + # BP3 doesn't do Joined + list (FILTER BP_TESTS EXCLUDE REGEX "Joined.*") foreach(test ${BP_TESTS}) add_common_test(${test} BP3) endforeach() @@ -228,6 +235,8 @@ endif() # BP5 tests if(ADIOS2_HAVE_BP5) set (BP5_TESTS ${ALL_SIMPLE_TESTS} ${SPECIAL_TESTS}) + # BP5 doesn't do Joined + list (FILTER BP5_TESTS EXCLUDE REGEX "Joined.*") # Delayed reader not worth testing on file engines list (FILTER BP5_TESTS EXCLUDE REGEX "DelayedReader") # Discard not a feature of BP5 @@ -253,6 +262,8 @@ if(NOT MSVC) # not on windows # BP4 streaming tests start with all the simple tests, but with a timeout added on open LIST (APPEND BP4_STREAM_TESTS ${ALL_SIMPLE_TESTS} ${SPECIAL_TESTS}) MutateTestSet( BP4_STREAM_TESTS "BPS" reader "OpenTimeoutSecs=10,BeginStepPollingFrequencySecs=0.1" "${BP4_STREAM_TESTS}") + # no BP4 streaming doesn't do Joined? + list (FILTER BP4_STREAM_TESTS EXCLUDE REGEX "Joined*") # SharedVars fail with BP4_streaming* list (FILTER BP4_STREAM_TESTS EXCLUDE REGEX ".*SharedVar.BPS$") # Discard not a feature of BP4 @@ -325,6 +336,8 @@ if(NOT MSVC) # not on windows list (FILTER FILESTREAM_TESTS EXCLUDE REGEX ".*VarDestruction.*") # BP3 and BP4 don't do structs list (FILTER FILESTREAM_TESTS EXCLUDE REGEX ".*1x1Struct.*") + # no FileStream doesn't do Joined + list (FILTER FILESTREAM_TESTS EXCLUDE REGEX "Joined*") foreach(test ${FILESTREAM_TESTS}) add_common_test(${test} FileStream) @@ -358,6 +371,9 @@ if(NOT MSVC AND ADIOS2_HAVE_HDF5 AND (NOT ADIOS2_HAVE_MPI OR HDF5_IS_PARALLEL)) list (FILTER HDF5_TESTS EXCLUDE REGEX ".*ModAttr.*") # HDF5 engine doesn't do structs (but HDF5 does) list (FILTER HDF5_TESTS EXCLUDE REGEX ".*1x1Struct.*") + # HDF5 doesn't do Joined + list (FILTER HDF5_TESTS EXCLUDE REGEX "Joined*") + foreach(test ${HDF5_TESTS}) add_common_test(${test} HDF5) endforeach() diff --git a/testing/adios2/engine/staging-common/TestReadJoined.cpp b/testing/adios2/engine/staging-common/TestReadJoined.cpp new file mode 100644 index 0000000000..91fc522a78 --- /dev/null +++ b/testing/adios2/engine/staging-common/TestReadJoined.cpp @@ -0,0 +1,158 @@ +/* + * Distributed under the OSI-approved Apache License, Version 2.0. See + * accompanying file Copyright.txt for details. + */ +#include +#include + +#include +#include +#include +#include + +#include + +#include + +#include "TestData.h" + +#include "ParseArgs.h" + +class CommonReadTest : public ::testing::Test +{ +public: + CommonReadTest() = default; +}; + +typedef std::chrono::duration Seconds; + +#if ADIOS2_USE_MPI +MPI_Comm testComm; +#endif + +const int nsteps = 3; +const size_t Ncols = 4; +const std::vector nblocksPerProcess = {2, 3, 2, 1, 3, 2}; +int nMyTotalRows[nsteps]; +int nTotalRows[nsteps]; + +// ADIOS2 Common read +TEST_F(CommonReadTest, ADIOS2CommonRead1D8) +{ + // Each process would write a 1x8 array and all processes would + // form a mpiSize * Nx 1D array + int mpiRank = 0, mpiSize = 1; + +#if ADIOS2_USE_MPI + MPI_Comm_rank(testComm, &mpiRank); + MPI_Comm_size(testComm, &mpiSize); +#endif + + // Write test data using ADIOS2 + +#if ADIOS2_USE_MPI + adios2::ADIOS adios(testComm); +#else + adios2::ADIOS adios; +#endif + adios2::IO inIO = adios.DeclareIO("Input"); + + inIO.SetEngine(engine); + inIO.SetParameters(engineParams); + + adios2::Engine reader = inIO.Open(fname, adios2::Mode::Read); + + if (!mpiRank) + { + std::cout << "Reading as stream with BeginStep/EndStep on " << mpiSize + << "processes :" << std::endl; + } + + int step = 0; + while (true) + { + adios2::StepStatus status = reader.BeginStep(adios2::StepMode::Read); + + if (status != adios2::StepStatus::OK) + { + break; + } + + auto rows_var = inIO.InquireVariable("totalrows"); + auto var = inIO.InquireVariable("table"); + EXPECT_TRUE(var); + + if (!mpiRank) + { + std::cout << "Step " << step << " table shape (" << var.Shape()[0] + << ", " << var.Shape()[1] << ")" << std::endl; + } + + int Nrows; + reader.Get(rows_var, Nrows); + EXPECT_EQ(var.Shape()[0], Nrows); + EXPECT_EQ(var.Shape()[1], Ncols); + + var.SetSelection({{0, 0}, {(size_t)Nrows, Ncols}}); + + // Check data on rank 0 + if (!mpiRank) + { + std::vector data(Nrows * Ncols); + reader.Get(var, data.data()); + reader.PerformGets(); + for (size_t i = 0; i < (size_t)Nrows; ++i) + { + for (size_t j = 0; j < Ncols; ++j) + { + EXPECT_GE(data[i * Ncols + j], (step + 1) * 1.0); + EXPECT_LT(data[i * Ncols + j], (nsteps + 1) * 1.0 + 0.9999); + } + } + } + + reader.EndStep(); + ++step; + } + reader.Close(); + EXPECT_EQ(step, nsteps); +} + +//****************************************************************************** +// main +//****************************************************************************** + +int main(int argc, char **argv) +{ + int result; + ::testing::InitGoogleTest(&argc, argv); + ParseArgs(argc, argv); + +#if ADIOS2_USE_MPI + int provided; + int thread_support_level = (engine == "SST" || engine == "sst") + ? MPI_THREAD_MULTIPLE + : MPI_THREAD_SINGLE; + + // MPI_THREAD_MULTIPLE is only required if you enable the SST MPI_DP + MPI_Init_thread(nullptr, nullptr, thread_support_level, &provided); + + int key; + MPI_Comm_rank(MPI_COMM_WORLD, &key); + + const unsigned int color = 2; + MPI_Comm_split(MPI_COMM_WORLD, color, key, &testComm); +#endif + + result = RUN_ALL_TESTS(); + +#if ADIOS2_USE_MPI +#ifdef CRAY_MPICH_VERSION + MPI_Barrier(MPI_COMM_WORLD); +#else + MPI_Finalize(); +#endif +#endif + + return result; +} diff --git a/testing/adios2/engine/staging-common/TestSupp.cmake b/testing/adios2/engine/staging-common/TestSupp.cmake index efadc464d0..4eeaeaf4b5 100644 --- a/testing/adios2/engine/staging-common/TestSupp.cmake +++ b/testing/adios2/engine/staging-common/TestSupp.cmake @@ -64,6 +64,7 @@ set (STAGING_COMMON_TEST_SUPP_VERBOSE OFF) set (1x1_CMD "run_test.py.$ -nw 1 -nr 1") set (1x1Struct_CMD "run_test.py.$ -nw 1 -nr 1 -r $ -w $ ") +set (1x1Joined_CMD "run_test.py.$ -nw 1 -nr 1 -r $ -w $ ") set (1x1GetSync_CMD "run_test.py.$ -nw 1 -nr 1 --rarg=--read_mode --rarg=sync") set (1x1DontCloseWriter_CMD "run_test.py.$ -nw 1 -nr 1 --warg=--dont_close") set (1x1DontCloseReader_CMD "run_test.py.$ -nw 1 -nr 1 --rarg=--dont_close") @@ -90,6 +91,7 @@ set (3x5LockGeometry_CMD "run_test.py.$ -nw 3 -nr 5 --warg=--num_steps - set (1x1EarlyExit_CMD "run_test.py.$ -nw 1 -nr 1 --warg=--num_steps --warg=50 --rarg=--num_steps --rarg=5 --rarg=--early_exit") set (3x5EarlyExit_CMD "run_test.py.$ -nw 3 -nr 5 --warg=--num_steps --warg=50 --rarg=--num_steps --rarg=5 --rarg=--early_exit") set (3x5LockGeometry_TIMEOUT 60) +set (5x1Joined_CMD "run_test.py.$ -nw 5 -nr 1 -r $ -w $ ") set (5x3_CMD "run_test.py.$ -nw 5 -nr 3") set (5x3DontClose_CMD "run_test.py.$ -nw 5 -nr 3 --rarg=--dont_close --warg=--dont_close") set (1x1.Local_CMD "run_test.py.$ -nw 1 -nr 1 -w $ -r $") diff --git a/testing/adios2/engine/staging-common/TestWriteJoined.cpp b/testing/adios2/engine/staging-common/TestWriteJoined.cpp new file mode 100644 index 0000000000..1799f7958b --- /dev/null +++ b/testing/adios2/engine/staging-common/TestWriteJoined.cpp @@ -0,0 +1,178 @@ +/* + * Distributed under the OSI-approved Apache License, Version 2.0. See + * accompanying file Copyright.txt for details. + */ +#include +#include +#include + +#include +#include +#include +#include + +#include + +#include + +#include "TestData.h" + +#include "ParseArgs.h" + +class CommonWriteTest : public ::testing::Test +{ +public: + CommonWriteTest() = default; +}; + +#if ADIOS2_USE_MPI +MPI_Comm testComm; +#endif + +const int nsteps = 3; +const size_t Ncols = 4; +const std::vector nblocksPerProcess = {2, 3, 2, 1, 3, 2}; +int nMyTotalRows[nsteps]; +int nTotalRows[nsteps]; + +// ADIOS2 COMMON write +TEST_F(CommonWriteTest, ADIOS2CommonWrite) +{ + // form a mpiSize * Nx 1D array + int mpiRank = 0, mpiSize = 1; + +#if ADIOS2_USE_MPI + MPI_Comm_rank(testComm, &mpiRank); + MPI_Comm_size(testComm, &mpiSize); + const int nblocks = (mpiRank < static_cast(nblocksPerProcess.size()) + ? nblocksPerProcess[mpiRank] + : 1); +#else + const int nblocks = nblocksPerProcess[0]; +#endif + + // Write test data using ADIOS2 + +#if ADIOS2_USE_MPI + adios2::ADIOS adios(testComm); +#else + adios2::ADIOS adios; +#endif + adios2::IO outIO = adios.DeclareIO("Output"); + + EXPECT_LE(mpiSize, nblocksPerProcess.size()); + outIO.SetEngine(engine); + outIO.SetParameters(engineParams); + + adios2::Engine writer = outIO.Open(fname, adios2::Mode::Write); + auto bpp_var = outIO.DefineVariable("blocksperprocess", + {nblocksPerProcess.size()}, {0}, + {nblocksPerProcess.size()}); + + auto rows_var = outIO.DefineVariable("totalrows"); + + auto var = outIO.DefineVariable("table", {adios2::JoinedDim, Ncols}, + {}, {1, Ncols}); + + if (!mpiRank) + { + std::cout << "Writing to " << fname << std::endl; + } + + for (int step = 0; step < nsteps; step++) + { + // Application variables for output random size per process, 5..10 + // each + std::vector Nrows; + nMyTotalRows[step] = 0; + for (int i = 0; i < nblocks; ++i) + { + int n = rand() % 6 + 5; + Nrows.push_back(static_cast(n)); + nMyTotalRows[step] += n; + } + + nTotalRows[step] = nMyTotalRows[step]; +#if ADIOS2_USE_MPI + MPI_Allreduce(&(nMyTotalRows[step]), &(nTotalRows[step]), 1, MPI_INT, + MPI_SUM, MPI_COMM_WORLD); +#endif + + if (!mpiRank) + { + std::cout << "Writing " << nTotalRows[step] << " rows in step " + << step << std::endl; + } + + writer.BeginStep(); + if ((step == 0) && (mpiRank == 0)) + { + writer.Put(bpp_var, nblocksPerProcess.data()); + } + if (mpiRank == 0) + { + writer.Put(rows_var, nTotalRows[step]); + } + for (int block = 0; block < nblocks; ++block) + { + std::vector mytable(Nrows[block] * Ncols); + for (size_t row = 0; row < Nrows[block]; row++) + { + for (size_t col = 0; col < Ncols; col++) + { + mytable[row * Ncols + col] = static_cast( + (step + 1) * 1.0 + mpiRank * 0.1 + block * 0.01 + + row * 0.001 + col * 0.0001); + } + } + + var.SetSelection({{}, {Nrows[block], Ncols}}); + + std::cout << "Step " << step << " rank " << mpiRank << " block " + << block << " count (" << var.Count()[0] << ", " + << var.Count()[1] << ")" << std::endl; + + writer.Put(var, mytable.data(), adios2::Mode::Sync); + } + writer.EndStep(); + } + writer.Close(); +} + +int main(int argc, char **argv) +{ + int result; + ::testing::InitGoogleTest(&argc, argv); + + DelayMS = 0; // zero for common writer + + ParseArgs(argc, argv); + +#if ADIOS2_USE_MPI + int provided; + int thread_support_level = (engine == "SST" || engine == "sst") + ? MPI_THREAD_MULTIPLE + : MPI_THREAD_SINGLE; + + // MPI_THREAD_MULTIPLE is only required if you enable the SST MPI_DP + MPI_Init_thread(nullptr, nullptr, thread_support_level, &provided); + + int key; + MPI_Comm_rank(MPI_COMM_WORLD, &key); + + const unsigned int color = 1; + MPI_Comm_split(MPI_COMM_WORLD, color, key, &testComm); +#endif + + result = RUN_ALL_TESTS(); + +#if ADIOS2_USE_MPI +#ifdef CRAY_MPICH_VERSION + MPI_Barrier(MPI_COMM_WORLD); +#else + MPI_Finalize(); +#endif +#endif + + return result; +}