Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix/actionx compdat #5488

Merged
merged 3 commits into from
Jul 30, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 21 additions & 3 deletions compareECLFiles.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -95,13 +95,29 @@ function(add_test_compareECLFiles)
TESTNAME ${PARAM_CASENAME})
endfunction()

###########################################################################
# TEST: compareSeparateECLFiles
###########################################################################

# Input:
# - casename: basename (no extension)
# - filename1 (no extension)
# - filename2 (no extension)
#
# Details:
# - This test class compares two separate simulations
function(add_test_compareSeparateECLFiles)
set(oneValueArgs CASENAME FILENAME1 FILENAME2 DIR1 DIR2 SIMULATOR ABS_TOL REL_TOL IGNORE_EXTRA_KW DIR_PREFIX)
set(oneValueArgs CASENAME FILENAME1 FILENAME2 DIR1 DIR2 SIMULATOR ABS_TOL REL_TOL IGNORE_EXTRA_KW DIR_PREFIX MPI_PROCS)
set(multiValueArgs TEST_ARGS)
cmake_parse_arguments(PARAM "$" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} )
if(NOT PARAM_PREFIX)
set(PARAM_PREFIX compareSeparateECLFiles)
endif()
if(PARAM_MPI_PROCS)
set(MPI_PROCS ${PARAM_MPI_PROCS})
else()
set(MPI_PROCS 1)
endif()
set(RESULT_PATH ${BASE_RESULT_PATH}${PARAM_DIR_PREFIX}/${PARAM_SIMULATOR}+${PARAM_CASENAME})
set(TEST_ARGS ${PARAM_TEST_ARGS})
set(DRIVER_ARGS -i ${OPM_TESTS_ROOT}/${PARAM_DIR1}
Expand All @@ -112,7 +128,8 @@ function(add_test_compareSeparateECLFiles)
-b ${PROJECT_BINARY_DIR}/bin
-a ${PARAM_ABS_TOL}
-t ${PARAM_REL_TOL}
-c ${COMPARE_ECL_COMMAND})
-c ${COMPARE_ECL_COMMAND}
-n ${MPI_PROCS})
if(PARAM_IGNORE_EXTRA_KW)
list(APPEND DRIVER_ARGS -y ${PARAM_IGNORE_EXTRA_KW})
endif()
Expand All @@ -124,7 +141,8 @@ function(add_test_compareSeparateECLFiles)
DIRNAME ${PARAM_DIR}
FILENAME ${PARAM_FILENAME}
SIMULATOR ${PARAM_SIMULATOR}
TESTNAME ${PARAM_CASENAME})
TESTNAME ${PARAM_CASENAME}
PROCESSORS ${MPI_PROCS})
endfunction()

###########################################################################
Expand Down
58 changes: 31 additions & 27 deletions opm/simulators/flow/GenericCpGridVanguard.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -196,13 +196,14 @@ doLoadBalance_(const Dune::EdgeWeightMethod edgeWeightsMethod,
const auto wells = ((mpiSize > 1) || partitionJacobiBlocks)
? schedule.getWellsatEnd()
: std::vector<Well>{};

const auto& possibleFutureConnections = schedule.getPossibleFutureConnections();
// Distribute the grid and switch to the distributed view.
if (mpiSize > 1) {
this->distributeGrid(edgeWeightsMethod, ownersFirst, partitionMethod,
serialPartitioning, enableDistributedWells,
imbalanceTol, loadBalancerSet != 0,
faceTrans, wells,
possibleFutureConnections,
eclState1, parallelWells);
}

Expand All @@ -215,7 +216,7 @@ doLoadBalance_(const Dune::EdgeWeightMethod edgeWeightsMethod,
#if HAVE_OPENCL || HAVE_ROCSPARSE || HAVE_CUDA
if (partitionJacobiBlocks) {
this->cell_part_ = this->grid_->
zoltanPartitionWithoutScatter(&wells, faceTrans.data(),
zoltanPartitionWithoutScatter(&wells, &possibleFutureConnections, faceTrans.data(),
numJacobiBlocks,
imbalanceTol);
}
Expand Down Expand Up @@ -282,25 +283,26 @@ extractFaceTrans(const GridView& gridView) const
template <class ElementMapper, class GridView, class Scalar>
void
GenericCpGridVanguard<ElementMapper, GridView, Scalar>::
distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const Dune::PartitionMethod partitionMethod,
const bool serialPartitioning,
const bool enableDistributedWells,
const double imbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
EclipseState& eclState1,
FlowGenericVanguard::ParallelWellStruct& parallelWells)
distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const Dune::PartitionMethod partitionMethod,
const bool serialPartitioning,
const bool enableDistributedWells,
const double imbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
const std::unordered_map<std::string, std::set<std::array<int,3>>>& possibleFutureConnections,
EclipseState& eclState1,
FlowGenericVanguard::ParallelWellStruct& parallelWells)
{
if (auto* eclState = dynamic_cast<ParallelEclipseState*>(&eclState1);
eclState != nullptr)
{
this->distributeGrid(edgeWeightsMethod, ownersFirst, partitionMethod,
serialPartitioning, enableDistributedWells,
imbalanceTol, loadBalancerSet, faceTrans,
wells, eclState, parallelWells);
wells, possibleFutureConnections, eclState, parallelWells);
}
else {
const auto message = std::string {
Expand All @@ -319,17 +321,18 @@ distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
template <class ElementMapper, class GridView, class Scalar>
void
GenericCpGridVanguard<ElementMapper, GridView, Scalar>::
distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const Dune::PartitionMethod partitionMethod,
const bool serialPartitioning,
const bool enableDistributedWells,
const double imbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
ParallelEclipseState* eclState,
FlowGenericVanguard::ParallelWellStruct& parallelWells)
distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const Dune::PartitionMethod partitionMethod,
const bool serialPartitioning,
const bool enableDistributedWells,
const double imbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
const std::unordered_map<std::string, std::set<std::array<int,3>>>& possibleFutureConnections,
ParallelEclipseState* eclState,
FlowGenericVanguard::ParallelWellStruct& parallelWells)
{
OPM_TIMEBLOCK(gridDistribute);
const auto isIORank = this->grid_->comm().rank() == 0;
Expand All @@ -347,13 +350,14 @@ distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
: std::vector<int>{};
//For this case, simple partitioning is selected automatically
parallelWells =
std::get<1>(this->grid_->loadBalance(handle, parts, &wells, ownersFirst,
std::get<1>(this->grid_->loadBalance(handle, parts, &wells, &possibleFutureConnections, ownersFirst,
addCornerCells, overlapLayers));
}
else {
parallelWells =
std::get<1>(this->grid_->loadBalance(handle, edgeWeightsMethod,
&wells, serialPartitioning,
&wells, &possibleFutureConnections,
serialPartitioning,
faceTrans.data(), ownersFirst,
addCornerCells, overlapLayers,
partitionMethod, imbalanceTol,
Expand Down
48 changes: 25 additions & 23 deletions opm/simulators/flow/GenericCpGridVanguard.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -165,29 +165,31 @@ class GenericCpGridVanguard {
private:
std::vector<double> extractFaceTrans(const GridView& gridView) const;

void distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const Dune::PartitionMethod partitionMethod,
const bool serialPartitioning,
const bool enableDistributedWells,
const double imbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
EclipseState& eclState,
FlowGenericVanguard::ParallelWellStruct& parallelWells);

void distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const Dune::PartitionMethod partitionMethod,
const bool serialPartitioning,
const bool enableDistributedWells,
const double imbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
ParallelEclipseState* eclState,
FlowGenericVanguard::ParallelWellStruct& parallelWells);
void distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const Dune::PartitionMethod partitionMethod,
const bool serialPartitioning,
const bool enableDistributedWells,
const double imbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
const std::unordered_map<std::string, std::set<std::array<int,3>>>& possibleFutureConnections,
EclipseState& eclState,
FlowGenericVanguard::ParallelWellStruct& parallelWells);

void distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const Dune::PartitionMethod partitionMethod,
const bool serialPartitioning,
const bool enableDistributedWells,
const double imbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
const std::unordered_map<std::string, std::set<std::array<int,3>>>& possibleFutureConnections,
ParallelEclipseState* eclState,
FlowGenericVanguard::ParallelWellStruct& parallelWells);

protected:
virtual const std::string& zoltanParams() const = 0;
Expand Down
15 changes: 9 additions & 6 deletions opm/simulators/linalg/ISTLSolverBda.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -78,12 +78,14 @@ void BdaSolverInfo<Matrix,Vector>::
prepare(const Grid& grid,
const Dune::CartesianIndexMapper<Grid>& cartMapper,
const std::vector<Well>& wellsForConn,
const std::unordered_map<std::string, std::set<std::array<int,3>>>& possibleFutureConnections,
const std::vector<int>& cellPartition,
const std::size_t nonzeroes,
const bool useWellConn)
{
if (numJacobiBlocks_ > 1) {
detail::setWellConnections(grid, cartMapper, wellsForConn,
possibleFutureConnections,
useWellConn,
wellConnectionsGraph_,
numJacobiBlocks_);
Expand Down Expand Up @@ -239,12 +241,13 @@ using BM = Dune::BCRSMatrix<MatrixBlock<Scalar,Dim,Dim>>;
template<class Scalar, int Dim>
using BV = Dune::BlockVector<Dune::FieldVector<Scalar,Dim>>;

#define INSTANTIATE_GRID(T, Dim, Grid) \
template void BdaSolverInfo<BM<T,Dim>,BV<T,Dim>>:: \
prepare(const Grid&, \
const Dune::CartesianIndexMapper<Grid>&, \
const std::vector<Well>&, \
const std::vector<int>&, \
#define INSTANTIATE_GRID(T, Dim, Grid) \
template void BdaSolverInfo<BM<T,Dim>,BV<T,Dim>>:: \
prepare(const Grid&, \
const Dune::CartesianIndexMapper<Grid>&, \
const std::vector<Well>&, \
const std::unordered_map<std::string, std::set<std::array<int,3>>>&, \
const std::vector<int>&, \
const std::size_t, const bool);
using PolyHedralGrid3D = Dune::PolyhedralGrid<3, 3>;
#if HAVE_DUNE_ALUGRID
Expand Down
2 changes: 2 additions & 0 deletions opm/simulators/linalg/ISTLSolverBda.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ struct BdaSolverInfo
void prepare(const Grid& grid,
const Dune::CartesianIndexMapper<Grid>& cartMapper,
const std::vector<Well>& wellsForConn,
const std::unordered_map<std::string, std::set<std::array<int,3>>>& possibleFutureConnections,
const std::vector<int>& cellPartition,
const std::size_t nonzeroes,
const bool useWellConn);
Expand Down Expand Up @@ -207,6 +208,7 @@ class ISTLSolverBda : public ISTLSolver<TypeTag>
bdaBridge_->prepare(this->simulator_.vanguard().grid(),
this->simulator_.vanguard().cartesianIndexMapper(),
this->simulator_.vanguard().schedule().getWellsatEnd(),
this->simulator_.vanguard().schedule().getPossibleFutureConnections(),
this->simulator_.vanguard().cellPartition(),
this->getMatrix().nonzeroes(), this->useWellConn_);
}
Expand Down
4 changes: 2 additions & 2 deletions opm/simulators/linalg/findOverlapRowsAndColumns.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ namespace detail
/// \param useWellConn Boolean that is true when UseWellContribusion is true
/// \param wellGraph Cell IDs of well cells stored in a graph.
template<class Grid, class CartMapper, class W>
void setWellConnections(const Grid& grid, const CartMapper& cartMapper, const W& wells, bool useWellConn, std::vector<std::set<int>>& wellGraph, int numJacobiBlocks)
void setWellConnections(const Grid& grid, const CartMapper& cartMapper, const W& wells, const std::unordered_map<std::string, std::set<std::array<int,3>>>& possibleFutureConnections, bool useWellConn, std::vector<std::set<int>>& wellGraph, int numJacobiBlocks)
{
if ( grid.comm().size() > 1 || numJacobiBlocks > 1)
{
Expand All @@ -62,7 +62,7 @@ namespace detail
cart[ cartMapper.cartesianIndex( i ) ] = i;

Dune::cpgrid::WellConnections well_indices;
well_indices.init(wells, cpgdim, cart);
well_indices.init(wells, &possibleFutureConnections, cpgdim, cart);

for (auto& well : well_indices)
{
Expand Down
24 changes: 24 additions & 0 deletions parallelTests.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -212,3 +212,27 @@ add_test_compare_parallel_simulation(CASENAME rxft
REL_TOL 1.0e-3
DIR rxft_smry
TEST_ARGS --enable-tuning=true --linear-solver-reduction=1e-7 --tolerance-cnv=5e-6 --tolerance-mb=1e-8 --enable-drift-compensation=false)

opm_set_test_driver(${PROJECT_SOURCE_DIR}/tests/run-comparison.sh "")

add_test_compareSeparateECLFiles(CASENAME actionx_compdat_1_proc
DIR1 actionx
FILENAME1 COMPDAT_SHORT
DIR2 actionx
FILENAME2 ACTIONX_COMPDAT_SHORT
SIMULATOR flow
ABS_TOL ${abs_tol}
REL_TOL ${rel_tol}
IGNORE_EXTRA_KW BOTH
MPI_PROCS 1)

add_test_compareSeparateECLFiles(CASENAME actionx_compdat_8_procs
DIR1 actionx
FILENAME1 COMPDAT_SHORT
DIR2 actionx
FILENAME2 ACTIONX_COMPDAT_SHORT
SIMULATOR flow
ABS_TOL ${abs_tol}
REL_TOL ${rel_tol}
IGNORE_EXTRA_KW BOTH
MPI_PROCS 8)
Loading