From debb7cb036c336be1a16c8156ce663dfa982a031 Mon Sep 17 00:00:00 2001 From: Lisa Julia Nebel Date: Tue, 19 Nov 2024 15:17:26 +0100 Subject: [PATCH 1/6] Add option of comparing only the SMRY file to run-parallel-regressionTest.sh via the flag -s --- compareECLFiles.cmake | 19 +++++++++++++++++-- tests/run-parallel-regressionTest.sh | 22 +++++++++++++++------- 2 files changed, 32 insertions(+), 9 deletions(-) diff --git a/compareECLFiles.cmake b/compareECLFiles.cmake index d1bc24b0375..38d4e4277df 100644 --- a/compareECLFiles.cmake +++ b/compareECLFiles.cmake @@ -194,7 +194,7 @@ endfunction() # - This test class compares the output from a parallel simulation # to the output from the serial instance of the same model. function(add_test_compare_parallel_simulation) - set(oneValueArgs CASENAME FILENAME SIMULATOR ABS_TOL REL_TOL DIR MPI_PROCS) + set(oneValueArgs CASENAME FILENAME SIMULATOR ABS_TOL REL_TOL DIR MPI_PROCS ONLY_SMRY) set(multiValueArgs TEST_ARGS) cmake_parse_arguments(PARAM "$" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} ) @@ -210,7 +210,22 @@ function(add_test_compare_parallel_simulation) set(RESULT_PATH ${BASE_RESULT_PATH}/parallel/${PARAM_SIMULATOR}+${PARAM_CASENAME}) set(TEST_ARGS ${OPM_TESTS_ROOT}/${PARAM_DIR}/${PARAM_FILENAME} ${PARAM_TEST_ARGS}) - set(DRIVER_ARGS -i ${OPM_TESTS_ROOT}/${PARAM_DIR} + + # Handle ONLY_SMRY flag (defaults to 0 if not provided) + if(PARAM_ONLY_SMRY) + if(${PARAM_ONLY_SMRY} EQUAL 1) + set(DRIVER_ARGS -s) + elseif(${PARAM_ONLY_SMRY} EQUAL 0) + set(DRIVER_ARGS "") + else() + message(FATAL_ERROR "ONLY_SMRY must be either 0 or 1.") + endif() + else() + set(DRIVER_ARGS "") + endif() + + set(DRIVER_ARGS ${DRIVER_ARGS} + -i ${OPM_TESTS_ROOT}/${PARAM_DIR} -r ${RESULT_PATH} -b ${PROJECT_BINARY_DIR}/bin -f ${PARAM_FILENAME} diff --git a/tests/run-parallel-regressionTest.sh b/tests/run-parallel-regressionTest.sh index d3b3fa74806..5febd8c5d4f 100755 --- a/tests/run-parallel-regressionTest.sh +++ b/tests/run-parallel-regressionTest.sh @@ -18,12 +18,15 @@ then echo -e "\t\t -e Simulator binary to use" echo -e "\tOptional options:" echo -e "\t\t -n Number of MPI processes to use" + echo -e "\t\t -s If given, compare only the SMRY file and skip comparison of the UNRST file." exit 1 fi MPI_PROCS=4 OPTIND=1 -while getopts "i:r:b:f:a:t:c:e:n:" OPT +ONLY_SUMMARY=false + +while getopts "i:r:b:f:a:t:c:e:n:s" OPT do case "${OPT}" in i) INPUT_DATA_PATH=${OPTARG} ;; @@ -35,6 +38,7 @@ do c) COMPARE_ECL_COMMAND=${OPTARG} ;; e) EXE_NAME=${OPTARG} ;; n) MPI_PROCS=${OPTARG} ;; + s) ONLY_SUMMARY=true ;; esac done shift $(($OPTIND-1)) @@ -61,12 +65,16 @@ then ${COMPARE_ECL_COMMAND} -t SMRY -a -R ${RESULT_PATH}/${FILENAME} ${RESULT_PATH}/mpi/${FILENAME} ${ABS_TOL} ${REL_TOL} fi -echo "=== Executing comparison for restart file ===" -${COMPARE_ECL_COMMAND} -l -t UNRST ${RESULT_PATH}/${FILENAME} ${RESULT_PATH}/mpi/${FILENAME} ${ABS_TOL} ${REL_TOL} -if [ $? -ne 0 ] -then - ecode=1 - ${COMPARE_ECL_COMMAND} -a -l -t UNRST ${RESULT_PATH}/${FILENAME} ${RESULT_PATH}/mpi/${FILENAME} ${ABS_TOL} ${REL_TOL} +if [ "$ONLY_SUMMARY" = false ]; then + echo "=== Executing comparison for restart file ===" + ${COMPARE_ECL_COMMAND} -l -t UNRST ${RESULT_PATH}/${FILENAME} ${RESULT_PATH}/mpi/${FILENAME} ${ABS_TOL} ${REL_TOL} + if [ $? -ne 0 ] + then + ecode=1 + ${COMPARE_ECL_COMMAND} -a -l -t UNRST ${RESULT_PATH}/${FILENAME} ${RESULT_PATH}/mpi/${FILENAME} ${ABS_TOL} ${REL_TOL} + fi +else + echo "=== Skipping comparison for restart file due to -s flag ===" fi exit $ecode From 6bdb80126fd85ccd0d90a78f5cb85ece99fb23fd Mon Sep 17 00:00:00 2001 From: Lisa Julia Nebel Date: Fri, 22 Nov 2024 10:54:00 +0100 Subject: [PATCH 2/6] Add parameter enableDistributedWells to the well state an when set to true, do not throw when initalizing distributed multi-segment wells in WellState.cpp --- opm/simulators/flow/FlowGenericVanguard.cpp | 9 ++++----- opm/simulators/wells/BlackoilWellModel_impl.hpp | 2 +- opm/simulators/wells/WellState.cpp | 10 ++++++---- opm/simulators/wells/WellState.hpp | 5 ++++- tests/test_wellstate.cpp | 3 ++- 5 files changed, 17 insertions(+), 12 deletions(-) diff --git a/opm/simulators/flow/FlowGenericVanguard.cpp b/opm/simulators/flow/FlowGenericVanguard.cpp index 28e19182497..c9ced125299 100644 --- a/opm/simulators/flow/FlowGenericVanguard.cpp +++ b/opm/simulators/flow/FlowGenericVanguard.cpp @@ -322,14 +322,13 @@ void FlowGenericVanguard::init() if (comm.rank() == 0) { std::string message = - std::string("Option --allow-distributed-wells=true is only allowed if model\n") - + "only has only standard wells. You need to provide option \n" - + " with --enable-multisegement-wells=false to treat existing \n" + std::string("Option --allow-distributed-wells=true in a model with\n") + + "multisegment wells. This feature is still experimental. You can\n" + + "set --enable-multisegment-wells=false to treat the existing \n" + "multisegment wells as standard wells."; - OpmLog::error(message); + OpmLog::info(message); } comm.barrier(); - OPM_THROW(std::invalid_argument, "All wells need to be standard wells!"); } } } diff --git a/opm/simulators/wells/BlackoilWellModel_impl.hpp b/opm/simulators/wells/BlackoilWellModel_impl.hpp index 32f69dff5a4..31dc52c0a44 100644 --- a/opm/simulators/wells/BlackoilWellModel_impl.hpp +++ b/opm/simulators/wells/BlackoilWellModel_impl.hpp @@ -882,7 +882,7 @@ namespace Opm { this->wellState().init(cellPressures, cellTemperatures, this->schedule(), this->wells_ecl_, this->local_parallel_well_info_, timeStepIdx, &this->prevWellState(), this->well_perf_data_, - this->summaryState()); + this->summaryState(), simulator_.vanguard().enableDistributedWells()); } diff --git a/opm/simulators/wells/WellState.cpp b/opm/simulators/wells/WellState.cpp index 595095f75d4..88bea8af3c7 100644 --- a/opm/simulators/wells/WellState.cpp +++ b/opm/simulators/wells/WellState.cpp @@ -264,11 +264,13 @@ void WellState::init(const std::vector& cellPressures, const int report_step, const WellState* prevState, const std::vector>>& well_perf_data, - const SummaryState& summary_state) + const SummaryState& summary_state, + const bool enableDistributedWells) { // call init on base class this->base_init(cellPressures, cellTemperatures, wells_ecl, parallel_well_info, well_perf_data, summary_state); + this->enableDistributedWells_ = enableDistributedWells; this->global_well_info = std::make_optional(schedule, report_step, wells_ecl); @@ -439,7 +441,7 @@ void WellState::resize(const std::vector& wells_ecl, const SummaryState& summary_state) { const std::vector tmp(numCells, 0.0); // <- UGLY HACK to pass the size - init(tmp, tmp, schedule, wells_ecl, parallel_well_info, 0, nullptr, well_perf_data, summary_state); + init(tmp, tmp, schedule, wells_ecl, parallel_well_info, 0, nullptr, well_perf_data, summary_state, this->enableDistributedWells_); if (handle_ms_well) { initWellStateMSWell(wells_ecl, nullptr); @@ -728,8 +730,8 @@ void WellState::initWellStateMSWell(const std::vector& wells_ecl, n_activeperf++; } } - - if (static_cast(ws.perf_data.size()) != n_activeperf) + + if (!this->enableDistributedWells_ && static_cast(ws.perf_data.size()) != n_activeperf) throw std::logic_error("Distributed multi-segment wells cannot be initialized properly yet."); diff --git a/opm/simulators/wells/WellState.hpp b/opm/simulators/wells/WellState.hpp index 7047b28845b..0056e7359b6 100644 --- a/opm/simulators/wells/WellState.hpp +++ b/opm/simulators/wells/WellState.hpp @@ -104,7 +104,8 @@ class WellState const int report_step, const WellState* prevState, const std::vector>>& well_perf_data, - const SummaryState& summary_state); + const SummaryState& summary_state, + const bool enableDistributedWells); void resize(const std::vector& wells_ecl, const std::vector>>& parallel_well_info, @@ -353,6 +354,8 @@ class WellState } private: + bool enableDistributedWells_ = false; + bool is_permanently_inactive_well(const std::string& wname) const { return std::find(this->permanently_inactive_well_names_.begin(), this->permanently_inactive_well_names_.end(), wname) != this->permanently_inactive_well_names_.end(); } diff --git a/tests/test_wellstate.cpp b/tests/test_wellstate.cpp index e2bdf947df5..fefd6d8cad5 100644 --- a/tests/test_wellstate.cpp +++ b/tests/test_wellstate.cpp @@ -179,7 +179,8 @@ namespace { state.init(cpress, ctemp, setup.sched, wells, ppinfos, - timeStep, nullptr, setup.well_perf_data, setup.st); + timeStep, nullptr, setup.well_perf_data, setup.st, + false /*enableDistributedWells*/); state.initWellStateMSWell(setup.sched.getWells(timeStep), nullptr); From 6784db2d1d03521c4a234b5c886a26dd6c1cace9 Mon Sep 17 00:00:00 2001 From: Lisa Julia Nebel Date: Tue, 19 Nov 2024 12:36:41 +0100 Subject: [PATCH 3/6] Add tests for parallel multisegment wells --- parallelTests.cmake | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/parallelTests.cmake b/parallelTests.cmake index fd8a94a79f2..422666957b8 100644 --- a/parallelTests.cmake +++ b/parallelTests.cmake @@ -36,6 +36,27 @@ add_test_compare_parallel_simulation(CASENAME spe9_dist_z REL_TOL ${rel_tol_parallel} TEST_ARGS --linear-solver-reduction=1e-7 --tolerance-cnv=5e-6 --tolerance-mb=1e-8 --enable-drift-compensation=false) +# A test for distributed multisegment wells. We load distribute only along the z-axis +add_test_compare_parallel_simulation(CASENAME msw-simple + FILENAME MSW-SIMPLE # this file contains one Multisegment well without branches that is distributed across several processes + DIR msw + SIMULATOR flow_distribute_z + ONLY_SMRY 1 + ABS_TOL 1e4 # the absolute tolerance is pretty high here, yet in this case, we are only interested in the relative tolerance + REL_TOL 1e-5 + MPI_PROCS 4 + TEST_ARGS --solver-max-time-step-in-days=10 --allow-distributed-wells=true) + +add_test_compare_parallel_simulation(CASENAME msw-3d + FILENAME MSW-3D # this file contains one Multisegment well with branches that is distributed across several processes + DIR msw + SIMULATOR flow_distribute_z + ONLY_SMRY 1 + ABS_TOL 1e4 # the absolute tolerance is pretty high here, yet in this case, we are only interested in the relative tolerance + REL_TOL 1e-4 + MPI_PROCS 4 + TEST_ARGS --allow-distributed-wells=true) + add_test_compare_parallel_simulation(CASENAME spe9group FILENAME SPE9_CP_GROUP SIMULATOR flow From 81a5da8b637eb07534045c35abc753effdf875b4 Mon Sep 17 00:00:00 2001 From: Lisa Julia Nebel Date: Wed, 16 Oct 2024 14:44:11 +0200 Subject: [PATCH 4/6] Add communication when multiplying with the matrix duneB_ in apply Here we go from cells to segments, and everything concerning segments is stored globally. --- opm/simulators/wells/MultisegmentWellEquations.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/opm/simulators/wells/MultisegmentWellEquations.cpp b/opm/simulators/wells/MultisegmentWellEquations.cpp index d229c719117..1eb1875f677 100644 --- a/opm/simulators/wells/MultisegmentWellEquations.cpp +++ b/opm/simulators/wells/MultisegmentWellEquations.cpp @@ -152,6 +152,11 @@ apply(const BVector& x, BVector& Ax) const duneB_.mv(x, Bx); + if (this->pw_info_.communication().size() == 1) { + // We need to communicate here to get the contributions from all segments + this->pw_info_.communication().sum(Bx.data(), Bx.size()); + } + // invDBx = duneD^-1 * Bx_ const BVectorWell invDBx = mswellhelpers::applyUMFPack(*duneDSolver_, Bx); From 627b9c98baaf705263799fd318a83f38e64eeb54 Mon Sep 17 00:00:00 2001 From: Lisa Julia Nebel Date: Thu, 17 Oct 2024 14:01:55 +0200 Subject: [PATCH 5/6] Add communication when multiplying with the matrix duneB_ in recoverSolutionWell Here we go from cells to segments, and everything concerning segments is stored globally. --- .../wells/MultisegmentWellEquations.cpp | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/opm/simulators/wells/MultisegmentWellEquations.cpp b/opm/simulators/wells/MultisegmentWellEquations.cpp index 1eb1875f677..0116d1cda4f 100644 --- a/opm/simulators/wells/MultisegmentWellEquations.cpp +++ b/opm/simulators/wells/MultisegmentWellEquations.cpp @@ -212,9 +212,20 @@ template void MultisegmentWellEquations:: recoverSolutionWell(const BVector& x, BVectorWell& xw) const { - BVectorWell resWell = resWell_; // resWell = resWell - B * x - duneB_.mmv(x, resWell); + BVectorWell resWell = resWell_; + if (this->pw_info_.communication().size() == 1) { + duneB_.mmv(x, resWell); + } else { + BVectorWell Bx(duneB_.N()); + duneB_.mv(x, Bx); + + // We need to communicate here to get the contributions from all segments + this->pw_info_.communication().sum(Bx.data(), Bx.size()); + + resWell -= Bx; + } + // xw = D^-1 * resWell xw = mswellhelpers::applyUMFPack(*duneDSolver_, resWell); } From 4801a17703a90e85088fafe8927e2d374b4c383b Mon Sep 17 00:00:00 2001 From: Lisa Julia Nebel Date: Tue, 8 Oct 2024 15:02:27 +0200 Subject: [PATCH 6/6] Add comments at the spots where we multiply with D^(-1) - we actually can do this on all processes --- .../wells/MultisegmentWellEquations.cpp | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/opm/simulators/wells/MultisegmentWellEquations.cpp b/opm/simulators/wells/MultisegmentWellEquations.cpp index 0116d1cda4f..35972da2da7 100644 --- a/opm/simulators/wells/MultisegmentWellEquations.cpp +++ b/opm/simulators/wells/MultisegmentWellEquations.cpp @@ -157,6 +157,9 @@ apply(const BVector& x, BVector& Ax) const this->pw_info_.communication().sum(Bx.data(), Bx.size()); } + // It is ok to do this on each process instead of only on one, + // because the other processes would remain idle while waiting for + // the single process to complete the computation. // invDBx = duneD^-1 * Bx_ const BVectorWell invDBx = mswellhelpers::applyUMFPack(*duneDSolver_, Bx); @@ -168,6 +171,9 @@ template void MultisegmentWellEquations:: apply(BVector& r) const { + // It is ok to do this on each process instead of only on one, + // because the other processes would remain idle while waiting for + // the single process to complete the computation. // invDrw_ = duneD^-1 * resWell_ const BVectorWell invDrw = mswellhelpers::applyUMFPack(*duneDSolver_, resWell_); // r = r - duneC_^T * invDrw @@ -198,6 +204,9 @@ template typename MultisegmentWellEquations::BVectorWell MultisegmentWellEquations::solve() const { + // It is ok to do this on each process instead of only on one, + // because the other processes would remain idle while waiting for + // the single process to complete the computation. return mswellhelpers::applyUMFPack(*duneDSolver_, resWell_); } @@ -205,6 +214,9 @@ template typename MultisegmentWellEquations::BVectorWell MultisegmentWellEquations::solve(const BVectorWell& rhs) const { + // It is ok to do this on each process instead of only on one, + // because the other processes would remain idle while waiting for + // the single process to complete the computation. return mswellhelpers::applyUMFPack(*duneDSolver_, rhs); } @@ -227,6 +239,9 @@ recoverSolutionWell(const BVector& x, BVectorWell& xw) const } // xw = D^-1 * resWell + // It is ok to do this on each process instead of only on one, + // because the other processes would remain idle while waiting for + // the single process to complete the computation. xw = mswellhelpers::applyUMFPack(*duneDSolver_, resWell); }