Skip to content

Commit

Permalink
[dist-rg] review updates:
Browse files Browse the repository at this point in the history
- split tests into core and backend part
- fix formatting
- fix openmpi pre 4.1.x macro

Co-authored-by: Pratik Nayak <[email protected]>
Co-authored-by: Yu-Hsiang M. Tsai <[email protected]>
Signed-off-by: Marcel Koch <[email protected]>
  • Loading branch information
3 people committed Dec 18, 2024
1 parent 52bd3be commit a52ba0d
Show file tree
Hide file tree
Showing 11 changed files with 252 additions and 195 deletions.
12 changes: 6 additions & 6 deletions core/distributed/row_gatherer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@ namespace experimental {
namespace distributed {


#if GINKGO_HAVE_OPENMPI_POST_4_1_X
using DefaultCollComm = mpi::NeighborhoodCommunicator;
#else
#if GINKGO_HAVE_OPENMPI_PRE_4_1_X
using DefaultCollComm = mpi::DenseCommunicator;
#else
using DefaultCollComm = mpi::NeighborhoodCommunicator;
#endif


Expand Down Expand Up @@ -85,9 +85,9 @@ mpi::request RowGatherer<LocalIndexType>::apply_async(
!use_host_buffer || mpi_exec->memory_accessible(
x_local->get_executor()),
"The receive buffer uses device memory, but MPI "
"support of device memory is not available. Please "
"provide a host buffer or enable MPI support for "
"device memory.");
"support of device memory is not available or host "
"buffer were explicitly requested. Please provide a "
"host buffer or enable MPI support for device memory.");

auto b_local = b_global->get_local_vector();

Expand Down
178 changes: 1 addition & 177 deletions core/test/mpi/distributed/row_gatherer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,21 +23,6 @@ class RowGatherer : public ::testing::Test {
using row_gatherer_type =
gko::experimental::distributed::RowGatherer<index_type>;

RowGatherer()
{
int rank = this->comm.rank();
auto part = gko::share(part_type::build_from_global_size_uniform(
this->ref, this->comm.size(), this->comm.size() * 3));
auto recv_connections =
this->template create_recv_connections<long>()[rank];
auto imap =
map_type{this->ref, part, this->comm.rank(), recv_connections};
auto coll_comm =
std::make_shared<gko::experimental::mpi::NeighborhoodCommunicator>(
this->comm, imap);
rg = row_gatherer_type::create(ref, coll_comm, imap);
}

void SetUp() override { ASSERT_EQ(comm.size(), 6); }

template <typename T>
Expand All @@ -53,11 +38,11 @@ class RowGatherer : public ::testing::Test {

std::shared_ptr<gko::Executor> ref = gko::ReferenceExecutor::create();
gko::experimental::mpi::communicator comm = MPI_COMM_WORLD;
std::shared_ptr<row_gatherer_type> rg;
};

TYPED_TEST_SUITE(RowGatherer, gko::test::IndexTypes, TypenameNameGenerator);


TYPED_TEST(RowGatherer, CanDefaultConstruct)
{
using RowGatherer = typename TestFixture::row_gatherer_type;
Expand Down Expand Up @@ -103,164 +88,3 @@ TYPED_TEST(RowGatherer, CanConstructFromCollectiveCommAndIndexMap)
gko::dim<2> size{recv_connections.get_size(), 18};
GKO_ASSERT_EQUAL_DIMENSIONS(rg, size);
}


TYPED_TEST(RowGatherer, CanApply)
{
using Dense = gko::matrix::Dense<double>;
using Vector = gko::experimental::distributed::Vector<double>;
int rank = this->comm.rank();
auto offset = static_cast<double>(rank * 3);
auto b = Vector::create(
this->ref, this->comm, gko::dim<2>{18, 1},
gko::initialize<Dense>({offset, offset + 1, offset + 2}, this->ref));
auto x = Dense::create(this->ref, gko::dim<2>{this->rg->get_size()[0], 1});

this->rg->apply(b, x);

auto expected = this->template create_recv_connections<double>()[rank];
auto expected_vec = Dense::create(
this->ref, gko::dim<2>{expected.get_size(), 1}, expected, 1);
GKO_ASSERT_MTX_NEAR(x, expected_vec, 0.0);
}


TYPED_TEST(RowGatherer, CanApplyAsync)
{
using Dense = gko::matrix::Dense<double>;
using Vector = gko::experimental::distributed::Vector<double>;
int rank = this->comm.rank();
auto offset = static_cast<double>(rank * 3);
auto b = Vector::create(
this->ref, this->comm, gko::dim<2>{18, 1},
gko::initialize<Dense>({offset, offset + 1, offset + 2}, this->ref));
auto x = Dense::create(this->ref, gko::dim<2>{this->rg->get_size()[0], 1});

auto req = this->rg->apply_async(b, x);
req.wait();

auto expected = this->template create_recv_connections<double>()[rank];
auto expected_vec = Dense::create(
this->ref, gko::dim<2>{expected.get_size(), 1}, expected, 1);
GKO_ASSERT_MTX_NEAR(x, expected_vec, 0.0);
}


TYPED_TEST(RowGatherer, CanApplyAsyncConsequetively)
{
using Dense = gko::matrix::Dense<double>;
using Vector = gko::experimental::distributed::Vector<double>;
int rank = this->comm.rank();
auto offset = static_cast<double>(rank * 3);
auto b = Vector::create(
this->ref, this->comm, gko::dim<2>{18, 1},
gko::initialize<Dense>({offset, offset + 1, offset + 2}, this->ref));
auto x = Dense::create(this->ref, gko::dim<2>{this->rg->get_size()[0], 1});

this->rg->apply_async(b, x).wait();
this->rg->apply_async(b, x).wait();

auto expected = this->template create_recv_connections<double>()[rank];
auto expected_vec = Dense::create(
this->ref, gko::dim<2>{expected.get_size(), 1}, expected, 1);
GKO_ASSERT_MTX_NEAR(x, expected_vec, 0.0);
}


TYPED_TEST(RowGatherer, CanApplyAsyncWithWorkspace)
{
using Dense = gko::matrix::Dense<double>;
using Vector = gko::experimental::distributed::Vector<double>;
int rank = this->comm.rank();
auto offset = static_cast<double>(rank * 3);
auto b = Vector::create(
this->ref, this->comm, gko::dim<2>{18, 1},
gko::initialize<Dense>({offset, offset + 1, offset + 2}, this->ref));
auto x = Dense::create(this->ref, gko::dim<2>{this->rg->get_size()[0], 1});
gko::array<char> workspace(this->ref);

auto req = this->rg->apply_async(b, x, workspace);
req.wait();

auto expected = this->template create_recv_connections<double>()[rank];
auto expected_vec = Dense::create(
this->ref, gko::dim<2>{expected.get_size(), 1}, expected, 1);
GKO_ASSERT_MTX_NEAR(x, expected_vec, 0.0);
}


TYPED_TEST(RowGatherer, CanApplyAsyncMultipleTimesWithWorkspace)
{
using Dense = gko::matrix::Dense<double>;
using Vector = gko::experimental::distributed::Vector<double>;
int rank = this->comm.rank();
auto offset = static_cast<double>(rank * 3);
auto b1 = Vector::create(
this->ref, this->comm, gko::dim<2>{18, 1},
gko::initialize<Dense>({offset, offset + 1, offset + 2}, this->ref));
auto b2 = gko::clone(b1);
b2->scale(gko::initialize<Dense>({-1}, this->ref));
auto x1 = Dense::create(this->ref, gko::dim<2>{this->rg->get_size()[0], 1});
auto x2 = gko::clone(x1);
gko::array<char> workspace1(this->ref);
gko::array<char> workspace2(this->ref);

auto req1 = this->rg->apply_async(b1, x1, workspace1);
auto req2 = this->rg->apply_async(b2, x2, workspace2);
req1.wait();
req2.wait();

auto expected = this->template create_recv_connections<double>()[rank];
auto expected_vec1 = Dense::create(
this->ref, gko::dim<2>{expected.get_size(), 1}, expected, 1);
auto expected_vec2 = gko::clone(expected_vec1);
expected_vec2->scale(gko::initialize<Dense>({-1}, this->ref));
GKO_ASSERT_MTX_NEAR(x1, expected_vec1, 0.0);
GKO_ASSERT_MTX_NEAR(x2, expected_vec2, 0.0);
}


TYPED_TEST(RowGatherer, CanApplyAsyncWithMultipleColumns)
{
using Dense = gko::matrix::Dense<double>;
using Vector = gko::experimental::distributed::Vector<double>;
int rank = this->comm.rank();
auto offset = static_cast<double>(rank * 3);
auto b = Vector::create(
this->ref, this->comm, gko::dim<2>{18, 2},
gko::initialize<Dense>({{offset, offset * offset},
{offset + 1, offset * offset + 1},
{offset + 2, offset * offset + 2}},
this->ref));
auto x = Dense::create(this->ref, gko::dim<2>{this->rg->get_size()[0], 2});

this->rg->apply_async(b, x).wait();

gko::array<double> expected[] = {
gko::array<double>{this->ref, {3, 9, 5, 11, 10, 82, 11, 83}},
gko::array<double>{this->ref, {0, 0, 1, 1, 7, 37, 12, 144, 13, 145}},
gko::array<double>{this->ref, {3, 9, 4, 10, 17, 227}},
gko::array<double>{this->ref, {1, 1, 2, 2, 12, 144, 14, 146}},
gko::array<double>{this->ref,
{4, 10, 5, 11, 9, 81, 10, 82, 15, 225, 16, 226}},
gko::array<double>{this->ref, {8, 38, 12, 144, 13, 145, 14, 146}}};
auto expected_vec =
Dense::create(this->ref, gko::dim<2>{expected[rank].get_size() / 2, 2},
expected[rank], 2);
GKO_ASSERT_MTX_NEAR(x, expected_vec, 0.0);
}


TYPED_TEST(RowGatherer, ThrowsOnAdvancedApply)
{
using RowGatherer = typename TestFixture::row_gatherer_type;
using Dense = gko::matrix::Dense<double>;
using Vector = gko::experimental::distributed::Vector<double>;
auto rg = RowGatherer::create(this->ref, this->comm);
auto b = Vector::create(this->ref, this->comm);
auto x = Dense::create(this->ref);
auto alpha = Dense::create(this->ref, gko::dim<2>{1, 1});
auto beta = Dense::create(this->ref, gko::dim<2>{1, 1});

ASSERT_THROW(rg->apply(alpha, b, beta, x), gko::NotImplemented);
}
1 change: 0 additions & 1 deletion include/ginkgo/core/base/mpi.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -368,7 +368,6 @@ class request {
return status;
}


private:
MPI_Request req_;
};
Expand Down
11 changes: 5 additions & 6 deletions include/ginkgo/core/distributed/row_gatherer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,19 +32,18 @@ namespace distributed {
* Example usage:
* ```c++
* auto coll_comm = std::make_shared<mpi::neighborhood_communicator>(comm,
* imap); auto rg = distributed::RowGatherer<int32>::create(exec, coll_comm,
* imap);
* imap);
* auto rg = distributed::RowGatherer<int32>::create(exec, coll_comm, imap);
*
* auto b = distributed::Vector<double>::create(...);
* auto x = matrix::Dense<double>::create(...);
*
* auto future = rg->apply_async(b, x);
* auto req = rg->apply_async(b, x);
* // do some computation that doesn't modify b, or access x
* future.wait();
* req.wait();
* // x now contains the gathered rows of b
* ```
* Using the apply instead of the apply_async will lead to a blocking
* communication.
* Using apply instead of apply_async will lead to a blocking communication.
*
* @note Objects of this class are only available as shared_ptr, since the class
* is derived from std::enable_shared_from_this.
Expand Down
6 changes: 1 addition & 5 deletions test/mpi/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,8 +1,4 @@
ginkgo_create_common_and_reference_test(assembly MPI_SIZE 3)
ginkgo_create_common_and_reference_test(matrix MPI_SIZE 3)
ginkgo_create_common_and_reference_test(partition_helpers MPI_SIZE 3)
ginkgo_create_common_and_reference_test(vector MPI_SIZE 3)

add_subdirectory(distributed)
add_subdirectory(preconditioner)
add_subdirectory(solver)
add_subdirectory(multigrid)
5 changes: 5 additions & 0 deletions test/mpi/distributed/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
ginkgo_create_common_and_reference_test(assembly MPI_SIZE 3)
ginkgo_create_common_and_reference_test(matrix MPI_SIZE 3)
ginkgo_create_common_and_reference_test(partition_helpers MPI_SIZE 3)
ginkgo_create_common_and_reference_test(vector MPI_SIZE 3)
ginkgo_create_common_and_reference_test(row_gatherer MPI_SIZE 6)
File renamed without changes.
File renamed without changes.
File renamed without changes.
Loading

0 comments on commit a52ba0d

Please sign in to comment.