Skip to content

Commit 1434d31

Browse files
committed
bugfix: change MPI buffers from int to double
1 parent 8d0d7e1 commit 1434d31

File tree

1 file changed

+15
-12
lines changed

1 file changed

+15
-12
lines changed

core/test/HaloExchange_test.cpp

Lines changed: 15 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
/*!
22
* @file ModelMetadata_test.cpp
33
*
4-
* @date 17 Jan 2025
4+
* @date 20 Jan 2025
55
* @author Tom Meltzer <tdm39@cam.ac.uk>
66
*/
77

@@ -102,13 +102,13 @@ void setDGVecValue(
102102
for (size_t j = 0; j < smesh.ny; ++j) {
103103
for (size_t i = 0; i < smesh.nx; ++i) {
104104
size_t pos = indexer(meshDims, std::vector<size_t>({ i, j }));
105-
dgvec(pos, k) = -1.;
105+
dgvec(pos, k) = value;
106106
}
107107
}
108108
}
109109
}
110110

111-
void updateDGVec(DGVector<DG>& dgvec, std::vector<int>& recv, ParametricMesh& smesh,
111+
void updateDGVec(DGVector<DG>& dgvec, std::vector<double>& recv, ParametricMesh& smesh,
112112
std::array<size_t, 4>& edgeLengths, ModelMetadata::Edge edge)
113113
{
114114
SliceIter::MultiDim meshDims = { smesh.nx, smesh.ny };
@@ -225,8 +225,8 @@ MPI_TEST_CASE("test halo exchange on 3 proc grid", 3)
225225
// create a send buffer the size of the perimeter of the domain
226226
// each process will populate the send buffer with their perimeter cells
227227
const size_t perimeterLength = 2 * localNx + 2 * localNy;
228-
std::vector<int> send = std::vector<int>(perimeterLength, 0);
229-
std::vector<int> recv = std::vector<int>(perimeterLength, 0);
228+
std::vector<double> send = std::vector<double>(perimeterLength, 0.0);
229+
std::vector<double> recv = std::vector<double>(perimeterLength, 0.0);
230230

231231
for (auto edge : edges) {
232232
size_t offset = std::accumulate(edgeLengths.begin(), edgeLengths.begin() + edge, 0);
@@ -236,7 +236,7 @@ MPI_TEST_CASE("test halo exchange on 3 proc grid", 3)
236236
// create a RMA memory window which all process will be able to access
237237
MPI_Win win;
238238
MPI_Win_create(
239-
&send[0], perimeterLength * sizeof(int), sizeof(int), MPI_INFO_NULL, test_comm, &win);
239+
&send[0], perimeterLength * sizeof(double), sizeof(double), MPI_INFO_NULL, test_comm, &win);
240240

241241
MPI_Win_fence(MPI_MODE_NOPRECEDE, win); // Fence, no preceding RMA calls
242242

@@ -254,7 +254,8 @@ MPI_TEST_CASE("test halo exchange on 3 proc grid", 3)
254254
size_t count = metadata.neighbourExtents[edge][i];
255255
size_t disp = metadata.neighbourHaloSend[edge][i];
256256
size_t recvOffset = metadata.neighbourHaloRecv[edge][i];
257-
MPI_Get(&recv[recvOffset], count, MPI_INT, fromRank, disp, count, MPI_INT, win);
257+
MPI_Get(
258+
&recv[recvOffset], count, MPI_DOUBLE, fromRank, disp, count, MPI_DOUBLE, win);
258259
}
259260
}
260261
}
@@ -355,8 +356,8 @@ MPI_TEST_CASE("test halo exchange on 3 proc grid with periodic boundary conditio
355356
// create a send buffer the size of the perimeter of the domain
356357
// each process will populate the send buffer with their perimeter cells
357358
const size_t perimeterLength = 2 * localNx + 2 * localNy;
358-
std::vector<int> send = std::vector<int>(perimeterLength, 0);
359-
std::vector<int> recv = std::vector<int>(perimeterLength, 0);
359+
std::vector<double> send = std::vector<double>(perimeterLength, 0.0);
360+
std::vector<double> recv = std::vector<double>(perimeterLength, 0.0);
360361

361362
for (auto edge : edges) {
362363
size_t offset = std::accumulate(edgeLengths.begin(), edgeLengths.begin() + edge, 0);
@@ -366,7 +367,7 @@ MPI_TEST_CASE("test halo exchange on 3 proc grid with periodic boundary conditio
366367
// create a RMA memory window which all process will be able to access
367368
MPI_Win win;
368369
MPI_Win_create(
369-
&send[0], perimeterLength * sizeof(int), sizeof(int), MPI_INFO_NULL, test_comm, &win);
370+
&send[0], perimeterLength * sizeof(double), sizeof(double), MPI_INFO_NULL, test_comm, &win);
370371

371372
MPI_Win_fence(MPI_MODE_NOPRECEDE, win); // Fence, no preceding RMA calls
372373

@@ -384,7 +385,8 @@ MPI_TEST_CASE("test halo exchange on 3 proc grid with periodic boundary conditio
384385
size_t count = metadata.neighbourExtents[edge][i];
385386
size_t disp = metadata.neighbourHaloSend[edge][i];
386387
size_t recvOffset = metadata.neighbourHaloRecv[edge][i];
387-
MPI_Get(&recv[recvOffset], count, MPI_INT, fromRank, disp, count, MPI_INT, win);
388+
MPI_Get(
389+
&recv[recvOffset], count, MPI_DOUBLE, fromRank, disp, count, MPI_DOUBLE, win);
388390
}
389391
}
390392
}
@@ -399,7 +401,8 @@ MPI_TEST_CASE("test halo exchange on 3 proc grid with periodic boundary conditio
399401
size_t count = metadata.neighbourExtentsPeriodic[edge][i];
400402
size_t disp = metadata.neighbourHaloSendPeriodic[edge][i];
401403
size_t recvOffset = metadata.neighbourHaloRecvPeriodic[edge][i];
402-
MPI_Get(&recv[recvOffset], count, MPI_INT, fromRank, disp, count, MPI_INT, win);
404+
MPI_Get(
405+
&recv[recvOffset], count, MPI_DOUBLE, fromRank, disp, count, MPI_DOUBLE, win);
403406
}
404407
}
405408
}

0 commit comments

Comments
 (0)