MAIA bb96820c
Multiphysics at AIA
Loading...
Searching...
No Matches
maia::mpi Namespace Reference

Functions

template<typename U >
void exchangeBuffer (const MInt noExDomains, const MInt *const exDomainId, const MInt *const recvSize, const MInt *const sendSize, const MPI_Comm comm, U *const receiveBuffer, const U *const sendBuffer, const MInt noDat=1)
 Generic exchange of data. More...
 
template<typename U >
MInt exchangeBufferAllToAll (const MInt sendSize, const MPI_Comm comm, U *const receiveBuffer, const U *const sendBuffer, const MInt noDat=1)
 Generic exchange of data. More...
 
template<typename U >
void exchangeBuffer (const std::vector< MInt > &exDomains, const std::vector< std::vector< MInt > > &noValuesToRecv, const std::vector< std::vector< MInt > > &noValuesToSend, const MPI_Comm comm, const U *const sendBuffer, U *const receiveBuffer, const MInt noDat=1)
 Generic exchange of data. More...
 
template<typename U >
void exchangeBuffer (const std::vector< MInt > &exDomains, std::vector< MInt > &noValuesToRecv, std::vector< MInt > &noValuesToSend, const MPI_Comm comm, const U *const sendBuffer, U *const receiveBuffer, const MInt noDat=1)
 Generic exchange of data (std::vector version) More...
 
template<typename U >
void exchangeBuffer (const std::vector< MInt > &exDomains, const MInt *const noValuesToSend, const MInt *const noValuesToRecv, const MPI_Comm comm, const U *const sendBuffer, U *const receiveBuffer, const MInt noDat=1)
 Generic exchange of data (std::vector version) More...
 
template<typename U >
void exchangeBuffer (const std::vector< MInt > &exDomains, std::vector< MInt > &noValues, const MPI_Comm comm, const U *const sendBuffer, U *const receiveBuffer, const MInt noDat=1)
 Generic exchange of data (std::vector version) More...
 
template<typename U >
void exchangeValues (const std::vector< MInt > &exDomains, MInt noValues, const MPI_Comm comm, const U *const sendBuffer, U *const receiveBuffer, const MInt noDat=1)
 Generic exchange of data (std::vector version) More...
 
template<typename U >
void exchangeData (const MInt noNghbrDomains, const MInt *const nghbrDomains, const MInt *const noHaloCells, const MInt **const, const MInt *const noWindowCells, const MInt **const windowCells, const MPI_Comm comm, const U *const data, U *const haloBuffer, const MInt noDat=1)
 Generic exchange of data. More...
 
template<typename U >
void exchangeData (const std::vector< MInt > &nghbrDomains, const MInt *const noHaloCells, MInt **const haloCells, MInt *const noWindowCells, MInt **const windowCells, const MPI_Comm comm, const U *const data, U *const haloBuffer, const MInt noDat=1)
 Generic exchange of data. More...
 
template<typename U >
void exchangeData (const MInt noNghbrDomains, const MInt *const nghbrDomains, const MInt *const noHaloCells, const MInt **const haloCells, const MInt *const noWindowCells, const MInt **const windowCells, const MPI_Comm comm, U *const data, const MInt noDat=1)
 Generic exchange of data. More...
 
template<typename U >
void exchangeData (const std::vector< MInt > &nghbrDomains, const MInt *const noHaloCells, MInt **const haloCells, MInt *const noWindowCells, MInt **const windowCells, const MPI_Comm comm, U *const data, const MInt noDat=1)
 Generic exchange of data. More...
 
template<typename U >
void reverseExchangeData (const MInt noNghbrDomains, const MInt *const nghbrDomains, const MInt *const noHaloCells, const MInt **const haloCells, const MInt *const noWindowCells, const MInt **const, const MPI_Comm comm, const U *const data, U *const windowBuffer, const MInt noDat=1)
 Generic reverse exchange of data. More...
 
template<typename U >
void exchangeData (const MInt noNghbrDomains, const MInt *const nghbrDomains, const std::vector< std::vector< MInt > > &haloCellVec, const std::vector< std::vector< MInt > > &windowCellVec, const MPI_Comm comm, U *const data, const MInt noDat=1)
 Generic exchange of data. More...
 
template<typename U >
void exchangeData (const MInt noNghbrDomains, const MInt *const nghbrDomains, const std::vector< std::vector< MInt > > &haloCellVec, const std::vector< std::vector< MInt > > &windowCellVec, const MPI_Comm comm, U *const data, U *const haloBuffer, const MInt noDat=1)
 Generic exchange of data. More...
 
template<typename U >
void exchangeData (const std::vector< MInt > &nghbrDomains, const std::vector< std::vector< MInt > > &haloCellVec, const std::vector< std::vector< MInt > > &windowCellVec, const MPI_Comm comm, U *const data, const MInt noDat=1)
 Generic exchange of data NOTE: version called in exchangeData from the solvers! More...
 
template<std::size_t N>
void exchangeBitset (const std::vector< MInt > &nghbrDomains, const std::vector< std::vector< MInt > > &haloCellVec, const std::vector< std::vector< MInt > > &windowCellVec, const MPI_Comm comm, std::bitset< N > *const data, const MInt noCells, const MInt noDat=1)
 Generic exchange of data. More...
 
template<typename U >
void exchangeData (const std::vector< MInt > &nghbrDomains, std::vector< std::vector< MInt > > &haloCellVec, std::vector< std::vector< MInt > > &windowCellVec, const MPI_Comm comm, U *const data, U *const haloBuffer, const MInt noDat=1)
 Generic exchange of data. More...
 
template<typename U >
void reverseExchangeData (const MInt noNghbrDomains, const MInt *const nghbrDomains, const std::vector< std::vector< MInt > > &haloCellVec, const std::vector< std::vector< MInt > > &windowCellVec, const MPI_Comm comm, const U *const data, U *const windowBuffer, const MInt noDat=1)
 Generic reverse exchange of data. More...
 
template<typename U >
void reverseExchangeData (const std::vector< MInt > &nghbrDomains, const std::vector< std::vector< MInt > > &haloCellVec, const std::vector< std::vector< MInt > > &windowCellVec, const MPI_Comm comm, const U *const data, U *const windowBuffer, const MInt noDat=1)
 Generic reverse exchange of data. More...
 
template<typename U >
void exchangeScattered (const std::vector< MInt > &nghbrDomains, std::vector< MInt > &sendDomainIndices, std::vector< U > &sendData, const MPI_Comm comm, std::vector< MInt > &recvOffsets, std::vector< U > &recvBuffer, const MInt noDat=1)
 Generic exchange of data. More...
 
MUint getBufferSize (const std::vector< std::vector< MInt > > &exchangeCells)
 Generic exchange of data. More...
 
template<class dataType >
void exchangeData (const dataType *const sendBuffer, const MInt domainId, const MInt noDomains, const MPI_Comm mpiComm, const MInt dataBlockSize, const MInt *const dataSizeSend, const MInt *const dataSizeRecv, dataType *const recvBuffer)
 Communicate variable amounts of data from each domain to all other domains. Data may also reside on the same domain. More...
 
template<class dataType >
void exchangeData (const dataType *const sendBuffer, const MInt domainId, const MInt noNghbrDomains, const MInt *const nghbrDomainIds, const MPI_Comm mpiComm, const MInt dataBlockSize, const MInt *const dataSizeSend, const MInt *const dataSizeRecv, dataType *const recvBuffer)
 Communicate variable amounts of data from each domain to all neighboring domains. More...
 
template<typename DataType >
void assembleDataBuffer (const MInt noCells, const MInt dataBlockSize, const DataType *const data, const MInt *const sortedId, DataType *const buffer)
 Assemble data buffer according to given sorting order. More...
 
template<typename DataType >
void communicateData (const DataType *const data, const MInt noCells, const MInt *const sortedCellId, const MInt noDomains, const MInt domainId, const MPI_Comm mpiComm, const MInt *const noCellsToSendByDomain, const MInt *const noCellsToReceiveByDomain, const MInt dataBlockSize, DataType *const buffer)
 Assemble given data in send buffer and communicate. More...
 
template<std::size_t N>
void communicateBitsetData (const std::bitset< N > *const data, const MInt noCells, const MInt *const sortedCellId, const MInt noDomains, const MInt domainId, const MPI_Comm mpiComm, const MInt *const noCellsToSendByDomain, const MInt *const noCellsToReceiveByDomain, const MInt dataBlockSize, std::bitset< N > *const buffer)
 
template<typename DataType >
void communicateGlobalyOrderedData (DataType const *const sendData, const MInt noLocIds, const MInt noGlobalIds, const MInt nRows, const MInt nCols, const MInt noDomains, const MInt domainId, const MPI_Comm mpiComm, const MIntScratchSpace &localToGlobal, const MIntScratchSpace &dataOffsets, ScratchSpace< DataType > &recvData)
 Communicate (optionally) solver-structured data sorted by a global Id. More...
 
template<class T >
std::vector< T > mpiExchangePointToPoint (const T *const sendBuffer, const MInt *const snghbrs, const MInt nosnghbrs, const MInt *const sendcounts, const MInt *const rnghbrs, const MInt nornghbrs, const MPI_Comm &mpi_comm, const MInt domainId, const MInt dataBlockSize, MInt *const recvcounts_=nullptr, MInt *const rdispls_=nullptr)
 
template<typename U >
void reverseExchangeAddData (const std::vector< MInt > &nghbrDomains, const std::vector< std::vector< MInt > > &haloCellVec, const std::vector< std::vector< MInt > > &windowCellVec, const MPI_Comm comm, U **const data, const MInt noDat=1)
 Generic exchange from halo to window cells, however in this case the value in the halo-cell is added to the values in the windowcell! More...
 
template<typename U >
void reverseExchangeAddData (const std::vector< MInt > &nghbrDomains, const std::vector< std::vector< MInt > > &haloCellVec, const std::vector< std::vector< MInt > > &windowCellVec, const MPI_Comm comm, U *const data, const MInt noDat=1)
 Generic exchange from halo to window cells, however in this case the value in the halo-cell is added to the values in the windowcell, simple pointer version. More...
 

Function Documentation

◆ assembleDataBuffer()

template<typename DataType >
void maia::mpi::assembleDataBuffer ( const MInt  noCells,
const MInt  dataBlockSize,
const DataType *const  data,
const MInt *const  sortedId,
DataType *const  buffer 
)

Definition at line 841 of file mpiexchange.h.

842 {
843 TRACE();
844
845 for(MInt cellId = 0; cellId < noCells; cellId++) {
846 // Skip cells without valid index
847 if(sortedId[cellId] < 0) {
848 continue;
849 }
850
851 std::copy_n(&data[dataBlockSize * cellId], dataBlockSize, &buffer[dataBlockSize * sortedId[cellId]]);
852 }
853}
int32_t MInt
Definition: maiatypes.h:62

◆ communicateBitsetData()

template<std::size_t N>
void maia::mpi::communicateBitsetData ( const std::bitset< N > *const  data,
const MInt  noCells,
const MInt *const  sortedCellId,
const MInt  noDomains,
const MInt  domainId,
const MPI_Comm  mpiComm,
const MInt *const  noCellsToSendByDomain,
const MInt *const  noCellsToReceiveByDomain,
const MInt  dataBlockSize,
std::bitset< N > *const  buffer 
)

Definition at line 873 of file mpiexchange.h.

876 {
877 TRACE();
878
879 ScratchSpace<MUlong> data2(dataBlockSize * noCells, FUN_, "data2");
880 ScratchSpace<MUlong> buffer2(dataBlockSize * noCellsToReceiveByDomain[noDomains], FUN_, "buffer2");
881
882 for(MInt i = 0; i < (signed)data2.size(); i++) {
883 data2[i] = data[i].to_ulong();
884 }
885
886 communicateData(data2.data(), noCells, sortedCellId, noDomains, domainId, mpiComm, noCellsToSendByDomain,
887 noCellsToReceiveByDomain, dataBlockSize, buffer2.data());
888
889 for(MInt i = 0; i < (signed)buffer2.size(); i++) {
890 buffer[i] = std::bitset<N>(buffer2[i]);
891 }
892}
This class is a ScratchSpace.
Definition: scratch.h:758

◆ communicateData()

template<typename DataType >
void maia::mpi::communicateData ( const DataType *const  data,
const MInt  noCells,
const MInt *const  sortedCellId,
const MInt  noDomains,
const MInt  domainId,
const MPI_Comm  mpiComm,
const MInt *const  noCellsToSendByDomain,
const MInt *const  noCellsToReceiveByDomain,
const MInt  dataBlockSize,
DataType *const  buffer 
)

Definition at line 858 of file mpiexchange.h.

861 {
862 TRACE();
863
864 ScratchSpace<DataType> sendBuffer(dataBlockSize * std::max(noCellsToSendByDomain[noDomains], 1), FUN_, "sendBuffer");
865
866 maia::mpi::assembleDataBuffer(noCells, dataBlockSize, data, sortedCellId, &sendBuffer[0]);
867 maia::mpi::exchangeData(&sendBuffer[0], domainId, noDomains, mpiComm, dataBlockSize, noCellsToSendByDomain,
868 noCellsToReceiveByDomain, buffer);
869}
void exchangeData(const MInt noNghbrDomains, const MInt *const nghbrDomains, const MInt *const noHaloCells, const MInt **const, const MInt *const noWindowCells, const MInt **const windowCells, const MPI_Comm comm, const U *const data, U *const haloBuffer, const MInt noDat=1)
Generic exchange of data.
Definition: mpiexchange.h:295
void assembleDataBuffer(const MInt noCells, const MInt dataBlockSize, const DataType *const data, const MInt *const sortedId, DataType *const buffer)
Assemble data buffer according to given sorting order.
Definition: mpiexchange.h:841

◆ communicateGlobalyOrderedData()

template<typename DataType >
void maia::mpi::communicateGlobalyOrderedData ( DataType const *const  sendData,
const MInt  noLocIds,
const MInt  noGlobalIds,
const MInt  nRows,
const MInt  nCols,
const MInt  noDomains,
const MInt  domainId,
const MPI_Comm  mpiComm,
const MIntScratchSpace localToGlobal,
const MIntScratchSpace dataOffsets,
ScratchSpace< DataType > &  recvData 
)
Author
konstantin
Date
2018-09-28
Parameters
[in]sendDataPointer to globally and locally unsorted (optionally solverstructured) data
[in]noLocIdsNumber of local dataPoints
[in]noGlobalIdsNumber of global dataPoints
[in]nRowsNumber of rows in solverstructured send data
[in]nColsNumber of columns in solverstructured send data
[in]localToGlobalMapping of localIds to globalIds
[in]dataOffsetsThe offsets of the global data points after communication
[in]recvDataGlobally ordered (solver-structured) data array

Definition at line 919 of file mpiexchange.h.

922 {
923 TRACE();
924
925 ScratchSpace<std::vector<MInt>> sendIds(noDomains, AT_, "sendIds");
926 ScratchSpace<std::vector<MInt>> recvIds(noDomains, AT_, "recvIds");
927 MIntScratchSpace globalToLocal(noGlobalIds, AT_, "noGlobalIds");
928 globalToLocal.fill(-1);
929 for(MInt localId = 0; localId < noLocIds; localId++) {
930 MInt globalId = localToGlobal(localId);
931 globalToLocal(globalId) = localId;
932 for(MLong dom = 0; dom < noDomains; dom++) {
933 if(globalId < dataOffsets(dom + 1) && globalId >= dataOffsets(dom)) {
934 sendIds[dom].push_back(globalId);
935 }
936 }
937 }
938 MIntScratchSpace sendCount(noDomains, AT_, "sendCount");
939 MIntScratchSpace recvCount(noDomains, AT_, "recvCount");
940 for(MLong dom = 0; dom < noDomains; dom++) {
941 sendCount[dom] = sendIds[dom].size();
942 sendIds[dom].shrink_to_fit();
943 }
944 MPI_Alltoall(&sendCount[0], 1, MPI_INT, &recvCount[0], 1, MPI_INT, mpiComm, AT_, "sendCount[0]", "recvCount[0]");
945 for(MInt dom = 0; dom < noDomains; dom++) {
946 recvIds[dom].resize(recvCount[dom]);
947 recvIds[dom].shrink_to_fit();
948 }
949 MInt mpiCount = 0;
950 ScratchSpace<MPI_Request> sendReq(noDomains, AT_, "sendReq");
951 sendReq.fill(MPI_REQUEST_NULL);
952 for(MInt dom = 0; dom < noDomains; dom++) {
953 if(sendCount[dom] == 0) continue;
954 MPI_Issend(&sendIds[dom].front(), sendCount[dom], MPI_INT, dom, 78, mpiComm, &sendReq[mpiCount++], AT_,
955 "sendIds[dom].front()");
956 }
957 for(MInt dom = 0; dom < noDomains; dom++) {
958 if(recvCount[dom] == 0) continue;
959 MPI_Recv(&recvIds[dom].front(), recvCount[dom], MPI_INT, dom, 78, mpiComm, MPI_STATUS_IGNORE, AT_,
960 "recvIds[dom].front()");
961 }
962 if(mpiCount > 0) MPI_Waitall(mpiCount, &sendReq[0], MPI_STATUSES_IGNORE, AT_);
963
964 ScratchSpace<std::vector<DataType>> sendDataBuf(noDomains, AT_, "sendDataBuf");
965 for(MInt dom = 0; dom < noDomains; dom++) {
966 sendDataBuf[dom].resize(nRows * nCols * sendCount[dom]);
967 sendDataBuf[dom].shrink_to_fit();
968 for(MInt cnt = 0; cnt < sendCount[dom]; cnt++) {
969 MInt globalId = sendIds[dom][cnt];
970 MInt localId = globalToLocal(globalId);
971 if(localId < 0) {
972 std::cerr << "localId " << localId << " globalId " << globalId << " cnt " << cnt << " sendIds[dom].size() "
973 << sendIds[dom].size() << " sendCount[dom] " << sendCount[dom] << std::endl;
974 mTerm(1, AT_, "localId not found -> communication failed.");
975 }
976 for(MInt row = 0; row < nRows; row++) {
977 for(MInt col = 0; col < nCols; col++) {
978 sendDataBuf[dom][cnt * nRows * nCols + nCols * row + col] =
979 sendData[localId * nRows * nCols + nCols * row + col];
980 }
981 }
982 }
983 }
984
985 MIntScratchSpace recvDispl(noDomains + 1, AT_, "recvDispl");
986 recvDispl.fill(0);
987 recvCount[0] = recvIds[0].size() * nRows * nCols;
988 sendCount[0] = sendIds[0].size() * nRows * nCols;
989 for(MLong dom = 1; dom < noDomains; dom++) {
990 recvCount[dom] = recvIds[dom].size() * nRows * nCols;
991 sendCount[dom] = sendIds[dom].size() * nRows * nCols;
992 recvDispl[dom] = recvDispl[dom - 1] + recvCount[dom - 1];
993 }
994 recvDispl[noDomains] = recvDispl[noDomains - 1] + recvCount[noDomains - 1];
995
996 sendReq.fill(MPI_REQUEST_NULL);
997 mpiCount = 0;
998 ScratchSpace<DataType> tmpData(recvDispl[noDomains], AT_, "tmpData");
999 for(MInt dom = 0; dom < noDomains; dom++) {
1000 if(sendCount[dom] == 0) continue;
1001 MPI_Issend(&sendDataBuf[dom].front(), sendCount[dom], type_traits<DataType>::mpiType(), dom, 82, mpiComm,
1002 &sendReq[mpiCount++], AT_, "sendDataBuf[dom].front()");
1003 }
1004 for(MInt dom = 0; dom < noDomains; dom++) {
1005 if(recvCount[dom] == 0) continue;
1006 MPI_Recv(&tmpData[recvDispl[dom]], recvCount[dom], type_traits<DataType>::mpiType(), dom, 82, mpiComm,
1007 MPI_STATUS_IGNORE, AT_, "tmpData[recvDispl[dom]]");
1008 }
1009 if(mpiCount > 0) MPI_Waitall(mpiCount, &sendReq[0], MPI_STATUSES_IGNORE, AT_);
1010
1011 recvDispl.fill(0);
1012 for(MLong dom = 1; dom < noDomains; dom++) {
1013 recvDispl[dom] = recvDispl[dom - 1] + recvIds[dom - 1].size();
1014 }
1015
1016 for(MInt dom = 0; dom < noDomains; dom++) {
1017 for(MUint cnt = 0; cnt < recvIds[dom].size(); cnt++) {
1018 MInt globalId = recvIds[dom][cnt];
1019 MInt recvId = globalId - dataOffsets(domainId);
1020 if(recvId < 0 || recvId >= (dataOffsets(domainId + 1) - dataOffsets(domainId))) {
1021 std::cerr << "recvId " << recvId << " globalId " << globalId << "dataOffsets(domainId) "
1022 << dataOffsets(domainId) << std::endl;
1023 mTerm(1, AT_, "recvId exceeds array dimensions.");
1024 }
1025 for(MInt row = 0; row < nRows; row++) {
1026 for(MInt col = 0; col < nCols; col++) {
1027 recvData[recvId * nCols * nRows + nCols * row + col] =
1028 tmpData[(recvDispl[dom] + cnt) * nCols * nRows + nCols * row + col];
1029 }
1030 }
1031 }
1032 }
1033}
void mTerm(const MInt errorCode, const MString &location, const MString &message)
Definition: functions.cpp:29
uint32_t MUint
Definition: maiatypes.h:63
int64_t MLong
Definition: maiatypes.h:64
int MPI_Recv(void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Status *status, const MString &name, const MString &varname)
same as MPI_Recv
int MPI_Issend(const void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request, const MString &name, const MString &varname)
same as MPI_Issend
int MPI_Waitall(int count, MPI_Request *request, MPI_Status *status, const MString &name)
same as MPI_Waitall
int MPI_Alltoall(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm, const MString &name, const MString &sndvarname, const MString &rcvvarname)
same as MPI_Alltoall

◆ exchangeBitset()

template<std::size_t N>
void maia::mpi::exchangeBitset ( const std::vector< MInt > &  nghbrDomains,
const std::vector< std::vector< MInt > > &  haloCellVec,
const std::vector< std::vector< MInt > > &  windowCellVec,
const MPI_Comm  comm,
std::bitset< N > *const  data,
const MInt  noCells,
const MInt  noDat = 1 
)
Author
Lennart Schneiders
Date
October 2017

Definition at line 516 of file mpiexchange.h.

518 {
519 TRACE();
520 static_assert(N <= 64, "conversion to ulong not appropriate, change to ullong!");
521 const auto noNghbrDomains = (signed)nghbrDomains.size();
522 ScratchSpace<MUlong> tmp_data(noCells, AT_, "tmp_data");
523 for(MUint i = 0; i < nghbrDomains.size(); i++) {
524 for(MInt windowCellId : windowCellVec[i]) {
525 tmp_data[windowCellId] = data[windowCellId].to_ulong();
526 }
527 }
528 exchangeData(noNghbrDomains, nghbrDomains.data(), haloCellVec, windowCellVec, comm, tmp_data.data(), noDat);
529 for(MUint i = 0; i < nghbrDomains.size(); i++) {
530 for(MInt haloCellId : haloCellVec[i]) {
531 data[haloCellId] = std::bitset<N>(tmp_data[haloCellId]);
532 }
533 }
534}

◆ exchangeBuffer() [1/5]

template<typename U >
void maia::mpi::exchangeBuffer ( const MInt  noExDomains,
const MInt *const  exDomainId,
const MInt *const  recvSize,
const MInt *const  sendSize,
const MPI_Comm  comm,
U *const  receiveBuffer,
const U *const  sendBuffer,
const MInt  noDat = 1 
)
inline
Author
Lennart Schneiders
Date
October 2017
Template Parameters
UDatatype to be exchanged
Parameters
noExDomainsNumber of exchanging domains
exDomainIdRank of the exchanging domains
recvSizeNumber of values to be received from each exchange partner.
sendSizeNumber of values to be send to each exchange partner.
commMPI Communicator to be used.
receiveBufferBuffer used to receive data.
sendBufferBuffer used to send data.
noDatNumber of data elements to be send.

Definition at line 44 of file mpiexchange.h.

46 {
47 TRACE();
48
49 // 0. prepare
50 const MPI_Datatype DTYPE = type_traits<U>::mpiType();
51 MInt tag = 612;
52 ASSERT(tag < MPI_TAG_UB, "");
53 ASSERT(noExDomains > 0, "");
54 ScratchSpace<MPI_Request> sendRequests(noExDomains, AT_, "sendRequests");
55 ScratchSpace<MPI_Request> recvRequests(noExDomains, AT_, "recvRequests");
56 sendRequests.fill(MPI_REQUEST_NULL);
57 recvRequests.fill(MPI_REQUEST_NULL);
58
59 // Post all receive requests
60 for(MInt i = 0, receiveCount = 0; i < noExDomains; i++) {
61 MPI_Irecv(&(receiveBuffer[receiveCount]), noDat * recvSize[i], DTYPE, exDomainId[i], tag, comm, &recvRequests[i],
62 AT_, "receiveBuffer");
63 receiveCount += noDat * recvSize[i];
64 }
65
66 // Start sending
67 for(MInt i = 0, sendCount = 0; i < noExDomains; i++) {
68#if defined(HOST_Klogin)
69 MPI_Issend(const_cast<U*>(&sendBuffer[sendCount]), noDat * sendSize[i], DTYPE, exDomainId[i], tag, comm,
70 &sendRequests[i]);
71#else
72 MPI_Isend(&sendBuffer[sendCount], noDat * sendSize[i], DTYPE, exDomainId[i], tag, comm, &sendRequests[i], AT_,
73 "&sendBuffer[sendCount]");
74#endif
75 sendCount += noDat * sendSize[i];
76 }
77
78 // Wait for all send and receive requests to finish
79 MPI_Waitall(noExDomains, &recvRequests[0], MPI_STATUSES_IGNORE, AT_);
80 MPI_Waitall(noExDomains, &sendRequests[0], MPI_STATUSES_IGNORE, AT_);
81}
int MPI_Isend(const void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request, const MString &name, const MString &varname)
same as MPI_Isend
int MPI_Irecv(void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Request *request, const MString &name, const MString &varname)
same as MPI_Irecv

◆ exchangeBuffer() [2/5]

template<typename U >
void maia::mpi::exchangeBuffer ( const std::vector< MInt > &  exDomains,
const MInt *const  noValuesToSend,
const MInt *const  noValuesToRecv,
const MPI_Comm  comm,
const U *const  sendBuffer,
U *const  receiveBuffer,
const MInt  noDat = 1 
)
inline
Author
Lennart Schneiders
Date
October 2017
Template Parameters
U
Parameters
exDomainsDomains with which to exchange Data.
noValuesToSendNumber of values to send.
noValuesToRecvNumber of values to recv.
commMPI communicator to be used.
sendBufferBuffer used to send data.
receiveBufferBuffer used to receive data.
noDatNumber of values per data entry.

Definition at line 212 of file mpiexchange.h.

218 {
219 TRACE();
220
221 // 0. prepare
222 const auto noNghbrDomains = exDomains.size();
223 ASSERT(noNghbrDomains > 0, "");
224
225 // 1. exchange
226 exchangeBuffer(noNghbrDomains, exDomains.data(), &noValuesToRecv[0], &noValuesToSend[0], comm, receiveBuffer,
227 sendBuffer, noDat);
228}
void exchangeBuffer(const MInt noExDomains, const MInt *const exDomainId, const MInt *const recvSize, const MInt *const sendSize, const MPI_Comm comm, U *const receiveBuffer, const U *const sendBuffer, const MInt noDat=1)
Generic exchange of data.
Definition: mpiexchange.h:44

◆ exchangeBuffer() [3/5]

template<typename U >
void maia::mpi::exchangeBuffer ( const std::vector< MInt > &  exDomains,
const std::vector< std::vector< MInt > > &  noValuesToRecv,
const std::vector< std::vector< MInt > > &  noValuesToSend,
const MPI_Comm  comm,
const U *const  sendBuffer,
U *const  receiveBuffer,
const MInt  noDat = 1 
)
inline
Author
Lennart Schneiders
Date
October 2017
Template Parameters
U
Parameters
exDomainsDomains with which to exchange Data.
noValuesToSendNumber of values to send (in this type because of m_haloCells)
noValuesToRecvNumber of values to recv (in this type because of m_windowCells)
commMPI communicator to be used.
sendBufferBuffer used to send data.
receiveBufferBuffer used to receive data.
noDatNumber of values per data entry.

Definition at line 144 of file mpiexchange.h.

150 {
151 TRACE();
152
153 // 0. prepare
154 const auto noNghbrDomains = exDomains.size();
155 ASSERT(noNghbrDomains > 0, "");
156 ScratchSpace<MInt> noHaloCells(noNghbrDomains, AT_, "noHaloCellsg");
157 ScratchSpace<MInt> noWindowCells(noNghbrDomains, AT_, "noWindowCellsg");
158 for(MUint i = 0; i < noNghbrDomains; i++) {
159 noHaloCells[i] = noValuesToRecv[i].size();
160 noWindowCells[i] = noValuesToSend[i].size();
161 }
162
163 // 1. exchange
164 exchangeBuffer(noNghbrDomains, exDomains.data(), &noHaloCells[0], &noWindowCells[0], comm, receiveBuffer, sendBuffer,
165 noDat);
166}

◆ exchangeBuffer() [4/5]

template<typename U >
void maia::mpi::exchangeBuffer ( const std::vector< MInt > &  exDomains,
std::vector< MInt > &  noValues,
const MPI_Comm  comm,
const U *const  sendBuffer,
U *const  receiveBuffer,
const MInt  noDat = 1 
)
inline
Author
Lennart Schneiders
Date
October 2017
Template Parameters
U
Parameters
exDomainsDomains with which to exchange Data.
noValuesNumber of values to send and recv.
commMPI communicator to be used.
sendBufferBuffer used to send data.
receiveBufferBuffer used to receive data.
noDatNumber of values per data entry.

Definition at line 242 of file mpiexchange.h.

247 {
248 TRACE();
249
250 // 0. prepare
251 const auto noNghbrDomains = exDomains.size();
252 ASSERT(noNghbrDomains > 0, "");
253
254 // 1. exchange
255 exchangeBuffer(noNghbrDomains, exDomains.data(), &noValues[0], &noValues[0], comm, receiveBuffer, sendBuffer, noDat);
256}

◆ exchangeBuffer() [5/5]

template<typename U >
void maia::mpi::exchangeBuffer ( const std::vector< MInt > &  exDomains,
std::vector< MInt > &  noValuesToRecv,
std::vector< MInt > &  noValuesToSend,
const MPI_Comm  comm,
const U *const  sendBuffer,
U *const  receiveBuffer,
const MInt  noDat = 1 
)
inline
Author
Lennart Schneiders
Date
October 2017
Template Parameters
U
Parameters
exDomainsDomains with which to exchange Data.
noValuesToSendNumber of values to send.
noValuesToRecvNumber of values to recv.
commMPI communicator to be used.
sendBufferBuffer used to send data.
receiveBufferBuffer used to receive data.
noDatNumber of values per data entry.

Definition at line 181 of file mpiexchange.h.

187 {
188 TRACE();
189
190 // 0. prepare
191 const auto noNghbrDomains = exDomains.size();
192 ASSERT(noNghbrDomains > 0, "");
193
194 // 1. exchange
195 exchangeBuffer(noNghbrDomains, exDomains.data(), &noValuesToRecv[0], &noValuesToSend[0], comm, receiveBuffer,
196 sendBuffer, noDat);
197}

◆ exchangeBufferAllToAll()

template<typename U >
MInt maia::mpi::exchangeBufferAllToAll ( const MInt  sendSize,
const MPI_Comm  comm,
U *const  receiveBuffer,
const U *const  sendBuffer,
const MInt  noDat = 1 
)
inline
Author
Sven Berger
Date
September 2019
Template Parameters
UDatatype to be exchanged
Parameters
sendSizeNumber of values to be send.
commMPI Communicator to be used.
receiveBufferBuffer used to receive data.
sendBufferBuffer used to send data.
noDatNumber of data elements to be send.

Definition at line 95 of file mpiexchange.h.

96 {
97 TRACE();
98
99 // 0. prepare
100 const MPI_Datatype DTYPE = type_traits<U>::mpiType();
101 const MInt tag = 612;
102 const MInt noDomains = globalNoDomains();
103 ASSERT(tag < MPI_TAG_UB, "");
104 ScratchSpace<MPI_Request> mpiRequest(noDomains, AT_, "mpiRequest");
105 MIntScratchSpace noToRecv(noDomains, AT_, "noToRecv");
106 mpiRequest.fill(MPI_REQUEST_NULL);
107
108 // 1. gather number of values to be recved
109 MPI_Allgather(&sendSize, 1, MPI_INT, &noToRecv[0], 1, MPI_INT, comm, AT_, "sendSize", "noToRecv");
110 const MInt totalNo = std::accumulate(noToRecv.begin(), noToRecv.end(), -sendSize);
111 ScratchSpace<U> temp(sendSize * noDat + 1, AT_, "temp");
112 std::copy(&sendBuffer[0], &sendBuffer[sendSize * noDat], &temp[0]);
113
114 // 2. send
115 MInt recvCount = 0;
116 for(MInt i = 0; i < noDomains; i++) {
117 if(globalDomainId() == i) {
118 MPI_Ibcast(&temp[0], noDat * sendSize, DTYPE, i, comm, &mpiRequest[i], AT_, "temp");
119 } else {
120 MPI_Ibcast(&receiveBuffer[recvCount], noDat * noToRecv[i], DTYPE, i, comm, &mpiRequest[i], AT_, "receiveBuffer");
121 recvCount += noDat * noToRecv[i];
122 }
123 }
124
125 MPI_Waitall(noDomains, &mpiRequest[0], MPI_STATUSES_IGNORE, AT_);
126 return totalNo;
127}
MInt globalNoDomains()
Return global number of domains.
MInt globalDomainId()
Return global domain id.
int MPI_Ibcast(void *buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm, MPI_Request *request, const MString &name, const MString &varname)
same as MPI_Ibcast
int MPI_Allgather(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm, const MString &name, const MString &sndvarname, const MString &rcvvarname)
same as MPI_Allgather

◆ exchangeData() [1/10]

template<class dataType >
void maia::mpi::exchangeData ( const dataType *const  sendBuffer,
const MInt  domainId,
const MInt  noDomains,
const MPI_Comm  mpiComm,
const MInt  dataBlockSize,
const MInt *const  dataSizeSend,
const MInt *const  dataSizeRecv,
dataType *const  recvBuffer 
)
Author
Ansgar Niemoeller (ansgar) a.nie.nosp@m.moel.nosp@m.ler@a.nosp@m.ia.r.nosp@m.wth-a.nosp@m.ache.nosp@m.n.de
Date
2016-12-22
Parameters
[in]sendBufferPointer to data send buffer.
[in]dataBlockSizeData solver size, e.g. number of variables per cell.
[in]dataSizeSendAmount of data to send to each domain, e.g. number of cells.
[in]dataSizeRecvAmount of data to receive from each domain.
[out]recvBufferPointer to data receive buffer.

Definition at line 699 of file mpiexchange.h.

701 {
702 TRACE();
703
704 using namespace maia;
705
706 ScratchSpace<MPI_Request> recvRequests(noDomains, AT_, "recvRequests");
707 ScratchSpace<MPI_Request> sendRequests(noDomains, AT_, "sendRequests");
708
709 MIntScratchSpace sendOffsets(noDomains, AT_, "sendOffsets");
710 MIntScratchSpace recvOffsets(noDomains, AT_, "recvOffsets");
711
712 sendOffsets[0] = 0;
713 recvOffsets[0] = 0;
714 // Determine send/receive data offsets
715 for(MInt i = 1; i < noDomains; i++) {
716 sendOffsets[i] = sendOffsets[i - 1] + dataBlockSize * dataSizeSend[i - 1];
717 recvOffsets[i] = recvOffsets[i - 1] + dataBlockSize * dataSizeRecv[i - 1];
718 }
719
720 // Start receiving ...
721 MInt recvCount = 0;
722 for(MInt i = 0; i < noDomains; i++) {
723 // ... only if there is any data and this is not the current domain
724 if(dataSizeRecv[i] > 0 && i != domainId) {
725 MPI_Irecv(&recvBuffer[recvOffsets[i]], dataBlockSize * dataSizeRecv[i], type_traits<dataType>::mpiType(), i, i,
726 mpiComm, &recvRequests[recvCount], AT_, "recvBuffer[recvOffsets[i]]");
727 recvCount++;
728 }
729 }
730
731 // Start sending ...
732 MInt sendCount = 0;
733 for(MInt i = 0; i < noDomains; i++) {
734 // ... if there is any data
735 if(dataSizeSend[i] > 0) {
736 if(i == domainId) {
737 // If this is the current domain just copy the data to the receive
738 // buffer
739 std::copy_n(&sendBuffer[sendOffsets[i]], dataBlockSize * dataSizeSend[i], &recvBuffer[recvOffsets[i]]);
740 } else {
741#if defined(HOST_Klogin)
742 MPI_Isend(const_cast<dataType*>(&sendBuffer[sendOffsets[i]]), dataBlockSize * dataSizeSend[i],
743 type_traits<dataType>::mpiType(), i, domainId, mpiComm, &sendRequests[sendCount], AT_,
744 "const_cast<dataType*>(&sendBuffer[sendOffsets[i]])");
745#else
746 MPI_Isend(&sendBuffer[sendOffsets[i]], dataBlockSize * dataSizeSend[i], type_traits<dataType>::mpiType(), i,
747 domainId, mpiComm, &sendRequests[sendCount], AT_, "sendBuffer[sendOffsets[i]]");
748#endif
749 sendCount++;
750 }
751 }
752 }
753
754 // Finish receiving
755 if(recvCount > 0) {
756 MPI_Waitall(recvCount, &recvRequests[0], MPI_STATUSES_IGNORE, AT_);
757 }
758 // Finish sending
759 if(sendCount > 0) {
760 MPI_Waitall(sendCount, &sendRequests[0], MPI_STATUSES_IGNORE, AT_);
761 }
762}
Namespace for auxiliary functions/classes.

◆ exchangeData() [2/10]

template<class dataType >
void maia::mpi::exchangeData ( const dataType *const  sendBuffer,
const MInt  domainId,
const MInt  noNghbrDomains,
const MInt *const  nghbrDomainIds,
const MPI_Comm  mpiComm,
const MInt  dataBlockSize,
const MInt *const  dataSizeSend,
const MInt *const  dataSizeRecv,
dataType *const  recvBuffer 
)

Definition at line 767 of file mpiexchange.h.

769 {
770 TRACE();
771
772 using namespace maia;
773
774 if(noNghbrDomains == 0) {
775 return;
776 }
777
778 ScratchSpace<MPI_Request> recvRequests(noNghbrDomains, AT_, "recvRequests");
779 ScratchSpace<MPI_Request> sendRequests(noNghbrDomains, AT_, "sendRequests");
780
781 MIntScratchSpace sendOffsets(noNghbrDomains, AT_, "sendOffsets");
782 MIntScratchSpace recvOffsets(noNghbrDomains, AT_, "recvOffsets");
783
784 sendOffsets[0] = 0;
785 recvOffsets[0] = 0;
786 // Determine send/receive data offsets
787 for(MInt i = 1; i < noNghbrDomains; i++) {
788 sendOffsets[i] = sendOffsets[i - 1] + dataBlockSize * dataSizeSend[i - 1];
789 recvOffsets[i] = recvOffsets[i - 1] + dataBlockSize * dataSizeRecv[i - 1];
790 }
791
792 // Start receiving ...
793 MInt recvCount = 0;
794 for(MInt i = 0; i < noNghbrDomains; i++) {
795 const MInt nghbrDomainId = nghbrDomainIds[i];
796 // ... only if there is any data and this is not the current domain
797 if(dataSizeRecv[i] > 0 && nghbrDomainId != domainId) {
798 MPI_Irecv(&recvBuffer[recvOffsets[i]], dataBlockSize * dataSizeRecv[i], type_traits<dataType>::mpiType(),
799 nghbrDomainId, nghbrDomainId, mpiComm, &recvRequests[recvCount], AT_, "recvBuffer[recvOffsets[i]]");
800 recvCount++;
801 }
802 }
803
804 // Start sending ...
805 MInt sendCount = 0;
806 for(MInt i = 0; i < noNghbrDomains; i++) {
807 const MInt nghbrDomainId = nghbrDomainIds[i];
808 // ... if there is any data
809 if(dataSizeSend[i] > 0) {
810 if(nghbrDomainId == domainId) {
811 // If this is the current domain just copy the data to the receive
812 // buffer
813 std::copy_n(&sendBuffer[sendOffsets[i]], dataBlockSize * dataSizeSend[i], &recvBuffer[recvOffsets[i]]);
814 } else {
815#if defined(HOST_Klogin)
816 MPI_Isend(const_cast<dataType*>(&sendBuffer[sendOffsets[i]]), dataBlockSize * dataSizeSend[i],
817 type_traits<dataType>::mpiType(), nghbrDomainId, domainId, mpiComm, &sendRequests[sendCount], AT_,
818 "const_cast<dataType*>(&sendBuffer[sendOffsets[i]])");
819#else
820 MPI_Isend(&sendBuffer[sendOffsets[i]], dataBlockSize * dataSizeSend[i], type_traits<dataType>::mpiType(),
821 nghbrDomainId, domainId, mpiComm, &sendRequests[sendCount], AT_, "sendBuffer[sendOffsets[i]]");
822#endif
823 sendCount++;
824 }
825 }
826 }
827
828 // Finish receiving
829 if(recvCount > 0) {
830 MPI_Waitall(recvCount, &recvRequests[0], MPI_STATUSES_IGNORE, AT_);
831 }
832 // Finish sending
833 if(sendCount > 0) {
834 MPI_Waitall(sendCount, &sendRequests[0], MPI_STATUSES_IGNORE, AT_);
835 }
836}

◆ exchangeData() [3/10]

template<typename U >
void maia::mpi::exchangeData ( const MInt  noNghbrDomains,
const MInt *const  nghbrDomains,
const MInt *const  noHaloCells,
const MInt **const  haloCells,
const MInt *const  noWindowCells,
const MInt **const  windowCells,
const MPI_Comm  comm,
U *const  data,
const MInt  noDat = 1 
)
Author
Lennart Schneiders
Date
October 2017

Definition at line 347 of file mpiexchange.h.

349 {
350 TRACE();
351
352 // 0. prepare
353 MInt receiveCount = std::accumulate(noHaloCells, noHaloCells + noNghbrDomains, 0);
354 ScratchSpace<U> haloBuffer(mMax(1, noDat * receiveCount), AT_, "haloBuffer");
355
356 // 1. exchange
357 exchangeData(noNghbrDomains, nghbrDomains, noHaloCells, haloCells, noWindowCells, windowCells, comm, data,
358 &haloBuffer[0], noDat);
359
360 // 2. scatter
361 receiveCount = 0;
362 for(MInt i = 0; i < noNghbrDomains; i++) {
363 for(MInt j = 0; j < noHaloCells[i]; j++) {
364 for(MInt k = 0; k < noDat; k++) {
365 data[noDat * haloCells[i][j] + k] = haloBuffer[receiveCount];
366 receiveCount++;
367 }
368 }
369 }
370}
constexpr T mMax(const T &x, const T &y)
Definition: functions.h:94

◆ exchangeData() [4/10]

template<typename U >
void maia::mpi::exchangeData ( const MInt  noNghbrDomains,
const MInt *const  nghbrDomains,
const MInt *const  noHaloCells,
const MInt ** const  ,
const MInt *const  noWindowCells,
const MInt **const  windowCells,
const MPI_Comm  comm,
const U *const  data,
U *const  haloBuffer,
const MInt  noDat = 1 
)
Author
Lennart Schneiders
Date
October 2017

Definition at line 295 of file mpiexchange.h.

297 {
298 TRACE();
299
300 // 0. prepare
301 MInt sendCount = std::accumulate(noWindowCells, noWindowCells + noNghbrDomains, 0);
302 ScratchSpace<U> windowBuffer(mMax(1, noDat * sendCount), AT_, "windowBuffer");
303
304 // 1. gather
305 sendCount = 0;
306 for(MInt i = 0; i < noNghbrDomains; i++) {
307 for(MInt j = 0; j < noWindowCells[i]; j++) {
308 for(MInt k = 0; k < noDat; k++) {
309 windowBuffer[sendCount] = data[noDat * windowCells[i][j] + k];
310 sendCount++;
311 }
312 }
313 }
314
315 // 2. exchange
316 exchangeBuffer(noNghbrDomains, nghbrDomains, noHaloCells, noWindowCells, comm, haloBuffer, &windowBuffer[0], noDat);
317}

◆ exchangeData() [5/10]

template<typename U >
void maia::mpi::exchangeData ( const MInt  noNghbrDomains,
const MInt *const  nghbrDomains,
const std::vector< std::vector< MInt > > &  haloCellVec,
const std::vector< std::vector< MInt > > &  windowCellVec,
const MPI_Comm  comm,
U *const  data,
const MInt  noDat = 1 
)
Author
Lennart Schneiders
Date
October 2017

Definition at line 435 of file mpiexchange.h.

438 {
439 TRACE();
440 ASSERT(noNghbrDomains == (signed)windowCellVec.size() && noNghbrDomains == (signed)haloCellVec.size(), "");
441 ScratchSpace<MInt> noHaloCells(mMax(1, noNghbrDomains), AT_, "noHaloCells");
442 ScratchSpace<MInt> noWindowCells(mMax(1, noNghbrDomains), AT_, "noWindowCells");
443 ScratchSpace<const MInt*> haloCells(mMax(1, noNghbrDomains), AT_, "haloCells");
444 ScratchSpace<const MInt*> windowCells(mMax(1, noNghbrDomains), AT_, "windowCells");
445 for(MInt i = 0; i < noNghbrDomains; i++) {
446 noHaloCells[i] = (signed)haloCellVec[i].size();
447 noWindowCells[i] = (signed)windowCellVec[i].size();
448 haloCells[i] = haloCellVec[i].data();
449 windowCells[i] = windowCellVec[i].data();
450 }
451
452 exchangeData(noNghbrDomains, nghbrDomains, noHaloCells.getPointer(), haloCells.getPointer(),
453 noWindowCells.getPointer(), windowCells.getPointer(), comm, data, noDat);
454}

◆ exchangeData() [6/10]

template<typename U >
void maia::mpi::exchangeData ( const MInt  noNghbrDomains,
const MInt *const  nghbrDomains,
const std::vector< std::vector< MInt > > &  haloCellVec,
const std::vector< std::vector< MInt > > &  windowCellVec,
const MPI_Comm  comm,
U *const  data,
U *const  haloBuffer,
const MInt  noDat = 1 
)
Author
Lennart Schneiders
Date
October 2017

Definition at line 466 of file mpiexchange.h.

469 {
470 TRACE();
471 ASSERT(noNghbrDomains == (signed)windowCellVec.size() && noNghbrDomains == (signed)haloCellVec.size(), "");
472 ScratchSpace<MInt> noHaloCells(mMax(1, noNghbrDomains), AT_, "noHaloCells");
473 ScratchSpace<MInt> noWindowCells(mMax(1, noNghbrDomains), AT_, "noWindowCells");
474 ScratchSpace<const MInt*> haloCells(mMax(1, noNghbrDomains), AT_, "haloCells");
475 ScratchSpace<const MInt*> windowCells(mMax(1, noNghbrDomains), AT_, "windowCells");
476 for(MInt i = 0; i < noNghbrDomains; i++) {
477 noHaloCells[i] = (signed)haloCellVec[i].size();
478 noWindowCells[i] = (signed)windowCellVec[i].size();
479 haloCells[i] = haloCellVec[i].data();
480 windowCells[i] = windowCellVec[i].data();
481 }
482
483 exchangeData(noNghbrDomains, nghbrDomains, noHaloCells.getPointer(), haloCells.getPointer(),
484 noWindowCells.getPointer(), windowCells.getPointer(), comm, data, haloBuffer, noDat);
485}

◆ exchangeData() [7/10]

template<typename U >
void maia::mpi::exchangeData ( const std::vector< MInt > &  nghbrDomains,
const MInt *const  noHaloCells,
MInt **const  haloCells,
MInt *const  noWindowCells,
MInt **const  windowCells,
const MPI_Comm  comm,
const U *const  data,
U *const  haloBuffer,
const MInt  noDat = 1 
)
Author
Lennart Schneiders
Date
October 2017

Definition at line 329 of file mpiexchange.h.

331 {
332 TRACE();
333 exchangeData(nghbrDomains.size(), nghbrDomains.data(), noHaloCells, haloCells, noWindowCells, windowCells, comm, data,
334 haloBuffer, noDat);
335}

◆ exchangeData() [8/10]

template<typename U >
void maia::mpi::exchangeData ( const std::vector< MInt > &  nghbrDomains,
const MInt *const  noHaloCells,
MInt **const  haloCells,
MInt *const  noWindowCells,
MInt **const  windowCells,
const MPI_Comm  comm,
U *const  data,
const MInt  noDat = 1 
)
Author
Lennart Schneiders
Date
October 2017

Definition at line 382 of file mpiexchange.h.

384 {
385 TRACE();
386 exchangeData(nghbrDomains.size(), nghbrDomains.data(), noHaloCells, haloCells, noWindowCells, windowCells, comm, data,
387 noDat);
388}

◆ exchangeData() [9/10]

template<typename U >
void maia::mpi::exchangeData ( const std::vector< MInt > &  nghbrDomains,
const std::vector< std::vector< MInt > > &  haloCellVec,
const std::vector< std::vector< MInt > > &  windowCellVec,
const MPI_Comm  comm,
U *const  data,
const MInt  noDat = 1 
)
Author
Lennart Schneiders
Date
October 2017

Definition at line 498 of file mpiexchange.h.

500 {
501 TRACE();
502 const MInt noNghbrDomains = (signed)nghbrDomains.size();
503 exchangeData(noNghbrDomains, nghbrDomains.data(), haloCellVec, windowCellVec, comm, data, noDat);
504}

◆ exchangeData() [10/10]

template<typename U >
void maia::mpi::exchangeData ( const std::vector< MInt > &  nghbrDomains,
std::vector< std::vector< MInt > > &  haloCellVec,
std::vector< std::vector< MInt > > &  windowCellVec,
const MPI_Comm  comm,
U *const  data,
U *const  haloBuffer,
const MInt  noDat = 1 
)
Author
Lennart Schneiders
Date
October 2017

Definition at line 546 of file mpiexchange.h.

548 {
549 TRACE();
550 const auto noNghbrDomains = (signed)nghbrDomains.size();
551 exchangeData(noNghbrDomains, nghbrDomains.data(), haloCellVec, windowCellVec, comm, data, haloBuffer, noDat);
552}

◆ exchangeScattered()

template<typename U >
void maia::mpi::exchangeScattered ( const std::vector< MInt > &  nghbrDomains,
std::vector< MInt > &  sendDomainIndices,
std::vector< U > &  sendData,
const MPI_Comm  comm,
std::vector< MInt > &  recvOffsets,
std::vector< U > &  recvBuffer,
const MInt  noDat = 1 
)
Author
Lennart Schneiders
Date
October 2017

Definition at line 613 of file mpiexchange.h.

615 {
616 TRACE();
617 const auto noNghbrDomains = (signed)nghbrDomains.size();
618 if(sendDomainIndices.size() * ((unsigned)noDat) != sendData.size()) {
619 mTerm(1, AT_, "Invalid exchange buffer sizes.");
620 }
621 ScratchSpace<MInt> recvSize(noNghbrDomains, AT_, "recvSize");
622 ScratchSpace<MInt> sendSize(noNghbrDomains, AT_, "sendSize");
623 ScratchSpace<MInt> unity(noNghbrDomains, AT_, "unity");
624 unity.fill(1);
625 recvSize.fill(0);
626 sendSize.fill(0);
627 for(MInt sendDomainIndice : sendDomainIndices) {
628 ASSERT(sendDomainIndice > -1 && sendDomainIndice < noNghbrDomains, "");
629 sendSize(sendDomainIndice)++;
630 }
631
632 exchangeBuffer(noNghbrDomains, nghbrDomains.data(), &unity[0], &unity[0], comm, &recvSize[0], &sendSize[0]);
633
634 MInt recvCnt = 0;
635 MInt sendCnt = 0;
636 for(MInt i = 0; i < noNghbrDomains; i++) {
637 recvCnt += recvSize[i];
638 sendCnt += sendSize[i];
639 }
640
641 std::vector<MInt> sendOffsets;
642 recvOffsets.clear();
643 sendOffsets.resize(noNghbrDomains + 1);
644 recvOffsets.resize(noNghbrDomains + 1);
645
646 recvOffsets[0] = 0;
647 sendOffsets[0] = 0;
648 for(MInt i = 0; i < noNghbrDomains; i++) {
649 recvOffsets[i + 1] = recvOffsets[i] + recvSize[i];
650 sendOffsets[i + 1] = sendOffsets[i] + sendSize[i];
651 }
652
653 std::vector<U> sendBuffer;
654 recvBuffer.clear();
655 sendBuffer.resize(sendCnt * noDat);
656 recvBuffer.resize(recvCnt * noDat);
657
658 std::fill(&sendSize[0], &sendSize[0] + noNghbrDomains, 0);
659 for(MUint i = 0; i < sendDomainIndices.size(); i++) {
660 MInt idx = sendDomainIndices[i];
661 ASSERT(idx > -1 && idx < noNghbrDomains, "");
662 for(MInt j = 0; j < noDat; j++) {
663 sendBuffer[noDat * (sendOffsets[idx] + sendSize[idx]) + j] = sendData[noDat * i + j];
664 }
665 sendSize[idx]++;
666 }
667
668 exchangeBuffer(noNghbrDomains, nghbrDomains.data(), &recvSize[0], &sendSize[0], comm, recvBuffer.data(),
669 sendBuffer.data(), noDat);
670}

◆ exchangeValues()

template<typename U >
void maia::mpi::exchangeValues ( const std::vector< MInt > &  exDomains,
MInt  noValues,
const MPI_Comm  comm,
const U *const  sendBuffer,
U *const  receiveBuffer,
const MInt  noDat = 1 
)
inline
Author
Lennart Schneiders
Date
October 2017
Template Parameters
U
Parameters
exDomainsDomains with which to exchange Data.
noValuesNumber of values to send and recv.
commMPI communicator to be used.
sendBufferBuffer used to send data.
receiveBufferBuffer used to receive data.
noDatNumber of values per data entry.

Definition at line 270 of file mpiexchange.h.

271 {
272 TRACE();
273
274 // 0. prepare
275 const auto noNghbrDomains = exDomains.size();
276 ASSERT(noNghbrDomains > 0, "");
277
278 ScratchSpace<MInt> noV(noNghbrDomains, AT_, "noV");
279 noV.fill(noValues);
280
281 // 1. exchange
282 exchangeBuffer(noNghbrDomains, exDomains.data(), &noV[0], &noV[0], comm, receiveBuffer, sendBuffer, noDat);
283}

◆ getBufferSize()

MUint maia::mpi::getBufferSize ( const std::vector< std::vector< MInt > > &  exchangeCells)
inline
Author
Lennart Schneiders
Date
October 2017

Definition at line 681 of file mpiexchange.h.

681 {
682 return accumulate(exchangeCells.begin(), exchangeCells.end(), 0u,
683 [](const MInt& a, const std::vector<MInt>& b) { return a + static_cast<MInt>(b.size()); });
684}
Definition: contexttypes.h:19

◆ mpiExchangePointToPoint()

template<class T >
std::vector< T > maia::mpi::mpiExchangePointToPoint ( const T *const  sendBuffer,
const MInt *const  snghbrs,
const MInt  nosnghbrs,
const MInt *const  sendcounts,
const MInt *const  rnghbrs,
const MInt  nornghbrs,
const MPI_Comm &  mpi_comm,
const MInt  domainId,
const MInt  dataBlockSize,
MInt *const  recvcounts_ = nullptr,
MInt *const  rdispls_ = nullptr 
)

Definition at line 1057 of file mpiexchange.h.

1067 {
1068 TRACE();
1069
1070#ifndef NDEBUG
1071 {
1072 // TODO labels:COMM,totest uncomment the next two lines, is it necessary to be sorted?
1073 // if (!std::is_sorted(&snghbrs[0], &snghbrs[nosnghbrs])) TERMM(-1, "");
1074 // if (!std::is_sorted(&rnghbrs[0], &rnghbrs[nornghbrs])) TERMM(-1, "");
1075 // Check for duplicates
1076 std::set<MInt> s1(&snghbrs[0], &snghbrs[0] + nosnghbrs);
1077 ASSERT(s1.size() == static_cast<unsigned>(nosnghbrs), "");
1078 std::set<MInt> s2(&rnghbrs[0], &rnghbrs[0] + nornghbrs);
1079 ASSERT(s2.size() == static_cast<unsigned>(nornghbrs), "");
1080
1081 /*
1082 * Check if send and receive neighbor domainIds are consistent
1083 */
1084
1085 // Determine all neighbor domains of current domain
1086 MInt noDomains;
1087 MPI_Comm_size(mpi_comm, &noDomains);
1088 ScratchSpace<MChar> isReceiveDomain(noDomains, AT_, "isReceiveDomain");
1089 std::fill(isReceiveDomain.begin(), isReceiveDomain.end(), 0);
1090 for(MInt d = 0; d < nosnghbrs; d++) {
1091 isReceiveDomain[snghbrs[d]] = 1;
1092 }
1093
1094 // Exchange with all domains
1095 MPI_Alltoall(MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, &isReceiveDomain[0], 1, type_traits<MChar>::mpiType(), mpi_comm,
1096 AT_, "MPI_IN_PLACE", "isReceiveDomain[0]");
1097
1098 // Check if the domains, which will send to the current domain are also included in rnghbrs of current domain
1099 MInt nornghbrs_ = std::accumulate(isReceiveDomain.begin(), isReceiveDomain.end(), 0);
1100 if(nornghbrs != nornghbrs_) {
1101 TERMM(-1, "True number of domains sending to current domain " + std::to_string(domainId)
1102 + " differs from expected number " + std::to_string(nornghbrs));
1103 }
1104 for(MInt d = 0; d < nornghbrs; d++) {
1105 if(!isReceiveDomain[rnghbrs[d]]) {
1106 TERMM(1, "Domain " + std::to_string(domainId) + " has domain " + std::to_string(rnghbrs[d])
1107 + " as a neighbor but domain " + std::to_string(rnghbrs[d])
1108 + " has nothing to send to current domain.");
1109 }
1110 }
1111
1112 MPI_Barrier(mpi_comm, AT_);
1113 }
1114#endif
1115
1116 // Check if I am a useless domain, which has nothing to send or receive
1117 if(nosnghbrs == 0 && nornghbrs == 0) {
1118 std::vector<T> _;
1119 return _;
1120 }
1121
1122 // 1) Determine recvcounts
1123 ScratchSpace<MPI_Request> recvRequests(std::max(1, nornghbrs), AT_, "recvRequests");
1124 ScratchSpace<MInt> recvcounts(std::max(1, nornghbrs), AT_, "recvcounts");
1125 for(MInt d = 0; d < nornghbrs; ++d) {
1126 recvcounts[d] = 0; // is it necessary?
1127 MPI_Irecv(&recvcounts[d], 1, type_traits<MInt>::mpiType(), rnghbrs[d], rnghbrs[d], mpi_comm, &recvRequests[d], AT_,
1128 "recvcounts[d]");
1129 }
1130
1131 ScratchSpace<MPI_Request> sendRequests(std::max(1, nosnghbrs), AT_, "sendRequests");
1132 for(MInt d = 0; d < nosnghbrs; ++d) {
1133 MPI_Isend(&sendcounts[d], 1, type_traits<MInt>::mpiType(), snghbrs[d], domainId, mpi_comm, &sendRequests[d], AT_,
1134 "sendcounts[d]");
1135 }
1136
1137 MPI_Waitall(nornghbrs, &recvRequests[0], MPI_STATUSES_IGNORE, AT_);
1138 MPI_Waitall(nosnghbrs, &sendRequests[0], MPI_STATUSES_IGNORE, AT_);
1139 if(recvcounts_) std::copy_n(&recvcounts[0], nornghbrs, recvcounts_);
1140
1141 /*
1142 std::cout << "MYRANK=" << domainId << ": " << "sendinfo : ";
1143 for (MInt i = 0; i < nosnghbrs; ++i) {
1144 std::cout << sendcounts[i] << "(" << snghbrs[i] << "),";
1145 }
1146 std::cout << std::endl;
1147 std::cout << "MYRANK=" << domainId << ": " << "recvinfo : ";
1148 for (MInt i = 0; i < nornghbrs; ++i) {
1149 std::cout << recvcounts[i] << "(" << rnghbrs[i] << "),";
1150 }
1151 std::cout << " " << std::accumulate(&recvcounts[0], &recvcounts[nornghbrs-1], 0) * dataBlockSize << std::endl <<
1152 std::flush; MPI_Barrier(mpi_comm, AT_);
1153 */
1154
1155
1156 // 2) Alocate memory for receiveBuffer
1157 const MInt totalBufferSize = std::accumulate(&recvcounts[0], &recvcounts[0] + nornghbrs, 0) * dataBlockSize;
1158 std::vector<T> receiveBuffer(totalBufferSize);
1159
1160 // 3) take into account dataBlockSize
1161 std::vector<MInt> sdispls(nosnghbrs);
1162 std::vector<MInt> rdispls(nornghbrs);
1163
1164 // sdispls[0] = 0;
1165 // rdispls[0] = 0;
1166 for(MInt i = 1; i < nosnghbrs; i++) {
1167 sdispls[i] = sdispls[i - 1] + dataBlockSize * sendcounts[i - 1];
1168 }
1169 for(MInt i = 1; i < nornghbrs; i++) {
1170 rdispls[i] = rdispls[i - 1] + dataBlockSize * recvcounts[i - 1];
1171 }
1172 if(rdispls_) std::copy_n(&rdispls[0], nornghbrs, rdispls_);
1173
1174 // 4) Send & receive actual data
1175 std::fill(recvRequests.begin(), recvRequests.end(), MPI_REQUEST_NULL);
1176 for(MInt d = 0; d < nornghbrs; ++d) {
1177 if(recvcounts[d] > 0) {
1178 MPI_Irecv(&receiveBuffer[rdispls[d]], recvcounts[d] * dataBlockSize, type_traits<T>::mpiType(), rnghbrs[d],
1179 rnghbrs[d], mpi_comm, &recvRequests[d], AT_, "receiveBuffer[rdispls[d]]");
1180 }
1181 }
1182
1183 std::fill(sendRequests.begin(), sendRequests.end(), MPI_REQUEST_NULL);
1184 for(MInt d = 0; d < nosnghbrs; ++d) {
1185 if(sendcounts[d] > 0) {
1186 MPI_Isend(&sendBuffer[sdispls[d]], sendcounts[d] * dataBlockSize, type_traits<T>::mpiType(), snghbrs[d], domainId,
1187 mpi_comm, &sendRequests[d], AT_, "sendBuffer[sdispls[d]]");
1188 }
1189 }
1190
1191 MPI_Waitall(nornghbrs, &recvRequests[0], MPI_STATUSES_IGNORE, AT_);
1192 MPI_Waitall(nosnghbrs, &sendRequests[0], MPI_STATUSES_IGNORE, AT_);
1193
1194 return receiveBuffer;
1195}
int MPI_Barrier(MPI_Comm comm, const MString &name)
same as MPI_Barrier

◆ reverseExchangeAddData() [1/2]

template<typename U >
void maia::mpi::reverseExchangeAddData ( const std::vector< MInt > &  nghbrDomains,
const std::vector< std::vector< MInt > > &  haloCellVec,
const std::vector< std::vector< MInt > > &  windowCellVec,
const MPI_Comm  comm,
U **const  data,
const MInt  noDat = 1 
)
Author
Tim Wegmann
Date
March 2021

Definition at line 1205 of file mpiexchange.h.

1207 {
1208 TRACE();
1209
1210 // 0. prepare
1211 const MInt noNghbrDomains = (signed)nghbrDomains.size();
1212
1213 ASSERT(noNghbrDomains == (signed)windowCellVec.size() && noNghbrDomains == (signed)haloCellVec.size(), "");
1214
1215 ScratchSpace<MInt> noHaloCells(mMax(1, noNghbrDomains), AT_, "noHaloCells");
1216 ScratchSpace<MInt> noWindowCells(mMax(1, noNghbrDomains), AT_, "noWindowCells");
1217 ScratchSpace<const MInt*> haloCells(mMax(1, noNghbrDomains), AT_, "haloCells");
1218 ScratchSpace<const MInt*> windowCells(mMax(1, noNghbrDomains), AT_, "windowCells");
1219
1220 for(MInt i = 0; i < noNghbrDomains; i++) {
1221 noHaloCells[i] = (signed)haloCellVec[i].size();
1222 noWindowCells[i] = (signed)windowCellVec[i].size();
1223 haloCells[i] = haloCellVec[i].data();
1224 windowCells[i] = windowCellVec[i].data();
1225 }
1226
1227 MInt receiveCount = std::accumulate(&noHaloCells[0], &noHaloCells[0] + noNghbrDomains, 0);
1228 ScratchSpace<U> haloBuffer(mMax(1, noDat * receiveCount), AT_, "haloBuffer");
1229
1230 MInt receiveSize = std::accumulate(&noWindowCells[0], &noWindowCells[0] + noNghbrDomains, 0);
1231 ScratchSpace<U> windowBuffer(mMax(1, noDat * receiveSize), AT_, "haloBuffer");
1232
1233 // 1. gather
1234 receiveCount = 0;
1235 for(MInt i = 0; i < noNghbrDomains; i++) {
1236 for(MInt j = 0; j < noHaloCells[i]; j++) {
1237 for(MInt k = 0; k < noDat; k++) {
1238 haloBuffer[receiveCount] = data[haloCells[i][j]][k];
1239 receiveCount++;
1240 }
1241 }
1242 }
1243
1244 // 2. exchange
1245 exchangeBuffer(noNghbrDomains, nghbrDomains.data(), noWindowCells.getPointer(), noHaloCells.getPointer(), comm,
1246 &windowBuffer[0], &haloBuffer[0], noDat);
1247
1248 // 3. scatter
1249 receiveSize = 0;
1250 for(MInt i = 0; i < noNghbrDomains; i++) {
1251 for(MInt j = 0; j < noWindowCells[i]; j++) {
1252 for(MInt k = 0; k < noDat; k++) {
1253 data[windowCells[i][j]][k] += windowBuffer[receiveSize];
1254 receiveSize++;
1255 }
1256 }
1257 }
1258}

◆ reverseExchangeAddData() [2/2]

template<typename U >
void maia::mpi::reverseExchangeAddData ( const std::vector< MInt > &  nghbrDomains,
const std::vector< std::vector< MInt > > &  haloCellVec,
const std::vector< std::vector< MInt > > &  windowCellVec,
const MPI_Comm  comm,
U *const  data,
const MInt  noDat = 1 
)
Author
Tim Wegmann
Date
March 2021

Definition at line 1268 of file mpiexchange.h.

1270 {
1271 TRACE();
1272
1273 // 0. prepare
1274 const MInt noNghbrDomains = (signed)nghbrDomains.size();
1275
1276 ASSERT(noNghbrDomains == (signed)windowCellVec.size() && noNghbrDomains == (signed)haloCellVec.size(), "");
1277
1278 ScratchSpace<MInt> noHaloCells(mMax(1, noNghbrDomains), AT_, "noHaloCells");
1279 ScratchSpace<MInt> noWindowCells(mMax(1, noNghbrDomains), AT_, "noWindowCells");
1280 ScratchSpace<const MInt*> haloCells(mMax(1, noNghbrDomains), AT_, "haloCells");
1281 ScratchSpace<const MInt*> windowCells(mMax(1, noNghbrDomains), AT_, "windowCells");
1282
1283 for(MInt i = 0; i < noNghbrDomains; i++) {
1284 noHaloCells[i] = (signed)haloCellVec[i].size();
1285 noWindowCells[i] = (signed)windowCellVec[i].size();
1286 haloCells[i] = haloCellVec[i].data();
1287 windowCells[i] = windowCellVec[i].data();
1288 }
1289
1290 MInt receiveCount = std::accumulate(&noHaloCells[0], &noHaloCells[0] + noNghbrDomains, 0);
1291 ScratchSpace<U> haloBuffer(mMax(1, noDat * receiveCount), AT_, "haloBuffer");
1292
1293 MInt receiveSize = std::accumulate(&noWindowCells[0], &noWindowCells[0] + noNghbrDomains, 0);
1294 ScratchSpace<U> windowBuffer(mMax(1, noDat * receiveSize), AT_, "haloBuffer");
1295
1296 // 1. gather
1297 receiveCount = 0;
1298 for(MInt i = 0; i < noNghbrDomains; i++) {
1299 for(MInt j = 0; j < noHaloCells[i]; j++) {
1300 for(MInt k = 0; k < noDat; k++) {
1301 haloBuffer[receiveCount] = data[noDat * haloCells[i][j] + k];
1302 receiveCount++;
1303 }
1304 }
1305 }
1306
1307 // 2. exchange
1308 exchangeBuffer(noNghbrDomains, nghbrDomains.data(), noWindowCells.getPointer(), noHaloCells.getPointer(), comm,
1309 &windowBuffer[0], &haloBuffer[0], noDat);
1310
1311 // 3. scatter
1312 receiveSize = 0;
1313 for(MInt i = 0; i < noNghbrDomains; i++) {
1314 for(MInt j = 0; j < noWindowCells[i]; j++) {
1315 for(MInt k = 0; k < noDat; k++) {
1316 data[noDat * windowCells[i][j] + k] += windowBuffer[receiveSize];
1317 receiveSize++;
1318 }
1319 }
1320 }
1321}

◆ reverseExchangeData() [1/3]

template<typename U >
void maia::mpi::reverseExchangeData ( const MInt  noNghbrDomains,
const MInt *const  nghbrDomains,
const MInt *const  noHaloCells,
const MInt **const  haloCells,
const MInt *const  noWindowCells,
const MInt ** const  ,
const MPI_Comm  comm,
const U *const  data,
U *const  windowBuffer,
const MInt  noDat = 1 
)
Author
Lennart Schneiders
Date
October 2017

Definition at line 400 of file mpiexchange.h.

403 {
404 TRACE();
405
406 // 0. prepare
407 MInt receiveCount = std::accumulate(noHaloCells, noHaloCells + noNghbrDomains, 0);
408 ScratchSpace<U> haloBuffer(mMax(1, receiveCount), AT_, "haloBuffer");
409
410 // 1. gather
411 receiveCount = 0;
412 for(MInt i = 0; i < noNghbrDomains; i++) {
413 for(MInt j = 0; j < noHaloCells[i]; j++) {
414 for(MInt k = 0; k < noDat; k++) {
415 haloBuffer[receiveCount] = data[noDat * haloCells[i][j] + k];
416 receiveCount++;
417 }
418 }
419 }
420
421 // 2. exchange
422 exchangeBuffer(noNghbrDomains, nghbrDomains, noWindowCells, noHaloCells, comm, windowBuffer, &haloBuffer[0], noDat);
423}

◆ reverseExchangeData() [2/3]

template<typename U >
void maia::mpi::reverseExchangeData ( const MInt  noNghbrDomains,
const MInt *const  nghbrDomains,
const std::vector< std::vector< MInt > > &  haloCellVec,
const std::vector< std::vector< MInt > > &  windowCellVec,
const MPI_Comm  comm,
const U *const  data,
U *const  windowBuffer,
const MInt  noDat = 1 
)
Author
Lennart Schneiders
Date
October 2017

Definition at line 564 of file mpiexchange.h.

567 {
568 TRACE();
569 ASSERT(noNghbrDomains == (signed)windowCellVec.size() && noNghbrDomains == (signed)haloCellVec.size(), "");
570 ScratchSpace<MInt> noHaloCells(mMax(1, noNghbrDomains), AT_, "noHaloCells");
571 ScratchSpace<MInt> noWindowCells(mMax(1, noNghbrDomains), AT_, "noWindowCells");
572 ScratchSpace<const MInt*> haloCells(mMax(1, noNghbrDomains), AT_, "haloCells");
573 ScratchSpace<const MInt*> windowCells(mMax(1, noNghbrDomains), AT_, "windowCells");
574 for(MInt i = 0; i < noNghbrDomains; i++) {
575 noHaloCells[i] = (signed)haloCellVec[i].size();
576 noWindowCells[i] = (signed)windowCellVec[i].size();
577 haloCells[i] = haloCellVec[i].data();
578 windowCells[i] = windowCellVec[i].data();
579 }
580
581 reverseExchangeData(noNghbrDomains, nghbrDomains, noHaloCells.getPointer(), haloCells.getPointer(),
582 noWindowCells.getPointer(), windowCells.getPointer(), comm, data, windowBuffer, noDat);
583}

◆ reverseExchangeData() [3/3]

template<typename U >
void maia::mpi::reverseExchangeData ( const std::vector< MInt > &  nghbrDomains,
const std::vector< std::vector< MInt > > &  haloCellVec,
const std::vector< std::vector< MInt > > &  windowCellVec,
const MPI_Comm  comm,
const U *const  data,
U *const  windowBuffer,
const MInt  noDat = 1 
)
Author
Lennart Schneiders
Date
October 2017

Definition at line 595 of file mpiexchange.h.

597 {
598 TRACE();
599 const auto noNghbrDomains = (signed)nghbrDomains.size();
600 reverseExchangeData(noNghbrDomains, nghbrDomains.data(), haloCellVec, windowCellVec, comm, data, windowBuffer, noDat);
601}
void reverseExchangeData(const MInt noNghbrDomains, const MInt *const nghbrDomains, const MInt *const noHaloCells, const MInt **const haloCells, const MInt *const noWindowCells, const MInt **const, const MPI_Comm comm, const U *const data, U *const windowBuffer, const MInt noDat=1)
Generic reverse exchange of data.
Definition: mpiexchange.h:400