CEPS  24.01
Cardiac ElectroPhysiology Simulator
CepsContainerCommunication.hpp
Go to the documentation of this file.
1 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2  This file is part of CEPS.
3 
4  CEPS is free software: you can redistribute it and/or modify
5  it under the terms of the GNU General Public License as published by
6  the Free Software Foundation, either version 3 of the License, or
7  (at your option) any later version.
8 
9  CEPS is distributed in the hope that it will be useful,
10  but WITHOUT ANY WARRANTY; without even the implied warranty of
11  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  GNU General Public License for more details.
13 
14  You should have received a copy of the GNU General Public License
15  along with CEPS (see file LICENSE at root of project).
16  If not, see <https://www.gnu.org/licenses/>.
17 
18 
19  Copyright 2019-2024 Inria, Universite de Bordeaux
20 
21  Authors, in alphabetical order:
22 
23  Pierre-Elliott BECUE, Florian CARO, Yves COUDIERE(*), Andjela DAVIDOVIC,
24  Charlie DOUANLA-LONTSI, Marc FUENTES, Mehdi JUHOOR, Michael LEGUEBE(*),
25  Pauline MIGERDITICHAN, Valentin PANNETIER(*), Nejib ZEMZEMI.
26  * : currently active authors
27 
28 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
31 # pragma once
32 
38 
40 namespace ceps
41 {
42 
51  template <typename T>
52  int
53  allGatherv(const CepsVector<T>& localTab, int localSize,
54  CepsVector<T>& globalTab, CepsBool masterOnly = false);
55 
62  int
63  allGatherv(const std::unordered_map<CepsUInt, CepsUInt>& orig,
64  std::unordered_map<CepsUInt, CepsUInt>& dest);
65 
72  template<typename _Key, typename _Val,
73  template<typename,typename> typename _Container>
74  int
75  broadcastMap(const CepsUInt& sender, _Container<_Key,_Val>& table);
76 
86  template< typename _Key, typename _Val,
87  template<typename,typename> typename _Container>
88  int
89  sendMap(
90  const CepsUInt& sender,
91  const CepsUInt& receiver,
92  _Container<_Key,_Val>& table,
93  const CepsUInt& nComms =1,
94  const CepsUInt& commId =1
95  );
96 
103  template<typename T>
104  void
105  sendRecvWithMatrix(const Eigen::Matrix<CepsUInt, Eigen::Dynamic,Eigen::Dynamic>& commMat,
106  CepsVector<CepsVector<T>>& sendBuffers,
107  CepsVector<CepsVector<T>>& recvBuffers);
108 
109 } // namespace ceps
110 
111 
112 template <typename T>
113 int
114 ceps::allGatherv(const CepsVector<T>& localTab, int localSize, CepsVector<T>& globalTab, CepsBool masterOnly)
115 {
116  CepsUInt gridSize = ceps::getGridSize();
117  MPI_Comm comm = ceps::getCommunicator();
118 
119  CepsVector<int> localSizes(ceps::getGridSize(),0);
120  MPI_Allgather(&localSize,1,MPI_INT,localSizes.data(),1,MPI_INT,comm);
121 
122  // We do not use ceps::cumSum to avoid forward declaration
123  CepsVector<int> displacements(gridSize+1,0);
124  CepsUInt count(0u);
125  for (CepsUInt i=0u; i<gridSize; i++)
126  {
127  count += localSizes[i];
128  displacements[i+1] = count;
129  }
130 
131  T* localNonConst = const_cast<T*>(localTab.data()); // we need of these two casts
132  void* localVoid = static_cast<void*>(localNonConst); // because first argument of MPI_Allgatherv is void*
133 
134  MPI_Datatype mpiType = MpiType<T>::getType();
135 
136  if (masterOnly)
137  {
138  if (ceps::isMaster())
139  globalTab.resize(displacements[gridSize]);
140  return MPI_Gatherv(localVoid, localSize, mpiType, globalTab.data(), localSizes.data(),
141  displacements.data(), mpiType, CEPS_MASTER_PROC, comm);
142  }
143  else
144  {
145  globalTab.resize(displacements[gridSize]);
146  return MPI_Allgatherv(localVoid, localSize, mpiType, globalTab.data(), localSizes.data(),
147  displacements.data(), mpiType, comm);
148  }
149 }
150 
151 template< typename _Key, typename _Val,
152  template<typename,typename> typename _Container>
153 int
154 ceps::broadcastMap(const CepsUInt& sender, _Container<_Key,_Val>&table)
155 {
156 
157  MPI_Comm comm = ceps::getCommunicator();
158  CepsBool isSender = ceps::getRank() == sender;
159  CepsUInt nItems = isSender ? table.size() : 0;
160 
161  if (not isSender)
162  table.clear();
163 
164  MPI_Bcast(&nItems,1,CEPS_MPI_UINT,sender,comm);
165  CepsVector<_Key> keys = ceps::keysOf (table);
166  CepsVector<_Val> values = ceps::valuesOf(table);
167  if (not isSender)
168  {
169  keys .resize(nItems);
170  values.resize(nItems);
171  }
172  MPI_Bcast(keys .data(),nItems,MpiType<_Key>::getType(),sender,comm);
173  MPI_Bcast(values.data(),nItems,MpiType<_Val>::getType(),sender,comm);
174  if (not isSender)
175  for (CepsUInt i = 0U; i < keys.size (); ++i)
176  table.emplace (keys[i], values[i]);
177 
178  return MPI_SUCCESS;
179 
180 }
181 
182 template<typename _Key, typename _Val,
183  template<typename,typename> typename _Container>
184 int
186  const CepsUInt& sender,
187  const CepsUInt& receiver,
188  _Container<_Key,_Val>& table,
189  const CepsUInt& nComms,
190  const CepsUInt& commId
191 )
192 {
193 
194  CepsBool isSender = ceps::getRank() == sender;
195  CepsBool isReceiver = ceps::getRank() == receiver;
196  if (isReceiver or isSender)
197  {
198 
199  CepsUInt gSize = ceps::getGridSize();
200  MPI_Comm comm = ceps::getCommunicator();
201  CepsUInt nItems = isSender ? table.size() : 0;
202  MPI_Status status;
203 
204  if (isReceiver)
205  table.clear();
206 
207  CepsInt tag = commId + nComms*(receiver + gSize*sender);
208 
209  if (isSender)
210  MPI_Send(&nItems,1,CEPS_MPI_UINT,receiver,3*tag,comm);
211  if (isReceiver)
212  MPI_Recv(&nItems,1,CEPS_MPI_UINT,sender,3*tag,comm,&status);
213 
214  CepsVector<_Key> keys = ceps::keysOf (table);
215  CepsVector<_Val> values = ceps::valuesOf(table);
216  if (isSender)
217  MPI_Send(keys.data(),nItems,MpiType<_Key>::getType(),receiver,3*tag+1,comm);
218  if (isReceiver)
219  {
220  keys .resize(nItems);
221  MPI_Recv(keys.data(),nItems,MpiType<_Key>::getType(),sender,3*tag+1,comm,&status);
222  }
223 
224  if (isSender)
225  {
226  MPI_Send(values.data(),nItems,MpiType<_Val>::getType(),receiver,3*tag+2,comm);
227  }
228  if (isReceiver)
229  {
230  values.resize(nItems);
231  MPI_Recv(values.data(),nItems,MpiType<_Val>::getType(),sender,3*tag+2,comm,&status);
232  for (CepsUInt i = 0U; i < keys.size (); ++i)
233  table.emplace (keys[i], values[i]);
234  }
235  }
236 
237  return MPI_SUCCESS;
238 
239 }
240 
241 
242 template <typename T>
243 void ceps::
244 sendRecvWithMatrix(const Eigen::Matrix<CepsUInt, Eigen::Dynamic, Eigen::Dynamic>& commMat,
245  CepsVector<CepsVector<T>>& sendBuffers,
246  CepsVector<CepsVector<T>>& recvBuffers)
247 {
248  CepsUInt gridSize = ceps::getGridSize();
249  MPI_Comm comm = ceps::getCommunicator();
250  CepsUInt rank = ceps::getRank();
251  CepsUInt ncols = commMat.cols();
252  CepsUInt nrows = commMat.rows();
253 
254  CepsBool badShape = nrows > gridSize or ncols > gridSize;
255  CEPS_ABORT_IF(badShape,
256  "given comm Matrix has wrong shape " << nrows << " " << ncols
257  );
258 
259  std::map<CepsUInt, CepsUInt> connectivity;
260  for (CepsUInt i = 0U; i < gridSize; ++i)
261  connectivity[i] = commMat(rank,i);
262 
263  CepsUInt i = 0;
264  MPI_Request *sendRequests = ceps::newArray<MPI_Request>(connectivity.size());
265 
266  for (auto &[destination, data_size] : connectivity)
267  MPI_Isend(sendBuffers[destination].data (), data_size, MpiType<T>::getType(),
268  destination,rank*gridSize+destination,comm, &sendRequests[i++]);
269 
270  // do all the receives (blocking)
271  connectivity.clear();
272  for (CepsUInt j=0U; j<gridSize; ++j)
273  connectivity[j] = commMat(j,rank);
274 
275  CepsUInt nbReceives = connectivity.size();
276  i = 0;
277  MPI_Request *receiveRequests = ceps::newArray<MPI_Request>(nbReceives);
278  MPI_Status *receiveStatus = ceps::newArray<MPI_Status >(nbReceives);
279 
280  recvBuffers.resize(gridSize);
281  for (auto &[sender, data_size] : connectivity)
282  {
283  recvBuffers[sender].resize(data_size);
284  MPI_Recv(recvBuffers[sender].data(), data_size, MpiType<T>::getType(),
285  sender, sender*gridSize+rank, comm, &receiveStatus[i++]);
286  }
287 
288  ceps::destroyTabular(receiveRequests);
289  ceps::destroyTabular(receiveStatus );
290  ceps::destroyTabular(sendRequests );
291  return;
292 }
293 
295 template <>
296 void ceps::
297 sendRecvWithMatrix(const Eigen::Matrix<CepsUInt, Eigen::Dynamic, Eigen::Dynamic>& commMat,
298  CepsVector<CepsVector<CepsHash3>>& sendBuffers,
299  CepsVector<CepsVector<CepsHash3>>& recvBuffers);
300 
#define CEPS_ABORT_IF(condition, message)
Stops the execution with a message if condition is true. If testing is enabled, only throws a runtime...
#define CEPS_MASTER_PROC
std::vector< _Type, _Alloc > CepsVector
C++ vector.
Definition: CepsTypes.hpp:155
bool CepsBool
Booleans.
Definition: CepsTypes.hpp:124
std::make_unsigned_t< CepsInt > CepsUInt
Unsigned version on CepsInt.
Definition: CepsTypes.hpp:109
int32_t CepsInt
Need 32 bit integer.
Definition: CepsTypes.hpp:106
#define CEPS_MPI_UINT
Definition: CepsTypes.hpp:325
A namespace for all utility methods.
CepsUInt getRank()
Returns current processor rank.
int broadcastMap(const CepsUInt &sender, _Container< _Key, _Val > &table)
broadcast map or multimap
void destroyTabular(_Type &)
Destroy[delete] any type.
Definition: CepsMemory.hpp:149
void sendRecvWithMatrix(const Eigen::Matrix< CepsUInt, Eigen::Dynamic, Eigen::Dynamic > &commMat, CepsVector< CepsVector< T >> &sendBuffers, CepsVector< CepsVector< T >> &recvBuffers)
Pair to pair communication (using matrix)
MPI_Comm getCommunicator()
Get the communicator.
int sendMap(const CepsUInt &sender, const CepsUInt &receiver, _Container< _Key, _Val > &table, const CepsUInt &nComms=1, const CepsUInt &commId=1)
send a map or mutlimap from a cpu to another
CepsUInt getGridSize()
Returns the number of process on the computing grid.
int allGatherv(const CepsVector< T > &localTab, int localSize, CepsVector< T > &globalTab, CepsBool masterOnly=false)
Gather all the local orig vector in the global dest vector.
CepsVector< _Tp > valuesOf(const CepsMap< _Key, _Tp, _Comp, _Alloc > &m)
Get the values of a map as a vector.
Definition: CepsMap.hpp:57
CepsBool isMaster()
Is calling process the master ?
CepsVector< _Key > keysOf(const CepsMap< _Key, _Tp, _Comp, _Alloc > &m)
Get the keys of a map as a vector.
Definition: CepsMap.hpp:45
Used to retrieve the MPI "type" in function of the true type.
Definition: CepsTypes.hpp:262