OPAL (Object Oriented Parallel Accelerator Library)  2.2.0
OPAL
VnodeMultiBalancer.hpp
Go to the documentation of this file.
1 // -*- C++ -*-
2 /***************************************************************************
3  *
4  * The IPPL Framework
5  *
6  * This program was prepared by PSI.
7  * All rights in the program are reserved by PSI.
8  * Neither PSI nor the author(s)
9  * makes any warranty, express or implied, or assumes any liability or
10  * responsibility for the use of this software
11  *
12  * Visit www.amas.web.psi for more details
13  *
14  ***************************************************************************/
15 
16 // -*- C++ -*-
17 /***************************************************************************
18  *
19  * The IPPL Framework
20  *
21  *
22  * Visit http://people.web.psi.ch/adelmann/ for more details
23  *
24  ***************************************************************************/
25 
26 //-----------------------------------------------------------------------------
27 // Description:
28 // Vnode-granularity load balancer, based on input container of bool weight
29 // BareFields
30 //-----------------------------------------------------------------------------
31 
32 // include files
36 #include "Field/BareField.h"
37 
38 
40 // Implementation of VnodeMultiRepartition().
41 //
42 // Internally, it constructs a Field<...,double,....> having one element for
43 // each vnode along each direction in the input FieldLayout, called vf. It
44 // assigns a weight of 1.0 to each element equal to the total number of unique
45 // Field's having any true elements in each vnode for all the Field's in the
46 // input container (STL vector) of boolean weight Fields. If any vnode is
47 // compressed to false, the corresponding vf element value is left unchanged;
48 // if a vnode is uncompressed, this means that there is at least one true
49 // element in it, and the vf element value is incremented by one; if a vnode is
50 // compressed to true, this means all elements are true and the vf element is
51 // also incremented by 1. Then, it invokes BinaryBalancer on this small Field
52 // of weights, vf, to partition it elementwise among the processors, one
53 // element per PE. Finally, it maps this back to the input FieldLayout (used by
54 // the input Fields of weights), and repartitions the input FieldLayout's
55 // vnodes in a corresponding way among the PE's.
57 
58 
59 template<unsigned Dim>
61  std::vector<BareField<bool,Dim>* >& weights) {
62 
63 
64 
65 
66  int npe = Ippl::Comm->getNodes(); // Total number of PE's (pnodes)
67  if (npe == 1) return; // Not much hope of balancing on 1 node, eh?
68 
69  // Get numbers of vnodes per direction (vnode "array" extents) and total
70  // number of vnodes:
71  unsigned vpd[Dim];
72  int vnodes = 1;
73  for (unsigned int d=0; d<Dim; d++) {
74  vpd[d] = layout.getVnodesPerDirection(d);
75  vnodes *= vpd[d];
76  }
77 
78  // Construct the Field sized as vpd[d] elements along each dimension d:
79  NDIndex<Dim> vfndi;
80  for (unsigned int d=0; d<Dim; d++) vfndi[d] = Index(vpd[d]);
81  // SERIAL/PARALLEL specification must match input FieldLayout layout:
82  e_dim_tag edt[Dim];
83  for (unsigned int d=0; d<Dim; d++) edt[d] = layout.getDistribution(d);
84  // Because "true" for recurse parameter selected here, this algorithm will
85  // run faster the first time through if the input "layout" was also
86  // constructed with recurse=true. Should be correct even if not, though, and
87  // subsequent re-calls of VnodeMultiRepartition() should go faster in either
88  // case:
89  FieldLayout<Dim> l(vfndi, edt, vpd, true, vnodes);
91  vf = 0.0;
92 
93 
94  // Loop through the PE's owned LField's (<==> Vnode's) in each boolean Field
95  // of weights. Check each for compression, and increment the value the value
96  // in an array of double's with one element for each vnode; increment by 1 if
97  // compressed to true or uncompressed; if compressed to false don't
98  // increment:
99  int* anyTrueElements = new int[vnodes];
100  for (int v=0; v < vnodes; v++) anyTrueElements[v] = 0;
101 
102  // Iterate through the BareFields in the container:
103  typename std::vector<BareField<bool,Dim>* >::iterator bfi;
104  for (bfi = weights.begin(); bfi != weights.end(); ++bfi) {
105  BareField<bool,Dim>& weight = *(*bfi);
106  typename BareField<bool,Dim>::iterator_if weightItr;
107  // Go through the LFields in the BareField:
108  for (weightItr = weight.begin_if();
109  weightItr != weight.end_if();
110  ++weightItr) {
111  if ((*weightItr).second->IsCompressed()) {
112  if ((*weightItr).second->getCompressedData()) {
113  anyTrueElements[(*weightItr).second->getVnode()] += 1;
114  }
115  } else {
116  anyTrueElements[(*weightItr).second->getVnode()] += 1;
117  }
118  }
119  }
120 
121 
122  // Now broadcast so that every PE has complete copy of "anyTrueElements"
123  // array: Send to PE 0, which combines all, then broadcasts back:
124  int pe = Ippl::Comm->myNode(); // My processor ID
125  Message *msg;
128  if (pe == 0) {
129  // Receive partially-filled arrays from other PE's:
130  int* anyTrueElementsPartial = new int[vnodes];
131  int notReceived = npe - 1;
132  while (notReceived > 0) {
133  int otherPE = COMM_ANY_NODE;
134  int tag = partialTag;
135  Message* msg2 = Ippl::Comm->receive_block(otherPE, tag);
136  msg2->get(anyTrueElementsPartial);
137  delete msg2;
138  // Put values into anyTrueElements:
139  for (int v=0; v < vnodes; v++) {
140  // Any nonzero element in anyTrueElementsPartial *must* correspond to a
141  // zero element in current status of anyTrueElements on PE 0; check for
142  // otherwise and give error message if otherwise:
143  if ((anyTrueElements[v] != 0) && (anyTrueElementsPartial[v] != 0)) {
144  ERRORMSG("VnodeMultiRepartition(): anyTrueElements[" << v << "] = "
145  << anyTrueElements[v] << " and anyTrueElementsPartial["
146  << v << "] = " << anyTrueElementsPartial[v]
147  << " ; inconsistent!" << endl);
148  }
149  anyTrueElements[v] += anyTrueElementsPartial[v];
150  }
151  notReceived--;
152  }
153  msg = new Message();
154  msg->put(anyTrueElements, anyTrueElements + vnodes);
155  // Broadcast fully-filled array to other PE's:
156  Ippl::Comm->broadcast_others(msg, completeTag);
157  delete [] anyTrueElementsPartial;
158  } else {
159  // Send my partially-filled array to PE 0:
160  msg = new Message();
161  msg->put(anyTrueElements, anyTrueElements + vnodes);
162  Ippl::Comm->send(msg, 0, partialTag);
163  // Receive fully-filled array from PE 0:
164  int pe0 = 0;
165  msg = Ippl::Comm->receive_block(pe0, completeTag);
166  msg->get(anyTrueElements);
167  }
168 
169  // Loop through the PE's owned LField's (<==> Vnode's) in vf:
170  typename BareField<double,Dim>::iterator_if vfItr;
171  for (vfItr = vf.begin_if(); vfItr != vf.end_if(); ++vfItr) {
172 
173  // Global integer index of this vnode:
174  int vnode = (*vfItr).second->getVnode();
175 
176  // Assign the integer value from anyTrueElements to the corresponding
177  // single element in vf's corresponding vnode: The use of the special
178  // LField::Compress(double val) function is a trick; we know that the vf
179  // Field can be compressed (and probably always is); using this function
180  // assigns the CompressedData value directly, or else compresses the LField
181  // and sets CompressedData to the requested value if for some reason it
182  // wasn't already compressed:
183  (*vfItr).second->Compress(anyTrueElements[vnode]);
184 
185  }
186 
187 
188  // Now invoke BinaryBalancer on the small Field vf:
189  BinaryRepartition(l, vf);
190 
191  // Try this more rational alternative to handling zero-length vnodes:
192 
193  // Find and record all zero-length vnodes in l; each PE can check for this
194  // independently, since all have domain size info for both local and remote
195  // vnodes:
196  int nZeroSizeVnodes = 0;
197  // Locals:
198  typename FieldLayout<Dim>::iterator_iv lLocals;
199  for (lLocals = l.begin_iv(); lLocals != l.end_iv(); ++lLocals) {
200  if ((*lLocals).second->getDomain().size() == 0) {nZeroSizeVnodes += 1;}
201  }
202  // Remotes:
203  typename FieldLayout<Dim>::iterator_dv lRemotes;
204  for (lRemotes = l.begin_rdv(); lRemotes != l.end_rdv(); ++lRemotes) {
205  if ((*lRemotes).second->getDomain().size() == 0) {nZeroSizeVnodes += 1;}
206  }
207 
208  // For now, punt on handling this and just return without changing the input
209  // FieldLayout at all; report this as an error. The stuff ifdef'd by "GROSS"
210  // below is partially-completed (and partially-compiling) kludge to try and
211  // make BinaryRepartition do something in this case by tweaking the vf Field
212  // values with random numbers.
213  if (nZeroSizeVnodes != 0) {
214  WARNMSG("VnodeMultiRepartition() was not able to get a successful "
215  << "rebalance. So, it is leaving the FieldLayout vnode "
216  << "PE-partioning the way it was whenyou called it. Sorry about "
217  << "that; there just aren't enough noncompressed vnodes to go "
218  << "around for this many PEs---at least not enough located in a "
219  << "way that the underlying BinaryBalancer algorithm can deal "
220  << "with." << endl);
221  // Cleanup:
222  delete [] anyTrueElements;
223  return;
224  }
225 
226 #ifdef GROSS
227  // Moved this gross stuff to the bottom of the file; hoist it back in here
228  // later if needed. Of course, this module will not compile when GROSS is
229  // defined until you do this hoisting, and fix remaining compile errors in
230  // the gross stuff.
231 #endif // GROSS
232 
233 
234  // This has changed the FieldLayout l so that it no longer has the same
235  // number of vnodes as layout; it has one vnode per pnode.
236 
237  // Now must go through each *element* of vf, find which PE owns it now, map
238  // the element index back to a vnode-array (and then global vnode ID) index
239  // in layout, and reassign ownership of layout's corresponding vnode to that
240  // same PE.
241 
242  int* peOwner = new int[vnodes];
243  // Mask value -1 needed for inter-PE-exchange of array values below:
244  for (int v=0; v < vnodes; v++) peOwner[v] = -1;
245 
246  // Loop through the elements of the Field vf. We know there is one per vnode
247  // in the Field vf:
248 
249  // Outer loop over PE's owned Lfields in vf; vfItr constructed already:
250  for (vfItr = vf.begin_if(); vfItr != vf.end_if(); ++vfItr) {
251 
252  // Inner loop over elements in LField:
253  typename LField<double,Dim>::iterator lfi;
254  for (lfi = (*vfItr).second->begin();
255  lfi != (*vfItr).second->end(); ++lfi) {
256  // Global integer index values of the Field element this refers to:
257  int vfIndex[Dim];
258  // To compute this from result of Lfield::iterator::GetOffset(), must
259  // have the global-index-space base index values of this LField's
260  // subdomain so you can add it on:
261  int lfBase[Dim];
262  for (unsigned int d=0; d<Dim; d++) {
263  lfBase[d] = (*vfItr).second->getOwned()[d].first();
264  }
265  for (unsigned int d=0; d<Dim; d++) vfIndex[d] = lfi.GetOffset(d) + lfBase[d];
266  // Global integer index of this vnode:
267  int vnode = vfIndex[0];
268  int multipplier = 1;
269  for (unsigned int d=1; d<Dim; d++) {
270  multipplier *= vpd[d-1];
271  vnode += vfIndex[d]*multipplier;
272  }
273  if (vnode >= vnodes) {
274  ERRORMSG("VnodeMultiRepartition(): vnode = " << vnode
275  << " but vnodes is only " << vnodes << " ; inconsistent!"
276  << endl);
277  PInsist(vnode < vnodes,
278  "VnodeMultiRepartition: exit because of vnode value error.");
279  }
280 
281  // Record PE ownership. This is SPMD code here. The PE that calculated
282  // the value of vnode is the one that owns it, so assign ownership to
283  // it's PE ID number:
284  peOwner[vnode] = pe;
285  }
286  }
287 
288 
289  // Now broadcast so that every PE has complete copy of "peOwner" array:
292  Message* msg4;
293  if (pe == 0) {
294  // Receive partially-filled arrays from other PE's:
295  int* peOwnerPartial = new int[vnodes];
296  int notReceived = npe - 1;
297  while (notReceived > 0) {
298  int otherPE = COMM_ANY_NODE;
299  int tag = partialTag;
300  Message* msg2 = Ippl::Comm->receive_block(otherPE, tag);
301  msg2->getmsg((void *)peOwnerPartial);
302  delete msg2;
303  // Put values into peOwner:
304  for (int v=0; v < vnodes; v++) {
305  if (peOwnerPartial[v] != -1) {
306  // Any non-minus-one element in peOwnerPartial *must* correspond to a
307  // minus-one element in current status of peOwner on PE 0; check for
308  // otherwise and give error message if otherwise:
309  if (peOwner[v] != -1) {
310  ERRORMSG("VnodeMultiRepartition(): peOwner[" << v << "] = "
311  << peOwner[v] << " and peOwnerPartial["
312  << v << "] = " << peOwnerPartial[v]
313  << " ; inconsistent!" << endl);
314  }
315  peOwner[v] = peOwnerPartial[v];
316  }
317  }
318  notReceived--;
319  }
320  msg4 = new Message();
321  msg4->put(peOwner, peOwner + vnodes);
322  // Broadcast fully-filled array to other PE's:
323  Ippl::Comm->broadcast_others(msg4, completeTag);
324  delete [] peOwnerPartial;
325  } else {
326  // Send my partially-filled array to PE 0:
327  msg4 = new Message();
328  msg4->put(peOwner, peOwner + vnodes);
329  Ippl::Comm->send(msg4, 0, partialTag);
330  // Receive fully-filled array from PE 0:
331  int pe0 = 0;
332  msg4 = Ippl::Comm->receive_block(pe0, completeTag);
333  msg4->get(peOwner);
334  }
335  delete msg4;
336 
337 
338  // Now repartition layout to have the same PE ownership as l; must construct
339  // an array of NDIndex's before FieldLayout::Repartition can be invoked:
340 
341  // Find out how many vnodes I (PE) own:
342  int nVnodesIOwn = 0;
343  for (int v=0; v < vnodes; v++) if (peOwner[v] == pe) ++nVnodesIOwn;
344 
345  // Array of Vnodes that I own:
346  Vnode<Dim>* domains = new Vnode<Dim>[nVnodesIOwn];
347 
348  // Get the values of the domains from the original layout:
349 
350  // The ones I owned in the original layout (locals):
351  typename FieldLayout<Dim>::iterator_iv oldLocals;
352  int domain = 0; // counter
353  for (oldLocals = layout.begin_iv(); oldLocals != layout.end_iv();
354  ++oldLocals) {
355  // Global integer index of this vnode:
356  int vnode = (*oldLocals).second->getVnode();
357  if (peOwner[vnode] == pe) {
358  domains[domain] =
359  Vnode<Dim>((*oldLocals).second->getDomain(), pe, vnode);
360  ++domain;
361  }
362  }
363 
364  // The ones I didn't own in the original layout (remotes):
365  typename FieldLayout<Dim>::iterator_dv oldRemotes;
366  for (oldRemotes = layout.begin_rdv(); oldRemotes != layout.end_rdv();
367  ++oldRemotes) {
368  // Global integer index of this vnode:
369  int vnode = (*oldRemotes).second->getVnode();
370  if (peOwner[vnode] == pe) {
371  domains[domain] =
372  Vnode<Dim>((*oldRemotes).second->getDomain(), pe, vnode);
373  ++domain;
374  }
375  }
376 
377 
378  // Finally, call FieldLayout::Repartition() to repartition layout:
379  layout.Repartition(domains, domains + nVnodesIOwn);
380 
381 
382  // Cleanup:
383  delete [] anyTrueElements;
384  delete [] peOwner;
385  delete [] domains;
386 
387  return;
388 }
389 
390 #ifdef GROSS
391  // See comments above in first "GROSS" block about putting this stuff back up there.
392 
393  // If none are zero, go on from here. If any are zero, have to go back and
394  // find them, and find suitable nonzero ones to divide to get one nonzero one
395  // for each PE that owns a zero-size one:
396  FieldLayout<Dim> l2(vfndi, edt, vpd, true, vnodes); // Outside if block for scoping.
397  if (nZeroSizeVnodes != 0) {
398  // Reconstruct a replacement vf Field, add a random epsilon fudge factor to
399  // it, and see if the repartitioner works; if not, *don't* repartition and
400  // just return (load balance attempt fails, so leave load as it was):
401 
402  // The replacement vf Field:
403  //outside if block; see above FieldLayout<Dim> l2(vfndi, edt, vpd, true, vnodes);
404  BareField<double,Dim> vf2(l2);
405  vf2 = 0.0;
406 
407  // -----------------------------------------------------------------------------------
408  // Duplicate the original assignment of the vf Field:
409 
410  // Loop through the PE's owned LField's (<==> Vnode's) in each boolean Field
411  // of weights. Check each for compression, and increment the value the value
412  // in an array of double's with one element for each vnode; increment by 1 if
413  // compressed to true or uncompressed; if compressed to false don't
414  // increment:
415  for (int v=0; v < vnodes; v++) anyTrueElements[v] = 0;
416 
417  // Iterate through the BareFields in the container:
418  std::vector<BareField<bool,Dim>* >::iterator bfi2;
419  for (bfi2 = weights.begin(); bfi2 != weights.end(); ++bfi2) {
420  BareField<bool,Dim>& weight2 = *(*bfi2);
422  // Go through the LFields in the BareField:
423  for (weight2Itr = weight2.begin_if();
424  weight2Itr != weight2.end_if();
425  ++weight2Itr) {
426  if ((*weight2Itr).second->IsCompressed()) {
427  if ((*weight2Itr).second->getCompressedData()) {
428  anyTrueElements[(*weight2Itr).second->getVnode()] += 1;
429  }
430  } else {
431  anyTrueElements[(*weight2Itr).second->getVnode()] += 1;
432  }
433  }
434  }
435  // -----------------------------------------------------------------------------------
436 
437  // Now add the epsilon tweak:
438  double normfact = sum(Abs(vf2));
439  normfact = normfact/vfndi.size();
440  double epsilon = 1.0/normfact;
441  vf2 += epsilon*IpplRandom;
442 
443  // ***********************************************************************
444  // Once again go through the code up to the attempted BinaryRepartition():
445  // This should maybe a function....
446 
447  // Now broadcast so that every PE has complete copy of "anyTrueElements"
448  // array: Send to PE 0, which combines all, then broadcasts back:
451  Message *msg3;
452  if (pe == 0) {
453  // Receive partially-filled arrays from other PE's:
454  int* anyTrueElementsPartial = new int[vnodes];
455  int notReceived = npe - 1;
456  while (notReceived > 0) {
457  int otherPE = COMM_ANY_NODE;
458  int tag = partialTag;
459  Message* msg2 = Ippl::Comm->receive_block(otherPE, tag);
460  msg2->get(anyTrueElementsPartial);
461  delete msg2;
462  // Put values into anyTrueElements:
463  for (int v=0; v < vnodes; v++) {
464  // Any nonzero element in anyTrueElementsPartial *must* correspond to a
465  // zero element in current status of anyTrueElements on PE 0; check for
466  // otherwise and give error message if otherwise:
467  if ((anyTrueElements[v] != 0) && (anyTrueElementsPartial[v] != 0)) {
468  ERRORMSG("VnodeMultiRepartition(): anyTrueElements[" << v << "] = "
469  << anyTrueElements[v] << " and anyTrueElementsPartial["
470  << v << "] = " << anyTrueElementsPartial[v]
471  << " ; inconsistent!" << endl);
472  }
473  anyTrueElements[v] += anyTrueElementsPartial[v];
474  }
475  notReceived--;
476  }
477  msg3 = new Message();
478  msg3->put(anyTrueElements, anyTrueElements + vnodes);
479  // Broadcast fully-filled array to other PE's:
480  Ippl::Comm->broadcast_others(msg3, completeTag);
481  delete [] anyTrueElementsPartial;
482  } else {
483  // Send my partially-filled array to PE 0:
484  msg3 = new Message();
485  msg3->put(anyTrueElements, anyTrueElements + vnodes);
486  Ippl::Comm->send(msg3, 0, partialTag);
487  // Receive fully-filled array from PE 0:
488  int pe0 = 0;
489  msg3 = Ippl::Comm->receive_block(pe0, completeTag);
490  msg3->get(anyTrueElements);
491  }
492  delete msg3;
493 
494  // Loop through the PE's owned LField's (<==> Vnode's) in vf2:
496  for (vf2Itr = vf2.begin_if(); vf2Itr != vf2.end_if(); ++vf2Itr) {
497 
498  // Global integer index of this vnode:
499  int vnode = (*vf2Itr).second->getVnode();
500 
501  // Assign the integer value from anyTrueElements to the corresponding
502  // single element in vf2's corresponding vnode: The use of the special
503  // LField::Compress(double val) function is a trick; we know that the vf2
504  // Field can be compressed (and probably always is); using this function
505  // assigns the CompressedData value directly, or else compresses the LField
506  // and sets CompressedData to the requested value if for some reason it
507  // wasn't already compressed:
508  (*vf2Itr).second->Compress(anyTrueElements[vnode]);
509 
510  }
511 
512  // Now invoke BinaryBalancer on the small Field vf2:
513  BinaryRepartition(l2, vf2);
514 
515  // ***********************************************************************
516 
517  // Check once again for boo-boos (zero-length vnodes); if found, abandon balance:
518  // Find and record all zero-length vnodes in l; each PE can check for this
519  // independently, since all have domain size info for both local and remote
520  // vnodes:
521  int nZeroSizeVnodes2 = 0;
522  // Locals:
524  for (l2Locals = l2.begin_iv(); l2Locals != l2.end_iv(); ++l2Locals) {
525  if ((*l2Locals).second->getDomain().size() == 0) {nZeroSizeVnodes2 += 1;}
526  }
527  // Remotes:
529  for (l2Remotes = l2.begin_rdv(); l2Remotes != l2.end_rdv(); ++l2Remotes) {
530  if ((*l2Remotes).second->getDomain().size() == 0) {nZeroSizeVnodes2 += 1;}
531  }
532  // If none are zero, go on from here. If any are zero, have to go back and
533  // find them, and find suitable nonzero ones to divide to get one nonzero one
534  // for each PE that owns a zero-size one:
535  if (nZeroSizeVnodes2 != 0) {
536  WARNMSG("VnodeMultiRepartition(): even on a desperate 2nd attempt by adding in some"
537  << "random nonzero vnodes, was not able to get a successful rebalance. So, "
538  << "leaving the FieldLayout partioning the way it was when you called"
539  << " VnodeMultiRepartition(). Sorry about that." << endl);
540  return;
541  } else {
542  // Success! (Of some sort....); repartition vf and assign it to vf2. Do
543  // this by setting l equal to l2 and doing l.Repartition():
544  l = l2;
545  l2.Repartition
546  vf = vf2;
547  }
548  }
549 #endif // GROSS
550 
551 /***************************************************************************
552  * $RCSfile: VnodeMultiBalancer.cpp,v $ $Author: adelmann $
553  * $Revision: 1.1.1.1 $ $Date: 2003/01/23 07:40:27 $
554  * IPPL_VERSION_ID: $Id: VnodeMultiBalancer.cpp,v 1.1.1.1 2003/01/23 07:40:27 adelmann Exp $
555  ***************************************************************************/
void VnodeMultiRepartition(FieldLayout< Dim > &layout, std::vector< BareField< bool, Dim > * > &weights)
#define VNMB_COMPLETE_TAG
Definition: Tags.h:99
ac_id_larray::iterator iterator_if
Definition: BareField.h:91
int myNode() const
Definition: Communicate.h:155
#define ERRORMSG(msg)
Definition: IpplInfo.h:399
const int COMM_ANY_NODE
Definition: Communicate.h:40
RandomNumberGen IpplRandom
T::PETE_Expr_t::PETE_Return_t sum(const PETE_Expr< T > &expr)
Definition: PETE.h:1213
int next_tag(int t, int s=1000)
Definition: TagMaker.h:43
void Repartition(const NDIndex< Dim > *, const NDIndex< Dim > *)
unsigned getVnodesPerDirection(unsigned dir)
int GetOffset(unsigned d) const
Definition: BrickIterator.h:51
iterator_dv end_rdv(const GuardCellSizes< Dim > &gc=gc0())
Definition: FieldLayout.h:772
iterator_if end_if()
Definition: BareField.h:100
Definition: Index.h:236
void BinaryRepartition(FieldLayout< Dim > &layout, BareField< double, Dim > &weights)
iterator_iv end_iv()
Definition: FieldLayout.h:716
iterator_iv begin_iv()
Definition: FieldLayout.h:709
ac_id_vnodes::iterator iterator_iv
Definition: FieldLayout.h:73
virtual int broadcast_others(Message *, int, bool delmsg=true)
#define WARNMSG(msg)
Definition: IpplInfo.h:398
Definition: FFT.h:30
Message & get(const T &cval)
Definition: Message.h:484
#define VNMB_TAG_CYCLE
Definition: Tags.h:100
Message & put(const T &val)
Definition: Message.h:414
Definition: Vnode.h:22
#define VNMB_PARTIAL_TAG
Definition: Tags.h:98
e_dim_tag getDistribution(unsigned int d) const
Definition: FieldLayout.h:396
e_dim_tag
Definition: FieldLayout.h:55
iterator_dv begin_rdv(const GuardCellSizes< Dim > &gc=gc0())
Definition: FieldLayout.h:765
Message & getmsg(void *)
std::string::iterator iterator
Definition: MSLang.h:16
#define PInsist(c, m)
Definition: PAssert.h:135
const unsigned Dim
iterator_if begin_if()
Definition: BareField.h:99
Message * receive_block(int &node, int &tag)
static Communicate * Comm
Definition: IpplInfo.h:93
bool send(Message *, int node, int tag, bool delmsg=true)
int getVnode() const
Definition: Vnode.h:70
int getNodes() const
Definition: Communicate.h:143
Inform & endl(Inform &inf)
Definition: Inform.cpp:42