OPAL (Object Oriented Parallel Accelerator Library) 2022.1
OPAL
ParticleSpatialLayout.h
Go to the documentation of this file.
1// -*- C++ -*-
2/***************************************************************************
3 *
4 * The IPPL Framework
5 *
6 ***************************************************************************/
7
8#ifndef PARTICLE_SPATIAL_LAYOUT_H
9#define PARTICLE_SPATIAL_LAYOUT_H
10
11/*
12 * ParticleSpatialLayout - particle layout based on spatial decomposition.
13 *
14 * This is a specialized version of ParticleLayout, which places particles
15 * on processors based on their spatial location relative to a fixed grid.
16 * In particular, this can maintain particles on processors based on a
17 * specified FieldLayout or RegionLayout, so that particles are always on
18 * the same node as the node containing the Field region to which they are
19 * local. This may also be used if there is no associated Field at all,
20 * in which case a grid is selected based on an even distribution of
21 * particles among processors.
22 */
23
24// include files
27#include "Region/RegionLayout.h"
28#include "Message/Message.h"
29#include "Message/Format.h"
30#include "Message/MsgBuffer.h"
33
34#include <cstddef>
35
36#include <functional>
37#include <iostream>
38#include <map>
39#include <memory>
40#include <vector>
41
43
44#include <mpi.h>
45
46// forward declarations
47class UserList;
48template <class T> class ParticleAttrib;
49template <unsigned Dim, class T> class UniformCartesian;
50template <class T, unsigned Dim, class Mesh, class CachingPolicy> class ParticleSpatialLayout;
51template <class T, unsigned Dim, class Mesh, class CachingPolicy>
52std::ostream& operator<<(std::ostream&, const ParticleSpatialLayout<T,Dim,Mesh,CachingPolicy>&);
53
54// ParticleSpatialLayout class definition. Template parameters are the type
55// and dimension of the ParticlePos object used for the particles. The
56// dimension of the position must match the dimension of the FieldLayout
57// object used in this particle layout, if any.
58// Optional template parameter for the mesh type
59template < class T, unsigned Dim, class Mesh=UniformCartesian<Dim,T>, class CachingPolicy=BoxParticleCachingPolicy<T,Dim,Mesh > >
61 public FieldLayoutUser, public CachingPolicy
62{
63 /*
64 Enable cashing of particles. The size is
65 given in multiples of the mesh size in each dimension.
66 */
67
68public:
69 // pair iterator definition ... this layout does not allow for pairlists
70 typedef int pair_t;
75
76 // type of attributes this layout should use for position and ID
80
81public:
82 // constructor: The Field layout to which we match our particle's
83 // locations.
85
86 // constructor: this one also takes a Mesh
88
89 // a similar constructor, but this one takes a RegionLayout.
91
92 // a default constructor ... in this case, no layout will
93 // be assumed by this class. A layout may be given later via the
94 // 'setLayout' method, either as a FieldLayout or as a RegionLayout.
96
97 // destructor
99
100 //
101 // spatial decomposition layout information
102 //
103
104 // retrieve a reference to the FieldLayout object in use. This may be used,
105 // e.g., to construct a Field with the same layout as the Particles. Note
106 // that if this object was constructed by providing a RegionLayout in the
107 // constructor, then this generated FieldLayout will not necessarily match
108 // up with the Region (it will be offset by some amount). But, if this
109 // object was either 1) created with a FieldLayout to begin with, or 2)
110 // created with no layout, and one was generated internally, then the
111 // returned FieldLayout will match and can be used to make new Fields or
112 // Particles.
114 {
115 return RLayout.getFieldLayout();
116 }
117
118 // retrieve a reference to the RegionLayout object in use
120 {
121 return RLayout;
122 }
124 {
125 return RLayout;
126 }
127
128 // get number of particles on a physical node
129 int getNodeCount(unsigned i) const
130 {
131 PAssert_LT(i, (unsigned int) Ippl::getNodes());
132 return NodeCount[i];
133 }
134
135 // get flag for empty node domain
136 bool getEmptyNode(unsigned i) const
137 {
138 PAssert_LT(i, (unsigned int) Ippl::getNodes());
139 return EmptyNode[i];
140 }
141
142 //
143 // Particle swapping/update routines
144 //
145
146 // Update the location and indices of all atoms in the given IpplParticleBase
147 // object. This handles swapping particles among processors if
148 // needed, and handles create and destroy requests. When complete,
149 // all nodes have correct layout information.
151 const ParticleAttrib<char>* canSwap=0);
152
153
154 //
155 // I/O
156 //
157
158 // Print out information for debugging purposes.
159 void printDebug(Inform&);
160
161 //
162 // virtual functions for FieldLayoutUser's (and other UserList users)
163 //
164
165 // Repartition onto a new layout
166 virtual void Repartition(UserList *);
167
168 // Tell this object that an object is being deleted
169 virtual void notifyUserOfDelete(UserList *);
170
171 void enableCaching() { caching = true; }
172 void disableCaching() { caching = false; }
173
174protected:
175 // The RegionLayout which determines where our particles go.
177
178 // The number of particles located on each physical node.
179 size_t *NodeCount;
180
181 // Flag for which nodes have no local domain
183
184 // a list of Message pointers used in swapping particles, and flags
185 // for which nodes expect messages in each dimension
189 std::vector<size_t>* PutList;
190
192
193 // perform common constructor tasks
194 void setup();
195
197 // Rebuild the RegionLayout entirely, by recalculating our min and max
198 // domains, adding a buffer region, and then giving this new Domain to
199 // our internal RegionLayout. When this is done, we must rebuild all
200 // our other data structures as well.
201 template < class PB >
202 void rebuild_layout(size_t haveLocal, PB& PData)
203 {
204 size_t i;
205 unsigned d; // loop variables
206
207 //~ Inform dbgmsg("SpatialLayout::rebuild_layout", INFORM_ALL_NODES);
208 //~ dbgmsg << "rebuild..." << endl;
209 SingleParticlePos_t minpos = 0;
210 SingleParticlePos_t maxpos = 0;
213
214 // if we have local particles, then find the min and max positions
215 if (haveLocal > 0)
216 {
217 minpos = PData.R[0];
218 maxpos = PData.R[0];
219 for (i=1; i < haveLocal; ++i)
220 {
221 for (d=0; d < Dim; ++d)
222 {
223 if (PData.R[i][d] < minpos[d])
224 minpos[d] = PData.R[i][d];
225 if (PData.R[i][d] > maxpos[d])
226 maxpos[d] = PData.R[i][d];
227 }
228 }
229 }
230
231 // if we're not on node 0, send data to node 0
232 if (Ippl::myNode() != 0)
233 {
234 Message *msg = new Message;
235 msg->put(haveLocal);
236 if (haveLocal > 0)
237 {
238 minpos.putMessage(*msg);
239 maxpos.putMessage(*msg);
240 }
241 Ippl::Comm->send(msg, 0, tag);
242
243 // now receive back min and max range as provided by the master node.
244 // These will include some buffer region, and will be integral values,
245 // so we can make a FieldLayout and use it to initialize the RegionLayout.
246 int node = 0;
247 msg = Ippl::Comm->receive_block(node, btag);
248 minpos.getMessage(*msg);
249 maxpos.getMessage(*msg);
250 delete msg;
251
252 }
253 else // on node 0, collect data and compute region
254 {
255 SingleParticlePos_t tmpminpos;
256 SingleParticlePos_t tmpmaxpos;
257 size_t tmphaveLocal = 0;
258 unsigned unreceived = Ippl::getNodes() - 1;
259
260 // collect data from other nodes
261 while (unreceived > 0)
262 {
263 int node = COMM_ANY_NODE;
264 Message *msg = Ippl::Comm->receive_block(node, tag);
265 msg->get(tmphaveLocal);
266 if (tmphaveLocal > 0)
267 {
268 tmpminpos.getMessage(*msg);
269 tmpmaxpos.getMessage(*msg);
270 for (i=0; i < Dim; ++i)
271 {
272 if (tmpminpos[i] < minpos[i])
273 minpos[i] = tmpminpos[i];
274 if (tmpmaxpos[i] > maxpos[i])
275 maxpos[i] = tmpmaxpos[i];
276 }
277 }
278 delete msg;
279 unreceived--;
280 }
281
282 // adjust min and max to include a buffer region and fall on integral
283 // values
284 SingleParticlePos_t extrapos = (maxpos - minpos) * ((T)0.125);
285 maxpos += extrapos;
286 minpos -= extrapos;
287 for (i=0; i < Dim; ++i)
288 {
289 if (minpos[i] >= 0.0)
290 minpos[i] = (int)(minpos[i]);
291 else
292 minpos[i] = (int)(minpos[i] - 1);
293 maxpos[i] = (int)(maxpos[i] + 1);
294 }
295
296 // send these values out to the other nodes
297 if (Ippl::getNodes() > 1)
298 {
299 Message *bmsg = new Message;
300 minpos.putMessage(*bmsg);
301 maxpos.putMessage(*bmsg);
302 Ippl::Comm->broadcast_others(bmsg, btag);
303 }
304 }
305
306 // determine the size of the new domain, and the number of blocks into
307 // which it should be broken
308 NDIndex<Dim> range;
309 for (i=0; i < Dim; ++i)
310 range[i] = Index((int)(minpos[i]), (int)(maxpos[i]));
311 int vn = -1;
312 if (RLayout.initialized())
313 vn = RLayout.size_iv() + RLayout.size_rdv();
314
315 // ask the RegionLayout to change the paritioning to match this size
316 // and block count. This will eventually end up by calling Repartition
317 // here, which will lead to rebuilding the neighbor data, etc., so we
318 // are done.
319 RLayout.changeDomain(range, vn);
320 }
321
322 // swap particles to neighboring nodes if they have moved too far
323 // PB is the type of IpplParticleBase which should have it's layout rebuilt.
324 //mwerks template<class PB>
325 //mwerks unsigned swap_particles(unsigned, PB&);
327 // go through all our local particles, and send particles which must
328 // be swapped to another node to that node.
329 template < class PB >
330 size_t swap_particles(size_t LocalNum, PB& PData)
331 {
332
333//~ Inform dbgmsg("SpatialLayout::swap_particles", INFORM_ALL_NODES);
334 //~ dbgmsg << "swap..." << endl;
335
336 Inform msg("ParticleSpatialLayout ERROR ", INFORM_ALL_NODES);
337
338 unsigned d, i, j; // loop variables
339 size_t ip;
340 unsigned N = Ippl::getNodes();
341 unsigned myN = Ippl::myNode();
342
343 // iterators used to search local domains
344 typename RegionLayout<T,Dim,Mesh>::iterator_iv localV, localEnd = RLayout.end_iv();
345
346 // iterators used to search remote domains
347 typename RegionLayout<T,Dim,Mesh>::iterator_dv remoteV; // remoteEnd = RLayout.end_rdv();
348
349 // JCC: This "nudge factor" stuff was added when we were experiencing
350 // problems with particles getting lost in between PRegions on
351 // neighboring nodes. This problem has since been resolved by
352 // fixing the way in which PRegion boundaries are computed, so I am
353 // commenting this out for now. We can bring it back later if the
354 // need arises.
355
356 /*
357
358 // Calculate a 'nudge factor', an amount that can get added to a
359 // particle position to determine where it should be located. The nudge
360 // factor equals 1/100th the smallest width of the rnodes in each dimension.
361 // When we try to find where a particle is located, we check what vnode
362 // contains this particle 'nudge region', a box around the particle's pos
363 // of the size of the nudge factor.
364 T pNudge[Dim];
365 for (d=0; d < Dim; ++d) {
366 // initialize to the first rnode's width
367 T minval = (*(RLayout.begin_iv())).second->getDomain()[d].length();
368
369 // check the local rnodes
370 for (localV = RLayout.begin_iv(); localV != localEnd; ++localV) {
371 T checkval = (*localV).second->getDomain()[d].length();
372 if (checkval < minval)
373 minval = checkval;
374 }
375
376 // check the remote rnodes
377 for (remoteV = RLayout.begin_rdv(); remoteV != remoteEnd; ++remoteV) {
378 T checkval = (*remoteV).second->getDomain()[d].length();
379 if (checkval < minval)
380 minval = checkval;
381 }
382
383 // now rescale the minval, and save it
384 pNudge[d] = 0.00001 * minval;
385 }
386
387 */
388
389 // An NDRegion object used to store a particle position.
390 NDRegion<T,Dim> pLoc;
391
392 // get new message tag for particle exchange with empty domains
394
395 if (!getEmptyNode(myN))
396 {
397
398 // Particles are swapped in multipple passes, one for each dimension.
399 // The tasks completed here for each dimension are the following:
400 // 1. For each local Vnode, find the remote Vnodes which exist along
401 // same axis as the current axis (i.e. all Vnodes along the x-axis).
402 // 2. From this list, determine which nodes we send messages to.
403 // 3. Go through all the particles, finding those which have moved to
404 // an off-processor vnode, and store index in an array for that node
405 // 4. Send off the particles to the nodes (if no particles are
406 // going to a node, send them a message with 0 in it)
407 // 5. Delete the send particles from our local list
408 // 6. Receive particles sent to us by other nodes (some messages may
409 // say that we're receiving 0 particles from that node).
410
411 // Initialize NDRegion with a position inside the first Vnode.
412 // We can skip dim 0, since it will be filled below.
413 for (d = 1; d < Dim; ++d)
414 {
415 T first = (*(RLayout.begin_iv())).second->getDomain()[d].first();
416 T last = (*(RLayout.begin_iv())).second->getDomain()[d].last();
417 T mid = first + 0.5 * (last - first);
418 pLoc[d] = PRegion<T>(mid, mid);
419 }
420
421 for (d = 0; d < Dim; ++d)
422 {
423
424 // get new message tag for particle exchange along this dimension
426
427 // we only need to do the rest if there are other nodes in this dim
428 if (NeighborNodes[d] > 0)
429 {
430 // create new messages to send to our neighbors
431 for (i = 0; i < N; i++)
432 if (SwapNodeList[d][i])
433 SwapMsgList[i] = new Message;
434
435 // Go through the particles and find those moving in the current dir.
436 // When one is found, copy it into outgoing message and delete it.
437 for (ip=0; ip<LocalNum; ++ip)
438 {
439 // get the position of particle ip, and find the closest grid pnt
440 // for just the dimensions 0 ... d
441 for (j = 0; j <= d; j++)
442 pLoc[j] = PRegion<T>(PData.R[ip][j], PData.R[ip][j]);
443
444 // first check local domains (in this dimension)
445 bool foundit = false;
446 // JCC: int nudged = 0;
447 while (!foundit)
448 {
449 for (localV = RLayout.begin_iv();
450 localV != localEnd && !foundit; ++localV)
451 {
452 foundit= (((*localV).second)->getDomain())[d].touches(pLoc[d]);
453 }
454
455 // if not found, it might be remote
456 if (!foundit)
457 {
458 // see which Vnode this postion is in
459 typename RegionLayout<T,Dim,Mesh>::touch_range_dv touchingVN =
460 RLayout.touch_range_rdv(pLoc);
461
462 // make sure we have a vnode to send it to
463 if (touchingVN.first == touchingVN.second)
464 {
465 // JCC: if (nudged >= Dim) {
466 ERRORMSG("Local particle " << ip << " with ID=");
467 ERRORMSG(PData.ID[ip] << " at ");
468 ERRORMSG(PData.R[ip] << " is outside of global domain ");
469 ERRORMSG(RLayout.getDomain() << endl);
470 ERRORMSG("This occurred when searching for point " << pLoc);
471 ERRORMSG(" in RegionLayout = " << RLayout << endl);
472 Ippl::abort();
473 }
474 else
475 {
476
477 // the node has been found - add index to put list
478 unsigned node = (*(touchingVN.first)).second->getNode();
479 PAssert_EQ(SwapNodeList[d][node], true);
480 PutList[node].push_back(ip);
481
482 // .. and then add to DestroyList
483 PData.destroy(1, ip);
484
485 // indicate we found it to quit this check
486 foundit = true;
487 }
488 }
489 }
490 }
491
492 // send the particles to their destination nodes
493 for (i = 0; i < N; i++)
494 {
495 if (SwapNodeList[d][i])
496 {
497 // put data for particles on this put list into message
498 PData.putMessage( *(SwapMsgList[i]), PutList[i] );
499
500 // add a final 'zero' number of particles to indicate the end
501 PData.putMessage(*(SwapMsgList[i]), (size_t) 0, (size_t) 0);
502
503 // send the message
504 // Inform dbgmsg("SpatialLayout", INFORM_ALL_NODES);
505 //dbgmsg << "Swapping "<<PutList[i].size() << " particles to node ";
506 //dbgmsg << i<<" with tag " << tag << " (" << 'x' + d << ")" << endl;
507 //dbgmsg << " ... msg = " << *(SwapMsgList[i]) << endl;
508 int node = i;
509 Ippl::Comm->send(SwapMsgList[i], node, tag);
510
511 // clear the list
512 PutList[i].erase(PutList[i].begin(), PutList[i].end());
513 }
514 }
515
516 LocalNum -= PData.getDestroyNum(); // update local num
517 ADDIPPLSTAT(incParticlesSwapped, PData.getDestroyNum());
518 PData.performDestroy();
519
520 // receive particles from neighbor nodes, and add them to our list
521 unsigned sendnum = NeighborNodes[d];
522 while (sendnum-- > 0)
523 {
525 Message *recmsg = Ippl::Comm->receive_block(node, tag);
526 size_t recvd;
527 while ((recvd = PData.getMessage(*recmsg)) > 0)
528 LocalNum += recvd;
529 delete recmsg;
530 }
531 } // end if (NeighborNodes[d] > 0)
532
533 if (d == 0)
534 {
535 // receive messages from any empty nodes
536 for (i = 0; i < N; ++i)
537 {
538 if (getEmptyNode(i))
539 {
540 int node = i;
541 Message *recmsg = Ippl::Comm->receive_block(node, etag);
542 size_t recvd;
543 while ((recvd = PData.getMessage(*recmsg)) > 0)
544 LocalNum += recvd;
545 delete recmsg;
546 }
547 }
548 }
549
550 } // end for (d=0; d<Dim; ++d)
551
552 }
553 else // empty node sends, but does not receive
554 {
555 msg << "case getEmptyNode(myN) " << endl;
556 // create new messages to send to our neighbors along dim 0
557 for (i = 0; i < N; i++)
558 if (SwapNodeList[0][i])
559 SwapMsgList[i] = new Message;
560
561 // Go through the particles and find those moving to other nodes.
562 // When one is found, copy it into outgoing message and delete it.
563 for (ip=0; ip<LocalNum; ++ip)
564 {
565 // get the position of particle ip, and find the closest grid pnt
566 for (j = 0; j < Dim; j++)
567 pLoc[j] = PRegion<T>(PData.R[ip][j], PData.R[ip][j]);
568
569 // see which remote Vnode this postion is in
570 typename RegionLayout<T,Dim,Mesh>::touch_range_dv touchingVN =
571 RLayout.touch_range_rdv(pLoc);
572
573 // make sure we have a vnode to send it to
574 if (touchingVN.first == touchingVN.second)
575 {
576 ERRORMSG("Local particle " << ip << " with ID=");
577 ERRORMSG(PData.ID[ip] << " at ");
578 ERRORMSG(PData.R[ip] << " is outside of global domain ");
579 ERRORMSG(RLayout.getDomain() << endl);
580 ERRORMSG("This occurred when searching for point " << pLoc);
581 ERRORMSG(" in RegionLayout = " << RLayout << endl);
582 Ippl::abort();
583 }
584 else
585 {
586 // the node has been found - add index to put list
587 unsigned node = (*(touchingVN.first)).second->getNode();
588 PAssert_EQ(SwapNodeList[0][node], true);
589 PutList[node].push_back(ip);
590
591 // .. and then add to DestroyList
592 PData.destroy(1, ip);
593 }
594 }
595
596 // send the particles to their destination nodes
597 for (i = 0; i < N; i++)
598 {
599 if (SwapNodeList[0][i])
600 {
601 // put data for particles on this put list into message
602 PData.putMessage( *(SwapMsgList[i]), PutList[i] );
603
604 // add a final 'zero' number of particles to indicate the end
605 PData.putMessage(*(SwapMsgList[i]), (size_t) 0, (size_t) 0);
606
607 // send the message
608 int node = i;
609 Ippl::Comm->send(SwapMsgList[i], node, etag);
610
611 // clear the list
612 PutList[i].erase(PutList[i].begin(), PutList[i].end());
613 }
614 }
615
616 LocalNum -= PData.getDestroyNum(); // update local num
617 ADDIPPLSTAT(incParticlesSwapped, PData.getDestroyNum());
618 PData.performDestroy();
619
620 }
621
622 // return how many particles we have now
623 return LocalNum;
624 }
625
626
627/*
628 * Simplified version for testing purposes.
629 */
630
631 template < class PB >
632 size_t short_swap_particles(size_t LocalNum, PB& PData)
633 {
634 static int sent = 0;
635
636
637 unsigned d, i, j; // loop variables
638 size_t ip;
639 unsigned N = Ippl::getNodes();
640
641 // iterators used to search local domains
642 typename RegionLayout<T,Dim,Mesh>::iterator_iv localV, localEnd = RLayout.end_iv();
643
644 // iterators used to search remote domains
645 typename RegionLayout<T,Dim,Mesh>::iterator_dv remoteV; // remoteEnd = RLayout.end_rdv();
646
647
648 // An NDRegion object used to store a particle position.
649 NDRegion<T,Dim> pLoc;
650
651 // Initialize NDRegion with a position inside the first Vnode.
652 // We can skip dim 0, since it will be filled below.
653 for (d = 1; d < Dim; ++d)
654 {
655 T first = (*(RLayout.begin_iv())).second->getDomain()[d].first();
656 T last = (*(RLayout.begin_iv())).second->getDomain()[d].last();
657 T mid = first + 0.5 * (last - first);
658 pLoc[d] = PRegion<T>(mid, mid);
659 }
660
661 for (d = 0; d < Dim; ++d)
662 {
663
664 // get new message tag for particle exchange along this dimension
666
667 // we only need to do the rest if there are other nodes in this dim
668 if (NeighborNodes[d] > 0)
669 {
670 // create new messages to send to our neighbors
671 for (i = 0; i < N; i++)
672 if (SwapNodeList[d][i])
673 SwapMsgList[i] = new Message;
674
675 // Go through the particles and find those moving in the current dir.
676 // When one is found, copy it into outgoing message and delete it.
677 for (ip=0; ip<LocalNum; ++ip)
678 {
679 // get the position of particle ip, and find the closest grid pnt
680 // for just the dimensions 0 ... d
681 for (j = 0; j <= d; j++)
682 pLoc[j] = PRegion<T>(PData.R[ip][j], PData.R[ip][j]);
683
684 // first check local domains (in this dimension)
685 bool foundit = false;
686
687 for (localV = RLayout.begin_iv();
688 localV != localEnd && !foundit; ++localV)
689 {
690 foundit= (((*localV).second)->getDomain())[d].touches(pLoc[d]);
691 }
692
693 // if not found, it might be remote
694 if (!foundit)
695 {
696 // see which Vnode this postion is in
697 typename RegionLayout<T,Dim,Mesh>::touch_range_dv touchingVN =
698 RLayout.touch_range_rdv(pLoc);
699
700
701 // the node has been found - add index to put list
702 unsigned node = (*(touchingVN.first)).second->getNode();
703 PAssert_EQ(SwapNodeList[d][node], true);
704 PutList[node].push_back(ip);
705
706 // .. and then add to DestroyList
707 PData.destroy(1, ip);
708
709 // indicate we found it to quit this check
710 foundit = true;
711 sent++;
712 }
713 }
714
715 std::vector<MPI_Request> requests;
716 std::vector<MsgBuffer*> buffers;
717
718 // send the particles to their destination nodes
719 for (i = 0; i < N; i++)
720 {
721 if (SwapNodeList[d][i])
722 {
723
724 // put data for particles on this put list into message
725 PData.putMessage( *(SwapMsgList[i]), PutList[i] );
726
727 // add a final 'zero' number of particles to indicate the end
728 PData.putMessage(*(SwapMsgList[i]), (size_t) 0, (size_t) 0);
729
730 int node = i;
731 Ippl::Comm->send(SwapMsgList[i], node, tag);
732
733 // clear the list
734 PutList[i].erase(PutList[i].begin(), PutList[i].end());
735
736
737
738 }
739 }
740
741 LocalNum -= PData.getDestroyNum(); // update local num
742 ADDIPPLSTAT(incParticlesSwapped, PData.getDestroyNum());
743 PData.performDestroy();
744
745 // receive particles from neighbor nodes, and add them to our list
746 unsigned sendnum = NeighborNodes[d];
747 while (sendnum-- > 0)
748 {
750 Message *recmsg = Ippl::Comm->receive_block(node, tag);
751 size_t recvd;
752 while ((recvd = PData.getMessage(*recmsg)) > 0)
753 LocalNum += recvd;
754 delete recmsg;
755 }
756
757 } // end if (NeighborNodes[d] > 0)
758
759 } // end for (d=0; d<Dim; ++d)
760
761
762 // return how many particles we have now
763 return LocalNum;
764 }
765
766
767
768
769
770 // PB is the type of IpplParticleBase which should have it's layout rebuilt.
771 //mwerks template<class PB>
772 //mwerks unsigned swap_particles(unsigned, PB&, const ParticleAttrib<char>&);
774 // go through all our local particles, and send particles which must
775 // be swapped to another node to that node.
776 template < class PB >
777 size_t swap_particles(size_t LocalNum, PB& PData,
778 const ParticleAttrib<char>& canSwap)
779 {
780
781 unsigned d, i, j; // loop variables
782 size_t ip;
783 unsigned N = Ippl::getNodes();
784 unsigned myN = Ippl::myNode();
785
786 // iterators used to search local domains
787 typename RegionLayout<T,Dim,Mesh>::iterator_iv localV, localEnd = RLayout.end_iv();
788
789 // iterators used to search remote domains
790 typename RegionLayout<T,Dim,Mesh>::iterator_dv remoteV; // remoteEnd = RLayout.end_rdv();
791
792 // JCC: This "nudge factor" stuff was added when we were experiencing
793 // problems with particles getting lost in between PRegions on
794 // neighboring nodes. This problem has since been resolved by
795 // fixing the way in which PRegion boundaries are computed, so I am
796 // commenting this out for now. We can bring it back later if the
797 // need arises.
798
799 /*
800
801 // Calculate a 'nudge factor', an amount that can get added to a
802 // particle position to determine where it should be located. The nudge
803 // factor equals 1/100th the smallest width of the rnodes in each dimension.
804 // When we try to find where a particle is located, we check what vnode
805 // contains this particle 'nudge region', a box around the particle's pos
806 // of the size of the nudge factor.
807 T pNudge[Dim];
808 for (d=0; d < Dim; ++d) {
809 // initialize to the first rnode's width
810 T minval = (*(RLayout.begin_iv())).second->getDomain()[d].length();
811
812 // check the local rnodes
813 for (localV = RLayout.begin_iv(); localV != localEnd; ++localV) {
814 T checkval = (*localV).second->getDomain()[d].length();
815 if (checkval < minval)
816 minval = checkval;
817 }
818
819 // check the remote rnodes
820 for (remoteV = RLayout.begin_rdv(); remoteV != remoteEnd; ++remoteV) {
821 T checkval = (*remoteV).second->getDomain()[d].length();
822 if (checkval < minval)
823 minval = checkval;
824 }
825
826 // now rescale the minval, and save it
827 pNudge[d] = 0.00001 * minval;
828 }
829
830 */
831
832 // An NDRegion object used to store a particle position.
833 NDRegion<T,Dim> pLoc;
834
835 // get new message tag for particle exchange with empty domains
837
838 if (!getEmptyNode(myN))
839 {
840
841 // Particles are swapped in multipple passes, one for each dimension.
842 // The tasks completed here for each dimension are the following:
843 // 1. For each local Vnode, find the remote Vnodes which exist along
844 // same axis as the current axis (i.e. all Vnodes along the x-axis).
845 // 2. From this list, determine which nodes we send messages to.
846 // 3. Go through all the particles, finding those which have moved to
847 // an off-processor vnode, and store index in an array for that node
848 // 4. Send off the particles to the nodes (if no particles are
849 // going to a node, send them a message with 0 in it)
850 // 5. Delete the send particles from our local list
851 // 6. Receive particles sent to us by other nodes (some messages may
852 // say that we're receiving 0 particles from that node).
853
854 // Initialize NDRegion with a position inside the first Vnode.
855 // We can skip dim 0, since it will be filled below.
856 for (d = 1; d < Dim; ++d)
857 {
858 T first = (*(RLayout.begin_iv())).second->getDomain()[d].first();
859 T last = (*(RLayout.begin_iv())).second->getDomain()[d].last();
860 T mid = first + 0.5 * (last - first);
861 pLoc[d] = PRegion<T>(mid, mid);
862 }
863
864 for (d = 0; d < Dim; ++d)
865 {
866
867 // get new message tag for particle exchange along this dimension
869
870 // we only need to do the rest if there are other nodes in this dim
871 if (NeighborNodes[d] > 0)
872 {
873 // create new messages to send to our neighbors
874 for (i = 0; i < N; i++)
875 if (SwapNodeList[d][i])
876 SwapMsgList[i] = new Message;
877
878 // Go through the particles and find those moving in the current dir.
879 // When one is found, copy it into outgoing message and delete it.
880 for (ip=0; ip<LocalNum; ++ip)
881 {
882 if (!bool(canSwap[ip])) continue; // skip if can't swap
883 // get the position of particle ip, and find the closest grid pnt
884 // for just the dimensions 0 ... d
885 for (j = 0; j <= d; j++)
886 pLoc[j] = PRegion<T>(PData.R[ip][j], PData.R[ip][j]);
887
888 // first check local domains (in this dimension)
889 bool foundit = false;
890 // JCC: int nudged = 0;
891 while (!foundit)
892 {
893 for (localV = RLayout.begin_iv();
894 localV != localEnd && !foundit; ++localV)
895 {
896 foundit= (((*localV).second)->getDomain())[d].touches(pLoc[d]);
897 }
898
899 // if not found, it might be remote
900 if (!foundit)
901 {
902 // see which Vnode this postion is in
903 typename RegionLayout<T,Dim,Mesh>::touch_range_dv touchingVN =
904 RLayout.touch_range_rdv(pLoc);
905
906 // make sure we have a vnode to send it to
907 if (touchingVN.first == touchingVN.second)
908 {
909 // JCC: if (nudged >= Dim) {
910 ERRORMSG("Local particle " << ip << " with ID=");
911 ERRORMSG(PData.ID[ip] << " at ");
912 ERRORMSG(PData.R[ip] << " is outside of global domain ");
913 ERRORMSG(RLayout.getDomain() << endl);
914 ERRORMSG("This occurred when searching for point " << pLoc);
915 ERRORMSG(" in RegionLayout = " << RLayout << endl);
916 Ippl::abort();
917 }
918 else
919 {
920 // the node has been found - add index to put list
921 unsigned node = (*(touchingVN.first)).second->getNode();
922 PAssert_EQ(SwapNodeList[d][node], true);
923 PutList[node].push_back(ip);
924
925 // .. and then add to DestroyList
926 PData.destroy(1, ip);
927
928 // indicate we found it to quit this check
929 foundit = true;
930 }
931 }
932 }
933 }
934
935 // send the particles to their destination nodes
936 for (i = 0; i < N; i++)
937 {
938 if (SwapNodeList[d][i])
939 {
940 // put data for particles on this put list into message
941 PData.putMessage( *(SwapMsgList[i]), PutList[i] );
942
943 // add a final 'zero' number of particles to indicate the end
944 PData.putMessage(*(SwapMsgList[i]), (size_t) 0, (size_t) 0);
945
946 // send the message
947 //Inform dbgmsg("SpatialLayout", INFORM_ALL_NODES);
948 //dbgmsg << "Swapping "<<PutList[i].size() << " particles to node ";
949 //dbgmsg << i<<" with tag " << tag << " (" << 'x' + d << ")" << endl;
950 //dbgmsg << " ... msg = " << *(SwapMsgList[i]) << endl;
951 int node = i;
952 Ippl::Comm->send(SwapMsgList[i], node, tag);
953
954 // clear the list
955 PutList[i].erase(PutList[i].begin(), PutList[i].end());
956 }
957 }
958
959 LocalNum -= PData.getDestroyNum(); // update local num
960 ADDIPPLSTAT(incParticlesSwapped, PData.getDestroyNum());
961 PData.performDestroy();
962
963 // receive particles from neighbor nodes, and add them to our list
964 unsigned sendnum = NeighborNodes[d];
965 while (sendnum-- > 0)
966 {
968 Message *recmsg = Ippl::Comm->receive_block(node, tag);
969 size_t recvd;
970 while ((recvd = PData.getMessage(*recmsg)) > 0)
971 LocalNum += recvd;
972 delete recmsg;
973 }
974 } // end if (NeighborNodes[d] > 0)
975
976 if (d == 0)
977 {
978 // receive messages from any empty nodes
979 for (i = 0; i < N; ++i)
980 {
981 if (getEmptyNode(i))
982 {
983 int node = i;
984 Message *recmsg = Ippl::Comm->receive_block(node, etag);
985 size_t recvd;
986 while ((recvd = PData.getMessage(*recmsg)) > 0)
987 LocalNum += recvd;
988 delete recmsg;
989 }
990 }
991 }
992
993 } // end for (d=0; d<Dim; ++d)
994
995 }
996 else // empty node sends, but does not receive
997 {
998 // create new messages to send to our neighbors along dim 0
999 for (i = 0; i < N; i++)
1000 if (SwapNodeList[0][i])
1001 SwapMsgList[i] = new Message;
1002
1003 // Go through the particles and find those moving to other nodes.
1004 // When one is found, copy it into outgoing message and delete it.
1005 for (ip=0; ip<LocalNum; ++ip)
1006 {
1007 if (!bool(canSwap[ip])) continue; // skip if can't swap
1008 // get the position of particle ip, and find the closest grid pnt
1009 for (j = 0; j < Dim; j++)
1010 pLoc[j] = PRegion<T>(PData.R[ip][j], PData.R[ip][j]);
1011
1012 // see which remote Vnode this postion is in
1013 typename RegionLayout<T,Dim,Mesh>::touch_range_dv touchingVN =
1014 RLayout.touch_range_rdv(pLoc);
1015
1016 // make sure we have a vnode to send it to
1017 if (touchingVN.first == touchingVN.second)
1018 {
1019 ERRORMSG("Local particle " << ip << " with ID=");
1020 ERRORMSG(PData.ID[ip] << " at ");
1021 ERRORMSG(PData.R[ip] << " is outside of global domain ");
1022 ERRORMSG(RLayout.getDomain() << endl);
1023 ERRORMSG("This occurred when searching for point " << pLoc);
1024 ERRORMSG(" in RegionLayout = " << RLayout << endl);
1025 Ippl::abort();
1026 }
1027 else
1028 {
1029 // the node has been found - add index to put list
1030 unsigned node = (*(touchingVN.first)).second->getNode();
1031 PAssert_EQ(SwapNodeList[0][node], true);
1032 PutList[node].push_back(ip);
1033
1034 // .. and then add to DestroyList
1035 PData.destroy(1, ip);
1036 }
1037 }
1038
1039 // send the particles to their destination nodes
1040 for (i = 0; i < N; i++)
1041 {
1042 if (SwapNodeList[0][i])
1043 {
1044 // put data for particles on this put list into message
1045 PData.putMessage( *(SwapMsgList[i]), PutList[i] );
1046
1047 // add a final 'zero' number of particles to indicate the end
1048 PData.putMessage(*(SwapMsgList[i]), (size_t) 0, (size_t) 0);
1049
1050 // send the message
1051 int node = i;
1052 Ippl::Comm->send(SwapMsgList[i], node, etag);
1053
1054 // clear the list
1055 PutList[i].erase(PutList[i].begin(), PutList[i].end());
1056 }
1057 }
1058
1059 LocalNum -= PData.getDestroyNum(); // update local num
1060 ADDIPPLSTAT(incParticlesSwapped, PData.getDestroyNum());
1061 PData.performDestroy();
1062
1063 }
1064
1065 // return how many particles we have now
1066 return LocalNum;
1067 }
1068
1069
1070
1071
1072/*
1073 * Newer (cleaner) version of swap particles that uses less bandwidth
1074 * and drastically lowers message counts for real cases.
1075 */
1076 template < class PB >
1077 size_t new_swap_particles(size_t LocalNum, PB& PData)
1078 {
1080 static int sent = 0;
1081
1082 unsigned N = Ippl::getNodes();
1083 unsigned myN = Ippl::myNode();
1084
1085 typename RegionLayout<T,Dim,Mesh>::iterator_iv localV, localEnd = RLayout.end_iv();
1087
1088 std::vector<int> msgsend(N, 0);
1089 std::vector<int> msgrecv(N, 0);
1090
1091 NDRegion<T,Dim> pLoc;
1092
1093 std::multimap<unsigned, unsigned> p2n; //<node ID, particle ID>
1094
1095 int particlesLeft = LocalNum;
1096 bool responsibleNodeNotFound = false;
1097 for (unsigned int ip=0; ip<LocalNum; ++ip)
1098 {
1099 for (unsigned int j = 0; j < Dim; j++)
1100 pLoc[j] = PRegion<T>(PData.R[ip][j], PData.R[ip][j]);
1101
1102 unsigned destination = myN;
1103 bool found = false;
1104 for (localV = RLayout.begin_iv(); localV != localEnd && !found; ++localV)
1105 {
1106 if ((((*localV).second)->getDomain()).touches(pLoc))
1107 found = true; // particle is local and doesn't need to be sent anywhere
1108 }
1109
1110 if (found)
1111 continue;
1112
1113 typename RegionLayout<T,Dim,Mesh>::touch_range_dv touchingVN = RLayout.touch_range_rdv(pLoc);
1114
1115 //external location
1116 if (touchingVN.first == touchingVN.second) {
1117 responsibleNodeNotFound = true;
1118 break;
1119 }
1120 destination = (*(touchingVN.first)).second->getNode();
1121
1122 msgsend[destination] = 1;
1123
1124 p2n.insert(std::pair<unsigned, unsigned>(destination, ip));
1125 sent++;
1126 particlesLeft--;
1127 }
1128
1129 allreduce(&responsibleNodeNotFound,
1130 1,
1131 std::logical_or<bool>());
1132
1133 if (responsibleNodeNotFound) {
1134 throw IpplException("ParticleSpatialLayout::new_swap_particles",
1135 "could not find node responsible for particle");
1136 }
1137
1138 //reduce message count so every node knows how many messages to receive
1139 allreduce(msgsend.data(), msgrecv.data(), N, std::plus<int>());
1140
1142
1143 typename std::multimap<unsigned, unsigned>::iterator i = p2n.begin();
1144
1145 std::unique_ptr<Format> format(PData.getFormat());
1146
1147
1148 std::vector<MPI_Request> requests;
1149 std::vector<std::shared_ptr<MsgBuffer> > buffers;
1150
1151 while (i!=p2n.end())
1152 {
1153 unsigned cur_destination = i->first;
1154
1155 std::shared_ptr<MsgBuffer> msgbuf(new MsgBuffer(format.get(), p2n.count(i->first)));
1156
1157 for (; i!=p2n.end() && i->first == cur_destination; ++i)
1158 {
1159 Message msg;
1160 PData.putMessage(msg, i->second);
1161 PData.destroy(1, i->second);
1162 msgbuf->add(&msg);
1163 }
1164
1165 MPI_Request request = Ippl::Comm->raw_isend( msgbuf->getBuffer(), msgbuf->getSize(), cur_destination, tag);
1166
1167 //remember request and buffer so we can delete them later
1168 requests.push_back(request);
1169 buffers.push_back(msgbuf);
1170 }
1171
1172 LocalNum -= PData.getDestroyNum(); // update local num
1173 PData.performDestroy();
1174
1175 //receive new particles
1176 for (int k = 0; k<msgrecv[myN]; ++k)
1177 {
1178 int node = Communicate::COMM_ANY_NODE;
1179 char *buffer = 0;
1180 int bufsize = Ippl::Comm->raw_probe_receive(buffer, node, tag);
1181 MsgBuffer recvbuf(format.get(), buffer, bufsize);
1182
1183 Message *msg = recvbuf.get();
1184 while (msg != 0)
1185 {
1186 LocalNum += PData.getSingleMessage(*msg);
1187 delete msg;
1188 msg = recvbuf.get();
1189 }
1190
1191
1192 }
1193
1194 //wait for communication to finish and clean up buffers
1195 MPI_Request* requests_ptr = requests.empty()? static_cast<MPI_Request*>(0): &(requests[0]);
1196 MPI_Waitall(requests.size(), requests_ptr, MPI_STATUSES_IGNORE);
1197
1198 return LocalNum;
1199 }
1200
1201 template < class PB >
1202 size_t new_swap_particles(size_t LocalNum, PB& PData,
1203 const ParticleAttrib<char>& canSwap)
1204 {
1206 static int sent = 0;
1207
1208 unsigned N = Ippl::getNodes();
1209 unsigned myN = Ippl::myNode();
1210
1211 typename RegionLayout<T,Dim,Mesh>::iterator_iv localV, localEnd = RLayout.end_iv();
1213
1214 std::vector<int> msgsend(N, 0);
1215 std::vector<int> msgrecv(N, 0);
1216
1217 NDRegion<T,Dim> pLoc;
1218
1219 std::multimap<unsigned, unsigned> p2n; //<node ID, particle ID>
1220
1221 int particlesLeft = LocalNum;
1222 bool responsibleNodeNotFound = false;
1223 for (unsigned int ip=0; ip<LocalNum; ++ip)
1224 {
1225 if (!bool(canSwap[ip]))//skip if it can't be swapped
1226 continue;
1227
1228 for (unsigned int j = 0; j < Dim; j++)
1229 pLoc[j] = PRegion<T>(PData.R[ip][j], PData.R[ip][j]);
1230
1231 unsigned destination = myN;
1232 bool found = false;
1233 for (localV = RLayout.begin_iv(); localV != localEnd && !found; ++localV)
1234 {
1235 if ((((*localV).second)->getDomain()).touches(pLoc))
1236 found = true; // particle is local and doesn't need to be sent anywhere
1237 }
1238
1239 if (found)
1240 continue;
1241
1242 typename RegionLayout<T,Dim,Mesh>::touch_range_dv touchingVN = RLayout.touch_range_rdv(pLoc);
1243
1244 //external location
1245 if (touchingVN.first == touchingVN.second) {
1246 responsibleNodeNotFound = true;
1247 break;
1248 }
1249 destination = (*(touchingVN.first)).second->getNode();
1250
1251 msgsend[destination] = 1;
1252
1253 p2n.insert(std::pair<unsigned, unsigned>(destination, ip));
1254 sent++;
1255 particlesLeft--;
1256 }
1257
1258 allreduce(&responsibleNodeNotFound,
1259 1,
1260 std::logical_or<bool>());
1261
1262 if (responsibleNodeNotFound) {
1263 throw IpplException("ParticleSpatialLayout::new_swap_particles",
1264 "could not find node responsible for particle");
1265 }
1266
1267 //reduce message count so every node knows how many messages to receive
1268 allreduce(msgsend.data(), msgrecv.data(), N, std::plus<int>());
1269
1271
1272 typename std::multimap<unsigned, unsigned>::iterator i = p2n.begin();
1273
1274 std::unique_ptr<Format> format(PData.getFormat());
1275
1276 std::vector<MPI_Request> requests;
1277 std::vector<std::shared_ptr<MsgBuffer> > buffers;
1278
1279 while (i!=p2n.end())
1280 {
1281 unsigned cur_destination = i->first;
1282
1283 std::shared_ptr<MsgBuffer> msgbuf(new MsgBuffer(format.get(), p2n.count(i->first)));
1284
1285 for (; i!=p2n.end() && i->first == cur_destination; ++i)
1286 {
1287 Message msg;
1288 PData.putMessage(msg, i->second);
1289 PData.destroy(1, i->second);
1290 msgbuf->add(&msg);
1291 }
1292
1293 MPI_Request request = Ippl::Comm->raw_isend( msgbuf->getBuffer(), msgbuf->getSize(), cur_destination, tag);
1294
1295 //remember request and buffer so we can delete them later
1296 requests.push_back(request);
1297 buffers.push_back(msgbuf);
1298 }
1299
1300 LocalNum -= PData.getDestroyNum(); // update local num
1301 PData.performDestroy();
1302
1303 //receive new particles
1304 for (int k = 0; k<msgrecv[myN]; ++k)
1305 {
1306 int node = Communicate::COMM_ANY_NODE;
1307 char *buffer = 0;
1308 int bufsize = Ippl::Comm->raw_probe_receive(buffer, node, tag);
1309 MsgBuffer recvbuf(format.get(), buffer, bufsize);
1310
1311 Message *msg = recvbuf.get();
1312 while (msg != 0)
1313 {
1314 LocalNum += PData.getSingleMessage(*msg);
1315 delete msg;
1316 msg = recvbuf.get();
1317 }
1318 }
1319
1320 //wait for communication to finish and clean up buffers
1321 MPI_Request* requests_ptr = requests.empty()? static_cast<MPI_Request*>(0): &(requests[0]);
1322 MPI_Waitall(requests.size(), requests_ptr, 0);
1323
1324 return LocalNum;
1325 }
1326
1327};
1328
1330
1331#endif // PARTICLE_SPATIAL_LAYOUT_H
PartBunchBase< T, Dim >::ConstIterator end(PartBunchBase< T, Dim > const &bunch)
PartBunchBase< T, Dim >::ConstIterator begin(PartBunchBase< T, Dim > const &bunch)
const unsigned Dim
void allreduce(const T *input, T *output, int count, Op op)
Definition: GlobalComm.hpp:510
const int COMM_ANY_NODE
Definition: Communicate.h:40
#define P_SPATIAL_TRANSFER_TAG
Definition: Tags.h:82
#define P_SPATIAL_RANGE_TAG
Definition: Tags.h:84
#define P_LAYOUT_CYCLE
Definition: Tags.h:86
#define P_SPATIAL_RETURN_TAG
Definition: Tags.h:81
std::ostream & operator<<(std::ostream &, const ParticleSpatialLayout< T, Dim, Mesh, CachingPolicy > &)
Inform & endl(Inform &inf)
Definition: Inform.cpp:42
#define INFORM_ALL_NODES
Definition: Inform.h:39
#define PAssert_LT(a, b)
Definition: PAssert.h:106
#define PAssert_EQ(a, b)
Definition: PAssert.h:104
#define ADDIPPLSTAT(stat, amount)
Definition: IpplStats.h:237
#define ERRORMSG(msg)
Definition: IpplInfo.h:350
std::string::iterator iterator
Definition: MSLang.h:16
Message & getMessage(Message &m)
Definition: Vektor.h:180
Message & putMessage(Message &m) const
Definition: Vektor.h:174
Definition: Index.h:237
Definition: Mesh.h:35
virtual MPI_Request raw_isend(void *, int, int, int)
Definition: Communicate.h:196
bool send(Message *, int node, int tag, bool delmsg=true)
virtual int raw_probe_receive(char *&, int &, int &)
Definition: Communicate.h:208
virtual int broadcast_others(Message *, int, bool delmsg=true)
Message * receive_block(int &node, int &tag)
void barrier(void)
Message & put(const T &val)
Definition: Message.h:406
bool empty() const
Definition: Message.h:300
Message & get(const T &cval)
Definition: Message.h:476
Message * get()
Definition: MsgBuffer.cpp:71
int next_tag(int t, int s=1000)
Definition: TagMaker.h:39
int getNodeCount(unsigned i) const
void rebuild_layout(size_t haveLocal, PB &PData)
ParticleAttrib< Index_t > ParticleIndex_t
ParticleAttrib< SingleParticlePos_t > ParticlePos_t
const RegionLayout< T, Dim, Mesh > & getLayout() const
bool getEmptyNode(unsigned i) const
virtual void Repartition(UserList *)
RegionLayout< T, Dim, Mesh > & getLayout()
virtual void notifyUserOfDelete(UserList *)
ParticleLayout< T, Dim >::SingleParticlePos_t SingleParticlePos_t
RegionLayout< T, Dim, Mesh > RLayout
void update(IpplParticleBase< ParticleSpatialLayout< T, Dim, Mesh, CachingPolicy > > &p, const ParticleAttrib< char > *canSwap=0)
size_t new_swap_particles(size_t LocalNum, PB &PData, const ParticleAttrib< char > &canSwap)
ParticleLayout< T, Dim >::Index_t Index_t
size_t new_swap_particles(size_t LocalNum, PB &PData)
std::vector< size_t > * PutList
size_t swap_particles(size_t LocalNum, PB &PData)
size_t short_swap_particles(size_t LocalNum, PB &PData)
FieldLayout< Dim > & getFieldLayout()
RegionLayout< T, Dim, Mesh > RegionLayout_t
size_t swap_particles(size_t LocalNum, PB &PData, const ParticleAttrib< char > &canSwap)
unsigned Index_t
ac_id_vnodes::iterator iterator_iv
Definition: RegionLayout.h:66
std::pair< touch_iterator_dv, touch_iterator_dv > touch_range_dv
Definition: RegionLayout.h:71
Definition: Inform.h:42
static void abort(const char *=0)
Definition: IpplInfo.cpp:616
static int getNodes()
Definition: IpplInfo.cpp:670
static int myNode()
Definition: IpplInfo.cpp:691
static Communicate * Comm
Definition: IpplInfo.h:84