OPAL (Object Oriented Parallel Accelerator Library)  2021.1.99
OPAL
AssignGeneralBF.hpp
Go to the documentation of this file.
1 // -*- C++ -*-
2 /***************************************************************************
3  *
4  * The IPPL Framework
5  *
6  * This program was prepared by PSI.
7  * All rights in the program are reserved by PSI.
8  * Neither PSI nor the author(s)
9  * makes any warranty, express or implied, or assumes any liability or
10  * responsibility for the use of this software
11  *
12  * Visit www.amas.web.psi for more details
13  *
14  ***************************************************************************/
15 
16 // -*- C++ -*-
17 /***************************************************************************
18  *
19  * The IPPL Framework
20  *
21  *
22  * Visit http://people.web.psi.ch/adelmann/ for more details
23  *
24  ***************************************************************************/
25 
27 //
28 // This file contains the version of assign() that work with
29 // general BareField = BareField expressions. It will perform
30 // general communication to exchange data if the LHS and RHS layouts
31 // do not match.
32 //
34 
35 // include files
36 #include "Field/Assign.h"
37 #include "Field/AssignDefs.h"
38 #include "Field/BareField.h"
39 #include "Field/BrickExpression.h"
40 #include "Field/IndexedBareField.h"
41 #include "Field/LField.h"
42 #include "Message/Communicate.h"
43 #include "Message/Message.h"
44 #include "Utility/PAssert.h"
45 #include "Utility/IpplInfo.h"
46 #include "Utility/IpplStats.h"
47 
48 #include "PETE/IpplExpressions.h"
49 
50 #include <map>
51 #include <vector>
52 #include <functional>
53 #include <utility>
54 #include <iostream>
55 #include <typeinfo>
56 
58 //
59 // Assign one BareField to another.
60 // Unlike the above, this works even if the two BareFields are
61 // on different layouts.
62 //
64 
65 template<class T1, unsigned Dim, class RHS, class Op>
66 void
67 assign(const BareField<T1,Dim>& clhs, RHS rhsp, Op op, ExprTag<false>)
68 {
69 
70  // debugging output macros. these are only enabled if DEBUG_ASSIGN is
71  // defined.
72  ASSIGNMSG(Inform msg("NEW assign BF(f)", INFORM_ALL_NODES));
73  ASSIGNMSG(msg << "Computing general assignment to BF[" << clhs.getDomain());
74  ASSIGNMSG(msg << "] ..." << endl);
75 
76  // cast away const here for lhs ... unfortunate but necessary.
77  // also, retrieve the BareField from the rhs iterator. We know we can
78  // do this since this is the ExprTag<false> specialization, only called if
79  // the rhs is also a BareField.
80  BareField<T1,Dim>& lhs = (BareField<T1,Dim>&) clhs ;
81  typedef typename RHS::PETE_Return_t T2;
82  const BareField<T2,Dim>& rhs( rhsp.GetBareField() );
83 
84  // Build iterators within local fields on the left and right hand side.
85  typedef typename LField<T1,Dim>::iterator LFI;
86  typedef typename LField<T2,Dim>::iterator RFI;
87  T1 lhs_compressed_data;
88  T2 rhs_compressed_data;
89  LFI lhs_i(lhs_compressed_data);
90  RFI rhs_i(rhs_compressed_data);
91 
92  // Build an iterator for the local fields on the left and right hand side.
93  typename BareField<T1,Dim>::iterator_if lf_i, lf_e = lhs.end_if();
94  typename BareField<T2,Dim>::const_iterator_if rf_i, rf_e = rhs.end_if();
95 
96  // Get message tag
98  int remaining = 0; // counter for messages to receive
99 
100  // Build a map of the messages we expect to receive.
101  typedef std::multimap< NDIndex<Dim> , LField<T1,Dim>* , std::less<NDIndex<Dim> > >
102  ac_recv_type;
103  ac_recv_type recv_ac;
104 
105  // ----------------------------------------
106  // First the send loop.
107  // Loop over all the local nodes of the right hand side and
108  // send data to the remote ones they overlap on the left hand side.
109  int nprocs = Ippl::getNodes();
110  ASSIGNMSG(msg << "Processing send-loop for " << nprocs << " nodes." << endl);
111  if (nprocs > 1) {
112 
113  // set up messages to be sent
114 
115  Message** mess = new Message*[nprocs];
116  bool* recvmsg = new bool[nprocs]; // receive msg from this node?
117  int iproc;
118  for (iproc=0; iproc<nprocs; ++iproc) {
119  mess[iproc] = 0;
120  recvmsg[iproc] = false;
121  }
122 
123 
124  // now loop over LFields of lhs, building receive list
125 
126  for (lf_i = lhs.begin_if(); lf_i != lf_e; ++lf_i) {
127  // Cache some information about this LField.
128  LField<T1,Dim> &lf = *((*lf_i).second);
129  const NDIndex<Dim> &la = lf.getAllocated();
130 
131  // Find the remote ones that have owned cells that touch this.
133  range( rhs.getLayout().touch_range_rdv(la) );
135  for (rv_i = range.first; rv_i != range.second; ++rv_i) {
136  // Save the intersection and the LField it is for.
137  // It is a multimap so we can't use operator[].
138  NDIndex<Dim> sub = la.intersect((*rv_i).first);
139  typedef typename ac_recv_type::value_type value_type;
140  recv_ac.insert( value_type(sub,&lf) );
141 
142  // Note who will be sending this data
143  int rnode = (*rv_i).second->getNode();
144  recvmsg[rnode] = true;
145  }
146  }
147 
148 
149  // now loop over LFields of rhs, packing overlaps into proper messages
150 
151  for (rf_i = rhs.begin_if(); rf_i != rf_e; ++rf_i) {
152  // Cache some information about this local array.
153  LField<T2,Dim> &rf = *((*rf_i).second);
154  const NDIndex<Dim>& ro = rf.getOwned();
155 
156  // Loop over the local ones that have allocated cells that this
157  // remote one touches.
159  range( lhs.getLayout().touch_range_rdv(ro,lhs.getGuardCellSizes()) );
160  typename FieldLayout<Dim>::touch_iterator_dv remote_i;
161  for (remote_i = range.first; remote_i != range.second; ++remote_i) {
162  // Find the intersection.
163  NDIndex<Dim> intersection = ro.intersect( (*remote_i).first );
164 
165  // Find out who owns this domain
166  int rnode = (*remote_i).second->getNode();
167 
168  // Construct an iterator for use in sending out the data
169  rhs_i = rf.begin(intersection, rhs_compressed_data);
170  rhs_i.TryCompress();
171 
172  // Put intersection domain and field data into message
173  if (mess[rnode] == 0)
174  mess[rnode] = new Message;
175  intersection.putMessage(*mess[rnode]);
176  rhs_i.putMessage(*mess[rnode]);
177  }
178  }
179 
180 
181  // tally number of messages to receive
182 
183  for (iproc=0; iproc<nprocs; ++iproc)
184  if (recvmsg[iproc]) ++remaining;
185  delete [] recvmsg;
186 
187 
188  // send the messages
189 
190  for (iproc=0; iproc<nprocs; ++iproc) {
191  if (mess[iproc] != 0)
192  Ippl::Comm->send(mess[iproc],iproc,tag);
193  }
194  delete [] mess;
195 
196  }
197 
198  // ----------------------------------------
199  // Handle the local fills.
200  // Loop over all the local Fields of the lhs and all the local
201  // fields in the rhs.
202  // This is an N*N operation, but the expectation is that there won't
203  // be TOO many Vnodes on a given processor.
204  ASSIGNMSG(msg << "Doing local fills for " << lhs.size_if());
205  ASSIGNMSG(msg << " local lhs blocks and ");
206  ASSIGNMSG(msg << rhs.size_if() << " local rhs blocks." << endl);
207 
208  for (lf_i = lhs.begin_if(); lf_i != lf_e; ++lf_i) {
209  // Cache some information about this LField.
210  LField<T1,Dim> &lf = *(*lf_i).second;
211  const NDIndex<Dim> &lo = lf.getOwned();
212  const NDIndex<Dim> &la = lf.getAllocated();
213 
214  ASSIGNMSG(msg << "----------------" << endl);
215  ASSIGNMSG(msg << "Assigning to local LField with owned = " << lo);
216  ASSIGNMSG(msg << ", allocated = " << la << endl);
217 
218  // Loop over the ones it touches on the rhs.
219  for (rf_i = rhs.begin_if(); rf_i != rf_e; ++rf_i) {
220  // Cache some info about this LField.
221  LField<T2,Dim> &rf = *(*rf_i).second;
222  const NDIndex<Dim> &ro = rf.getOwned();
223 
224  // If the remote has info we want, then get it.
225  if (la.touches(ro)) {
226  ASSIGNMSG(msg << "Computing assignment of portion of rhs " << ro);
227  ASSIGNMSG(msg << " to lhs " << la << endl);
228 
229  // Can we compress the left?
230  // End point "contains" works here since ro has unit stride.
231  bool c1 = rf.IsCompressed();
232  bool c2 = lf.IsCompressed();
233  bool c3 = ro.contains(lo);
234  ASSIGNMSG(msg << "Checking for possible compressed-assign:");
235  ASSIGNMSG(msg << "\n rf.IsCompressed = " << c1);
236  ASSIGNMSG(msg << "\n lf.IsCompressed = " << c2);
237  ASSIGNMSG(msg << "\n ro.contains(lo) = " << c3);
238  ASSIGNMSG(msg << endl);
239 
240  // If these are compressed we might not have to do any work.
241  if (c1 && c2 && c3) {
242  ASSIGNMSG(msg << "LHS, RHS both compressed, and rhs contains lhs, ");
243  ASSIGNMSG(msg << "compress." << endl);
244  PETE_apply(op,*lf.begin(),*rf.begin());
245  ASSIGNMSG(msg << "Now " << *lf.begin() << " == " << *rf.begin());
246  ASSIGNMSG(msg << endl);
247  } else {
248  // Find the intersection.
249  NDIndex<Dim> intersection = la.intersect(ro);
250  ASSIGNMSG(msg << "Intersection is " << intersection << endl);
251 
252  // Build an iterator for the rhs.
253  RFI rhs_i2 = rf.begin(intersection);
254 
255  // Could we compress that rhs iterator, and if so,
256  // Are we assigning the whole LField on the left?
257  // If both of these are true, we can compress the whole thing.
258  // Otherwise, we have to uncompress the LHS and do a full assign.
259  if (rhs_i2.CanCompress(*rf.begin(intersection)) &&
260  lhs.compressible() && intersection.containsAllPoints(la) &&
262 
263  // Compress the whole LField to the value on the right:
264  ASSIGNMSG(msg << "LHS BF is compressible, rhs_i2 compressed, ");
265  ASSIGNMSG(msg << "intersection contains ");
266  ASSIGNMSG(msg << la << ", assignment ==> compress assign.");
267  ASSIGNMSG(msg << endl);
268  lf.Compress((T1)(*rf.begin(intersection)));
269  ASSIGNMSG(msg << "Now " << *lf.begin() << " == ");
270  ASSIGNMSG(msg << *rf.begin(intersection) << endl);
271 
272  } else {
273  // Assigning only part of LField on the left.
274  // Must uncompress lhs, if not already uncompressed
275  // If the argument is true, we are not assigning to the whole
276  // allocated domain, and thus must fill in the uncompressed
277  // storage with the compressed value. If it is false, then
278  // we're assigning to the whole allocated domain, so we don't
279  // have to fill (it would just all get overwritten in the
280  // BrickExpression::apply).
281  ASSIGNMSG(msg << "Cannot do compressed assign, so do loop."<<endl);
282  ASSIGNMSG(msg << "First uncompress LHS LF ..." << endl);
283  lf.Uncompress(!intersection.containsAllPoints(la));
284 
285  // Get the iterator for it.
286  ASSIGNMSG(msg << "Get iterator for LHS ..." << endl);
287  LFI lhs_i2 = lf.begin(intersection);
288 
289  // And do the assignment.
290  ASSIGNMSG(msg << "And do expression evaluation." << endl);
291  BrickExpression<Dim,LFI,RFI,Op>(lhs_i2,rhs_i2,op).apply();
292  }
293  }
294  }
295  }
296  }
297 
298 
299  // ----------------------------------------
300  // Receive all the messages.
301  ASSIGNMSG(msg << "Processing receive-loop for " << nprocs<<" nodes."<<endl);
302  if (nprocs > 1) {
303 
304  while (remaining>0) {
305  // Receive the next message.
306  int any_node = COMM_ANY_NODE;
307  Message *rmess = Ippl::Comm->receive_block(any_node,tag);
308  PAssert(rmess != 0);
309  --remaining;
310 
311  // Determine the number of domains being sent
312  int ndoms = rmess->size() / (Dim+3);
313  for (int idom=0; idom<ndoms; ++idom) {
314  // extract the next domain from the message
315  NDIndex<Dim> intersection;
316  intersection.getMessage(*rmess);
317 
318  // Extract the rhs iterator from it.
319  T2 rhs_compressed_data2;
320  RFI rhs_i2(rhs_compressed_data2);
321  rhs_i2.getMessage(*rmess);
322 
323  // Find the LField it is destined for.
324  typename ac_recv_type::iterator hit = recv_ac.find( intersection );
325  PAssert( hit != recv_ac.end() );
326 
327  // Build the lhs brick iterator.
328  LField<T1,Dim> &lf = *(*hit).second;
329  const NDIndex<Dim> &lo = lf.getOwned();
330 
331  // Check and see if we really have to do this.
332  if ( !(rhs_i2.IsCompressed() && lf.IsCompressed() &&
333  (*rhs_i2 == *lf.begin())) )
334  {
335  // Yep. gotta do it.
336  // Only fill in the data if you have to.
337  bool c2 = intersection.containsAllPoints(lo);
339  lf.Uncompress( !(c2&&c3) );
340  LFI lhs_i2 = lf.begin(intersection);
341 
342  // Do the assignment.
343  BrickExpression<Dim,LFI,RFI,Op>(lhs_i2,rhs_i2,op).apply();
344  }
345 
346  // Take that entry out of the receive list.
347  recv_ac.erase( hit );
348  }
349  delete rmess;
350  }
351 
352  }
353 
354  // Update the guard cells.
355  ASSIGNMSG(msg << "Filling GC's at end if necessary ..." << endl);
356 
357  lhs.setDirtyFlag();
359 
360 
361  // Compress the LHS.
362  ASSIGNMSG(msg << "Trying to compress BareField at end ..." << endl);
363  lhs.Compress();
364 
365  //INCIPPLSTAT(incExpressions);
366  //INCIPPLSTAT(incBFEqualsBF);
367 }
368 
369 /***************************************************************************
370  * $RCSfile: AssignGeneralBF.cpp,v $ $Author: adelmann $
371  * $Revision: 1.1.1.1 $ $Date: 2003/01/23 07:40:26 $
372  * IPPL_VERSION_ID: $Id: AssignGeneralBF.cpp,v 1.1.1.1 2003/01/23 07:40:26 adelmann Exp $
373  ***************************************************************************/
T * value_type(const SliceIterator< T > &)
const unsigned Dim
#define ASSIGNMSG(x)
Definition: Assign.h:28
void assign(const BareField< T1, Dim > &clhs, RHS rhsp, Op op, ExprTag< false >)
void PETE_apply(const OpPeriodic< T > &, T &a, const T &b)
Definition: BCond.hpp:353
const int COMM_ANY_NODE
Definition: Communicate.h:40
#define F_TAG_CYCLE
Definition: Tags.h:53
#define F_GEN_ASSIGN_TAG
Definition: Tags.h:47
Inform & endl(Inform &inf)
Definition: Inform.cpp:42
#define INFORM_ALL_NODES
Definition: Inform.h:39
#define PAssert(c)
Definition: PAssert.h:102
std::string::iterator iterator
Definition: MSLang.h:16
Message & getMessage(Message &m)
Definition: NDIndex.h:138
bool touches(const NDIndex< Dim > &) const
bool contains(const NDIndex< Dim > &a) const
Message & putMessage(Message &m) const
Definition: NDIndex.h:130
NDIndex< Dim > intersect(const NDIndex< Dim > &) const
bool containsAllPoints(const NDIndex< Dim > &b) const
void setDirtyFlag()
Definition: BareField.h:117
void fillGuardCellsIfNotDirty() const
Definition: BareField.h:122
iterator_if begin_if()
Definition: BareField.h:100
const NDIndex< Dim > & getDomain() const
Definition: BareField.h:152
ac_id_larray::const_iterator const_iterator_if
Definition: BareField.h:93
ac_id_larray::iterator iterator_if
Definition: BareField.h:92
void Compress() const
Definition: BareField.hpp:991
bool compressible() const
Definition: BareField.h:191
iterator_if end_if()
Definition: BareField.h:101
ac_id_larray::size_type size_if() const
Definition: BareField.h:104
const GuardCellSizes< Dim > & getGuardCellSizes() const
Definition: BareField.h:147
Layout_t & getLayout() const
Definition: BareField.h:131
Definition: LField.h:58
void Compress()
Definition: LField.h:161
bool IsCompressed() const
Definition: LField.h:134
const NDIndex< Dim > & getOwned() const
Definition: LField.h:99
const NDIndex< Dim > & getAllocated() const
Definition: LField.h:98
const iterator & begin() const
Definition: LField.h:110
void Uncompress(bool fill_domain=true)
Definition: LField.h:172
Definition: Assign.h:48
touch_range_dv touch_range_rdv(const NDIndex< Dim > &domain, const GuardCellSizes< Dim > &gc=gc0()) const
Definition: FieldLayout.h:780
std::pair< touch_iterator_dv, touch_iterator_dv > touch_range_dv
Definition: FieldLayout.h:77
virtual void apply()
bool send(Message *, int node, int tag, bool delmsg=true)
Message * receive_block(int &node, int &tag)
size_t size() const
Definition: Message.h:292
int next_tag(int t, int s=1000)
Definition: TagMaker.h:39
Definition: Inform.h:42
static int getNodes()
Definition: IpplInfo.cpp:670
static Communicate * Comm
Definition: IpplInfo.h:84