OPAL (Object Oriented Parallel Accelerator Library)  2.2.0
OPAL
ConejoBalancer.h
Go to the documentation of this file.
1 // -*- C++ -*-
2 /***************************************************************************
3  *
4  * The IPPL Framework
5  *
6  *
7  * Visit http://people.web.psi.ch/adelmann/ for more details
8  *
9  ***************************************************************************/
10 
12 // Class ConejoBalancer
14 
15 #ifndef CONEJO_BALANCER_H
16 #define CONEJO_BALANCER_H
17 
19 //
20 // Description:
21 // A load balancer designed for the Conejo code.
22 // It distributes the vnodes so that each of the materials is
23 // simultaneously load balanced.
24 //
26 
27 // include files
28 #include "Field/BareField.h"
30 
31 #include <vector>
32 
33 // forward declarations
34 class MultiBalancer;
35 template<unsigned int D> class FieldLayout;
36 template<unsigned int D> class NDIndex;
37 
38 /*
39 
40  ConejoBalancer is an interface class for MultiBalancer.
41 
42  **************************************************
43  GENERAL DESCRIPTION
44  **************************************************
45 
46  ConejoBalancer does the following things:
47 
48  1. Inputs a series of BareFields with weights for each
49  grid location for each material.
50 
51  2. Sum the weights to a single value per vnode.
52 
53  3. Uses MultiBalancer to find the new distribution of vnodes.
54 
55  4. Rebalances one or more FieldLayouts for that new distribution.
56 
57  */
58 
60 {
61 public:
62 
63  // Construct the ConejoBalancer.
64  // This needs no arguments since it gets its information
65  // from member functions below.
67 
68  // Destroy the balancer.
70 
71  // Add a new set of weights for a new material.
72  // if dropCompressed is true, complressed vnodes in weights
73  // are considered to have weight of just one element.
74  //mwerks template<class T, unsigned int D>
75  //mwerks void addMaterial(BareField<T,D>& weights, bool dropCompressed=false);
77 
78  //
79  // ConejoBalancer::addMaterial
80  //
81  // Input:
82  // BareField with weights for this material.
83  // bool dropCompressed, which is true if compressed vnodes in weights
84  // should have the weight of a single element instead of multipplied
85  // by the number of elements.
86  // Output: The state of the ConejoBalancer includes the new weights.
87  //
88  // Extracting the weights goes through several phases:
89  // 1. If this is the first time this has been called,
90  // initialize the MultiBalancer.
91  // 2. Calculate the weights for the local part of the BareField.
92  // 3a. If this is not processor zero, send those weights to processor zero.
93  // 3b. If it is processor zero, collect the weights.
94  //
95 
96  template<class T, unsigned int D>
97  void addMaterial(BareField<T,D>& weights, bool dropCompressed)
98  {
99  // Initialize (or check consistency).
100  setupVnodes(weights.size_if(), weights.getLayout().size_rdv());
101 
102  // A container to hold the reduced weights for the local vnodes.
103  std::vector<double> vnodeWeights;
104  vnodeWeights.reserve(m_localVnodes);
105 
106  // Get the local weights.
107  reduceLocalWeights(weights,vnodeWeights,dropCompressed);
108 
109  // Get a message tag.
111 
112  // Everybody sends their data to processor zero.
113  sendWeights(vnodeWeights,tag);
114 
115  // If we are processor zero, process messages.
116  if ( m_myProc == 0 )
117  receiveWeights(vnodeWeights,tag);
118  }
119 
120 
121  // Redistribute a FieldLayout using the stored weights.
122  //mwerks template<unsigned int D>
123  //mwerks void redistribute(FieldLayout<D>& layout);
125 
126  //
127  // ConejoBalancer::redistribute
128  //
129  // Redistribute a FieldLayout using the stored weights.
130  //
131 
132  template<unsigned int D>
134  {
135  // Get message tags.
136  int bcasttag = Ippl::Comm->next_tag(F_CB_BCAST_TAG, F_TAG_CYCLE);
137  int domaintag = Ippl::Comm->next_tag(F_CB_DOMAIN_TAG, F_TAG_CYCLE);
138 
139  // On proc 0, figure things out and send them.
140  if ( m_myProc == 0 )
141  {
142  // Tell the MultiBalancer to figure out the new distribution.
144 
145  // Broadcast the vnode ids that each processor will have to send.
146  broadcastVnodesToSend(bcasttag);
147  }
148 
149  // Everywhere receives the id's of the vnodes it will send.
150  std::vector<int> vnodeDestinations;
151  receiveVnodesToSend(vnodeDestinations,bcasttag);
152 
153  // Send the domains for the vnodes to their new homes.
154  sendVnodeDomains(vnodeDestinations,layout,domaintag);
155 
156  // Receive the domains for the vnodes that will live here.
157  std::vector< NDIndex<D> > vnodeDomains;
158  receiveVnodeDomains(vnodeDomains,domaintag);
159 
160  // Redistribute the FieldLayout using the new local domains.
161  NDIndex<D> *p = &*(vnodeDomains.begin());
162  layout.Repartition( p , p + vnodeDomains.size() );
163  }
164 
165 
166 private:
167 
168  // Keep a pointer to the object that encapsulates the algorithm.
170 
171  // Remember the number of local vnodes.
173 
174  // Remember the total number of vnodes.
176 
177  // Remember the number of processors.
178  int m_procs;
179 
180  // Remember my processor.
181  int m_myProc;
182 
183  // Record the number of vnodes on each processor.
184  std::vector<int> m_vnodeCounts;
185 
186  // Support functions for internal use.
187  void sendWeights(std::vector<double>& vnodeWeights, int tag);
188  void receiveWeights(std::vector<double>& vnodeWeights, int tag);
189  void setupVnodes(int localVnodes, int remoteVnodes);
190  void recordVnodeCount(int count, int proc);
191  void broadcastVnodesToSend(int tag);
192  void receiveVnodesToSend(std::vector<int>& vnodeDestinations,int tag);
193 
194  //mwerks template<unsigned int D>
195  //mwerks void sendVnodeDomains(vector<int>& vnodeDests,
196  //mwerks FieldLayout<D>& layout,
197  //mwerks int tag);
198  //
199  // ConejoBalancer::sendVnodeDomains
200  //
201  // Send to all the other processors the domains for the vnodes
202  // that will be sent.
203  // Here just the NDIndexes are being sent.
204  // The contents of the Fields on those domains will be sent later.
205  //
206 
207  template<unsigned int D>
208  void sendVnodeDomains(std::vector<int>& vnodeDests,
209  FieldLayout<D>& layout,
210  int tag)
211  {
212  // A buffer for writing down the domains to be sent.
213  std::vector< NDIndex<D> > send;
214 
215  // Loop over processors, sending a message to each.
216  for ( int proc = 0; proc < m_procs; ++proc )
217  {
218  // Loop over the local vnodes, figuring out where each should go.
219  std::vector<int>::iterator vp = vnodeDests.begin();
220  typename FieldLayout<D>::iterator_iv fp = layout.begin_iv();
221  for ( ; vp!=vnodeDests.end(); ++vp, ++fp)
222  {
223  // If this vnode is going to processor proc
224  if ( *vp == proc )
225  // Record the domain for sending.
226  send.push_back( (*fp).second->getDomain() );
227  }
228 
229  // Build the message to be sent.
230  Message *msg = new Message;
231 
232  // Add the domains to the message.
233  NDIndex<D> *p = &*(send.begin());
234  msg->put(send.size());
235  putMessage(*msg, p , p + send.size() );
236 
237  // Send the message.
238  Ippl::Comm->send(msg,proc,tag);
239 
240  // Clear the send container.
241  send.clear();
242  }
243  }
244 
245  //mwerks template<unsigned int D>
246  //mwerks void receiveVnodeDomains(vector< NDIndex<D> >& vnodeDomains, int tag);
248  //
249  // ConejoBalancer::receiveVnodeDomains
250  //
251  // Each processor receives from all the other processors
252  // the domains it will have after the redistribution.
253  //
254  template<unsigned int D>
255  void receiveVnodeDomains(std::vector< NDIndex<D> >& vnodeDomains,
256  int tag)
257  {
258  // Loop over all the processors, receiving a message from each.
259  for (int proc=0; proc < m_procs; ++proc)
260  {
261  // Receive a message from any processor.
262  int any_proc = -1;
263  Message *msg = Ippl::Comm->receive_block(any_proc,tag);
264 
265  // Get the number of NDIndexes in this message.
266  long int s;
267  msg->get(s);
268 
269  // Make sure the size isn't negative.
270  PAssert_GE(s, 0);
271 
272  // If there are any there, unpack them.
273  if ( s != 0 )
274  {
275  // Add room to the container.
276  vnodeDomains.resize( vnodeDomains.size() + s );
277 
278  // Unpack onto the end of the container.
279  getMessage_iter(*msg, vnodeDomains.end()-s );
280  }
281 
282  // Delete the message.
283  delete msg;
284  }
285  }
286 };
287 
289 
290 #endif // CONEJO_BALANCER_H
291 
292 /***************************************************************************
293  * $RCSfile: ConejoBalancer.h,v $ $Author: adelmann $
294  * $Revision: 1.1.1.1 $ $Date: 2003/01/23 07:40:27 $
295  * IPPL_VERSION_ID: $Id: ConejoBalancer.h,v 1.1.1.1 2003/01/23 07:40:27 adelmann Exp $
296  ***************************************************************************/
Layout_t & getLayout() const
Definition: BareField.h:130
void sendWeights(std::vector< double > &vnodeWeights, int tag)
void receiveVnodesToSend(std::vector< int > &vnodeDestinations, int tag)
void sendVnodeDomains(std::vector< int > &vnodeDests, FieldLayout< D > &layout, int tag)
#define F_CB_DOMAIN_TAG
Definition: Tags.h:95
unsigned size() const
void getMessage_iter(Message &m, OutputIterator o)
Definition: Message.h:603
void receiveWeights(std::vector< double > &vnodeWeights, int tag)
int next_tag(int t, int s=1000)
Definition: TagMaker.h:43
void Repartition(const NDIndex< Dim > *, const NDIndex< Dim > *)
MultiBalancer * m_balancer
#define PAssert_GE(a, b)
Definition: PAssert.h:124
void broadcastVnodesToSend(int tag)
iterator_iv begin_iv()
Definition: FieldLayout.h:709
void reduceLocalWeights(BareField< T, D > &weights, std::vector< double > &vnodeWeights, bool dropCompressed)
ac_id_vnodes::iterator iterator_iv
Definition: FieldLayout.h:73
void receiveVnodeDomains(std::vector< NDIndex< D > > &vnodeDomains, int tag)
std::vector< int > m_vnodeCounts
void setupVnodes(int localVnodes, int remoteVnodes)
void recordVnodeCount(int count, int proc)
Definition: FFT.h:30
Message & get(const T &cval)
Definition: Message.h:484
Message & put(const T &val)
Definition: Message.h:414
void redistribute(FieldLayout< D > &layout)
#define F_CB_BCAST_TAG
Definition: Tags.h:94
ac_id_larray::size_type size_if() const
Definition: BareField.h:103
ac_domain_vnodes::size_type size_rdv(const GuardCellSizes< Dim > &gc=gc0()) const
Definition: FieldLayout.h:758
#define F_CONEJO_BALANCER_TAG
Definition: Tags.h:93
std::string::iterator iterator
Definition: MSLang.h:16
void putMessage(Message &m, const T &t)
Definition: Message.h:557
Message * receive_block(int &node, int &tag)
static Communicate * Comm
Definition: IpplInfo.h:93
bool send(Message *, int node, int tag, bool delmsg=true)
void addMaterial(BareField< T, D > &weights, bool dropCompressed)
#define F_TAG_CYCLE
Definition: Tags.h:53