src/FieldLayout/ConejoBalancer.h

Go to the documentation of this file.
00001 // -*- C++ -*-
00002 /***************************************************************************
00003  *
00004  * The IPPL Framework
00005  * 
00006  *
00007  * Visit http://people.web.psi.ch/adelmann/ for more details
00008  *
00009  ***************************************************************************/
00010 
00012 // Class ConejoBalancer
00014 
00015 #ifndef CONEJO_BALANCER_H
00016 #define CONEJO_BALANCER_H
00017 
00019 //
00020 // Description:
00021 // A load balancer designed for the Conejo code.
00022 // It distributes the vnodes so that each of the materials is 
00023 // simultaneously load balanced.
00024 //
00026 
00027 // include files
00028 #include "Field/BareField.h"
00029 #include "FieldLayout/MultiBalancer.h"
00030 
00031 #ifdef IPPL_STDSTL
00032 #include <vector>
00033 using std::vector;
00034 #else
00035 #include <vector.h>
00036 #endif
00037 
00038 // forward declarations
00039 class MultiBalancer;
00040 template<unsigned int D> class FieldLayout;
00041 template<unsigned int D> class NDIndex;
00042 
00043 /*
00044 
00045   ConejoBalancer is an interface class for MultiBalancer.
00046 
00047   **************************************************
00048   GENERAL DESCRIPTION
00049   **************************************************
00050 
00051   ConejoBalancer does the following things:
00052 
00053   1. Inputs a series of BareFields with weights for each
00054      grid location for each material.
00055 
00056   2. Sum the weights to a single value per vnode.
00057 
00058   3. Uses MultiBalancer to find the new distribution of vnodes.
00059 
00060   4. Rebalances one or more FieldLayouts for that new distribution.
00061 
00062  */
00063 
00064 class ConejoBalancer
00065 {
00066 public:
00067 
00068   // Construct the ConejoBalancer.
00069   // This needs no arguments since it gets its information
00070   // from member functions below.
00071   ConejoBalancer();
00072 
00073   // Destroy the balancer.
00074   ~ConejoBalancer();
00075 
00076   // Add a new set of weights for a new material.
00077   // if dropCompressed is true, complressed vnodes in weights
00078   // are considered to have weight of just one element.
00079   //mwerks  template<class T, unsigned int D>
00080   //mwerks  void addMaterial(BareField<T,D>& weights, bool dropCompressed=false);
00082 
00083   //
00084   // ConejoBalancer::addMaterial
00085   //
00086   // Input: 
00087   //   BareField with weights for this material.
00088   //   bool dropCompressed, which is true if compressed vnodes in weights
00089   //        should have the weight of a single element instead of multipplied
00090   //        by the number of elements.
00091   // Output: The state of the ConejoBalancer includes the new weights.
00092   //
00093   // Extracting the weights goes through several phases:
00094   // 1. If this is the first time this has been called, 
00095   //    initialize the MultiBalancer.
00096   // 2. Calculate the weights for the local part of the BareField.
00097   // 3a. If this is not processor zero, send those weights to processor zero.
00098   // 3b. If it is processor zero, collect the weights.
00099   //
00100 
00101   template<class T, unsigned int D>
00102   void addMaterial(BareField<T,D>& weights, bool dropCompressed)
00103   {
00104     // Initialize (or check consistency).
00105     setupVnodes(weights.size_if(), weights.getLayout().size_rdv());
00106 
00107     // A container to hold the reduced weights for the local vnodes.
00108     vector<double> vnodeWeights;
00109     vnodeWeights.reserve(m_localVnodes);
00110 
00111     // Get the local weights.
00112     reduceLocalWeights(weights,vnodeWeights,dropCompressed);
00113 
00114     // Get a message tag.
00115     int tag = Ippl::Comm->next_tag(F_CONEJO_BALANCER_TAG, F_TAG_CYCLE);
00116 
00117     // Everybody sends their data to processor zero.
00118     sendWeights(vnodeWeights,tag);
00119 
00120     // If we are processor zero, process messages.
00121     if ( m_myProc == 0 )
00122       receiveWeights(vnodeWeights,tag);
00123   }
00124 
00125 
00126   // Redistribute a FieldLayout using the stored weights.
00127   //mwerks  template<unsigned int D>
00128   //mwerks  void redistribute(FieldLayout<D>& layout);
00130 
00131   //
00132   // ConejoBalancer::redistribute
00133   //
00134   // Redistribute a FieldLayout using the stored weights.
00135   //
00136 
00137   template<unsigned int D>
00138   void redistribute(FieldLayout<D>& layout)
00139   {
00140     // Get message tags.
00141     int bcasttag = Ippl::Comm->next_tag(F_CB_BCAST_TAG, F_TAG_CYCLE);
00142     int domaintag = Ippl::Comm->next_tag(F_CB_DOMAIN_TAG, F_TAG_CYCLE);
00143 
00144     // On proc 0, figure things out and send them.
00145     if ( m_myProc == 0 )
00146       {
00147         // Tell the MultiBalancer to figure out the new distribution.
00148         m_balancer->distribute();
00149 
00150       // Broadcast the vnode ids that each processor will have to send.
00151         broadcastVnodesToSend(bcasttag);
00152       }
00153 
00154     // Everywhere receives the id's of the vnodes it will send.
00155     vector<int> vnodeDestinations;
00156     receiveVnodesToSend(vnodeDestinations,bcasttag);
00157 
00158     // Send the domains for the vnodes to their new homes.
00159     sendVnodeDomains(vnodeDestinations,layout,domaintag);
00160 
00161     // Receive the domains for the vnodes that will live here.
00162     vector< NDIndex<D> > vnodeDomains;
00163     receiveVnodeDomains(vnodeDomains,domaintag);
00164 
00165   // Redistribute the FieldLayout using the new local domains.
00166     NDIndex<D> *p = &*(vnodeDomains.begin());
00167     layout.Repartition( p , p + vnodeDomains.size() );
00168   }
00169 
00170 
00171 private:
00172 
00173   // Keep a pointer to the object that encapsulates the algorithm.
00174   MultiBalancer *m_balancer;
00175 
00176   // Remember the number of local vnodes.
00177   int m_localVnodes;
00178 
00179   // Remember the total number of vnodes.
00180   int m_totalVnodes;
00181 
00182   // Remember the number of processors.
00183   int m_procs;
00184 
00185   // Remember my processor.
00186   int m_myProc;
00187 
00188   // Record the number of vnodes on each processor.
00189   vector<int> m_vnodeCounts;
00190 
00191   // Support functions for internal use.
00192   void sendWeights(vector<double>& vnodeWeights, int tag);
00193   void receiveWeights(vector<double>& vnodeWeights, int tag);
00194   void setupVnodes(int localVnodes, int remoteVnodes);
00195   void recordVnodeCount(int count, int proc);
00196   void broadcastVnodesToSend(int tag);
00197   void receiveVnodesToSend(vector<int>& vnodeDestinations,int tag);
00198 
00199   //mwerks  template<unsigned int D>
00200   //mwerks  void sendVnodeDomains(vector<int>& vnodeDests,
00201   //mwerks                      FieldLayout<D>& layout,
00202   //mwerks                      int tag);
00203   //
00204   // ConejoBalancer::sendVnodeDomains
00205   //
00206   // Send to all the other processors the domains for the vnodes 
00207   // that will be sent.
00208   // Here just the NDIndexes are being sent.
00209   // The contents of the Fields on those domains will be sent later.
00210   //
00211 
00212   template<unsigned int D>
00213   void sendVnodeDomains(vector<int>& vnodeDests, 
00214                         FieldLayout<D>& layout,
00215                         int tag)
00216   {
00217     // A buffer for writing down the domains to be sent.
00218     vector< NDIndex<D> > send;
00219 
00220   // Loop over processors, sending a message to each.
00221     for ( int proc = 0; proc < m_procs; ++proc )
00222       {
00223         // Loop over the local vnodes, figuring out where each should go.
00224         vector<int>::iterator vp = vnodeDests.begin();
00225         typename FieldLayout<D>::iterator_iv fp = layout.begin_iv();
00226         for ( ; vp!=vnodeDests.end(); ++vp, ++fp)
00227           {
00228             // If this vnode is going to processor proc
00229             if ( *vp == proc ) 
00230               // Record the domain for sending.
00231               send.push_back( (*fp).second->getDomain() );
00232           }
00233 
00234         // Build the message to be sent.
00235         Message *msg = new Message;
00236 
00237         // Add the domains to the message.
00238         NDIndex<D> *p = &*(send.begin());
00239         msg->put(send.size());
00240         putMessage(*msg, p , p + send.size() );
00241 
00242         // Send the message.
00243         Ippl::Comm->send(msg,proc,tag);
00244 
00245         // Clear the send container.
00246         send.clear();
00247       }
00248   }
00249 
00250   //mwerks  template<unsigned int D>
00251   //mwerks  void receiveVnodeDomains(vector< NDIndex<D> >& vnodeDomains, int tag);
00253   //
00254   // ConejoBalancer::receiveVnodeDomains
00255   //
00256   // Each processor receives from all the other processors
00257   // the domains it will have after the redistribution.
00258   //
00259   template<unsigned int D>
00260   void receiveVnodeDomains(vector< NDIndex<D> >& vnodeDomains, 
00261                            int tag)
00262   {
00263     // Loop over all the processors, receiving a message from each.
00264     for (int proc=0; proc < m_procs; ++proc)
00265       {
00266         // Receive a message from any processor.
00267         int any_proc = -1;
00268         Message *msg = Ippl::Comm->receive_block(any_proc,tag);
00269 
00270         // Get the number of NDIndexes in this message.
00271         long int s;
00272         msg->get(s);
00273 
00274       // Make sure the size isn't negative.
00275         PAssert(s>=0);
00276 
00277         // If there are any there, unpack them.
00278         if ( s != 0 ) 
00279           {
00280             // Add room to the container.
00281             vnodeDomains.resize( vnodeDomains.size() + s );
00282 
00283           // Unpack onto the end of the container.
00284             getMessage_iter(*msg, vnodeDomains.end()-s );
00285           }
00286 
00287         // Delete the message.
00288         delete msg;
00289       }
00290   }
00291 };
00292 
00293 #include "FieldLayout/ConejoBalancer.cpp"
00294 
00295 #endif // CONEJO_BALANCER_H
00296 
00297 /***************************************************************************
00298  * $RCSfile: ConejoBalancer.h,v $   $Author: adelmann $
00299  * $Revision: 1.1.1.1 $   $Date: 2003/01/23 07:40:27 $
00300  * IPPL_VERSION_ID: $Id: ConejoBalancer.h,v 1.1.1.1 2003/01/23 07:40:27 adelmann Exp $ 
00301  ***************************************************************************/

Generated on Mon Jan 16 13:23:46 2006 for IPPL by  doxygen 1.4.6