src/Utility/DiscField.h

Go to the documentation of this file.
00001 // -*- C++ -*-
00002 /***************************************************************************
00003  *
00004  * The IPPL Framework
00005  * 
00006  *
00007  * Visit http://people.web.psi.ch/adelmann/ for more details
00008  *
00009  ***************************************************************************/
00010 
00011 #ifndef DISC_FIELD_H
00012 #define DISC_FIELD_H
00013 
00014 // debugging macros
00015 #ifdef IPPL_PRINTDEBUG
00016 #define DFDBG(x) x
00017 #define CDFDBG(x) x
00018 #else
00019 #define DFDBG(x)
00020 #define CDFDBG(x)
00021 #endif
00022 
00023 // include files
00024 #include "Index/NDIndex.h"
00025 #include "Field/BrickExpression.h"
00026 #include "Field/Field.h"
00027 #include "Utility/DiscBuffer.h"
00028 #include "Utility/DiscConfig.h"
00029 #include "Utility/Pstring.h"
00030 #include "Utility/Inform.h"
00031 #include "Utility/vmap.h"
00032 #include "Utility/IpplTimings.h"
00033 #include <stdio.h>
00034 #include <stdlib.h>
00035 #include <unistd.h>
00036 #include <fcntl.h>
00037 #ifdef __MWERKS__
00038 #include <stat.h>
00039 //#include <types.h>
00040 #else
00041 #include <sys/stat.h>
00042 #include <sys/types.h>
00043 #endif // __MWERKS__
00044 
00045 #ifdef IPPL_STDSTL
00046 #include <vector>
00047 using std::vector;
00048 #else
00049 #include <vector.h>
00050 #endif // IPPL_STDSTL
00051 
00052 #ifdef IPPL_USE_STANDARD_HEADERS
00053 #include <iostream>
00054 using namespace std;
00055 #else
00056 #include <iostream.h>
00057 #endif
00058 
00059 // forward declarations
00060 template<unsigned Dim, class T> class UniformCartesian;
00061 template<class T, unsigned Dim, class M, class C> class Field;
00062 template<unsigned Dim>                            class FieldLayout;
00063 
00064 
00065 // This helper class is used to represent a record for I/O of the .offset file.
00066 // It is only used for reads and writes.  See the notes below for
00067 // the reason the vnodedata is a set of ints instead of an NDIndex.
00068 template <unsigned Dim, class T>
00069 struct DFOffsetData {
00070   int           vnodedata[6*Dim];
00071   bool          isCompressed;
00072 #if defined(IPPL_LONGLONG)
00073   long long     offset;
00074 #else
00075   long          offset;
00076 #endif
00077   T             compressedVal;
00078 };
00079 
00080 
00081 template <unsigned Dim>
00082 class DiscField {
00083 
00084 public:
00085   // Constructor: make a DiscField for writing only
00086   // fname = name of file (without extensions
00087   // config = name of configuration file
00088   // numFields = number of Fields which will be written to the file
00089   // typestr = string describing the 'type' of the Field to be written (this
00090   //           is ignored if the Field is being read).  The string should be
00091   //           the same as the statement used to declare the Field object
00092   //           e.g., for a field of the form Field<double,2> the string
00093   //           should be "Field<double,2>".  The string should be such that
00094   //           if read later into a variable F, you can use the string in
00095   //           a source code line as  'F A' to create an instance of the
00096   //           same type of data as is stored in the file.
00097   DiscField(const char* fname, const char* config, unsigned int numFields,
00098             const char* typestr = 0);
00099 
00100   // Constructor: same as above, but without a config file specified.  The
00101   // default config file entry that will be used is "*  .", which means, for
00102   // each SMP machine, assume the directory to put data files in is "."
00103   DiscField(const char* fname, unsigned int numFields,
00104             const char* typestr = 0);
00105 
00106   // Constructor: make a DiscField for reading only.
00107   // fname = name of file (without extensions
00108   // config = name of configuration file
00109   DiscField(const char* fname, const char* config);
00110 
00111   // Constructor: same as above, but without a config file specified.  The
00112   // default config file entry that will be used is "*  .", which means, for
00113   // each SMP machine, assume the directory to put data files in is "."
00114   DiscField(const char* fname);
00115 
00116   // Destructor.
00117   ~DiscField();
00118 
00119   //
00120   // accessor functions
00121   //
00122 
00123   // Obtain all the information about the file, including the number
00124   // of records, fields, and number of vnodes stored in each record.
00125   void query(int& numRecords, int& numFields, vector<int>& size) const;
00126 
00127   // Query for the number of records (e.g., timesteps) in the file.
00128   unsigned int get_NumRecords() const { return NumRecords; }
00129 
00130   // Query for the number of Fields stored in the file.
00131   unsigned int get_NumFields() const { return NumFields; }
00132 
00133   // Query for the total domain of the system.
00134   NDIndex<Dim> get_Domain() const { return Size; }
00135 
00136   // Query for the dimension of the data in the file.  This is useful
00137   // mainly if you are checking for dimension by contructing a DiscField
00138   // and then trying to check it's dimension later.  If the dimension is
00139   // not correctly matched with the Dim template parameter, you will get
00140   // an error if you try to read or write.
00141   unsigned int get_Dimension() const { return DataDimension; }
00142 
00143   // Query for the type string
00144   const char *get_TypeString() { return TypeString.c_str(); }
00145 
00146   // Query for the disctype string
00147   const char *get_DiscType() {
00148     if (DiscType.length() > 0)
00149       return DiscType.c_str();
00150     return 0;
00151   }
00152 
00153   //
00154   // read/write methods
00155   //
00156 
00157   // read the selected record in the file into the given Field object.
00158   // readDomain = the portion of the field on disk that should actually be 
00159   //              read in, and placed in the same location in the
00160   //              provided field.  All of readDomain must be contained
00161   //              within the data on disk, and in the field in memory,
00162   //              although the domain on disk and in memory do not themselves
00163   //              have to be the same.
00164   // varID = index for which field is being read ... this should be from
00165   //         0 ... (numFields-1), for the case where the file contains
00166   //         more than one Field.
00167   // record = which record to read.  DiscField does not keep a 'current
00168   //          file position' pointer, instead you explicitly request which
00169   //          record you wish to read.
00170   // Return success of operation.
00171 
00172   template <class T, class M, class C>
00173   bool read(Field<T,Dim,M,C>& f, const NDIndex<Dim> &readDomain,
00174             unsigned int varID, unsigned int record) {
00175 
00176     // sanity checking for input arguments and state of this object
00177     bool canread = false;
00178     if (!ConfigOK) {
00179       ERRORMSG("Cannot read in DiscField::read - config file error." << endl);
00180     } else if (DataDimension != Dim) {
00181       ERRORMSG("Bad dimension "<< DataDimension <<" in DiscField::read"<<endl);
00182       ERRORMSG("(" << DataDimension << " != " << Dim << ")" << endl);
00183     } else if (WritingFile) {
00184       ERRORMSG("DiscField::read called for DiscField opened for write."<<endl);
00185     } else if (varID >= NumFields) {
00186       ERRORMSG(varID << " is a bad Field ID in DiscField::read." << endl);
00187       ERRORMSG("(" << varID << " is >= " << NumFields << ")" << endl);
00188     } else if (record >= NumRecords) {
00189       ERRORMSG(record << " is a bad record number in DiscField::read."<<endl);
00190       ERRORMSG("(" << record << " is >= " << NumRecords << ")" << endl);
00191     } else if (!(f.getLayout().getDomain().contains(readDomain))) {
00192       ERRORMSG("DiscField::read - the total field domain ");
00193       ERRORMSG(f.getLayout().getDomain() << " must contain the requested ");
00194       ERRORMSG("read domain " << readDomain << endl);
00195     } else if (!(get_Domain().contains(readDomain))) {
00196       ERRORMSG("DiscField::read - the DiscField domain ");
00197       ERRORMSG(get_Domain() << " must contain the requested ");
00198       ERRORMSG("read domain " << readDomain << endl);
00199     } else {
00200       canread = true;
00201     }
00202 
00203     // If there was an error, we will abort
00204     if (!canread) {
00205       Ippl::abort("Exiting due to DiscField error.");
00206       return false;
00207     }
00208 
00209     // A typedef used later
00210     typedef typename LField<T,Dim>::iterator LFI;
00211     typedef BrickExpression<Dim,LFI,LFI,OpAssign> Expr_t;
00212 
00213     // Start timer for just the read portion
00214     static IpplTimings::TimerRef readtimer =
00215       IpplTimings::getTimer("DiscField read");
00216     IpplTimings::startTimer(readtimer);
00217 
00218     DFDBG(string dbgmsgname("DF:read:"));
00219     DFDBG(dbgmsgname += Config->getConfigFile());
00220     DFDBG(Inform dbgmsg(dbgmsgname.c_str(), INFORM_ALL_NODES));
00221     DFDBG(dbgmsg << "At start of read: Field layout=" << f.getLayout()<<endl);
00222     DFDBG(dbgmsg << "At start of read: Read domain =" << readDomain << endl);
00223 
00224     // Get a new tag value for this read operation, used for all sends
00225     // to other nodes with data.
00226     int tag = Ippl::Comm->next_tag(DF_READ_TAG, DF_TAG_CYCLE);
00227 
00228     // At the start of a new record, determine how many elements of the
00229     // Field should be stored into this node's vnodes.
00230     int expected = compute_expected(f.getLayout(), readDomain);
00231 
00232     // On all nodes, loop through all the file sets, and:
00233     //   1. box0: Get the number of vnodes stored there, from the layout file
00234     //   2. box0: Get offset information for all the vnodes, and
00235     //      assign other nodes on the same SMP selected vnodes to read.
00236     //   2. For each vnode assigned to a processor to read:
00237     //       - read data (if necessary)
00238     //       - distribute data to interested parties, or yourself
00239     // On all nodes, when you get some data from another node:
00240     //       - copy it into the relevant vnode
00241     //       - decrement your expected value.
00242     // When expected hits zero, we're done with reading on that node.
00243 
00244     DFDBG(dbgmsg << "Reading data from " << numFiles()<<" filesets:"<<endl);
00245 
00246     for (int sf=0; sf < numFiles(); ++sf) {
00247 
00248       // Create the data file handle, but don't yet open it ... only open
00249       // it if we need to later (if we run into any uncompressed blocks).
00250       int outputDatafd = (-1);
00251 
00252       // offset data read in from file or obtained from the box0 node
00253       vector<DFOffsetData<Dim,T> > offdata;
00254 
00255       // the number of vnodes we'll be working on on this node
00256       int vnodes = 0;
00257 
00258       // the maximum number of elements we'll need to have buffer space for
00259       int maxsize = 0;
00260 
00261       // on box0 nodes, read in the layout and offest info
00262       if (Ippl::myNode() == myBox0()) {
00263 
00264         // Get the number of vnodes in this file.
00265         vnodes = read_layout(record, sf);
00266 
00267         // Get the offset data for this field and record.
00268         read_offset(varID, record, sf, offdata, vnodes);
00269       }
00270 
00271       // On all nodes, either send out or receive in offset information.
00272       // Some nodes will not get any, and will not have to do any reading.
00273       // But those that do, will read in data for the vnodes they are
00274       // assigned.  'vnodes' will be set to the number of vnodes assigned
00275       // for reading from this node, and  'maxsize' will be set
00276       // to the maximum size of the vnodes in this file, for use in
00277       // preparing the buffer that will be used to read those vnodes.
00278       distribute_offsets(offdata, vnodes, maxsize, readDomain);
00279 
00280       DFDBG(dbgmsg << "After reading and distributing offset data: ");
00281       DFDBG(dbgmsg << "Node " << Ippl::myNode() << " will read ");
00282       DFDBG(dbgmsg << vnodes << " vnodes, with maxsize = " << maxsize);
00283       DFDBG(dbgmsg << endl);
00284 
00285       // Loop through all the vnodes now; they will appear in any
00286       // order, which is fine, we just read them and and see where they
00287       // go.  The info in the offset struct includes the domain for that
00288       // block and whether it was written compressed or not.
00289 
00290       for (int vn=0; vn < vnodes; ++vn) {
00291         // Create an NDIndex object storing the vnode domain for this vnode.
00292         NDIndex<Dim> vnodeblock;
00293         offset_data_to_domain(offdata[vn], vnodeblock);
00294 
00295         // If there is no intersection of this vnode and the read-domain,
00296         // we can just skip it entirely.
00297         if (! vnodeblock.touches(readDomain)) {
00298           DFDBG(dbgmsg << "Skipping vnode " << vn << ", no intersection ");
00299           DFDBG(dbgmsg << "between " << vnodeblock << " and ");
00300           DFDBG(dbgmsg << readDomain << endl);
00301 
00302           continue;
00303         }
00304 
00305         // Compute the size of a block to add to the base of this block,
00306         // based on the chunk size.  If the data is compressed, this won't
00307         // matter.
00308         int msdim = (Dim-1);    // this will be zero-based
00309         int chunkelems = Ippl::chunkSize() / sizeof(T);
00310         NDIndex<Dim> chunkblock = chunk_domain(vnodeblock, chunkelems, msdim,
00311                                                offdata[vn].isCompressed);
00312  
00313         DFDBG(dbgmsg << "Reading in chunks in blocks of size " << chunkblock);
00314         DFDBG(dbgmsg << " and max buffer elems = " << maxsize);
00315         DFDBG(dbgmsg << " in vnode " << vn << " with total domain ");
00316         DFDBG(dbgmsg << vnodeblock << endl);
00317 
00318         // Initialize the NDIndex we'll use to indicate what portion of the
00319         // domain we're reading and processing.
00320         NDIndex<Dim> currblock = vnodeblock;
00321         currblock[msdim] = Index(vnodeblock[msdim].first() - 1,
00322                                  vnodeblock[msdim].first() - 1);
00323         for (int md = (msdim+1); md < Dim; ++md)
00324           currblock[md] = Index(vnodeblock[md].first(),vnodeblock[md].first());
00325 
00326         // Initialize the offset value for this vnode.  The seek position
00327         // is stored as a byte offset, although it is read from disk as
00328         // a number of elements offset from the beginning.
00329         Offset_t seekpos = (-1);
00330 
00331         // Loop through the chunks, reading and processing each one.
00332         int unread = vnodeblock.size();
00333         while (unread > 0) {
00334           // Compute the domain of the chunk we'll work on now, and store
00335           // this in currblock.
00336 
00337           // First determine if we're at the end of our current incr dimension,
00338           // and determine new bounds
00339           bool incrhigher=(currblock[msdim].last()==vnodeblock[msdim].last());
00340           int a = (incrhigher ?
00341                    vnodeblock[msdim].first() :
00342                    currblock[msdim].last() + 1);
00343           int b = a + chunkblock[msdim].length() - 1;
00344           if (b > vnodeblock[msdim].last())
00345             b = vnodeblock[msdim].last();
00346 
00347           // Increment this dimension
00348           currblock[msdim] = Index(a, b);
00349 
00350           // Increment higher dimensions, if necessary
00351           if (incrhigher) {
00352             for (int cd = (msdim+1); cd < Dim; ++cd) {
00353               if (currblock[cd].last() < vnodeblock[cd].last()) {
00354                 // This dim is not at end, so just inc by 1
00355                 currblock[cd] = Index(currblock[cd].first() + 1,
00356                                       currblock[cd].last() + 1);
00357                 break;
00358               } else {
00359                 // Move this dimension back to start, and go on to next one
00360                 currblock[cd] = Index(vnodeblock[cd].first(),
00361                                       vnodeblock[cd].first());
00362               }
00363             }
00364           }
00365 
00366           // Decrement our unread count, since we'll process this block
00367           // either by actually reading it or getting its compressed value
00368           // from the offset file, if we have to read it at all.
00369           int nelems = currblock.size();
00370           unread -= nelems;
00371 
00372           DFDBG(dbgmsg << "Starting processing of chunk with domain ");
00373           DFDBG(dbgmsg << currblock << " in vnode " << vn);
00374           DFDBG(dbgmsg << " at offset = " << offdata[vn].offset << endl);
00375           DFDBG(dbgmsg << "After this, still have " << unread << " unread.");
00376           DFDBG(dbgmsg << endl);
00377 
00378           // Set the seek position now, if necessary
00379           if (!offdata[vn].isCompressed && seekpos < 0) {
00380             seekpos = offdata[vn].offset * sizeof(T);
00381             DFDBG(dbgmsg << "Set seek position = " << seekpos << endl);
00382           }
00383 
00384           // At this point, we might be able to skip a lot of work if this
00385           // particular chunk does not intersect with our read domain any.
00386           if (! currblock.touches(readDomain)) {
00387             DFDBG(dbgmsg << "Skipping sub-vnode chunk " << currblock);
00388             DFDBG(dbgmsg << ", no intersection with readDomain ");
00389             DFDBG(dbgmsg << readDomain << endl);
00390 
00391             // Before we skip the rest, we must update the offset
00392             Offset_t readbytes  = nelems * sizeof(T);
00393             seekpos += readbytes;
00394             DFDBG(dbgmsg << "Updating offset at end of skip operation to ");
00395             DFDBG(dbgmsg << seekpos << endl);
00396 
00397             // Then, we're done with this chunk, move on to the next.
00398             continue;
00399           }
00400 
00401           // Put the intersecting domain in readDomainSection.
00402           NDIndex<Dim> readDomainSection = currblock.intersect(readDomain);
00403           DFDBG(dbgmsg << "Intersection of chunk " << currblock);
00404           DFDBG(dbgmsg << " and read domain " << readDomain << " = ");
00405           DFDBG(dbgmsg << readDomainSection << endl);
00406 
00407           // if it is not compressed, read in the data.  If it is,
00408           // just keep the buffer pointer at zero.
00409           T *buffer = 0;
00410           if (!offdata[vn].isCompressed) {
00411             // If we have not yet done so, open the data file.
00412             if (outputDatafd < 0) {
00413               DFDBG(dbgmsg << "Opening input data file ...");
00414               DFDBG(dbgmsg << endl);
00415               outputDatafd = open_df_file_fd(Config->getFilename(sf), ".data",
00416                                              O_RDONLY);
00417             }
00418 
00419             // Resize the read buffer in case it is not large enough.
00420             // We use the max size for all the vnodes here, to avoid doing
00421             // this more than once per file set.  This also returns the
00422             // pointer to the buffer to use, as a void *, which we cast
00423             // to the proper type.  For direct-io, we might need to make
00424             // this a little bigger to match the device block size.
00425 
00426             long nbytes = maxsize*sizeof(T);
00427 #ifdef IPPL_DIRECTIO
00428             if (openedDirectIO) {
00429               nbytes += dioinfo.d_miniosz; // extra in case offset is wrong
00430               size_t ndiff  = nbytes % dioinfo.d_miniosz;
00431               if (ndiff > 0)
00432                 nbytes += (dioinfo.d_miniosz - ndiff);
00433             }
00434 #endif
00435             buffer = static_cast<T *>(DiscBuffer::resize(nbytes));
00436             DFDBG(dbgmsg << "On box0: resized buf to " << DiscBuffer::size());
00437             DFDBG(dbgmsg << " bytes ... current block will need ");
00438             DFDBG(dbgmsg << nelems * sizeof(T) << " bytes." << endl);
00439 
00440             // Create some initial values for what and where to read.
00441             // We might adjust these if we're doing direct-io.
00442             T *      readbuffer = buffer;
00443             Offset_t readbytes  = nelems * sizeof(T);
00444             Offset_t readoffset = seekpos;
00445 
00446             // seekpos was only used to set readoffset, so we can update
00447             // seekpos now.  Add in the extra amount we'll be reading.
00448             seekpos += readbytes;
00449 
00450 #ifdef IPPL_DIRECTIO
00451             // If we're doing direct-io, we will need to adjust the start
00452             // and end of our buffers and offsets ...
00453             if (openedDirectIO) {
00454               // Find out how much our offset is off from multipple of
00455               // block size, and move it back by the difference.  Then we
00456               // will read in extra data and our storage will be offset to
00457               // start at the place where the new data is actually located.
00458 
00459               PAssert(readoffset >= 0);
00460               Offset_t extra = readoffset % dioinfo.d_miniosz;
00461               readoffset -= extra;
00462               DFDBG(dbgmsg << "DIO: Moving read offset back by " << extra);
00463               DFDBG(dbgmsg << " bytes, to readoffset = " << readoffset<<endl);
00464 
00465               // Compute the number of elements to read.  We might also need
00466               // to extend the read size to get the total read size to be a
00467               // multipple of the device block size.
00468 
00469               readbytes += extra;
00470               size_t ndiff = readbytes % dioinfo.d_miniosz;
00471               if (ndiff > 0)
00472                 readbytes += (dioinfo.d_miniosz - ndiff);
00473               PAssert(nbytes >= readbytes);
00474               DFDBG(dbgmsg << "DIO: Adjusted readbytes from ");
00475               DFDBG(dbgmsg << (nelems * sizeof(T)) << " to " << readbytes);
00476               DFDBG(dbgmsg << endl);
00477 
00478               // Point the buffer at the real first element, adjusted to
00479               // counteract our moving the offset location back to a
00480               // block-size multipple.
00481               PAssert(extra % sizeof(T) == 0);
00482               buffer += (extra / sizeof(T));
00483               DFDBG(dbgmsg << "DIO: Adjusted buffer pointer forward ");
00484               DFDBG(dbgmsg << (extra / sizeof(T)) << " elements." << endl);
00485             }
00486 #endif
00487 
00488             // Read data in a way that might do direct-io
00489             DFDBG(dbgmsg << "Calling read_data with readbytes=" << readbytes);
00490             DFDBG(dbgmsg << ", readoffset=" << readoffset << endl);
00491             read_data(outputDatafd, readbuffer, readbytes, readoffset);
00492           }
00493 
00494           // we have the data block now; find out where the data should
00495           // go, and either send the destination node a message, or copy
00496           // the data into the destination lfield.
00497 
00498           DFDBG(dbgmsg << "Finding destination nodes for block with ");
00499           DFDBG(dbgmsg << "domain = " << currblock << ", compressed = ");
00500           DFDBG(dbgmsg << offdata[vn].isCompressed << " ..." << endl);
00501           DFDBG(dbgmsg << "We will use the portion " << readDomainSection);
00502           DFDBG(dbgmsg << " from this block." << endl);
00503 
00504           // Set up to loop over the touching remote vnodes, and send out
00505           // messages
00506           typename FieldLayout<Dim>::touch_iterator_dv rv_i;
00507           //      int remaining = nelems;
00508           int remaining = readDomainSection.size();
00509 
00510           // compute what remote vnodes touch this block's domain, and
00511           // iterate over them.
00512           //      typename FieldLayout<Dim>::touch_range_dv
00513           //        range(f.getLayout().touch_range_rdv(currblock));
00514           typename FieldLayout<Dim>::touch_range_dv
00515             range(f.getLayout().touch_range_rdv(readDomainSection));
00516           for (rv_i = range.first; rv_i != range.second; ++rv_i) {
00517             // Compute the intersection of our domain and the remote vnode
00518             //      NDIndex<Dim> ri = currblock.intersect((*rv_i).first);
00519             NDIndex<Dim> ri = readDomainSection.intersect((*rv_i).first);
00520             DFDBG(dbgmsg << "Block intersects with remote domain ");
00521             DFDBG(dbgmsg << (*rv_i).first << " = " << ri << endl);
00522 
00523             // Find out who will be sending this data
00524             int rnode = (*rv_i).second->getNode();
00525 
00526             // Send this data to that remote node, by preparing a
00527             // CompressedBrickIterator and putting in the proper data.
00528             Message *msg = new Message;
00529             ri.putMessage(*msg);
00530             LFI cbi(buffer, ri, currblock, offdata[vn].compressedVal);
00531             cbi.TryCompress();
00532             cbi.putMessage(*msg, false);  // 'false' = avoid copy if possible
00533             DFDBG(dbgmsg << "Sending subblock " << ri << " from block ");
00534             DFDBG(dbgmsg << currblock << " to node " << rnode);
00535             DFDBG(dbgmsg << " with tag " << tag << endl);
00536             Ippl::Comm->send(msg, rnode, tag);
00537 
00538             // Decrement the remaining count
00539             remaining -= ri.size();
00540             DFDBG(dbgmsg << "After send, remaining = " << remaining << endl);
00541           }
00542 
00543           // loop over touching local vnodes, and copy in data, if there
00544           // is anything left
00545           typename BareField<T,Dim>::iterator_if lf_i = f.begin_if();
00546           for (; remaining > 0 && lf_i != f.end_if(); ++lf_i) {
00547             // Get the current LField and LField domain, and make an alias
00548             // for the domain of the block we've read from disk
00549             LField<T,Dim> &lf = *(*lf_i).second;
00550             const NDIndex<Dim>& lo = lf.getOwned();
00551             //      const NDIndex<Dim>& ro = currblock;
00552             const NDIndex<Dim>& ro = readDomainSection;
00553 
00554             // See if it touches the domain of the recently read block.
00555             if (lo.touches(ro)) {
00556               // Find the intersection.
00557               NDIndex<Dim> ri = lo.intersect(ro);
00558 
00559               DFDBG(dbgmsg << "Doing local copy of domain " << ri);
00560               DFDBG(dbgmsg << " into LField with domain " << lo << endl);
00561               
00562               // If these are compressed we might not have to do any work.
00563               if (lf.IsCompressed() &&
00564                   offdata[vn].isCompressed &&
00565                   ro.contains(lo)) {
00566                 DFDBG(dbgmsg << "  Doing comp-comp assign." << endl);
00567                 PETE_apply(OpAssign(),*lf.begin(),offdata[vn].compressedVal);
00568               } else {
00569                 // Build an iterator for the read-data block
00570                 // LFI rhs_i(buffer, ri, ro, offdata[vn].compressedVal);
00571                 LFI rhs_i(buffer, ri, currblock, offdata[vn].compressedVal);
00572 
00573                 // Could we compress that rhs iterator?
00574                 if (rhs_i.CanCompress(*rhs_i) && f.compressible() &&
00575                     ri.contains(lf.getAllocated())) {
00576                   // Compress the whole LField to the value on the right
00577                   DFDBG(dbgmsg << "  Doing lfield-comp assign." << endl);
00578                   lf.Compress(*rhs_i);
00579                 } else { // Assigning only part of LField on the left
00580                   // Must uncompress lhs, if not already uncompressed
00581                   lf.Uncompress(true);
00582 
00583                   // Get the iterator for it.
00584                   LFI lhs_i = lf.begin(ri);
00585 
00586                   // And do the assignment.
00587                   DFDBG(dbgmsg << "  Doing uncomp-uncomp assign." << endl);
00588                   Expr_t(lhs_i,rhs_i).apply();
00589                 }
00590               }
00591 
00592               // Decrement the expected count and the remaining count.
00593               // Remaining is how many cells are left of the current block.
00594               // Expected is how many cells this node expects to get copied
00595               // into its blocks.
00596               int bsize = ri.size();
00597               remaining -= bsize;
00598               expected -= bsize;
00599 
00600               DFDBG(dbgmsg << "Finished copying in local data, now ");
00601               DFDBG(dbgmsg << "expecting " << expected << " elems with ");
00602               DFDBG(dbgmsg << remaining << " elems remaining." << endl);
00603             }
00604           }
00605 
00606           // If we're here and still have remaining elements, we're screwed.
00607           if (remaining > 0)
00608             Ippl::abort("remaining > 0 at end of box0 vnode read!!!");
00609         }
00610       }
00611 
00612       // Close the data file now
00613       
00614       if (outputDatafd >= 0)
00615         close(outputDatafd);
00616     }
00617 
00618     // On all nodes, now, keep receiving messages until our expected count
00619     // goes to zero.
00620     while (expected > 0) {
00621       // Receive the next message from any node with the current read tag
00622       int node = COMM_ANY_TAG;
00623       DFDBG(dbgmsg << "Waiting for DF data, still expecting " << expected);
00624       DFDBG(dbgmsg << " elements ..." << endl);
00625       Message *msg = Ippl::Comm->receive_block(node, tag);
00626 
00627       // Extract the domain from the message
00628       NDIndex<Dim> ro;
00629       ro.getMessage(*msg);
00630       DFDBG(dbgmsg << "Received DF data from node " << node << " with tag ");
00631       DFDBG(dbgmsg << tag << ", with domain = " << ro << endl);
00632 
00633       // Extract the data from the message
00634       T rhs_compressed_data;
00635       LFI rhs_i(rhs_compressed_data);
00636       rhs_i.getMessage(*msg);
00637 
00638       // Find what local LField contains this domain
00639       typename BareField<T,Dim>::iterator_if lf_i = f.begin_if();
00640       bool foundlf = false;
00641       for (; lf_i != f.end_if(); ++lf_i) {
00642         // Get the current LField and LField domain
00643         LField<T,Dim> &lf = *(*lf_i).second;
00644         const NDIndex<Dim>& lo = lf.getOwned();
00645 
00646         // See if it contains the domain of the recently received block.
00647         // If so, assign the block to this LField
00648         if (lo.contains(ro)) {
00649           DFDBG(dbgmsg << "Found local lfield with domain " << lo);
00650           DFDBG(dbgmsg << " that contains received domain " << ro << endl);
00651 
00652           // Check and see if we really have to do this.
00653           if ( !(rhs_i.IsCompressed() && lf.IsCompressed() &&
00654                  (*rhs_i == *lf.begin())) ) {
00655             // Yep. gotta do it, since something is uncompressed or
00656             // the values are different.
00657 
00658             // Uncompress the LField first, if necessary.  It's necessary
00659             // if the received block size is smaller than the LField's.
00660             lf.Uncompress(!ro.contains(lo));
00661 
00662             DFDBG(dbgmsg << "Assigning value: lhs compressed = ");
00663             DFDBG(dbgmsg << lf.IsCompressed() << ", rhs compressed = ");
00664             DFDBG(dbgmsg << rhs_i.IsCompressed() << endl);
00665 
00666             // Make an iterator over the received block's portion of the
00667             // LField
00668             LFI lhs_i = lf.begin(ro);
00669 
00670             // Do the assignment.
00671             Expr_t(lhs_i,rhs_i).apply();
00672           } else {
00673             DFDBG(dbgmsg << "Local LField is compressed and has same value ");
00674             DFDBG(dbgmsg << "as received data." << endl);
00675           }
00676 
00677           // Update our expected value
00678           expected -= ro.size();
00679 
00680           // Indicate we're done, since the block we received is
00681           // guaranteed to be within only one of our LFields.
00682           foundlf = true;
00683           break;
00684         }
00685       }
00686 
00687       // Make sure we found what vnode this message is for; if we don't
00688       // we're screwed
00689       if (!foundlf) {
00690         ERRORMSG("Did not find destination local vnode for received domain ");
00691         ERRORMSG(ro << " from node " << node << endl);
00692         Ippl::abort("DID NOT FIND DESINATION LOCAL VNODE IN DISCFIELD::READ");
00693       }
00694 
00695       // Now we are done with the message
00696       delete msg;
00697     }
00698 
00699     // We're all done reading, so clean up
00700     IpplTimings::stopTimer(readtimer);
00701 
00702     // This is just like an assign, so set dirty flags, fill guard cells,
00703     // and try to compress the result.
00704 
00705     DFDBG(dbgmsg << "Finished with read.  Updating field GC's." << endl);
00706     f.setDirtyFlag();
00707     f.fillGuardCellsIfNotDirty();
00708     f.Compress();
00709 
00710     // Let everything catch up
00711     Ippl::Comm->barrier();
00712 
00713     // print out malloc info at end of read
00714     //     Inform memmsg("DiscField::read::mallinfo");
00715     //     struct mallinfo mdata;
00716     //     mdata = mallinfo();
00717     //     memmsg << "After read, new malloc info:" << endl;
00718     //     memmsg << "----------------------------" << endl;
00719     //     memmsg << "  total arena space = " << mdata.arena << endl;
00720     //     memmsg << "    ordinary blocks = " << mdata.ordblks << endl;
00721     //     memmsg << "       small blocks = " << mdata.smblks << endl;
00722     //     memmsg << "    user-held space = " << mdata.usmblks+mdata.uordblks;
00723     //     memmsg << endl;
00724     //     memmsg << "         free space = " << mdata.fsmblks+mdata.fordblks;
00725     //     memmsg << endl;
00726 
00727     return true;
00728   }
00729 
00730   // versions of read that provide default values for the arguments
00731   template <class T, class M, class C>
00732   bool read(Field<T,Dim,M,C>& f, unsigned int varID, unsigned int record) {
00733     return read(f, f.getLayout().getDomain(), varID, record);
00734   }
00735 
00736   template <class T, class M, class C>
00737   bool read(Field<T,Dim,M,C>& f, const NDIndex<Dim> &readDomain,
00738             unsigned int varID) {
00739     return read(f, readDomain, varID, 0);
00740   }
00741 
00742   template <class T, class M, class C>
00743   bool read(Field<T,Dim,M,C>& f, unsigned int varID) {
00744     return read(f, f.getLayout().getDomain(), varID, 0);
00745   }
00746 
00747   template <class T, class M, class C>
00748   bool read(Field<T,Dim,M,C>& f, const NDIndex<Dim> &readDomain) {
00749     return read(f, readDomain, 0, 0);
00750   }
00751 
00752   template <class T, class M, class C>
00753   bool read(Field<T,Dim,M,C>& f) {
00754     return read(f, f.getLayout().getDomain(), 0, 0);
00755   }
00756 
00757 
00759   // write the data from the given Field into the file.  This can be used
00760   // to either append new records, or to overwrite an existing record.
00761   // varID = index for which field is being written ... this should be from
00762   //         0 ... (numFields-1), for the case where the file contains
00763   //         more than one Field.  Writing continues for the current record
00764   //         until all numField fields have been written; then during the
00765   //         next write, the record number is incremented.
00766   //--------------------------------------------------------------------
00767   // notes for documentation:
00768   // - for now, you can not overwrite Field data on succesive writes
00769   //   (this can easily be added when needed - some restrictions will apply)
00770   // - when writing, all the Fields in a record must have the same layout
00771   // - layouts (and vnodes) can vary between records, however
00772   // - separate DiscField objects need to be opened for reading and writing
00773   //--------------------------------------------------------------------
00774   template<class T, class M, class C>
00775   bool write(Field<T,Dim,M,C>& f, unsigned int varID) {
00776     TAU_TYPE_STRING(taustr, "bool (" + CT(f) + ", unsigned int)" );
00777     TAU_PROFILE("DiscField::write()", taustr,
00778                 TAU_UTILITY | TAU_FIELD | TAU_IO);
00779 
00780     // sanity checking for input arguments and state of this object
00781     if (!ConfigOK) {
00782       ERRORMSG("Cannot write in DiscField::write - config file error."<<endl);
00783       Ippl::abort("Exiting due to DiscField error.");
00784       return false;
00785     }
00786     else if (!WritingFile) {
00787       ERRORMSG("DiscField::write called for DiscField opened for read."<<endl);
00788       Ippl::abort("Exiting due to DiscField error.");
00789       return false;
00790     }
00791     else if (varID >= NumFields) {
00792       ERRORMSG(varID << " is a bad variable ID in DiscField::write." << endl);
00793       Ippl::abort("Exiting due to DiscField error.");
00794       return false;
00795     }
00796     else if (NeedStartRecord == 0 && ValidField[varID]) {
00797       ERRORMSG("DiscField:write - attempt to overwrite Field " << varID);
00798       ERRORMSG(" at record " << NumRecords - 1 << endl);
00799       Ippl::abort("Exiting due to DiscField error.");
00800       return false;
00801     }
00802 
00803     DFDBG(string dbgmsgname("DF:write:"));
00804     DFDBG(dbgmsgname += Config->getConfigFile());
00805     DFDBG(Inform dbgmsg(dbgmsgname.c_str(), INFORM_ALL_NODES));
00806     DFDBG(dbgmsg << "At start of write: Field layout=" << f.getLayout()<<endl);
00807 
00808     INCIPPLSTAT(incDiscWrites);
00809 
00810     // useful typedefs for later
00811     typedef typename LField<T,Dim>::iterator LFI;
00812 
00813     // Get a new tag value for this write operation, used for all sends
00814     // to other nodes with data.
00815     int tag = Ippl::Comm->next_tag(FB_WRITE_TAG, FB_TAG_CYCLE);
00816 
00817     // Get the layout reference, and set up an iterator over lfields
00818     FieldLayout<Dim>& layout = f.getLayout();
00819     typename Field<T,Dim,M,C>::iterator_if local;
00820 
00821     // do we need to start a new record?  If so, extend data structures and
00822     // file storage
00823     if (NeedStartRecord != 0) {
00824       // convert the layout information for the field into internal storage,
00825       // represented as a map from NDIndex --> owner node
00826       if (!make_globalID(layout)) {
00827         ERRORMSG("DiscField::write - all Field's must have the same ");
00828         ERRORMSG("global domain in a single DiscField.\n");
00829         ERRORMSG("The original domain is " << get_Domain() << ", ");
00830         ERRORMSG("the attempted new domain is " << layout.getDomain() << endl);
00831         Ippl::abort("Exiting due to DiscField error.");
00832       }
00833 
00834       // update vnode and valid field information for new record
00835       if (numFiles() > 0 && myBox0() == Ippl::myNode()) {
00836         int nvtally = 0;
00837         NumVnodes[0].push_back(globalID.size());
00838         if (NumRecords > 0)
00839           nvtally = VnodeTally[0][NumRecords-1] + NumVnodes[0][NumRecords-1];
00840         VnodeTally[0].push_back(nvtally);
00841       }
00842 
00843       // indicate we have not written out data for any fields for this record
00844       for (unsigned int i=0; i < NumFields; ++i)
00845         ValidField[i] = false;
00846 
00847       // increment total record number
00848       NumRecords++;
00849       NumWritten = 0;
00850 
00851       // update necessary data files at the start of a new record
00852       if (Ippl::myNode() == myBox0()) {
00853         // Update the meta information ... this can be changed to be only
00854         // written out during destruction.
00855         DFDBG(dbgmsg << "Writing meta file ..." << endl);
00856         if (!write_meta()) {
00857           ERRORMSG("Could not write .meta file on node " << Ippl::myNode());
00858           ERRORMSG(endl);
00859           Ippl::abort("Exiting due to DiscField error.");
00860           return false;
00861         }
00862 
00863         // write out the NDIndex objects from the FieldLayout to the
00864         // .layout file
00865         DFDBG(dbgmsg << "Writing layout file ..." << endl);
00866         if (!write_layout()) {
00867           ERRORMSG("Could not update .layout file on node "<<Ippl::myNode());
00868           ERRORMSG(endl);
00869           Ippl::abort("Exiting due to DiscField error.");
00870           return false;
00871         }
00872       }
00873     }
00874 
00875     // On box0 nodes, do most of the work ... other nodes forward data
00876     // to box0 nodes.
00877     if (Ippl::myNode() == myBox0()) {
00878 
00879       // Open the offset data file, and write the Field number.
00880       // This precedes all the OffsetData structs for vnodes in the Field,
00881       // which are in random order for the given Field.  The OffsetData
00882       // structs contains a field 'vnode' which tells which vnode the data is
00883       // for (0 ... numVnodes - 1).
00884       FILE *outputOffset = open_df_file(Config->getFilename(0),
00885                                         ".offset", string("a"));
00886       int wVarID = (int)varID;
00887       DFDBG(dbgmsg << "Writing Field ID = " << wVarID<<" to offset file ...");
00888       DFDBG(dbgmsg << endl);
00889       if (fwrite(&wVarID, sizeof(int), 1, outputOffset) != 1) {
00890         ERRORMSG("DiscField::write - cannot write field number to .offset ");
00891         ERRORMSG("file" << endl);
00892         Ippl::abort("Exiting due to DiscField error.");
00893         fclose(outputOffset);
00894         return false;
00895       }
00896 
00897       // Initialize output file handle ... we might never write anything to
00898       // it if the field is completely compressed.
00899       DFDBG(dbgmsg << "Trying to open output data file ..." << endl);
00900       int outputDatafd = open_df_file_fd(Config->getFilename(0), ".data",
00901                                          O_RDWR|O_CREAT);
00902       DFDBG(dbgmsg << "Opened out file, fd=" << outputDatafd << endl);
00903 
00904       // Later we will receive message from other nodes.  This is the
00905       // number of blocks we should receive.  We'll decrease this by
00906       // the number of vnodes we already have on this processor, however.
00907       DFDBG(dbgmsg << "This box0 expected to receive " << globalID.size());
00908       DFDBG(dbgmsg << " blocks from other nodes, minus the ");
00909       DFDBG(dbgmsg << layout.size_iv() << " local blocks." << endl);
00910       int unreceived = globalID.size();
00911       int fromothers = unreceived - layout.size_iv();
00912 
00913       // Now we start processing blocks.  We have 'unreceived' total to
00914       // write, either from ourselves or others.  We first check for
00915       // messages, if there are any to receive, get one and process it,
00916       // otherwise write out one of our local blocks.
00917 
00918       local = f.begin_if();
00919       while (unreceived > 0) {
00920         // Keep processing remote blocks until we don't see one available
00921         // or we've received them all
00922         bool checkremote = (fromothers > 0);
00923         while (checkremote) {
00924           // Get a message
00925           int any_node = COMM_ANY_NODE;
00926           Message *msg = Ippl::Comm->receive(any_node, tag);
00927 
00928           // If we found one, process it
00929           if (msg != 0) {
00930             // Extract the domain from the message
00931             NDIndex<Dim> ro;
00932             ro.getMessage(*msg);
00933             DFDBG(dbgmsg << "Received an LField msg from node ");
00934             DFDBG(dbgmsg << any_node << " with tag " << tag << ", domain = ");
00935             DFDBG(dbgmsg << ro << endl);
00936             
00937             // Extract the data from the message
00938             T rhs_compressed_data;
00939             LFI cbi(rhs_compressed_data);
00940             cbi.getMessage(*msg);
00941 
00942             // Write this data out
00943             write_offset_and_data(outputOffset, outputDatafd, cbi, ro);
00944 
00945             // finish with this message
00946             delete msg;
00947 
00948             // Update counters
00949             unreceived -= 1;
00950             fromothers -= 1;
00951           } else {
00952             // We didn't see one, so stop checking for now
00953             checkremote = false;
00954           }
00955         }
00956 
00957         // Process a local block if any are left
00958         if (local != f.end_if()) {
00959           // Cache some information about this local field.
00960           LField<T,Dim>& l = *(*local).second.get();
00961           LFI cbi = l.begin();
00962           
00963           // Write this data out
00964           write_offset_and_data(outputOffset, outputDatafd, cbi, l.getOwned());
00965 
00966           // Update counters
00967           ++local;
00968           unreceived -= 1;
00969         }
00970       }
00971 
00972       // Close the output data file
00973       if (outputDatafd >= 0)
00974         close(outputDatafd);
00975 
00976       // Close the output offset file
00977       if (outputOffset != 0)
00978         fclose(outputOffset);
00979 
00980     } else {
00981       // On other nodes, just send out our LField blocks.
00982       for (local = f.begin_if(); local != f.end_if(); ++local) {
00983         // Cache some information about this local field.
00984         LField<T,Dim>& l = *(*local).second.get();
00985         const NDIndex<Dim> &ro = l.getOwned();
00986         LFI cbi = l.begin();
00987 
00988         // Create a message to send to box0
00989         Message *msg = new Message;
00990 
00991         // Put in the domain and data
00992         ro.putMessage(*msg);
00993         cbi.putMessage(*msg, false);  // 'false' = avoid copy if possible
00994 
00995         // Send this message to the box0 node.
00996         int node = myBox0();
00997         DFDBG(dbgmsg << "Sending local block " << ro << " to node " << node);
00998         DFDBG(dbgmsg << " with tag " << tag << endl);
00999         Ippl::Comm->send(msg, node, tag);
01000       }
01001     }
01002 
01003     // indicate we've written one more field
01004     ValidField[varID] = true;
01005     NeedStartRecord = (++NumWritten == NumFields);
01006 
01007     // Let everything catch up
01008     Ippl::Comm->barrier();
01009 
01010     // if we actually make it down to here, we were successful in writing
01011     return true;
01012   }
01013 
01014   // version of write that provides default value for varID
01015   template<class T, class M, class C>
01016   bool write(Field<T,Dim,M,C>& f) {
01017     return write(f, 0);
01018   }
01019 
01020 
01021   //
01022   // console printing methods
01023   //
01024 
01025   // print out debugging info to the given stream
01026   void printDebug(ostream&);
01027   void printDebug();
01028 
01029 private:
01030   // private typedefs
01031   typedef vmap<NDIndex<Dim>, int>  GlobalIDList_t;
01032 #if defined(IPPL_LONGLONG)
01033   typedef long long  Offset_t; 
01034 #else
01035   typedef long  Offset_t; 
01036 #endif
01037 
01038   //
01039   // meta data (static info for the file which does not change)
01040   //
01041 
01042   // the configuration file mechanism
01043   DiscConfig *Config;
01044   bool ConfigOK;
01045 
01046   // flag which is true if we're writing, false if reading
01047   bool WritingFile;
01048 
01049   // the base name for the output file, and the descriptive type string
01050   string BaseFile;
01051   string TypeString;
01052   string DiscType;
01053 
01054   // dimension of data in file ... this may not match the template dimension.
01055   unsigned int DataDimension;
01056 
01057   //
01058   // dynamic data (varies as records are written or read)
01059   //
01060 
01061   // do we need to start a new record during the next write?  Or, if reading,
01062   // which record have we current read into our Size and globalID variables?
01063   // If this is < 0, we have not read in any yet.
01064   int NeedStartRecord;
01065 
01066   // the number of fields and records in the file, and the number of Fields
01067   // written to the current record
01068   unsigned int NumFields;
01069   unsigned int NumRecords;
01070   unsigned int NumWritten;
01071 
01072   // this keeps track of where in the .data file writing is occuring
01073   // it is correlated with a given Field and record through the .offset file
01074   Offset_t CurrentOffset;
01075 
01076   // the global domain of the Fields in this DiscField object
01077   NDIndex<Dim> Size;
01078 
01079   // keep track of which Fields have been written to the current record
01080   vector<bool> ValidField;
01081 
01082   // the running tally of vnodes ON THIS SMP for each record, for each file.
01083   // VnodeTally[n] = number of vnodes written out total in prev records.
01084   // NOTE: this is not the number of vnodes TOTAL, it is the number of
01085   // vnodes on all the processors which are on this same SMP machine.
01086   vector<int> *VnodeTally;
01087 
01088   // the number of vnodes ON THIS SMP in each record, for each file.
01089   // NumVnodes[n] = number of vnodes in record n.
01090   // NOTE: this is not the number of vnodes TOTAL, it is the number of
01091   // vnodes on all the processors which are on this same SMP machine.
01092   vector<int> *NumVnodes;
01093 
01094   // store a mapping from an NDIndex to the physical node it resides on.
01095   // These values are stored only for those vnodes on processors which are
01096   // on our same SMP.  This must be remade when the layout changes
01097   // (e.g., every time we write a record for a Field,
01098   // since each Field can have a different layout and each Field's layout
01099   // can change from record to record).
01100   // key: local NDIndex, value: node
01101   GlobalIDList_t globalID;
01102 
01103   // Direct-IO info, required if we are using the DIRECTIO option
01104 #ifdef IPPL_DIRECTIO
01105   struct dioattr dioinfo;
01106   bool openedDirectIO;
01107 #endif
01108 
01109   //
01110   // functions used to build/query information about the processors, etc.
01111   //
01112 
01113   // perform initialization based on the constuctor arguments
01114   void initialize(const char* base, const char* config,
01115                   const char* typestr, unsigned int numFields);
01116 
01117   // open a file in the given mode.  If an error occurs, print a message (but
01118   // only if the last argument is true).
01119   // fnm = complete name of file (can include a path)
01120   // mode = open method ("r" == read, "rw" == read/write, etc.
01121   FILE *open_df_file(const string& fnm, const string& mode);
01122   FILE *open_df_file(const string& fnm, const string& suffix,
01123                      const string& mode);
01124 
01125   // Open a file using direct IO, if possible, otherwise without.  This
01126   // returns a file descriptor, not a FILE handle.  If the file was opened
01127   // using direct-io, also initialize the dioinfo member data.
01128   // The last argument indicates whether to init (create)
01129   // the file or just open for general read-write.
01130   int open_df_file_fd(const string& fnm, const string& suf, int flags);
01131 
01132   // create the data files used to store Field data.  Return success.
01133   bool create_files();
01134 
01135   // return the total number of SMP's in the system
01136   unsigned int numSMPs() const {
01137     return Config->numSMPs();
01138   }
01139 
01140   // return the total number of files which are being read or written
01141   unsigned int fileSMPs() const {
01142     return Config->fileSMPs();
01143   }
01144 
01145   // return the index of our SMP
01146   unsigned int mySMP() const {
01147     return Config->mySMP();
01148   }
01149 
01150   // return the Box0 node for this SMP
01151   unsigned int myBox0() const {
01152     return Config->getSMPBox0();
01153   }
01154 
01155   // return the number of files on our SMP (if no arg given) or the
01156   // given SMP
01157   unsigned int numFiles() const {
01158     return Config->getNumFiles();
01159   }
01160   unsigned int numFiles(unsigned int s) const {
01161     return Config->getNumFiles(s);
01162   }
01163 
01164   // compute how many physical nodes there are on the same SMP as the given
01165   // pnode.
01166   unsigned int pNodesPerSMP(unsigned int node) const {
01167     return Config->pNodesPerSMP(node);
01168   }
01169 
01170   // parse the IO configuration file and store the information.
01171   // arguments = name of config file, and if we're writing a file (true) or
01172   // reading (false)
01173   bool parse_config(const char *, bool);
01174 
01175   // Compute how many elements we should expect to store into the local
01176   // node for the given FieldLayout.  Just loop through the local vnodes
01177   // and accumulate the sizes of the owned domains.  This is modified
01178   // by the second "read domain" argument, which might be a subset of
01179   // the total domain.
01180   int compute_expected(const FieldLayout<Dim> &, const NDIndex<Dim> &);
01181 
01182   // Since the layout can be different every time write
01183   // is called, the globalID container needs to be recalculated.  The total
01184   // domain of the Field should not change, though, just the layout.  Return
01185   // success.
01186   bool make_globalID(FieldLayout<Dim> &);
01187 
01188   // Compute the size of a domain, zero-based, that has a total
01189   // size <= chunkelems and has evenly chunked slices.
01190   NDIndex<Dim> chunk_domain(const NDIndex<Dim> &currblock,
01191                             int chunkelems,
01192                             int &msdim,
01193                             bool iscompressed);
01194 
01195   //
01196   //
01197   // read/write functions for individual components
01198   //
01199 
01200   // read or write .meta data file information.  Return success.
01201   bool write_meta();
01202   bool read_meta();
01203 
01204   // read or write NDIndex values for a file.  Return success.
01205   bool read_NDIndex(FILE *, NDIndex<Dim> &);
01206   bool write_NDIndex(FILE *, const NDIndex<Dim> &);
01207 
01208   // Write .layout data file information.  Return success.
01209   bool write_layout();
01210 
01211   // Read layout info for one file set in the given record.
01212   int read_layout(int record, int sf);
01213 
01215   // Write out the data in a provided brick iterator with given owned
01216   // domain, updating both the offset file and the data file.  The .offset file
01217   // contains info  on where in the main data file each vnode's data can be
01218   // found.  The .offset file's structure looks like this:
01219   //   |--Record n----------------------------|
01220   //   | Field ID a                           |
01221   //   | VNa | VNb | VNc | VNd | .... | VNx   |
01222   //   | Field ID b                           |
01223   //   | VNa | VNb | VNc | VNd | .... | VNx   |
01224   //   | Field ID c                           |
01225   //   | VNa | VNb | VNc | VNd | .... | VNx   |
01226   //   |--------------------------------------|
01227   // where
01228   //   VNn is the data for a single Offset struct.  The sets of Offset structs
01229   //   for the Fields can appear in any order in a record, and the Offsets
01230   //   structs within a specific Field can also appear in any order.
01231   template<class T>
01232   void write_offset_and_data(FILE *outputOffset, int outputDatafd,
01233                              CompressedBrickIterator<T,Dim> &cbi,
01234                              const NDIndex<Dim> &owned) {
01235     TAU_TYPE_STRING(taustr, "void (FILE *, FILE *, " + CT(cbi) + ", NDIndex");
01236     TAU_PROFILE("DiscField::write_offset_and_data()", taustr,
01237                 TAU_UTILITY | TAU_FIELD | TAU_IO);
01238 
01239     DFDBG(string dbgmsgname("DF:write_offset_and_data"));
01240     DFDBG(Inform dbgmsg(dbgmsgname.c_str(), INFORM_ALL_NODES));
01241 
01242     // Create an offset output file struct, and initialize what we can.
01243     // We must take care to first zero out the offset struct.
01244     DFOffsetData<Dim,T> offset;
01245     memset(static_cast<void *>(&offset), 0, sizeof(DFOffsetData<Dim,T>));
01246 
01247     domain_to_offset_data(owned, offset);
01248     offset.isCompressed = cbi.IsCompressed();
01249     offset.offset       = CurrentOffset;
01250 
01251     // Set the compressed or uncompressed data in the offset struct
01252     if (offset.isCompressed) {
01253       // For compressed data, we just need to write out the entry to the
01254       // offset file ... that will contain the single compressed value.
01255       offset.compressedVal = *cbi;
01256       DFDBG(dbgmsg << "   Writing compressed vnode " << owned <<" w/value = ");
01257       DFDBG(dbgmsg << offset.compressedVal << endl);
01258 
01259     } else {
01260       // This is not compressed, so we must write to the data file.  The
01261       // main question now is whether we can use existing buffers, or write
01262       // things out in chunks, or what.
01263 
01264       // First, calculate how many elements to write out at a time.  The
01265       // 'chunkbytes' might be adjusted to match the maximum amount of data
01266       // that can be written out in a single direct-io call.  This is
01267       // true only if we are actually using direct-io, of course.
01268 
01269       long elems = owned.size();
01270       long chunkbytes = Ippl::chunkSize();
01271 #ifdef IPPL_DIRECTIO
01272       if (openedDirectIO) {
01273         // For direct-io, make sure we write out blocks with size that is
01274         // a multipple of the minimum io size
01275         PAssert(dioinfo.d_miniosz % sizeof(T) == 0);
01276         if (chunkbytes == 0 || chunkbytes > dioinfo.d_maxiosz)
01277           chunkbytes = dioinfo.d_maxiosz;
01278         else if (chunkbytes < dioinfo.d_miniosz)
01279           chunkbytes = dioinfo.d_miniosz;
01280         else if (chunkbytes % dioinfo.d_miniosz > 0)
01281           chunkbytes -= (chunkbytes % dioinfo.d_miniosz);
01282       }
01283 #endif
01284       long chunksize = chunkbytes / sizeof(T);
01285       if (chunksize < 1 || chunksize > elems)
01286         chunksize = elems;
01287 
01288       DFDBG(dbgmsg << "Total elems = " << elems << endl);
01289       DFDBG(dbgmsg << "Bytes in each chunk = " << chunkbytes);
01290       DFDBG(dbgmsg << " (orig chunkbytes = " << Ippl::chunkSize()<<")"<<endl);
01291       DFDBG(dbgmsg << "Elems in each chunk = " << chunksize << endl);
01292 
01293       // If cbi is iterating over its whole domain, we can just use the block
01294       // there as-is to write out data.  So if cbiptr is set to an non-zero
01295       // address, we'll just use that for the data, otherwise we'll have
01296       // to copy to a buffer.
01297 
01298       T *cbiptr = 0;
01299       if (cbi.whole())
01300         cbiptr = &(*cbi);
01301 
01302       // Loop through the data, writing out chunks.
01303 
01304       DFDBG(dbgmsg << "  Writing vnode " << owned << " in ");
01305       DFDBG(dbgmsg << "chunks of " << chunksize << " elements for ");
01306       DFDBG(dbgmsg << elems << " total elements ..." << endl);
01307 
01308       int needwrite = elems;
01309       while (needwrite > 0) {
01310         // Find out how many elements we'll write this time.
01311         int amount = chunksize;
01312         if (amount > needwrite)
01313           amount = needwrite;
01314 
01315         // Find the size of a buffer of at least large enough size.  We
01316         // might need a slighly larger buffer if we are using direct-io,
01317         // where data must be written out in blocks with sizes that
01318         // match the device block size.
01319         size_t nbytes = amount*sizeof(T);
01320 #ifdef IPPL_DIRECTIO
01321         if (openedDirectIO) {
01322           size_t ndiff = nbytes % dioinfo.d_miniosz;
01323           if (ndiff > 0)
01324             nbytes += (dioinfo.d_miniosz - ndiff);
01325         }
01326 #endif
01327         DFDBG(dbgmsg << "    Will write total nbytes = " << nbytes);
01328         DFDBG(dbgmsg << ", this has extra " << (nbytes - amount*sizeof(T)));
01329         DFDBG(dbgmsg << " bytes." << endl);
01330 
01331         // Get a pointer to the next data, or copy more data into a buffer
01332         // Initially start with the vnode pointer
01333         T *buffer = cbiptr;
01334 
01335         // If necessary, make a copy of the data
01336         if (buffer == 0) {
01337           DFDBG(dbgmsg << "    Getting copy buffer of total size ");
01338           DFDBG(dbgmsg << nbytes << " bytes ..." << endl);
01339           buffer = static_cast<T *>(DiscBuffer::resize(nbytes));
01340 
01341           // Copy data into this buffer from the iterator.
01342           DFDBG(dbgmsg << "    Copying data into buffer ..." << endl);
01343           T *bufptr = buffer;
01344           T *bufend = buffer + amount;
01345           for ( ; bufptr != bufend; ++bufptr, ++cbi)
01346             new (bufptr) T(*cbi);
01347         }
01348 
01349         // Write the data now
01350         DFDBG(dbgmsg << "    About to write " << nbytes << " to fd = ");
01351         DFDBG(dbgmsg << outputDatafd << endl);
01352 
01353         off_t seekoffset = CurrentOffset * sizeof(T);
01354         bool seekok = true;
01355         Timer wtimer;
01356         wtimer.clear();
01357         wtimer.start();
01358 
01359 #ifdef IPPL_DIRECTIO
01360         long nout = ::pwrite(outputDatafd, buffer, nbytes, seekoffset);
01361 #else
01362         long nout = 0;
01363         if (::lseek(outputDatafd, seekoffset, SEEK_SET) == seekoffset) {
01364           char *wbuf = (char *)buffer;
01365           nout = ::write(outputDatafd, wbuf, nbytes);
01366         } else {
01367           seekok = false;
01368         }
01369  #endif
01370 
01371         wtimer.stop();
01372         DiscBuffer::writetime += wtimer.clock_time();
01373         DiscBuffer::writebytes += nbytes;
01374 
01375         if (!seekok) {
01376           ERRORMSG("Seek error in DiscField::write_offset_and_data" << endl);
01377           ERRORMSG("Could not seek to position " << seekoffset << endl);
01378           Ippl::abort("Exiting due to DiscField error.");
01379         }
01380 
01381         if (nout != nbytes) {
01382           ERRORMSG("Write error in DiscField::write_offset_and_data" << endl);
01383           ERRORMSG("Could not write " << nbytes << " bytes." << endl);
01384           Ippl::abort("Exiting due to DiscField error.");
01385         }
01386 
01387         DFDBG(dbgmsg << "    Finished writing " << nout << " bytes in ");
01388         DFDBG(dbgmsg << wtimer.clock_time() << " seconds." << endl);
01389 
01390         // Update pointers and counts
01391         needwrite -= amount;
01392         if (cbiptr != 0)
01393           cbiptr += amount;
01394 
01395         // update the offset and stats
01396 
01397         CurrentOffset += (nbytes / sizeof(T));
01398         ADDIPPLSTAT(incDiscBytesWritten, nbytes);
01399 
01400         DFDBG(dbgmsg << "    Finishing writing chunk, still " << needwrite);
01401         DFDBG(dbgmsg << " elements to write out from this block." << endl);
01402       }
01403     }
01404 
01405     // write to offset file now
01406     DFDBG(dbgmsg << "Writing offset data to file, iscompressed = ");
01407     DFDBG(dbgmsg << offset.isCompressed);
01408     DFDBG(dbgmsg << ", offset = " << offset.offset << endl);
01409     if (fwrite(&offset, sizeof(DFOffsetData<Dim,T>), 1, outputOffset) != 1) {
01410       ERRORMSG("Write error in DiscField::write_offset_and_data" << endl);
01411       Ippl::abort("Exiting due to DiscField error.");
01412     }
01413   }
01414 
01416   // seek to the beginning of the vnode data for field 'varID' in record
01417   // 'record', for file 'sf'.  If not found, close the file and return false.
01418   // If it is found, read in all the offset records, and return them
01419   // in the provided vector.
01420   template <class T>
01421   bool read_offset(unsigned int varID,
01422                    unsigned int record,
01423                    unsigned int sf,
01424                    vector<DFOffsetData<Dim,T> > &offdata,
01425                    int vnodes) {
01426     TAU_TYPE_STRING(taustr, CT(*this) +  
01427                  " bool (unsigned int, unsigned int, unsigned int)");
01428     TAU_PROFILE("DiscField::read_offset()", taustr, 
01429                 TAU_UTILITY | TAU_FIELD | TAU_IO);
01430 
01431     // Open the offset file
01432     FILE *outputOffset = open_df_file(Config->getFilename(sf),
01433                                       ".offset", string("r"));
01434 
01435     // seek to the start of this record
01436     Offset_t seekpos = NumFields * (record * sizeof(int) +
01437                                     VnodeTally[sf][record] *
01438                                     sizeof(DFOffsetData<Dim,T>));
01439     if (fseek(outputOffset, seekpos, SEEK_SET) != 0) {
01440       ERRORMSG("Error seeking to position " << static_cast<long>(seekpos));
01441       ERRORMSG(" in .offset file " << endl);
01442       Ippl::abort("Exiting due to DiscField error.");
01443       fclose(outputOffset);
01444       return false;
01445     }
01446 
01447     // now keep looking at the Field ID in this record until we find the one
01448     // we want
01449     unsigned int checked = 0;
01450     while (checked < NumFields) {
01451       // read the next field ID number
01452       int rVarID;
01453       if (fread(&rVarID, sizeof(int), 1, outputOffset) != 1) {
01454         ERRORMSG("Error reading field ID from .offset file" << endl);
01455         Ippl::abort("Exiting due to DiscField error.");
01456         fclose(outputOffset);
01457         return false;
01458       }
01459 
01460       // is it what we want?
01461       if (rVarID == varID) {
01462         // Yes it is, so read in the offset record data.  First resize
01463         // the offset data vector.
01464         offdata.resize(vnodes);
01465         int result = fread(&(offdata[0]), sizeof(DFOffsetData<Dim,T>),
01466                            offdata.size(), outputOffset);
01467         if (result != offdata.size()) {
01468           ERRORMSG("Read error in DiscField::find_file_in_offset" << endl);
01469           ERRORMSG("Results is " << result << ", should be ");
01470           ERRORMSG(offdata.size() << endl);
01471           ERRORMSG("outputOffset is " << (void *)outputOffset << endl);
01472           Ippl::abort("Exiting due to DiscField error.");
01473           fclose(outputOffset);
01474           return false;
01475         }
01476 
01477         // And return success.
01478         fclose(outputOffset);
01479         return true;
01480       }
01481 
01482       // it was not, so move on to the next one
01483       checked++;
01484       seekpos += (NumVnodes[sf][record] * sizeof(DFOffsetData<Dim,T>) +
01485                   sizeof(int));
01486       if (fseek(outputOffset, seekpos, SEEK_SET) != 0) {
01487         ERRORMSG("Error seeking to position " << static_cast<long>(seekpos));
01488         ERRORMSG(" in .offset file " << endl);
01489         Ippl::abort("Exiting due to DiscField error.");
01490         fclose(outputOffset);
01491         return false;
01492       }
01493     }
01494 
01495     // if we're here, we did not find the Field ID anywhere in the .offset file
01496     ERRORMSG("Could not find data for field " << varID << " of record ");
01497     ERRORMSG(record << " in .offset file." << endl);
01498     Ippl::abort("Exiting due to DiscField error.");
01499     fclose(outputOffset);
01500     return false;
01501   }
01502 
01504   // On all nodes, either send out or receive in offset information.
01505   // Some nodes will not get any, and will not have to do any reading.
01506   // But those that do, will read in data for the vnodes they are
01507   // assigned.  'vnodes' will be set to the number of vnodes assigned
01508   // for reading from this node, and  'maxsize' will be set
01509   // to the maximum size of the vnodes in this file, for use in
01510   // preparing the buffer that will be used to read those vnodes.
01511   template <class T>
01512   void distribute_offsets(vector<DFOffsetData<Dim,T> > &offdata,
01513                           int &vnodes, int &maxsize,
01514                           const NDIndex<Dim> &readDomain) {
01515 
01516     DFDBG(Inform dbgmsg("DiscField::distribute_offsets", INFORM_ALL_NODES));
01517 
01518     // Initialize the vnode and maxsize values.
01519     vnodes = 0;
01520     maxsize = 0;
01521 
01522     // If parallel reads are turned off, just box0 nodes will read
01523     if (!Ippl::perSMPParallelIO()) {
01524       DFDBG(dbgmsg << "Per-SMP parallel IO is disabled, so only box0 nodes ");
01525       DFDBG(dbgmsg << "will read data." << endl);
01526 
01527       if (Ippl::myNode() == myBox0())
01528         vnodes = offdata.size();
01529 
01530     } else {
01531 
01532       // Generate the tag to use
01533       int tag = Ippl::Comm->next_tag(DF_OFFSET_TAG, DF_TAG_CYCLE);
01534 
01535       // Nodes that do not have their box0 process on the same SMP should
01536       // not receive anything
01537       if (Config->getNodeSMPIndex(myBox0()) != mySMP()) {
01538         DFDBG(dbgmsg << "Node " << Ippl::myNode() << " has box0 = ");
01539         DFDBG(dbgmsg << myBox0() << " on a different SMP.  Return from ");
01540         DFDBG(dbgmsg << "distribute_offsets with zero vnodes." << endl);
01541         return;
01542       }
01543 
01544       // All box0 nodes will (possibly) send out offset data.  Others will
01545       // receive it, even if it just says "you don't get any vnodes."
01546       if (Ippl::myNode() == myBox0()) {
01547         // How many offset blocks per processor
01548         int pernode = offdata.size() / pNodesPerSMP(myBox0());
01549 
01550         // Extra vnodes we might have to give to others
01551         int extra = offdata.size() % pNodesPerSMP(myBox0());
01552 
01553         DFDBG(dbgmsg << "Assigning " << pernode << " vnodes to each node, ");
01554         DFDBG(dbgmsg << "with " << extra << " extra." << endl);
01555 
01556         // The next vnode to assign; box0 will always get an extra one if
01557         // necessary.
01558         int nextvnode = pernode;
01559         if (extra > 0) {
01560           nextvnode += 1;
01561           extra -= 1;
01562         }
01563         DFDBG(dbgmsg << "This box0 node will get the first " << nextvnode);
01564         DFDBG(dbgmsg << " vnodes." << endl);
01565 
01566         // box0 nodes get the first 'nextvnode' vnodes.
01567         vnodes = nextvnode;
01568 
01569         // Loop through the nodes on this vnode; nodes other than box0 will
01570         // get sent a message.
01571         for (int n=0; n < Config->getNumSMPNodes(); ++n) {
01572           int node = Config->getSMPNode(mySMP(), n);
01573           if (node != Ippl::myNode()) {
01574             // How many vnodes to assign?
01575             int numvnodes = pernode;
01576             if (extra > 0) {
01577               numvnodes += 1;
01578               extra -= 1;
01579             }
01580 
01581             // Create a message for this other node, storing:
01582             //   - number of vnodes (int)
01583             //   - list of vnode offset structs (list)
01584             Message *msg = new Message;
01585             msg->put(numvnodes);
01586             if (numvnodes > 0) {
01587               msg->setCopy(false);
01588               msg->setDelete(false);
01589               msg->putmsg(static_cast<void *>(&(offdata[nextvnode])),
01590                           sizeof(DFOffsetData<Dim,T>),
01591                           numvnodes);
01592             }
01593 
01594             // Send this message to the other node
01595             DFDBG(dbgmsg << "Sending offset info for " << numvnodes);
01596             DFDBG(dbgmsg << " vnodes to node " << node << " with tag " << tag);
01597             DFDBG(dbgmsg << ", starting from box0 " << nextvnode << endl);
01598             Ippl::Comm->send(msg, node, tag);
01599 
01600             // Update what the next vnode info to send is
01601             nextvnode += numvnodes;
01602           }
01603         }
01604 
01605         // At the end, we should have no vnodes left to send
01606         if (nextvnode != offdata.size())
01607           Ippl::abort("ERROR: Could not give away all my vnodes!");
01608 
01609       } else {
01610         // On non-box0 nodes, receive offset info
01611         int node = myBox0();
01612         DFDBG(dbgmsg << "Waiting for offset data from node " << node << endl);
01613         Message *msg = Ippl::Comm->receive_block(node, tag);
01614 
01615         // Get the number of vnodes to store here
01616         msg->get(vnodes);
01617         DFDBG(dbgmsg << "Received offset info for " << vnodes << " vnodes ");
01618         DFDBG(dbgmsg << "from node " << node << endl);
01619 
01620         // If this is > 0, copy out vnode info
01621         if (vnodes > 0) {
01622           // resize the vector to make some storage
01623           offdata.resize(vnodes);
01624 
01625           // get offset data from the message
01626 	  ::getMessage_iter(*msg, &(offdata[0]));
01627         }
01628 
01629         // Done with the message now.
01630         delete msg;
01631       }
01632     }
01633 
01634     // Now, finally, on all nodes we scan the vnodes to find out the maximum
01635     // size of the buffer needed to read this data in.
01636     DFDBG(dbgmsg << "Scanning offset data for maxsize value ..." << endl);
01637     for (int v=0; v < vnodes; ++v) {
01638       // Convert data to NDIndex
01639       NDIndex<Dim> dom;
01640       offset_data_to_domain(offdata[v], dom);
01641       if (dom.touches(readDomain)) {
01642         // Compute chunk block size
01643         int msdim = (Dim-1);    // this will be zero-based
01644         int chunkelems = Ippl::chunkSize() / sizeof(T);
01645         NDIndex<Dim> chunkblock = chunk_domain(dom, chunkelems, msdim,
01646                                                offdata[v].isCompressed);
01647 
01648         DFDBG(dbgmsg << "Checking size of vnode " << v << " on node ");
01649         DFDBG(dbgmsg << Ippl::myNode() << " with domain " << dom);
01650         DFDBG(dbgmsg << ", compressed = " << offdata[v].isCompressed);
01651         DFDBG(dbgmsg << ", chunkblock = " << chunkblock << endl);
01652 
01653         // Now compare the size
01654         int dsize = chunkblock.size();
01655         if (dsize > maxsize) {
01656           maxsize = dsize;
01657           DFDBG(dbgmsg << "  New maxsize = " << maxsize << endl);
01658         }
01659       } else {
01660         DFDBG(dbgmsg << "Skipping vnode " << v << " since it does not ");
01661         DFDBG(dbgmsg << "intersect with readDomain = " << readDomain);
01662         DFDBG(dbgmsg << "; keeping maxsize = " << maxsize << endl);
01663       }
01664     }
01665   }
01666 
01668   // read the data for a block of values of type T from the given data file.
01669   // Return success of read.
01670   // The size and seekpos values are in bytes.
01671   template <class T>
01672   bool read_data(int outputDatafd, T* buffer, Offset_t readsize,
01673                  Offset_t seekpos) {
01674     TAU_TYPE_STRING(taustr, CT(*this) + " bool (FILE*, " + CT(buffer) 
01675                     + ", Offset_t, Offset_t )" );
01676     TAU_PROFILE("DiscField::read_data()", taustr, 
01677                 TAU_UTILITY | TAU_FIELD | TAU_IO);
01678 
01679     DFDBG(Inform dbgmsg("DiscField::read_data", INFORM_ALL_NODES));
01680     DFDBG(dbgmsg << "readsize=" << readsize << ", seekpos=" << seekpos);
01681     DFDBG(dbgmsg <<", sizeof(T)=" << sizeof(T) << endl);
01682 
01683     PAssert(seekpos >= 0);
01684     PAssert(readsize > 0);
01685     PAssert(buffer != 0);
01686     PAssert(outputDatafd >= 0);
01687     PAssert(readsize % sizeof(T) == 0);
01688 
01689 #ifdef IPPL_DIRECTIO
01690     if (openedDirectIO) {
01691       PAssert(readsize % dioinfo.d_miniosz == 0);
01692       PAssert(seekpos  % dioinfo.d_miniosz == 0);
01693     }
01694 #endif
01695 
01696     // Now read the block of data
01697     off_t seekoffset = seekpos;
01698     size_t nbytes = readsize;
01699     bool seekok = true;
01700 
01701     Timer rtimer;
01702     rtimer.clear();
01703     rtimer.start();
01704 
01705 #ifdef IPPL_DIRECTIO
01706     long nout = ::pread(outputDatafd, buffer, nbytes, seekoffset);
01707 #else
01708     long nout = 0;
01709     if (::lseek(outputDatafd, seekoffset, SEEK_SET) == seekoffset) {
01710       char *rbuf = (char *)buffer;
01711       nout = ::read(outputDatafd, rbuf, nbytes);
01712     } else {
01713       seekok = false;
01714     }
01715 #endif
01716 
01717     rtimer.stop();
01718     DiscBuffer::readtime += rtimer.clock_time();
01719     DiscBuffer::readbytes += readsize;
01720 
01721     if (!seekok) {
01722       ERRORMSG("Seek error in DiscField::read_data" << endl);
01723       ERRORMSG("Could not seek to position " << seekoffset << endl);
01724       Ippl::abort("Exiting due to DiscField error.");
01725     }
01726 
01727     if (nout != nbytes) {
01728       ERRORMSG("Read error in DiscField::read_data" << endl);
01729       ERRORMSG("Could not read " << nbytes << " bytes." << endl);
01730       Ippl::abort("Exiting due to DiscField error.");
01731     }
01732 
01733     DFDBG(size_t nelem = readsize / sizeof(T));
01734     DFDBG(dbgmsg << "Read in block of " << nelem << " elements in ");
01735     DFDBG(dbgmsg << rtimer.clock_time() << " second:" << endl);
01736     DFDBG(for (unsigned int i=0; i < nelem && i < 10; ++i))
01737       DFDBG(  dbgmsg << "  buffer[" << i << "] = " << buffer[i] << endl);
01738       
01739     ADDIPPLSTAT(incDiscBytesRead, readsize);
01740     return true;
01741   }
01742 
01744   // Convert data in an offset data struct to an NDIndex, and return it
01745   // in the second argument.
01746   template<class T>
01747   void offset_data_to_domain(DFOffsetData<Dim,T> &offdata,
01748                              NDIndex<Dim> &domain) {
01749     int *dptr = offdata.vnodedata + 1;
01750     for (int i=0; i < Dim; ++i) {
01751       int first = *dptr;
01752       int stride = *(dptr + 1);
01753       int length = *(dptr + 2);
01754       domain[i] = Index(first, first + (length - 1)*stride, stride);
01755       dptr += 6;
01756     }
01757   }
01758 
01760   // Convert domain data to offset I/O struct data
01761   template<class T>
01762   void domain_to_offset_data(const NDIndex<Dim> &domain,
01763                              DFOffsetData<Dim,T> &offdata) {
01764     int *dptr = offdata.vnodedata;
01765     for (int i=0; i < Dim; ++i) {
01766       *dptr++ = 0;
01767       *dptr++ = domain[i].first();
01768       *dptr++ = domain[i].stride();
01769       *dptr++ = domain[i].length();
01770       *dptr++ = 0;
01771       *dptr++ = 0;
01772     }
01773   }
01774 
01775   //
01776   // don't allow copy or assign ... these are declared but never defined,
01777   // if something tries to use them it will generate a missing symbol error
01778   //
01779 
01780   DiscField(const DiscField<Dim>&);
01781   DiscField& operator=(const DiscField<Dim>&);
01782 };
01783 
01784 #include "Utility/DiscField.cpp"
01785 
01786 #endif // DISC_FIELD_H
01787 
01788 /***************************************************************************
01789  * $RCSfile: DiscField.h,v $   $Author: adelmann $
01790  * $Revision: 1.1.1.1 $   $Date: 2003/01/23 07:40:33 $
01791  * IPPL_VERSION_ID: $Id: DiscField.h,v 1.1.1.1 2003/01/23 07:40:33 adelmann Exp $ 
01792  ***************************************************************************/

Generated on Mon Jan 16 13:23:57 2006 for IPPL by  doxygen 1.4.6