OPAL (Object Oriented Parallel Accelerator Library)  2.2.0
OPAL
DiscField.h
Go to the documentation of this file.
1 // -*- C++ -*-
2 /***************************************************************************
3  *
4  * The IPPL Framework
5  *
6  *
7  * Visit http://people.web.psi.ch/adelmann/ for more details
8  *
9  ***************************************************************************/
10 
11 #ifndef DISC_FIELD_H
12 #define DISC_FIELD_H
13 
14 // debugging macros
15 #ifdef IPPL_PRINTDEBUG
16 #define DFDBG(x) x
17 #define CDFDBG(x) x
18 #else
19 #define DFDBG(x)
20 #define CDFDBG(x)
21 #endif
22 
23 // include files
24 #include "Index/NDIndex.h"
25 #include "Field/BrickExpression.h"
26 #include "Field/Field.h"
27 #include "Utility/DiscBuffer.h"
28 #include "Utility/DiscConfig.h"
29 #include "Utility/Inform.h"
30 #include "Utility/vmap.h"
31 #include "Utility/IpplTimings.h"
32 #include <cstdio>
33 #include <cstdlib>
34 #include <unistd.h>
35 #include <fcntl.h>
36 #include <sys/stat.h>
37 #include <sys/types.h>
38 
39 #include <vector>
40 #include <iostream>
41 
42 // forward declarations
43 template<unsigned Dim, class T> class UniformCartesian;
44 template<class T, unsigned Dim, class M, class C> class Field;
45 template<unsigned Dim> class FieldLayout;
46 
47 
48 // This helper class is used to represent a record for I/O of the .offset file.
49 // It is only used for reads and writes. See the notes below for
50 // the reason the vnodedata is a set of ints instead of an NDIndex.
51 template <unsigned Dim, class T>
52 struct DFOffsetData {
53  int vnodedata[6*Dim];
55  long long offset;
57 };
58 
59 
60 template <unsigned Dim>
61 class DiscField {
62 
63 public:
64  // Constructor: make a DiscField for writing only
65  // fname = name of file (without extensions
66  // config = name of configuration file
67  // numFields = number of Fields which will be written to the file
68  // typestr = string describing the 'type' of the Field to be written (this
69  // is ignored if the Field is being read). The string should be
70  // the same as the statement used to declare the Field object
71  // e.g., for a field of the form Field<double,2> the string
72  // should be "Field<double,2>". The string should be such that
73  // if read later into a variable F, you can use the string in
74  // a source code line as 'F A' to create an instance of the
75  // same type of data as is stored in the file.
76  DiscField(const char* fname, const char* config, unsigned int numFields,
77  const char* typestr = 0);
78 
79  // Constructor: same as above, but without a config file specified. The
80  // default config file entry that will be used is "* .", which means, for
81  // each SMP machine, assume the directory to put data files in is "."
82  DiscField(const char* fname, unsigned int numFields,
83  const char* typestr = 0);
84 
85  // Constructor: make a DiscField for reading only.
86  // fname = name of file (without extensions
87  // config = name of configuration file
88  DiscField(const char* fname, const char* config);
89 
90  // Constructor: same as above, but without a config file specified. The
91  // default config file entry that will be used is "* .", which means, for
92  // each SMP machine, assume the directory to put data files in is "."
93  DiscField(const char* fname);
94 
95  // Destructor.
96  ~DiscField();
97 
98  //
99  // accessor functions
100  //
101 
102  // Obtain all the information about the file, including the number
103  // of records, fields, and number of vnodes stored in each record.
104  void query(int& numRecords, int& numFields, std::vector<int>& size) const;
105 
106  // Query for the number of records (e.g., timesteps) in the file.
107  unsigned int get_NumRecords() const { return NumRecords; }
108 
109  // Query for the number of Fields stored in the file.
110  unsigned int get_NumFields() const { return NumFields; }
111 
112  // Query for the total domain of the system.
113  NDIndex<Dim> get_Domain() const { return Size; }
114 
115  // Query for the dimension of the data in the file. This is useful
116  // mainly if you are checking for dimension by contructing a DiscField
117  // and then trying to check it's dimension later. If the dimension is
118  // not correctly matched with the Dim template parameter, you will get
119  // an error if you try to read or write.
120  unsigned int get_Dimension() const { return DataDimension; }
121 
122  // Query for the type string
123  const char *get_TypeString() { return TypeString.c_str(); }
124 
125  // Query for the disctype string
126  const char *get_DiscType() {
127  if (DiscType.length() > 0)
128  return DiscType.c_str();
129  return 0;
130  }
131 
132  //
133  // read/write methods
134  //
135 
136  // read the selected record in the file into the given Field object.
137  // readDomain = the portion of the field on disk that should actually be
138  // read in, and placed in the same location in the
139  // provided field. All of readDomain must be contained
140  // within the data on disk, and in the field in memory,
141  // although the domain on disk and in memory do not themselves
142  // have to be the same.
143  // varID = index for which field is being read ... this should be from
144  // 0 ... (numFields-1), for the case where the file contains
145  // more than one Field.
146  // record = which record to read. DiscField does not keep a 'current
147  // file position' pointer, instead you explicitly request which
148  // record you wish to read.
149  // Return success of operation.
150 
151  template <class T, class M, class C>
152  bool read(Field<T,Dim,M,C>& f, const NDIndex<Dim> &readDomain,
153  unsigned int varID, unsigned int record) {
154 
155  // sanity checking for input arguments and state of this object
156  bool canread = false;
157  if (!ConfigOK) {
158  ERRORMSG("Cannot read in DiscField::read - config file error." << endl);
159  } else if (DataDimension != Dim) {
160  ERRORMSG("Bad dimension "<< DataDimension <<" in DiscField::read"<<endl);
161  ERRORMSG("(" << DataDimension << " != " << Dim << ")" << endl);
162  } else if (WritingFile) {
163  ERRORMSG("DiscField::read called for DiscField opened for write."<<endl);
164  } else if (varID >= NumFields) {
165  ERRORMSG(varID << " is a bad Field ID in DiscField::read." << endl);
166  ERRORMSG("(" << varID << " is >= " << NumFields << ")" << endl);
167  } else if (record >= NumRecords) {
168  ERRORMSG(record << " is a bad record number in DiscField::read."<<endl);
169  ERRORMSG("(" << record << " is >= " << NumRecords << ")" << endl);
170  } else if (!(f.getLayout().getDomain().contains(readDomain))) {
171  ERRORMSG("DiscField::read - the total field domain ");
172  ERRORMSG(f.getLayout().getDomain() << " must contain the requested ");
173  ERRORMSG("read domain " << readDomain << endl);
174  } else if (!(get_Domain().contains(readDomain))) {
175  ERRORMSG("DiscField::read - the DiscField domain ");
176  ERRORMSG(get_Domain() << " must contain the requested ");
177  ERRORMSG("read domain " << readDomain << endl);
178  } else {
179  canread = true;
180  }
181 
182  // If there was an error, we will abort
183  if (!canread) {
184  Ippl::abort("Exiting due to DiscField error.");
185  return false;
186  }
187 
188  // A typedef used later
189  typedef typename LField<T,Dim>::iterator LFI;
191 
192  // Start timer for just the read portion
193  static IpplTimings::TimerRef readtimer =
194  IpplTimings::getTimer("DiscField read");
195  IpplTimings::startTimer(readtimer);
196 
197  DFDBG(std::string dbgmsgname("DF:read:"));
198  DFDBG(dbgmsgname += Config->getConfigFile());
199  DFDBG(Inform dbgmsg(dbgmsgname.c_str(), INFORM_ALL_NODES));
200  DFDBG(dbgmsg << "At start of read: Field layout=" << f.getLayout()<<endl);
201  DFDBG(dbgmsg << "At start of read: Read domain =" << readDomain << endl);
202 
203  // Get a new tag value for this read operation, used for all sends
204  // to other nodes with data.
206 
207  // At the start of a new record, determine how many elements of the
208  // Field should be stored into this node's vnodes.
209  int expected = compute_expected(f.getLayout(), readDomain);
210 
211  // On all nodes, loop through all the file sets, and:
212  // 1. box0: Get the number of vnodes stored there, from the layout file
213  // 2. box0: Get offset information for all the vnodes, and
214  // assign other nodes on the same SMP selected vnodes to read.
215  // 2. For each vnode assigned to a processor to read:
216  // - read data (if necessary)
217  // - distribute data to interested parties, or yourself
218  // On all nodes, when you get some data from another node:
219  // - copy it into the relevant vnode
220  // - decrement your expected value.
221  // When expected hits zero, we're done with reading on that node.
222 
223  DFDBG(dbgmsg << "Reading data from " << numFiles()<<" filesets:"<<endl);
224 
225  for (unsigned int sf=0; sf < numFiles(); ++sf) {
226 
227  // Create the data file handle, but don't yet open it ... only open
228  // it if we need to later (if we run into any uncompressed blocks).
229  int outputDatafd = (-1);
230 
231  // offset data read in from file or obtained from the box0 node
232  std::vector<DFOffsetData<Dim,T> > offdata;
233 
234  // the number of vnodes we'll be working on on this node
235  int vnodes = 0;
236 
237  // the maximum number of elements we'll need to have buffer space for
238  int maxsize = 0;
239 
240  // on box0 nodes, read in the layout and offest info
241  if ((unsigned int) Ippl::myNode() == myBox0()) {
242 
243  // Get the number of vnodes in this file.
244  vnodes = read_layout(record, sf);
245 
246  // Get the offset data for this field and record.
247  read_offset(varID, record, sf, offdata, vnodes);
248  }
249 
250  // On all nodes, either send out or receive in offset information.
251  // Some nodes will not get any, and will not have to do any reading.
252  // But those that do, will read in data for the vnodes they are
253  // assigned. 'vnodes' will be set to the number of vnodes assigned
254  // for reading from this node, and 'maxsize' will be set
255  // to the maximum size of the vnodes in this file, for use in
256  // preparing the buffer that will be used to read those vnodes.
257  distribute_offsets(offdata, vnodes, maxsize, readDomain);
258 
259  DFDBG(dbgmsg << "After reading and distributing offset data: ");
260  DFDBG(dbgmsg << "Node " << Ippl::myNode() << " will read ");
261  DFDBG(dbgmsg << vnodes << " vnodes, with maxsize = " << maxsize);
262  DFDBG(dbgmsg << endl);
263 
264  // Loop through all the vnodes now; they will appear in any
265  // order, which is fine, we just read them and and see where they
266  // go. The info in the offset struct includes the domain for that
267  // block and whether it was written compressed or not.
268 
269  for (int vn=0; vn < vnodes; ++vn) {
270  // Create an NDIndex object storing the vnode domain for this vnode.
271  NDIndex<Dim> vnodeblock;
272  offset_data_to_domain(offdata[vn], vnodeblock);
273 
274  // If there is no intersection of this vnode and the read-domain,
275  // we can just skip it entirely.
276  if (! vnodeblock.touches(readDomain)) {
277  DFDBG(dbgmsg << "Skipping vnode " << vn << ", no intersection ");
278  DFDBG(dbgmsg << "between " << vnodeblock << " and ");
279  DFDBG(dbgmsg << readDomain << endl);
280 
281  continue;
282  }
283 
284  // Compute the size of a block to add to the base of this block,
285  // based on the chunk size. If the data is compressed, this won't
286  // matter.
287  int msdim = (Dim-1); // this will be zero-based
288  int chunkelems = Ippl::chunkSize() / sizeof(T);
289  NDIndex<Dim> chunkblock = chunk_domain(vnodeblock, chunkelems, msdim,
290  offdata[vn].isCompressed);
291 
292  DFDBG(dbgmsg << "Reading in chunks in blocks of size " << chunkblock);
293  DFDBG(dbgmsg << " and max buffer elems = " << maxsize);
294  DFDBG(dbgmsg << " in vnode " << vn << " with total domain ");
295  DFDBG(dbgmsg << vnodeblock << endl);
296 
297  // Initialize the NDIndex we'll use to indicate what portion of the
298  // domain we're reading and processing.
299  NDIndex<Dim> currblock = vnodeblock;
300  currblock[msdim] = Index(vnodeblock[msdim].first() - 1,
301  vnodeblock[msdim].first() - 1);
302  for (unsigned int md = (msdim+1); md < Dim; ++md)
303  currblock[md] = Index(vnodeblock[md].first(),vnodeblock[md].first());
304 
305  // Initialize the offset value for this vnode. The seek position
306  // is stored as a byte offset, although it is read from disk as
307  // a number of elements offset from the beginning.
308  Offset_t seekpos = (-1);
309 
310  // Loop through the chunks, reading and processing each one.
311  int unread = vnodeblock.size();
312  while (unread > 0) {
313  // Compute the domain of the chunk we'll work on now, and store
314  // this in currblock.
315 
316  // First determine if we're at the end of our current incr dimension,
317  // and determine new bounds
318  bool incrhigher=(currblock[msdim].last()==vnodeblock[msdim].last());
319  int a = (incrhigher ?
320  vnodeblock[msdim].first() :
321  currblock[msdim].last() + 1);
322  int b = a + chunkblock[msdim].length() - 1;
323  if (b > vnodeblock[msdim].last())
324  b = vnodeblock[msdim].last();
325 
326  // Increment this dimension
327  currblock[msdim] = Index(a, b);
328 
329  // Increment higher dimensions, if necessary
330  if (incrhigher) {
331  for (unsigned int cd = (msdim+1); cd < Dim; ++cd) {
332  if (currblock[cd].last() < vnodeblock[cd].last()) {
333  // This dim is not at end, so just inc by 1
334  currblock[cd] = Index(currblock[cd].first() + 1,
335  currblock[cd].last() + 1);
336  break;
337  } else {
338  // Move this dimension back to start, and go on to next one
339  currblock[cd] = Index(vnodeblock[cd].first(),
340  vnodeblock[cd].first());
341  }
342  }
343  }
344 
345  // Decrement our unread count, since we'll process this block
346  // either by actually reading it or getting its compressed value
347  // from the offset file, if we have to read it at all.
348  int nelems = currblock.size();
349  unread -= nelems;
350 
351  DFDBG(dbgmsg << "Starting processing of chunk with domain ");
352  DFDBG(dbgmsg << currblock << " in vnode " << vn);
353  DFDBG(dbgmsg << " at offset = " << offdata[vn].offset << endl);
354  DFDBG(dbgmsg << "After this, still have " << unread << " unread.");
355  DFDBG(dbgmsg << endl);
356 
357  // Set the seek position now, if necessary
358  if (!offdata[vn].isCompressed && seekpos < 0) {
359  seekpos = offdata[vn].offset * sizeof(T);
360  DFDBG(dbgmsg << "Set seek position = " << seekpos << endl);
361  }
362 
363  // At this point, we might be able to skip a lot of work if this
364  // particular chunk does not intersect with our read domain any.
365  if (! currblock.touches(readDomain)) {
366  DFDBG(dbgmsg << "Skipping sub-vnode chunk " << currblock);
367  DFDBG(dbgmsg << ", no intersection with readDomain ");
368  DFDBG(dbgmsg << readDomain << endl);
369 
370  // Before we skip the rest, we must update the offset
371  Offset_t readbytes = nelems * sizeof(T);
372  seekpos += readbytes;
373  DFDBG(dbgmsg << "Updating offset at end of skip operation to ");
374  DFDBG(dbgmsg << seekpos << endl);
375 
376  // Then, we're done with this chunk, move on to the next.
377  continue;
378  }
379 
380  // Put the intersecting domain in readDomainSection.
381  NDIndex<Dim> readDomainSection = currblock.intersect(readDomain);
382  DFDBG(dbgmsg << "Intersection of chunk " << currblock);
383  DFDBG(dbgmsg << " and read domain " << readDomain << " = ");
384  DFDBG(dbgmsg << readDomainSection << endl);
385 
386  // if it is not compressed, read in the data. If it is,
387  // just keep the buffer pointer at zero.
388  T *buffer = 0;
389  if (!offdata[vn].isCompressed) {
390  // If we have not yet done so, open the data file.
391  if (outputDatafd < 0) {
392  DFDBG(dbgmsg << "Opening input data file ...");
393  DFDBG(dbgmsg << endl);
394  outputDatafd = open_df_file_fd(Config->getFilename(sf), ".data",
395  O_RDONLY);
396  }
397 
398  // Resize the read buffer in case it is not large enough.
399  // We use the max size for all the vnodes here, to avoid doing
400  // this more than once per file set. This also returns the
401  // pointer to the buffer to use, as a void *, which we cast
402  // to the proper type. For direct-io, we might need to make
403  // this a little bigger to match the device block size.
404 
405  long nbytes = maxsize*sizeof(T);
406 #ifdef IPPL_DIRECTIO
407  if (openedDirectIO) {
408  nbytes += dioinfo.d_miniosz; // extra in case offset is wrong
409  size_t ndiff = nbytes % dioinfo.d_miniosz;
410  if (ndiff > 0)
411  nbytes += (dioinfo.d_miniosz - ndiff);
412  }
413 #endif
414  buffer = static_cast<T *>(DiscBuffer::resize(nbytes));
415  DFDBG(dbgmsg << "On box0: resized buf to " << DiscBuffer::size());
416  DFDBG(dbgmsg << " bytes ... current block will need ");
417  DFDBG(dbgmsg << nelems * sizeof(T) << " bytes." << endl);
418 
419  // Create some initial values for what and where to read.
420  // We might adjust these if we're doing direct-io.
421  T * readbuffer = buffer;
422  Offset_t readbytes = nelems * sizeof(T);
423  Offset_t readoffset = seekpos;
424 
425  // seekpos was only used to set readoffset, so we can update
426  // seekpos now. Add in the extra amount we'll be reading.
427  seekpos += readbytes;
428 
429 #ifdef IPPL_DIRECTIO
430  // If we're doing direct-io, we will need to adjust the start
431  // and end of our buffers and offsets ...
432  if (openedDirectIO) {
433  // Find out how much our offset is off from multipple of
434  // block size, and move it back by the difference. Then we
435  // will read in extra data and our storage will be offset to
436  // start at the place where the new data is actually located.
437 
438  PAssert_GE(readoffset, 0);
439  Offset_t extra = readoffset % dioinfo.d_miniosz;
440  readoffset -= extra;
441  DFDBG(dbgmsg << "DIO: Moving read offset back by " << extra);
442  DFDBG(dbgmsg << " bytes, to readoffset = " << readoffset<<endl);
443 
444  // Compute the number of elements to read. We might also need
445  // to extend the read size to get the total read size to be a
446  // multipple of the device block size.
447 
448  readbytes += extra;
449  size_t ndiff = readbytes % dioinfo.d_miniosz;
450  if (ndiff > 0)
451  readbytes += (dioinfo.d_miniosz - ndiff);
452  PAssert_GE(nbytes, readbytes);
453  DFDBG(dbgmsg << "DIO: Adjusted readbytes from ");
454  DFDBG(dbgmsg << (nelems * sizeof(T)) << " to " << readbytes);
455  DFDBG(dbgmsg << endl);
456 
457  // Point the buffer at the real first element, adjusted to
458  // counteract our moving the offset location back to a
459  // block-size multipple.
460  PAssert_EQ(extra % sizeof(T), 0);
461  buffer += (extra / sizeof(T));
462  DFDBG(dbgmsg << "DIO: Adjusted buffer pointer forward ");
463  DFDBG(dbgmsg << (extra / sizeof(T)) << " elements." << endl);
464  }
465 #endif
466 
467  // Read data in a way that might do direct-io
468  DFDBG(dbgmsg << "Calling read_data with readbytes=" << readbytes);
469  DFDBG(dbgmsg << ", readoffset=" << readoffset << endl);
470  read_data(outputDatafd, readbuffer, readbytes, readoffset);
471  }
472 
473  // we have the data block now; find out where the data should
474  // go, and either send the destination node a message, or copy
475  // the data into the destination lfield.
476 
477  DFDBG(dbgmsg << "Finding destination nodes for block with ");
478  DFDBG(dbgmsg << "domain = " << currblock << ", compressed = ");
479  DFDBG(dbgmsg << offdata[vn].isCompressed << " ..." << endl);
480  DFDBG(dbgmsg << "We will use the portion " << readDomainSection);
481  DFDBG(dbgmsg << " from this block." << endl);
482 
483  // Set up to loop over the touching remote vnodes, and send out
484  // messages
486  // int remaining = nelems;
487  int remaining = readDomainSection.size();
488 
489  // compute what remote vnodes touch this block's domain, and
490  // iterate over them.
491  // typename FieldLayout<Dim>::touch_range_dv
492  // range(f.getLayout().touch_range_rdv(currblock));
494  range(f.getLayout().touch_range_rdv(readDomainSection));
495  for (rv_i = range.first; rv_i != range.second; ++rv_i) {
496  // Compute the intersection of our domain and the remote vnode
497  // NDIndex<Dim> ri = currblock.intersect((*rv_i).first);
498  NDIndex<Dim> ri = readDomainSection.intersect((*rv_i).first);
499  DFDBG(dbgmsg << "Block intersects with remote domain ");
500  DFDBG(dbgmsg << (*rv_i).first << " = " << ri << endl);
501 
502  // Find out who will be sending this data
503  int rnode = (*rv_i).second->getNode();
504 
505  // Send this data to that remote node, by preparing a
506  // CompressedBrickIterator and putting in the proper data.
507  Message *msg = new Message;
508  ri.putMessage(*msg);
509  LFI cbi(buffer, ri, currblock, offdata[vn].compressedVal);
510  cbi.TryCompress();
511  cbi.putMessage(*msg, false); // 'false' = avoid copy if possible
512  DFDBG(dbgmsg << "Sending subblock " << ri << " from block ");
513  DFDBG(dbgmsg << currblock << " to node " << rnode);
514  DFDBG(dbgmsg << " with tag " << tag << endl);
515  Ippl::Comm->send(msg, rnode, tag);
516 
517  // Decrement the remaining count
518  remaining -= ri.size();
519  DFDBG(dbgmsg << "After send, remaining = " << remaining << endl);
520  }
521 
522  // loop over touching local vnodes, and copy in data, if there
523  // is anything left
524  typename BareField<T,Dim>::iterator_if lf_i = f.begin_if();
525  for (; remaining > 0 && lf_i != f.end_if(); ++lf_i) {
526  // Get the current LField and LField domain, and make an alias
527  // for the domain of the block we've read from disk
528  LField<T,Dim> &lf = *(*lf_i).second;
529  const NDIndex<Dim>& lo = lf.getOwned();
530  // const NDIndex<Dim>& ro = currblock;
531  const NDIndex<Dim>& ro = readDomainSection;
532 
533  // See if it touches the domain of the recently read block.
534  if (lo.touches(ro)) {
535  // Find the intersection.
536  NDIndex<Dim> ri = lo.intersect(ro);
537 
538  DFDBG(dbgmsg << "Doing local copy of domain " << ri);
539  DFDBG(dbgmsg << " into LField with domain " << lo << endl);
540 
541  // If these are compressed we might not have to do any work.
542  if (lf.IsCompressed() &&
543  offdata[vn].isCompressed &&
544  ro.contains(lo)) {
545  DFDBG(dbgmsg << " Doing comp-comp assign." << endl);
546  PETE_apply(OpAssign(),*lf.begin(),offdata[vn].compressedVal);
547  } else {
548  // Build an iterator for the read-data block
549  // LFI rhs_i(buffer, ri, ro, offdata[vn].compressedVal);
550  LFI rhs_i(buffer, ri, currblock, offdata[vn].compressedVal);
551 
552  // Could we compress that rhs iterator?
553  if (rhs_i.CanCompress(*rhs_i) && f.compressible() &&
554  ri.contains(lf.getAllocated())) {
555  // Compress the whole LField to the value on the right
556  DFDBG(dbgmsg << " Doing lfield-comp assign." << endl);
557  lf.Compress(*rhs_i);
558  } else { // Assigning only part of LField on the left
559  // Must uncompress lhs, if not already uncompressed
560  lf.Uncompress(true);
561 
562  // Get the iterator for it.
563  LFI lhs_i = lf.begin(ri);
564 
565  // And do the assignment.
566  DFDBG(dbgmsg << " Doing uncomp-uncomp assign." << endl);
567  Expr_t(lhs_i,rhs_i).apply();
568  }
569  }
570 
571  // Decrement the expected count and the remaining count.
572  // Remaining is how many cells are left of the current block.
573  // Expected is how many cells this node expects to get copied
574  // into its blocks.
575  int bsize = ri.size();
576  remaining -= bsize;
577  expected -= bsize;
578 
579  DFDBG(dbgmsg << "Finished copying in local data, now ");
580  DFDBG(dbgmsg << "expecting " << expected << " elems with ");
581  DFDBG(dbgmsg << remaining << " elems remaining." << endl);
582  }
583  }
584 
585  // If we're here and still have remaining elements, we're screwed.
586  if (remaining > 0)
587  Ippl::abort("remaining > 0 at end of box0 vnode read!!!");
588  }
589  }
590 
591  // Close the data file now
592 
593  if (outputDatafd >= 0)
594  close(outputDatafd);
595  }
596 
597  // On all nodes, now, keep receiving messages until our expected count
598  // goes to zero.
599  while (expected > 0) {
600  // Receive the next message from any node with the current read tag
601  int node = COMM_ANY_TAG;
602  DFDBG(dbgmsg << "Waiting for DF data, still expecting " << expected);
603  DFDBG(dbgmsg << " elements ..." << endl);
604  Message *msg = Ippl::Comm->receive_block(node, tag);
605 
606  // Extract the domain from the message
607  NDIndex<Dim> ro;
608  ro.getMessage(*msg);
609  DFDBG(dbgmsg << "Received DF data from node " << node << " with tag ");
610  DFDBG(dbgmsg << tag << ", with domain = " << ro << endl);
611 
612  // Extract the data from the message
613  T rhs_compressed_data;
614  LFI rhs_i(rhs_compressed_data);
615  rhs_i.getMessage(*msg);
616 
617  // Find what local LField contains this domain
618  typename BareField<T,Dim>::iterator_if lf_i = f.begin_if();
619  bool foundlf = false;
620  for (; lf_i != f.end_if(); ++lf_i) {
621  // Get the current LField and LField domain
622  LField<T,Dim> &lf = *(*lf_i).second;
623  const NDIndex<Dim>& lo = lf.getOwned();
624 
625  // See if it contains the domain of the recently received block.
626  // If so, assign the block to this LField
627  if (lo.contains(ro)) {
628  DFDBG(dbgmsg << "Found local lfield with domain " << lo);
629  DFDBG(dbgmsg << " that contains received domain " << ro << endl);
630 
631  // Check and see if we really have to do this.
632  if ( !(rhs_i.IsCompressed() && lf.IsCompressed() &&
633  (*rhs_i == *lf.begin())) ) {
634  // Yep. gotta do it, since something is uncompressed or
635  // the values are different.
636 
637  // Uncompress the LField first, if necessary. It's necessary
638  // if the received block size is smaller than the LField's.
639  lf.Uncompress(!ro.contains(lo));
640 
641  DFDBG(dbgmsg << "Assigning value: lhs compressed = ");
642  DFDBG(dbgmsg << lf.IsCompressed() << ", rhs compressed = ");
643  DFDBG(dbgmsg << rhs_i.IsCompressed() << endl);
644 
645  // Make an iterator over the received block's portion of the
646  // LField
647  LFI lhs_i = lf.begin(ro);
648 
649  // Do the assignment.
650  Expr_t(lhs_i,rhs_i).apply();
651  } else {
652  DFDBG(dbgmsg << "Local LField is compressed and has same value ");
653  DFDBG(dbgmsg << "as received data." << endl);
654  }
655 
656  // Update our expected value
657  expected -= ro.size();
658 
659  // Indicate we're done, since the block we received is
660  // guaranteed to be within only one of our LFields.
661  foundlf = true;
662  break;
663  }
664  }
665 
666  // Make sure we found what vnode this message is for; if we don't
667  // we're screwed
668  if (!foundlf) {
669  ERRORMSG("Did not find destination local vnode for received domain ");
670  ERRORMSG(ro << " from node " << node << endl);
671  Ippl::abort("DID NOT FIND DESINATION LOCAL VNODE IN DISCFIELD::READ");
672  }
673 
674  // Now we are done with the message
675  delete msg;
676  }
677 
678  // We're all done reading, so clean up
679  IpplTimings::stopTimer(readtimer);
680 
681  // This is just like an assign, so set dirty flags, fill guard cells,
682  // and try to compress the result.
683 
684  DFDBG(dbgmsg << "Finished with read. Updating field GC's." << endl);
685  f.setDirtyFlag();
687  f.Compress();
688 
689  // Let everything catch up
690  Ippl::Comm->barrier();
691 
692  // print out malloc info at end of read
693  // Inform memmsg("DiscField::read::mallinfo");
694  // struct mallinfo mdata;
695  // mdata = mallinfo();
696  // memmsg << "After read, new malloc info:" << endl;
697  // memmsg << "----------------------------" << endl;
698  // memmsg << " total arena space = " << mdata.arena << endl;
699  // memmsg << " ordinary blocks = " << mdata.ordblks << endl;
700  // memmsg << " small blocks = " << mdata.smblks << endl;
701  // memmsg << " user-held space = " << mdata.usmblks+mdata.uordblks;
702  // memmsg << endl;
703  // memmsg << " free space = " << mdata.fsmblks+mdata.fordblks;
704  // memmsg << endl;
705 
706  return true;
707  }
708 
709  // versions of read that provide default values for the arguments
710  template <class T, class M, class C>
711  bool read(Field<T,Dim,M,C>& f, unsigned int varID, unsigned int record) {
712  return read(f, f.getLayout().getDomain(), varID, record);
713  }
714 
715  template <class T, class M, class C>
716  bool read(Field<T,Dim,M,C>& f, const NDIndex<Dim> &readDomain,
717  unsigned int varID) {
718  return read(f, readDomain, varID, 0);
719  }
720 
721  template <class T, class M, class C>
722  bool read(Field<T,Dim,M,C>& f, unsigned int varID) {
723  return read(f, f.getLayout().getDomain(), varID, 0);
724  }
725 
726  template <class T, class M, class C>
727  bool read(Field<T,Dim,M,C>& f, const NDIndex<Dim> &readDomain) {
728  return read(f, readDomain, 0, 0);
729  }
730 
731  template <class T, class M, class C>
733  return read(f, f.getLayout().getDomain(), 0, 0);
734  }
735 
736 
738  // write the data from the given Field into the file. This can be used
739  // to either append new records, or to overwrite an existing record.
740  // varID = index for which field is being written ... this should be from
741  // 0 ... (numFields-1), for the case where the file contains
742  // more than one Field. Writing continues for the current record
743  // until all numField fields have been written; then during the
744  // next write, the record number is incremented.
745  //--------------------------------------------------------------------
746  // notes for documentation:
747  // - for now, you can not overwrite Field data on succesive writes
748  // (this can easily be added when needed - some restrictions will apply)
749  // - when writing, all the Fields in a record must have the same layout
750  // - layouts (and vnodes) can vary between records, however
751  // - separate DiscField objects need to be opened for reading and writing
752  //--------------------------------------------------------------------
753  template<class T, class M, class C>
754  bool write(Field<T,Dim,M,C>& f, unsigned int varID) {
755 
756  // sanity checking for input arguments and state of this object
757  if (!ConfigOK) {
758  ERRORMSG("Cannot write in DiscField::write - config file error."<<endl);
759  Ippl::abort("Exiting due to DiscField error.");
760  return false;
761  }
762  else if (!WritingFile) {
763  ERRORMSG("DiscField::write called for DiscField opened for read."<<endl);
764  Ippl::abort("Exiting due to DiscField error.");
765  return false;
766  }
767  else if (varID >= NumFields) {
768  ERRORMSG(varID << " is a bad variable ID in DiscField::write." << endl);
769  Ippl::abort("Exiting due to DiscField error.");
770  return false;
771  }
772  else if (NeedStartRecord == 0 && ValidField[varID]) {
773  ERRORMSG("DiscField:write - attempt to overwrite Field " << varID);
774  ERRORMSG(" at record " << NumRecords - 1 << endl);
775  Ippl::abort("Exiting due to DiscField error.");
776  return false;
777  }
778 
779  DFDBG(std::string dbgmsgname("DF:write:"));
780  DFDBG(dbgmsgname += Config->getConfigFile());
781  DFDBG(Inform dbgmsg(dbgmsgname.c_str(), INFORM_ALL_NODES));
782  DFDBG(dbgmsg << "At start of write: Field layout=" << f.getLayout()<<endl);
783 
784  // INCIPPLSTAT(incDiscWrites);
785 
786  // useful typedefs for later
787  typedef typename LField<T,Dim>::iterator LFI;
788 
789  // Get a new tag value for this write operation, used for all sends
790  // to other nodes with data.
792 
793  // Get the layout reference, and set up an iterator over lfields
794  FieldLayout<Dim>& layout = f.getLayout();
795  typename Field<T,Dim,M,C>::iterator_if local;
796 
797  // do we need to start a new record? If so, extend data structures and
798  // file storage
799  if (NeedStartRecord != 0) {
800  // convert the layout information for the field into internal storage,
801  // represented as a map from NDIndex --> owner node
802  if (!make_globalID(layout)) {
803  ERRORMSG("DiscField::write - all Field's must have the same ");
804  ERRORMSG("global domain in a single DiscField.\n");
805  ERRORMSG("The original domain is " << get_Domain() << ", ");
806  ERRORMSG("the attempted new domain is " << layout.getDomain() << endl);
807  Ippl::abort("Exiting due to DiscField error.");
808  }
809 
810  // update vnode and valid field information for new record
811  if (numFiles() > 0 && myBox0() == (unsigned int) Ippl::myNode()) {
812  int nvtally = 0;
813  NumVnodes[0].push_back(globalID.size());
814  if (NumRecords > 0)
815  nvtally = VnodeTally[0][NumRecords-1] + NumVnodes[0][NumRecords-1];
816  VnodeTally[0].push_back(nvtally);
817  }
818 
819  // indicate we have not written out data for any fields for this record
820  for (unsigned int i=0; i < NumFields; ++i)
821  ValidField[i] = false;
822 
823  // increment total record number
824  NumRecords++;
825  NumWritten = 0;
826 
827  // update necessary data files at the start of a new record
828  if ((unsigned int) Ippl::myNode() == myBox0()) {
829  // Update the meta information ... this can be changed to be only
830  // written out during destruction.
831  DFDBG(dbgmsg << "Writing meta file ..." << endl);
832  if (!write_meta()) {
833  ERRORMSG("Could not write .meta file on node " << Ippl::myNode());
834  ERRORMSG(endl);
835  Ippl::abort("Exiting due to DiscField error.");
836  return false;
837  }
838 
839  // write out the NDIndex objects from the FieldLayout to the
840  // .layout file
841  DFDBG(dbgmsg << "Writing layout file ..." << endl);
842  if (!write_layout()) {
843  ERRORMSG("Could not update .layout file on node "<<Ippl::myNode());
844  ERRORMSG(endl);
845  Ippl::abort("Exiting due to DiscField error.");
846  return false;
847  }
848  }
849  }
850 
851  // On box0 nodes, do most of the work ... other nodes forward data
852  // to box0 nodes.
853  if ((unsigned int) Ippl::myNode() == myBox0()) {
854 
855  // Open the offset data file, and write the Field number.
856  // This precedes all the OffsetData structs for vnodes in the Field,
857  // which are in random order for the given Field. The OffsetData
858  // structs contains a field 'vnode' which tells which vnode the data is
859  // for (0 ... numVnodes - 1).
860  FILE *outputOffset = open_df_file(Config->getFilename(0),
861  ".offset", std::string("a"));
862  int wVarID = (int)varID;
863  DFDBG(dbgmsg << "Writing Field ID = " << wVarID<<" to offset file ...");
864  DFDBG(dbgmsg << endl);
865  if (fwrite(&wVarID, sizeof(int), 1, outputOffset) != 1) {
866  ERRORMSG("DiscField::write - cannot write field number to .offset ");
867  ERRORMSG("file" << endl);
868  Ippl::abort("Exiting due to DiscField error.");
869  fclose(outputOffset);
870  return false;
871  }
872 
873  // Initialize output file handle ... we might never write anything to
874  // it if the field is completely compressed.
875  DFDBG(dbgmsg << "Trying to open output data file ..." << endl);
876  int outputDatafd = open_df_file_fd(Config->getFilename(0), ".data",
877  O_RDWR|O_CREAT);
878  DFDBG(dbgmsg << "Opened out file, fd=" << outputDatafd << endl);
879 
880  // Later we will receive message from other nodes. This is the
881  // number of blocks we should receive. We'll decrease this by
882  // the number of vnodes we already have on this processor, however.
883  DFDBG(dbgmsg << "This box0 expected to receive " << globalID.size());
884  DFDBG(dbgmsg << " blocks from other nodes, minus the ");
885  DFDBG(dbgmsg << layout.size_iv() << " local blocks." << endl);
886  int unreceived = globalID.size();
887  int fromothers = unreceived - layout.size_iv();
888 
889  // Now we start processing blocks. We have 'unreceived' total to
890  // write, either from ourselves or others. We first check for
891  // messages, if there are any to receive, get one and process it,
892  // otherwise write out one of our local blocks.
893 
894  local = f.begin_if();
895  while (unreceived > 0) {
896  // Keep processing remote blocks until we don't see one available
897  // or we've received them all
898  bool checkremote = (fromothers > 0);
899  while (checkremote) {
900  // Get a message
901  int any_node = COMM_ANY_NODE;
902  Message *msg = Ippl::Comm->receive(any_node, tag);
903 
904  // If we found one, process it
905  if (msg != 0) {
906  // Extract the domain from the message
907  NDIndex<Dim> ro;
908  ro.getMessage(*msg);
909  DFDBG(dbgmsg << "Received an LField msg from node ");
910  DFDBG(dbgmsg << any_node << " with tag " << tag << ", domain = ");
911  DFDBG(dbgmsg << ro << endl);
912 
913  // Extract the data from the message
914  T rhs_compressed_data;
915  LFI cbi(rhs_compressed_data);
916  cbi.getMessage(*msg);
917 
918  // Write this data out
919  write_offset_and_data(outputOffset, outputDatafd, cbi, ro);
920 
921  // finish with this message
922  delete msg;
923 
924  // Update counters
925  unreceived -= 1;
926  fromothers -= 1;
927  } else {
928  // We didn't see one, so stop checking for now
929  checkremote = false;
930  }
931  }
932 
933  // Process a local block if any are left
934  if (local != f.end_if()) {
935  // Cache some information about this local field.
936  LField<T,Dim>& l = *(*local).second.get();
937  LFI cbi = l.begin();
938 
939  // Write this data out
940  write_offset_and_data(outputOffset, outputDatafd, cbi, l.getOwned());
941 
942  // Update counters
943  ++local;
944  unreceived -= 1;
945  }
946  }
947 
948  // Close the output data file
949  if (outputDatafd >= 0)
950  close(outputDatafd);
951 
952  // Close the output offset file
953  if (outputOffset != 0)
954  fclose(outputOffset);
955 
956  } else {
957  // On other nodes, just send out our LField blocks.
958  for (local = f.begin_if(); local != f.end_if(); ++local) {
959  // Cache some information about this local field.
960  LField<T,Dim>& l = *(*local).second.get();
961  const NDIndex<Dim> &ro = l.getOwned();
962  LFI cbi = l.begin();
963 
964  // Create a message to send to box0
965  Message *msg = new Message;
966 
967  // Put in the domain and data
968  ro.putMessage(*msg);
969  cbi.putMessage(*msg, false); // 'false' = avoid copy if possible
970 
971  // Send this message to the box0 node.
972  int node = myBox0();
973  DFDBG(dbgmsg << "Sending local block " << ro << " to node " << node);
974  DFDBG(dbgmsg << " with tag " << tag << endl);
975  Ippl::Comm->send(msg, node, tag);
976  }
977  }
978 
979  // indicate we've written one more field
980  ValidField[varID] = true;
982 
983  // Let everything catch up
984  Ippl::Comm->barrier();
985 
986  // if we actually make it down to here, we were successful in writing
987  return true;
988  }
989 
990  // version of write that provides default value for varID
991  template<class T, class M, class C>
993  return write(f, 0);
994  }
995 
996 
997  //
998  // console printing methods
999  //
1000 
1001  // print out debugging info to the given stream
1002  void printDebug(std::ostream&);
1003  void printDebug();
1004 
1005 private:
1006  // private typedefs
1008  typedef long long Offset_t;
1009 
1010  //
1011  // meta data (static info for the file which does not change)
1012  //
1013 
1014  // the configuration file mechanism
1016  bool ConfigOK;
1017 
1018  // flag which is true if we're writing, false if reading
1020 
1021  // the base name for the output file, and the descriptive type string
1022  std::string BaseFile;
1023  std::string TypeString;
1024  std::string DiscType;
1025 
1026  // dimension of data in file ... this may not match the template dimension.
1027  unsigned int DataDimension;
1028 
1029  //
1030  // dynamic data (varies as records are written or read)
1031  //
1032 
1033  // do we need to start a new record during the next write? Or, if reading,
1034  // which record have we current read into our Size and globalID variables?
1035  // If this is < 0, we have not read in any yet.
1037 
1038  // the number of fields and records in the file, and the number of Fields
1039  // written to the current record
1040  unsigned int NumFields;
1041  unsigned int NumRecords;
1042  unsigned int NumWritten;
1043 
1044  // this keeps track of where in the .data file writing is occuring
1045  // it is correlated with a given Field and record through the .offset file
1047 
1048  // the global domain of the Fields in this DiscField object
1050 
1051  // keep track of which Fields have been written to the current record
1052  std::vector<bool> ValidField;
1053 
1054  // the running tally of vnodes ON THIS SMP for each record, for each file.
1055  // VnodeTally[n] = number of vnodes written out total in prev records.
1056  // NOTE: this is not the number of vnodes TOTAL, it is the number of
1057  // vnodes on all the processors which are on this same SMP machine.
1058  std::vector<int> *VnodeTally;
1059 
1060  // the number of vnodes ON THIS SMP in each record, for each file.
1061  // NumVnodes[n] = number of vnodes in record n.
1062  // NOTE: this is not the number of vnodes TOTAL, it is the number of
1063  // vnodes on all the processors which are on this same SMP machine.
1064  std::vector<int> *NumVnodes;
1065 
1066  // store a mapping from an NDIndex to the physical node it resides on.
1067  // These values are stored only for those vnodes on processors which are
1068  // on our same SMP. This must be remade when the layout changes
1069  // (e.g., every time we write a record for a Field,
1070  // since each Field can have a different layout and each Field's layout
1071  // can change from record to record).
1072  // key: local NDIndex, value: node
1074 
1075  // Direct-IO info, required if we are using the DIRECTIO option
1076 #ifdef IPPL_DIRECTIO
1077  struct dioattr dioinfo;
1078  bool openedDirectIO;
1079 #endif
1080 
1081  //
1082  // functions used to build/query information about the processors, etc.
1083  //
1084 
1085  // perform initialization based on the constuctor arguments
1086  void initialize(const char* base, const char* config,
1087  const char* typestr, unsigned int numFields);
1088 
1089  // open a file in the given mode. If an error occurs, print a message (but
1090  // only if the last argument is true).
1091  // fnm = complete name of file (can include a path)
1092  // mode = open method ("r" == read, "rw" == read/write, etc.
1093  FILE *open_df_file(const std::string& fnm, const std::string& mode);
1094  FILE *open_df_file(const std::string& fnm, const std::string& suffix,
1095  const std::string& mode);
1096 
1097  // Open a file using direct IO, if possible, otherwise without. This
1098  // returns a file descriptor, not a FILE handle. If the file was opened
1099  // using direct-io, also initialize the dioinfo member data.
1100  // The last argument indicates whether to init (create)
1101  // the file or just open for general read-write.
1102  int open_df_file_fd(const std::string& fnm, const std::string& suf, int flags);
1103 
1104  // create the data files used to store Field data. Return success.
1105  bool create_files();
1106 
1107  // return the total number of SMP's in the system
1108  unsigned int numSMPs() const {
1109  return Config->numSMPs();
1110  }
1111 
1112  // return the total number of files which are being read or written
1113  unsigned int fileSMPs() const {
1114  return Config->fileSMPs();
1115  }
1116 
1117  // return the index of our SMP
1118  unsigned int mySMP() const {
1119  return Config->mySMP();
1120  }
1121 
1122  // return the Box0 node for this SMP
1123  unsigned int myBox0() const {
1124  return Config->getSMPBox0();
1125  }
1126 
1127  // return the number of files on our SMP (if no arg given) or the
1128  // given SMP
1129  unsigned int numFiles() const {
1130  return Config->getNumFiles();
1131  }
1132  unsigned int numFiles(unsigned int s) const {
1133  return Config->getNumFiles(s);
1134  }
1135 
1136  // compute how many physical nodes there are on the same SMP as the given
1137  // pnode.
1138  unsigned int pNodesPerSMP(unsigned int node) const {
1139  return Config->pNodesPerSMP(node);
1140  }
1141 
1142  // parse the IO configuration file and store the information.
1143  // arguments = name of config file, and if we're writing a file (true) or
1144  // reading (false)
1145  bool parse_config(const char *, bool);
1146 
1147  // Compute how many elements we should expect to store into the local
1148  // node for the given FieldLayout. Just loop through the local vnodes
1149  // and accumulate the sizes of the owned domains. This is modified
1150  // by the second "read domain" argument, which might be a subset of
1151  // the total domain.
1152  int compute_expected(const FieldLayout<Dim> &, const NDIndex<Dim> &);
1153 
1154  // Since the layout can be different every time write
1155  // is called, the globalID container needs to be recalculated. The total
1156  // domain of the Field should not change, though, just the layout. Return
1157  // success.
1159 
1160  // Compute the size of a domain, zero-based, that has a total
1161  // size <= chunkelems and has evenly chunked slices.
1162  NDIndex<Dim> chunk_domain(const NDIndex<Dim> &currblock,
1163  int chunkelems,
1164  int &msdim,
1165  bool iscompressed);
1166 
1167  //
1168  //
1169  // read/write functions for individual components
1170  //
1171 
1172  // read or write .meta data file information. Return success.
1173  bool write_meta();
1174  bool read_meta();
1175 
1176  // read or write NDIndex values for a file. Return success.
1177  bool read_NDIndex(FILE *, NDIndex<Dim> &);
1178  bool write_NDIndex(FILE *, const NDIndex<Dim> &);
1179 
1180  // Write .layout data file information. Return success.
1181  bool write_layout();
1182 
1183  // Read layout info for one file set in the given record.
1184  int read_layout(int record, int sf);
1185 
1187  // Write out the data in a provided brick iterator with given owned
1188  // domain, updating both the offset file and the data file. The .offset file
1189  // contains info on where in the main data file each vnode's data can be
1190  // found. The .offset file's structure looks like this:
1191  // |--Record n----------------------------|
1192  // | Field ID a |
1193  // | VNa | VNb | VNc | VNd | .... | VNx |
1194  // | Field ID b |
1195  // | VNa | VNb | VNc | VNd | .... | VNx |
1196  // | Field ID c |
1197  // | VNa | VNb | VNc | VNd | .... | VNx |
1198  // |--------------------------------------|
1199  // where
1200  // VNn is the data for a single Offset struct. The sets of Offset structs
1201  // for the Fields can appear in any order in a record, and the Offsets
1202  // structs within a specific Field can also appear in any order.
1203  template<class T>
1204  void write_offset_and_data(FILE *outputOffset, int outputDatafd,
1206  const NDIndex<Dim> &owned) {
1207 
1208  DFDBG(std::string dbgmsgname("DF:write_offset_and_data"));
1209  DFDBG(Inform dbgmsg(dbgmsgname.c_str(), INFORM_ALL_NODES));
1210 
1211  // Create an offset output file struct, and initialize what we can.
1212  // We must take care to first zero out the offset struct.
1213  DFOffsetData<Dim,T> offset;
1214  memset(static_cast<void *>(&offset), 0, sizeof(DFOffsetData<Dim,T>));
1215 
1216  domain_to_offset_data(owned, offset);
1217  offset.isCompressed = cbi.IsCompressed();
1218  offset.offset = CurrentOffset;
1219 
1220  // Set the compressed or uncompressed data in the offset struct
1221  if (offset.isCompressed) {
1222  // For compressed data, we just need to write out the entry to the
1223  // offset file ... that will contain the single compressed value.
1224  offset.compressedVal = *cbi;
1225  DFDBG(dbgmsg << " Writing compressed vnode " << owned <<" w/value = ");
1226  DFDBG(dbgmsg << offset.compressedVal << endl);
1227 
1228  } else {
1229  // This is not compressed, so we must write to the data file. The
1230  // main question now is whether we can use existing buffers, or write
1231  // things out in chunks, or what.
1232 
1233  // First, calculate how many elements to write out at a time. The
1234  // 'chunkbytes' might be adjusted to match the maximum amount of data
1235  // that can be written out in a single direct-io call. This is
1236  // true only if we are actually using direct-io, of course.
1237 
1238  long elems = owned.size();
1239  long chunkbytes = Ippl::chunkSize();
1240 #ifdef IPPL_DIRECTIO
1241  if (openedDirectIO) {
1242  // For direct-io, make sure we write out blocks with size that is
1243  // a multipple of the minimum io size
1244  PAssert_EQ(dioinfo.d_miniosz % sizeof(T), 0);
1245  if (chunkbytes == 0 || chunkbytes > dioinfo.d_maxiosz)
1246  chunkbytes = dioinfo.d_maxiosz;
1247  else if (chunkbytes < dioinfo.d_miniosz)
1248  chunkbytes = dioinfo.d_miniosz;
1249  else if (chunkbytes % dioinfo.d_miniosz > 0)
1250  chunkbytes -= (chunkbytes % dioinfo.d_miniosz);
1251  }
1252 #endif
1253  long chunksize = chunkbytes / sizeof(T);
1254  if (chunksize < 1 || chunksize > elems)
1255  chunksize = elems;
1256 
1257  DFDBG(dbgmsg << "Total elems = " << elems << endl);
1258  DFDBG(dbgmsg << "Bytes in each chunk = " << chunkbytes);
1259  DFDBG(dbgmsg << " (orig chunkbytes = " << Ippl::chunkSize()<<")"<<endl);
1260  DFDBG(dbgmsg << "Elems in each chunk = " << chunksize << endl);
1261 
1262  // If cbi is iterating over its whole domain, we can just use the block
1263  // there as-is to write out data. So if cbiptr is set to an non-zero
1264  // address, we'll just use that for the data, otherwise we'll have
1265  // to copy to a buffer.
1266 
1267  T *cbiptr = 0;
1268  if (cbi.whole())
1269  cbiptr = &(*cbi);
1270 
1271  // Loop through the data, writing out chunks.
1272 
1273  DFDBG(dbgmsg << " Writing vnode " << owned << " in ");
1274  DFDBG(dbgmsg << "chunks of " << chunksize << " elements for ");
1275  DFDBG(dbgmsg << elems << " total elements ..." << endl);
1276 
1277  int needwrite = elems;
1278  while (needwrite > 0) {
1279  // Find out how many elements we'll write this time.
1280  int amount = chunksize;
1281  if (amount > needwrite)
1282  amount = needwrite;
1283 
1284  // Find the size of a buffer of at least large enough size. We
1285  // might need a slighly larger buffer if we are using direct-io,
1286  // where data must be written out in blocks with sizes that
1287  // match the device block size.
1288  size_t nbytes = amount*sizeof(T);
1289 #ifdef IPPL_DIRECTIO
1290  if (openedDirectIO) {
1291  size_t ndiff = nbytes % dioinfo.d_miniosz;
1292  if (ndiff > 0)
1293  nbytes += (dioinfo.d_miniosz - ndiff);
1294  }
1295 #endif
1296  DFDBG(dbgmsg << " Will write total nbytes = " << nbytes);
1297  DFDBG(dbgmsg << ", this has extra " << (nbytes - amount*sizeof(T)));
1298  DFDBG(dbgmsg << " bytes." << endl);
1299 
1300  // Get a pointer to the next data, or copy more data into a buffer
1301  // Initially start with the vnode pointer
1302  T *buffer = cbiptr;
1303 
1304  // If necessary, make a copy of the data
1305  if (buffer == 0) {
1306  DFDBG(dbgmsg << " Getting copy buffer of total size ");
1307  DFDBG(dbgmsg << nbytes << " bytes ..." << endl);
1308  buffer = static_cast<T *>(DiscBuffer::resize(nbytes));
1309 
1310  // Copy data into this buffer from the iterator.
1311  DFDBG(dbgmsg << " Copying data into buffer ..." << endl);
1312  T *bufptr = buffer;
1313  T *bufend = buffer + amount;
1314  for ( ; bufptr != bufend; ++bufptr, ++cbi)
1315  new (bufptr) T(*cbi);
1316  }
1317 
1318  // Write the data now
1319  DFDBG(dbgmsg << " About to write " << nbytes << " to fd = ");
1320  DFDBG(dbgmsg << outputDatafd << endl);
1321 
1322  off_t seekoffset = CurrentOffset * sizeof(T);
1323  bool seekok = true;
1324  Timer wtimer;
1325  wtimer.clear();
1326  wtimer.start();
1327 
1328 #ifdef IPPL_DIRECTIO
1329  size_t nout = ::pwrite(outputDatafd, buffer, nbytes, seekoffset);
1330 #else
1331  size_t nout = 0;
1332  if (::lseek(outputDatafd, seekoffset, SEEK_SET) == seekoffset) {
1333  char *wbuf = (char *)buffer;
1334  nout = ::write(outputDatafd, wbuf, nbytes);
1335  } else {
1336  seekok = false;
1337  }
1338  #endif
1339 
1340  wtimer.stop();
1341  DiscBuffer::writetime += wtimer.clock_time();
1342  DiscBuffer::writebytes += nbytes;
1343 
1344  if (!seekok) {
1345  ERRORMSG("Seek error in DiscField::write_offset_and_data" << endl);
1346  ERRORMSG("Could not seek to position " << seekoffset << endl);
1347  Ippl::abort("Exiting due to DiscField error.");
1348  }
1349 
1350  if (nout != nbytes) {
1351  ERRORMSG("Write error in DiscField::write_offset_and_data" << endl);
1352  ERRORMSG("Could not write " << nbytes << " bytes." << endl);
1353  Ippl::abort("Exiting due to DiscField error.");
1354  }
1355 
1356  DFDBG(dbgmsg << " Finished writing " << nout << " bytes in ");
1357  DFDBG(dbgmsg << wtimer.clock_time() << " seconds." << endl);
1358 
1359  // Update pointers and counts
1360  needwrite -= amount;
1361  if (cbiptr != 0)
1362  cbiptr += amount;
1363 
1364  // update the offset and stats
1365 
1366  CurrentOffset += (nbytes / sizeof(T));
1367  // ADDIPPLSTAT(incDiscBytesWritten, nbytes);
1368 
1369  DFDBG(dbgmsg << " Finishing writing chunk, still " << needwrite);
1370  DFDBG(dbgmsg << " elements to write out from this block." << endl);
1371  }
1372  }
1373 
1374  // write to offset file now
1375  DFDBG(dbgmsg << "Writing offset data to file, iscompressed = ");
1376  DFDBG(dbgmsg << offset.isCompressed);
1377  DFDBG(dbgmsg << ", offset = " << offset.offset << endl);
1378  if (fwrite(&offset, sizeof(DFOffsetData<Dim,T>), 1, outputOffset) != 1) {
1379  ERRORMSG("Write error in DiscField::write_offset_and_data" << endl);
1380  Ippl::abort("Exiting due to DiscField error.");
1381  }
1382  }
1383 
1385  // seek to the beginning of the vnode data for field 'varID' in record
1386  // 'record', for file 'sf'. If not found, close the file and return false.
1387  // If it is found, read in all the offset records, and return them
1388  // in the provided vector.
1389  template <class T>
1390  bool read_offset(unsigned int varID,
1391  unsigned int record,
1392  unsigned int sf,
1393  std::vector<DFOffsetData<Dim,T> > &offdata,
1394  int vnodes) {
1395 
1396  // Open the offset file
1397  FILE *outputOffset = open_df_file(Config->getFilename(sf),
1398  ".offset", std::string("r"));
1399 
1400  // seek to the start of this record
1401  Offset_t seekpos = NumFields * (record * sizeof(int) +
1402  VnodeTally[sf][record] *
1403  sizeof(DFOffsetData<Dim,T>));
1404  if (fseek(outputOffset, seekpos, SEEK_SET) != 0) {
1405  ERRORMSG("Error seeking to position " << static_cast<long>(seekpos));
1406  ERRORMSG(" in .offset file " << endl);
1407  Ippl::abort("Exiting due to DiscField error.");
1408  fclose(outputOffset);
1409  return false;
1410  }
1411 
1412  // now keep looking at the Field ID in this record until we find the one
1413  // we want
1414  unsigned int checked = 0;
1415  while (checked < NumFields) {
1416  // read the next field ID number
1417  int rVarID;
1418  if (fread(&rVarID, sizeof(int), 1, outputOffset) != 1) {
1419  ERRORMSG("Error reading field ID from .offset file" << endl);
1420  Ippl::abort("Exiting due to DiscField error.");
1421  fclose(outputOffset);
1422  return false;
1423  }
1424 
1425  // is it what we want?
1426  if ((unsigned int) rVarID == varID) {
1427  // Yes it is, so read in the offset record data. First resize
1428  // the offset data vector.
1429  offdata.resize(vnodes);
1430  size_t result = fread(&(offdata[0]), sizeof(DFOffsetData<Dim,T>),
1431  offdata.size(), outputOffset);
1432  if (result != offdata.size()) {
1433  ERRORMSG("Read error in DiscField::find_file_in_offset" << endl);
1434  ERRORMSG("Results is " << result << ", should be ");
1435  ERRORMSG(offdata.size() << endl);
1436  ERRORMSG("outputOffset is " << (void *)outputOffset << endl);
1437  Ippl::abort("Exiting due to DiscField error.");
1438  fclose(outputOffset);
1439  return false;
1440  }
1441 
1442  // And return success.
1443  fclose(outputOffset);
1444  return true;
1445  }
1446 
1447  // it was not, so move on to the next one
1448  checked++;
1449  seekpos += (NumVnodes[sf][record] * sizeof(DFOffsetData<Dim,T>) +
1450  sizeof(int));
1451  if (fseek(outputOffset, seekpos, SEEK_SET) != 0) {
1452  ERRORMSG("Error seeking to position " << static_cast<long>(seekpos));
1453  ERRORMSG(" in .offset file " << endl);
1454  Ippl::abort("Exiting due to DiscField error.");
1455  fclose(outputOffset);
1456  return false;
1457  }
1458  }
1459 
1460  // if we're here, we did not find the Field ID anywhere in the .offset file
1461  ERRORMSG("Could not find data for field " << varID << " of record ");
1462  ERRORMSG(record << " in .offset file." << endl);
1463  Ippl::abort("Exiting due to DiscField error.");
1464  fclose(outputOffset);
1465  return false;
1466  }
1467 
1469  // On all nodes, either send out or receive in offset information.
1470  // Some nodes will not get any, and will not have to do any reading.
1471  // But those that do, will read in data for the vnodes they are
1472  // assigned. 'vnodes' will be set to the number of vnodes assigned
1473  // for reading from this node, and 'maxsize' will be set
1474  // to the maximum size of the vnodes in this file, for use in
1475  // preparing the buffer that will be used to read those vnodes.
1476  template <class T>
1477  void distribute_offsets(std::vector<DFOffsetData<Dim,T> > &offdata,
1478  int &vnodes, int &maxsize,
1479  const NDIndex<Dim> &readDomain) {
1480 
1481  DFDBG(Inform dbgmsg("DiscField::distribute_offsets", INFORM_ALL_NODES));
1482 
1483  // Initialize the vnode and maxsize values.
1484  vnodes = 0;
1485  maxsize = 0;
1486 
1487  // If parallel reads are turned off, just box0 nodes will read
1488  if (!Ippl::perSMPParallelIO()) {
1489  DFDBG(dbgmsg << "Per-SMP parallel IO is disabled, so only box0 nodes ");
1490  DFDBG(dbgmsg << "will read data." << endl);
1491 
1492  if ((unsigned int) Ippl::myNode() == myBox0())
1493  vnodes = offdata.size();
1494 
1495  } else {
1496 
1497  // Generate the tag to use
1499 
1500  // Nodes that do not have their box0 process on the same SMP should
1501  // not receive anything
1502  if (Config->getNodeSMPIndex(myBox0()) != mySMP()) {
1503  DFDBG(dbgmsg << "Node " << Ippl::myNode() << " has box0 = ");
1504  DFDBG(dbgmsg << myBox0() << " on a different SMP. Return from ");
1505  DFDBG(dbgmsg << "distribute_offsets with zero vnodes." << endl);
1506  return;
1507  }
1508 
1509  // All box0 nodes will (possibly) send out offset data. Others will
1510  // receive it, even if it just says "you don't get any vnodes."
1511  if ((unsigned int) Ippl::myNode() == myBox0()) {
1512  // How many offset blocks per processor
1513  int pernode = offdata.size() / pNodesPerSMP(myBox0());
1514 
1515  // Extra vnodes we might have to give to others
1516  int extra = offdata.size() % pNodesPerSMP(myBox0());
1517 
1518  DFDBG(dbgmsg << "Assigning " << pernode << " vnodes to each node, ");
1519  DFDBG(dbgmsg << "with " << extra << " extra." << endl);
1520 
1521  // The next vnode to assign; box0 will always get an extra one if
1522  // necessary.
1523  int nextvnode = pernode;
1524  if (extra > 0) {
1525  nextvnode += 1;
1526  extra -= 1;
1527  }
1528  DFDBG(dbgmsg << "This box0 node will get the first " << nextvnode);
1529  DFDBG(dbgmsg << " vnodes." << endl);
1530 
1531  // box0 nodes get the first 'nextvnode' vnodes.
1532  vnodes = nextvnode;
1533 
1534  // Loop through the nodes on this vnode; nodes other than box0 will
1535  // get sent a message.
1536  for (unsigned int n=0; n < Config->getNumSMPNodes(); ++n) {
1537  int node = Config->getSMPNode(mySMP(), n);
1538  if (node != Ippl::myNode()) {
1539  // How many vnodes to assign?
1540  int numvnodes = pernode;
1541  if (extra > 0) {
1542  numvnodes += 1;
1543  extra -= 1;
1544  }
1545 
1546  // Create a message for this other node, storing:
1547  // - number of vnodes (int)
1548  // - list of vnode offset structs (list)
1549  Message *msg = new Message;
1550  msg->put(numvnodes);
1551  if (numvnodes > 0) {
1552  msg->setCopy(false);
1553  msg->setDelete(false);
1554  msg->putmsg(static_cast<void *>(&(offdata[nextvnode])),
1555  sizeof(DFOffsetData<Dim,T>),
1556  numvnodes);
1557  }
1558 
1559  // Send this message to the other node
1560  DFDBG(dbgmsg << "Sending offset info for " << numvnodes);
1561  DFDBG(dbgmsg << " vnodes to node " << node << " with tag " << tag);
1562  DFDBG(dbgmsg << ", starting from box0 " << nextvnode << endl);
1563  Ippl::Comm->send(msg, node, tag);
1564 
1565  // Update what the next vnode info to send is
1566  nextvnode += numvnodes;
1567  }
1568  }
1569 
1570  // At the end, we should have no vnodes left to send
1571  if ((unsigned int) nextvnode != offdata.size())
1572  Ippl::abort("ERROR: Could not give away all my vnodes!");
1573 
1574  } else {
1575  // On non-box0 nodes, receive offset info
1576  int node = myBox0();
1577  DFDBG(dbgmsg << "Waiting for offset data from node " << node << endl);
1578  Message *msg = Ippl::Comm->receive_block(node, tag);
1579 
1580  // Get the number of vnodes to store here
1581  msg->get(vnodes);
1582  DFDBG(dbgmsg << "Received offset info for " << vnodes << " vnodes ");
1583  DFDBG(dbgmsg << "from node " << node << endl);
1584 
1585  // If this is > 0, copy out vnode info
1586  if (vnodes > 0) {
1587  // resize the vector to make some storage
1588  offdata.resize(vnodes);
1589 
1590  // get offset data from the message
1591  ::getMessage_iter(*msg, &(offdata[0]));
1592  }
1593 
1594  // Done with the message now.
1595  delete msg;
1596  }
1597  }
1598 
1599  // Now, finally, on all nodes we scan the vnodes to find out the maximum
1600  // size of the buffer needed to read this data in.
1601  DFDBG(dbgmsg << "Scanning offset data for maxsize value ..." << endl);
1602  for (int v=0; v < vnodes; ++v) {
1603  // Convert data to NDIndex
1604  NDIndex<Dim> dom;
1605  offset_data_to_domain(offdata[v], dom);
1606  if (dom.touches(readDomain)) {
1607  // Compute chunk block size
1608  int msdim = (Dim-1); // this will be zero-based
1609  int chunkelems = Ippl::chunkSize() / sizeof(T);
1610  NDIndex<Dim> chunkblock = chunk_domain(dom, chunkelems, msdim,
1611  offdata[v].isCompressed);
1612 
1613  DFDBG(dbgmsg << "Checking size of vnode " << v << " on node ");
1614  DFDBG(dbgmsg << Ippl::myNode() << " with domain " << dom);
1615  DFDBG(dbgmsg << ", compressed = " << offdata[v].isCompressed);
1616  DFDBG(dbgmsg << ", chunkblock = " << chunkblock << endl);
1617 
1618  // Now compare the size
1619  int dsize = chunkblock.size();
1620  if (dsize > maxsize) {
1621  maxsize = dsize;
1622  DFDBG(dbgmsg << " New maxsize = " << maxsize << endl);
1623  }
1624  } else {
1625  DFDBG(dbgmsg << "Skipping vnode " << v << " since it does not ");
1626  DFDBG(dbgmsg << "intersect with readDomain = " << readDomain);
1627  DFDBG(dbgmsg << "; keeping maxsize = " << maxsize << endl);
1628  }
1629  }
1630  }
1631 
1633  // read the data for a block of values of type T from the given data file.
1634  // Return success of read.
1635  // The size and seekpos values are in bytes.
1636  template <class T>
1637  bool read_data(int outputDatafd, T* buffer, Offset_t readsize,
1638  Offset_t seekpos) {
1639 
1640  DFDBG(Inform dbgmsg("DiscField::read_data", INFORM_ALL_NODES));
1641  DFDBG(dbgmsg << "readsize=" << readsize << ", seekpos=" << seekpos);
1642  DFDBG(dbgmsg <<", sizeof(T)=" << sizeof(T) << endl);
1643 
1644  PAssert_GE(seekpos, 0);
1645  PAssert_GT(readsize, 0);
1646  PAssert(buffer);
1647  PAssert_GE(outputDatafd, 0);
1648  PAssert_EQ(readsize % sizeof(T), 0);
1649 
1650 #ifdef IPPL_DIRECTIO
1651  if (openedDirectIO) {
1652  PAssert_EQ(readsize % dioinfo.d_miniosz, 0);
1653  PAssert_EQ(seekpos % dioinfo.d_miniosz, 0);
1654  }
1655 #endif
1656 
1657  // Now read the block of data
1658  off_t seekoffset = seekpos;
1659  size_t nbytes = readsize;
1660  bool seekok = true;
1661 
1662  Timer rtimer;
1663  rtimer.clear();
1664  rtimer.start();
1665 
1666 #ifdef IPPL_DIRECTIO
1667  size_t nout = ::pread(outputDatafd, buffer, nbytes, seekoffset);
1668 #else
1669  size_t nout = 0;
1670  if (::lseek(outputDatafd, seekoffset, SEEK_SET) == seekoffset) {
1671  char *rbuf = (char *)buffer;
1672  nout = ::read(outputDatafd, rbuf, nbytes);
1673  } else {
1674  seekok = false;
1675  }
1676 #endif
1677 
1678  rtimer.stop();
1679  DiscBuffer::readtime += rtimer.clock_time();
1680  DiscBuffer::readbytes += readsize;
1681 
1682  if (!seekok) {
1683  ERRORMSG("Seek error in DiscField::read_data" << endl);
1684  ERRORMSG("Could not seek to position " << seekoffset << endl);
1685  Ippl::abort("Exiting due to DiscField error.");
1686  }
1687 
1688  if (nout != nbytes) {
1689  ERRORMSG("Read error in DiscField::read_data" << endl);
1690  ERRORMSG("Could not read " << nbytes << " bytes." << endl);
1691  Ippl::abort("Exiting due to DiscField error.");
1692  }
1693 
1694  DFDBG(size_t nelem = readsize / sizeof(T));
1695  DFDBG(dbgmsg << "Read in block of " << nelem << " elements in ");
1696  DFDBG(dbgmsg << rtimer.clock_time() << " second:" << endl);
1697  DFDBG(for (unsigned int i=0; i < nelem && i < 10; ++i))
1698  DFDBG( dbgmsg << " buffer[" << i << "] = " << buffer[i] << endl);
1699 
1700  // ADDIPPLSTAT(incDiscBytesRead, readsize);
1701  return true;
1702  }
1703 
1705  // Convert data in an offset data struct to an NDIndex, and return it
1706  // in the second argument.
1707  template<class T>
1709  NDIndex<Dim> &domain) {
1710  int *dptr = offdata.vnodedata + 1;
1711  for (unsigned int i=0; i < Dim; ++i) {
1712  int first = *dptr;
1713  int stride = *(dptr + 1);
1714  int length = *(dptr + 2);
1715  domain[i] = Index(first, first + (length - 1)*stride, stride);
1716  dptr += 6;
1717  }
1718  }
1719 
1721  // Convert domain data to offset I/O struct data
1722  template<class T>
1724  DFOffsetData<Dim,T> &offdata) {
1725  int *dptr = offdata.vnodedata;
1726  for (unsigned int i=0; i < Dim; ++i) {
1727  *dptr++ = 0;
1728  *dptr++ = domain[i].first();
1729  *dptr++ = domain[i].stride();
1730  *dptr++ = domain[i].length();
1731  *dptr++ = 0;
1732  *dptr++ = 0;
1733  }
1734  }
1735 
1736  //
1737  // don't allow copy or assign ... these are declared but never defined,
1738  // if something tries to use them it will generate a missing symbol error
1739  //
1740 
1741  DiscField(const DiscField<Dim>&);
1743 };
1744 
1745 #include "Utility/DiscField.hpp"
1746 
1747 #endif // DISC_FIELD_H
1748 
1749 /***************************************************************************
1750  * $RCSfile: DiscField.h,v $ $Author: adelmann $
1751  * $Revision: 1.1.1.1 $ $Date: 2003/01/23 07:40:33 $
1752  * IPPL_VERSION_ID: $Id: DiscField.h,v 1.1.1.1 2003/01/23 07:40:33 adelmann Exp $
1753  ***************************************************************************/
std::vector< bool > ValidField
Definition: DiscField.h:1052
void write_offset_and_data(FILE *outputOffset, int outputDatafd, CompressedBrickIterator< T, Dim > &cbi, const NDIndex< Dim > &owned)
Definition: DiscField.h:1204
void PETE_apply(const OpPeriodic< T > &e, T &a, const T &b)
Definition: BCond.hpp:373
bool read_offset(unsigned int varID, unsigned int record, unsigned int sf, std::vector< DFOffsetData< Dim, T > > &offdata, int vnodes)
Definition: DiscField.h:1390
const std::string & getConfigFile() const
Definition: DiscConfig.h:76
unsigned int mySMP() const
Definition: DiscField.h:1118
std::string DiscType
Definition: DiscField.h:1024
static void abort(const char *=0, int exitcode=(-1))
Definition: IpplInfo.cpp:696
Layout_t & getLayout() const
Definition: BareField.h:130
void printDebug()
Definition: DiscField.hpp:457
ac_id_larray::iterator iterator_if
Definition: BareField.h:91
const char * get_TypeString()
Definition: DiscField.h:123
bool write(Field< T, Dim, M, C > &f)
Definition: DiscField.h:992
Definition: rbendmap.h:8
#define INFORM_ALL_NODES
Definition: Inform.h:38
bool read_NDIndex(FILE *, NDIndex< Dim > &)
Definition: DiscField.hpp:759
int NeedStartRecord
Definition: DiscField.h:1036
bool write(Field< T, Dim, M, C > &f, unsigned int varID)
Definition: DiscField.h:754
ac_id_vnodes::size_type size_iv() const
Definition: FieldLayout.h:702
std::vector< int > * NumVnodes
Definition: DiscField.h:1064
NDIndex< Dim > get_Domain() const
Definition: DiscField.h:113
const NDIndex< Dim > & getOwned() const
Definition: LField.h:93
touch_range_dv touch_range_rdv(const NDIndex< Dim > &domain, const GuardCellSizes< Dim > &gc=gc0()) const
Definition: FieldLayout.h:780
GlobalIDList_t globalID
Definition: DiscField.h:1073
void Compress() const
Definition: BareField.hpp:1304
#define ERRORMSG(msg)
Definition: IpplInfo.h:399
static long readbytes
Definition: DiscBuffer.h:84
bool make_globalID(FieldLayout< Dim > &)
Definition: DiscField.hpp:346
Message & getMessage(Message &m)
Definition: NDIndex.h:147
Offset_t CurrentOffset
Definition: DiscField.h:1046
void initialize(const char *base, const char *config, const char *typestr, unsigned int numFields)
Definition: DiscField.hpp:97
unsigned int fileSMPs() const
Definition: DiscField.h:1113
void barrier(void)
#define DF_READ_TAG
Definition: Tags.h:71
const int COMM_ANY_NODE
Definition: Communicate.h:40
static int myNode()
Definition: IpplInfo.cpp:794
static void * resize(long sz)
Definition: DiscBuffer.cpp:83
unsigned int getNumSMPNodes() const
Definition: DiscConfig.h:101
Definition: FFT.h:31
void Uncompress(bool fill_domain=true)
Definition: LField.h:166
bool read(Field< T, Dim, M, C > &f, unsigned int varID)
Definition: DiscField.h:722
unsigned size() const
bool ConfigOK
Definition: DiscField.h:1016
void getMessage_iter(Message &m, OutputIterator o)
Definition: Message.h:603
#define DFDBG(x)
Definition: DiscField.h:19
static double readtime
Definition: DiscBuffer.h:82
unsigned int fileSMPs() const
Definition: DiscConfig.h:81
unsigned int NumRecords
Definition: DiscField.h:1041
DiscField & operator=(const DiscField< Dim > &)
unsigned int DataDimension
Definition: DiscField.h:1027
int next_tag(int t, int s=1000)
Definition: TagMaker.h:43
unsigned int myBox0() const
Definition: DiscField.h:1123
bool compressible() const
Definition: BareField.h:190
#define DF_OFFSET_TAG
Definition: Tags.h:72
const iterator & begin() const
Definition: LField.h:104
bool write_NDIndex(FILE *, const NDIndex< Dim > &)
Definition: DiscField.hpp:792
#define DF_TAG_CYCLE
Definition: Tags.h:74
bool parse_config(const char *, bool)
Definition: DiscField.hpp:416
int open_df_file_fd(const std::string &fnm, const std::string &suf, int flags)
Definition: DiscField.hpp:201
std::vector< int > * VnodeTally
Definition: DiscField.h:1058
std::string BaseFile
Definition: DiscField.h:1022
Definition: Timer.h:7
DiscConfig * Config
Definition: DiscField.h:1015
void setDirtyFlag()
Definition: BareField.h:116
bool read(Field< T, Dim, M, C > &f)
Definition: DiscField.h:732
iterator_if end_if()
Definition: BareField.h:100
#define PAssert_GE(a, b)
Definition: PAssert.h:124
long long Offset_t
Definition: DiscField.h:1008
#define PAssert_EQ(a, b)
Definition: PAssert.h:119
Definition: Index.h:236
Expression Expr_t
type of an expression
Definition: Expression.h:34
unsigned int pNodesPerSMP(unsigned int node) const
Definition: DiscConfig.cpp:129
unsigned int pNodesPerSMP(unsigned int node) const
Definition: DiscField.h:1138
void query(int &numRecords, int &numFields, std::vector< int > &size) const
Definition: DiscField.hpp:165
void distribute_offsets(std::vector< DFOffsetData< Dim, T > > &offdata, int &vnodes, int &maxsize, const NDIndex< Dim > &readDomain)
Definition: DiscField.h:1477
const int COMM_ANY_TAG
Definition: Communicate.h:41
bool touches(const NDIndex< Dim > &) const
static void startTimer(TimerRef t)
Definition: IpplTimings.h:187
unsigned int NumWritten
Definition: DiscField.h:1042
Message & putmsg(void *, int, int=0)
unsigned int get_NumRecords() const
Definition: DiscField.h:107
const NDIndex< Dim > & getAllocated() const
Definition: LField.h:92
unsigned int numSMPs() const
Definition: DiscConfig.h:80
void fillGuardCellsIfNotDirty() const
Definition: BareField.h:121
unsigned int mySMP() const
Definition: DiscConfig.h:82
bool read_data(int outputDatafd, T *buffer, Offset_t readsize, Offset_t seekpos)
Definition: DiscField.h:1637
static long size()
Definition: DiscBuffer.h:48
bool IsCompressed() const
Definition: LField.h:128
unsigned int getSMPNode(unsigned int n) const
Definition: DiscConfig.h:107
bool WritingFile
Definition: DiscField.h:1019
bool whole() const
Definition: BrickIterator.h:81
bool write_layout()
Definition: DiscField.hpp:840
int compute_expected(const FieldLayout< Dim > &, const NDIndex< Dim > &)
Definition: DiscField.hpp:941
void offset_data_to_domain(DFOffsetData< Dim, T > &offdata, NDIndex< Dim > &domain)
Definition: DiscField.h:1708
Message & get(const T &cval)
Definition: Message.h:484
#define PAssert_GT(a, b)
Definition: PAssert.h:123
Message & put(const T &val)
Definition: Message.h:414
static long writebytes
Definition: DiscBuffer.h:85
FILE * open_df_file(const std::string &fnm, const std::string &mode)
Definition: DiscField.hpp:184
unsigned int numFiles(unsigned int s) const
Definition: DiscField.h:1132
#define FB_TAG_CYCLE
Definition: Tags.h:62
void domain_to_offset_data(const NDIndex< Dim > &domain, DFOffsetData< Dim, T > &offdata)
Definition: DiscField.h:1723
bool read(Field< T, Dim, M, C > &f, const NDIndex< Dim > &readDomain, unsigned int varID)
Definition: DiscField.h:716
void stop()
Definition: Timer.cpp:18
unsigned int get_Dimension() const
Definition: DiscField.h:120
bool contains(const NDIndex< Dim > &a) const
void clear()
Definition: Timer.cpp:8
int vnodedata[6 *Dim]
Definition: DiscField.h:53
long long offset
Definition: DiscField.h:55
void Compress()
Definition: LField.h:155
const std::string & getFilename(unsigned int fn) const
Definition: DiscConfig.h:125
int read_layout(int record, int sf)
Definition: DiscField.hpp:879
NDIndex< Dim > intersect(const NDIndex< Dim > &) const
double clock_time()
Definition: Timer.cpp:29
bool read(Field< T, Dim, M, C > &f, const NDIndex< Dim > &readDomain)
Definition: DiscField.h:727
#define PAssert(c)
Definition: PAssert.h:117
DiscField(const char *fname, const char *config, unsigned int numFields, const char *typestr=0)
Definition: DiscField.hpp:53
size_type size() const
Definition: vmap.h:138
const char * get_DiscType()
Definition: DiscField.h:126
vmap< NDIndex< Dim >, int > GlobalIDList_t
Definition: DiscField.h:1007
Message & putMessage(Message &m) const
Definition: NDIndex.h:139
static int chunkSize()
Definition: IpplInfo.h:239
bool read(Field< T, Dim, M, C > &f, const NDIndex< Dim > &readDomain, unsigned int varID, unsigned int record)
Definition: DiscField.h:152
const unsigned Dim
bool write_meta()
Definition: DiscField.hpp:490
bool create_files()
Definition: DiscField.hpp:300
NDIndex< Dim > Size
Definition: DiscField.h:1049
unsigned int getSMPBox0() const
Definition: DiscConfig.h:113
Timing::TimerRef TimerRef
Definition: IpplTimings.h:176
static TimerRef getTimer(const char *nm)
Definition: IpplTimings.h:182
bool read(Field< T, Dim, M, C > &f, unsigned int varID, unsigned int record)
Definition: DiscField.h:711
NDIndex< Dim > chunk_domain(const NDIndex< Dim > &currblock, int chunkelems, int &msdim, bool iscompressed)
Definition: DiscField.hpp:980
void start()
Definition: Timer.cpp:13
static void stopTimer(TimerRef t)
Definition: IpplTimings.h:192
unsigned int numFiles() const
Definition: DiscField.h:1129
iterator_if begin_if()
Definition: BareField.h:99
Message * receive_block(int &node, int &tag)
unsigned int NumFields
Definition: DiscField.h:1040
bool isCompressed
Definition: DiscField.h:54
Message * receive(int &node, int &tag)
Definition: Inform.h:41
unsigned int get_NumFields() const
Definition: DiscField.h:110
unsigned int getNodeSMPIndex(unsigned int n) const
Definition: DiscConfig.h:157
Message & setDelete(const bool c)
Definition: Message.h:339
static Communicate * Comm
Definition: IpplInfo.h:93
Message & setCopy(const bool c)
Definition: Message.h:327
bool send(Message *, int node, int tag, bool delmsg=true)
unsigned int getNumFiles() const
Definition: DiscConfig.h:119
unsigned int numSMPs() const
Definition: DiscField.h:1108
std::pair< touch_iterator_dv, touch_iterator_dv > touch_range_dv
Definition: FieldLayout.h:77
const NDIndex< Dim > & getDomain() const
Definition: FieldLayout.h:325
#define FB_WRITE_TAG
Definition: Tags.h:60
std::string TypeString
Definition: DiscField.h:1023
Inform & endl(Inform &inf)
Definition: Inform.cpp:42
bool read_meta()
Definition: DiscField.hpp:536
Definition: vmap.h:65
static double writetime
Definition: DiscBuffer.h:83
static bool perSMPParallelIO()
Definition: IpplInfo.h:244