OPAL (Object Oriented Parallel Accelerator Library) 2022.1
OPAL
DiscField.h
Go to the documentation of this file.
1// -*- C++ -*-
2/***************************************************************************
3 *
4 * The IPPL Framework
5 *
6 *
7 * Visit http://people.web.psi.ch/adelmann/ for more details
8 *
9 ***************************************************************************/
10
11#ifndef DISC_FIELD_H
12#define DISC_FIELD_H
13
14// include files
15#include "Index/NDIndex.h"
17#include "Utility/DiscBuffer.h"
18#include "Utility/DiscConfig.h"
19#include "Utility/Inform.h"
20#include "Utility/vmap.h"
21#include "Utility/IpplTimings.h"
22#include <cstdio>
23#include <cstdlib>
24#include <unistd.h>
25#include <fcntl.h>
26
27#include <vector>
28#include <iostream>
29
30// forward declarations
31template<unsigned Dim, class T> class UniformCartesian;
32template<class T, unsigned Dim, class M, class C> class Field;
33template<unsigned Dim> class FieldLayout;
34
35
36// This helper class is used to represent a record for I/O of the .offset file.
37// It is only used for reads and writes. See the notes below for
38// the reason the vnodedata is a set of ints instead of an NDIndex.
39template <unsigned Dim, class T>
41 int vnodedata[6*Dim];
43 long long offset;
45};
46
47
48template <unsigned Dim>
49class DiscField {
50
51public:
52 // Constructor: make a DiscField for writing only
53 // fname = name of file (without extensions
54 // config = name of configuration file
55 // numFields = number of Fields which will be written to the file
56 // typestr = string describing the 'type' of the Field to be written (this
57 // is ignored if the Field is being read). The string should be
58 // the same as the statement used to declare the Field object
59 // e.g., for a field of the form Field<double,2> the string
60 // should be "Field<double,2>". The string should be such that
61 // if read later into a variable F, you can use the string in
62 // a source code line as 'F A' to create an instance of the
63 // same type of data as is stored in the file.
64 DiscField(const char* fname, const char* config, unsigned int numFields,
65 const char* typestr = 0);
66
67 // Constructor: same as above, but without a config file specified. The
68 // default config file entry that will be used is "* .", which means, for
69 // each SMP machine, assume the directory to put data files in is "."
70 DiscField(const char* fname, unsigned int numFields,
71 const char* typestr = 0);
72
73 // Constructor: make a DiscField for reading only.
74 // fname = name of file (without extensions
75 // config = name of configuration file
76 DiscField(const char* fname, const char* config);
77
78 // Constructor: same as above, but without a config file specified. The
79 // default config file entry that will be used is "* .", which means, for
80 // each SMP machine, assume the directory to put data files in is "."
81 DiscField(const char* fname);
82
83 // Destructor.
84 ~DiscField();
85
86 //
87 // accessor functions
88 //
89
90 // Obtain all the information about the file, including the number
91 // of records, fields, and number of vnodes stored in each record.
92 void query(int& numRecords, int& numFields, std::vector<int>& size) const;
93
94 // Query for the number of records (e.g., timesteps) in the file.
95 unsigned int get_NumRecords() const { return NumRecords; }
96
97 // Query for the number of Fields stored in the file.
98 unsigned int get_NumFields() const { return NumFields; }
99
100 // Query for the total domain of the system.
101 NDIndex<Dim> get_Domain() const { return Size; }
102
103 // Query for the dimension of the data in the file. This is useful
104 // mainly if you are checking for dimension by contructing a DiscField
105 // and then trying to check it's dimension later. If the dimension is
106 // not correctly matched with the Dim template parameter, you will get
107 // an error if you try to read or write.
108 unsigned int get_Dimension() const { return DataDimension; }
109
110 // Query for the type string
111 const char *get_TypeString() { return TypeString.c_str(); }
112
113 // Query for the disctype string
114 const char *get_DiscType() {
115 if (DiscType.length() > 0)
116 return DiscType.c_str();
117 return 0;
118 }
119
120 //
121 // read/write methods
122 //
123
124 // read the selected record in the file into the given Field object.
125 // readDomain = the portion of the field on disk that should actually be
126 // read in, and placed in the same location in the
127 // provided field. All of readDomain must be contained
128 // within the data on disk, and in the field in memory,
129 // although the domain on disk and in memory do not themselves
130 // have to be the same.
131 // varID = index for which field is being read ... this should be from
132 // 0 ... (numFields-1), for the case where the file contains
133 // more than one Field.
134 // record = which record to read. DiscField does not keep a 'current
135 // file position' pointer, instead you explicitly request which
136 // record you wish to read.
137 // Return success of operation.
138
139 template <class T, class M, class C>
140 bool read(Field<T,Dim,M,C>& f, const NDIndex<Dim> &readDomain,
141 unsigned int varID, unsigned int record) {
142
143 // sanity checking for input arguments and state of this object
144 bool canread = false;
145 if (!ConfigOK) {
146 ERRORMSG("Cannot read in DiscField::read - config file error." << endl);
147 } else if (DataDimension != Dim) {
148 ERRORMSG("Bad dimension "<< DataDimension <<" in DiscField::read"<<endl);
149 ERRORMSG("(" << DataDimension << " != " << Dim << ")" << endl);
150 } else if (WritingFile) {
151 ERRORMSG("DiscField::read called for DiscField opened for write."<<endl);
152 } else if (varID >= NumFields) {
153 ERRORMSG(varID << " is a bad Field ID in DiscField::read." << endl);
154 ERRORMSG("(" << varID << " is >= " << NumFields << ")" << endl);
155 } else if (record >= NumRecords) {
156 ERRORMSG(record << " is a bad record number in DiscField::read."<<endl);
157 ERRORMSG("(" << record << " is >= " << NumRecords << ")" << endl);
158 } else if (!(f.getLayout().getDomain().contains(readDomain))) {
159 ERRORMSG("DiscField::read - the total field domain ");
160 ERRORMSG(f.getLayout().getDomain() << " must contain the requested ");
161 ERRORMSG("read domain " << readDomain << endl);
162 } else if (!(get_Domain().contains(readDomain))) {
163 ERRORMSG("DiscField::read - the DiscField domain ");
164 ERRORMSG(get_Domain() << " must contain the requested ");
165 ERRORMSG("read domain " << readDomain << endl);
166 } else {
167 canread = true;
168 }
169
170 // If there was an error, we will abort
171 if (!canread) {
172 Ippl::abort("Exiting due to DiscField error.");
173 return false;
174 }
175
176 // A typedef used later
177 typedef typename LField<T,Dim>::iterator LFI;
179
180 // Start timer for just the read portion
181 static IpplTimings::TimerRef readtimer =
182 IpplTimings::getTimer("DiscField read");
183 IpplTimings::startTimer(readtimer);
184
185 // Get a new tag value for this read operation, used for all sends
186 // to other nodes with data.
188
189 // At the start of a new record, determine how many elements of the
190 // Field should be stored into this node's vnodes.
191 int expected = compute_expected(f.getLayout(), readDomain);
192
193 // On all nodes, loop through all the file sets, and:
194 // 1. box0: Get the number of vnodes stored there, from the layout file
195 // 2. box0: Get offset information for all the vnodes, and
196 // assign other nodes on the same SMP selected vnodes to read.
197 // 2. For each vnode assigned to a processor to read:
198 // - read data (if necessary)
199 // - distribute data to interested parties, or yourself
200 // On all nodes, when you get some data from another node:
201 // - copy it into the relevant vnode
202 // - decrement your expected value.
203 // When expected hits zero, we're done with reading on that node.
204
205 for (unsigned int sf=0; sf < numFiles(); ++sf) {
206
207 // Create the data file handle, but don't yet open it ... only open
208 // it if we need to later (if we run into any uncompressed blocks).
209 int outputDatafd = (-1);
210
211 // offset data read in from file or obtained from the box0 node
212 std::vector<DFOffsetData<Dim,T> > offdata;
213
214 // the number of vnodes we'll be working on on this node
215 int vnodes = 0;
216
217 // the maximum number of elements we'll need to have buffer space for
218 int maxsize = 0;
219
220 // on box0 nodes, read in the layout and offest info
221 if ((unsigned int) Ippl::myNode() == myBox0()) {
222
223 // Get the number of vnodes in this file.
224 vnodes = read_layout(record, sf);
225
226 // Get the offset data for this field and record.
227 read_offset(varID, record, sf, offdata, vnodes);
228 }
229
230 // On all nodes, either send out or receive in offset information.
231 // Some nodes will not get any, and will not have to do any reading.
232 // But those that do, will read in data for the vnodes they are
233 // assigned. 'vnodes' will be set to the number of vnodes assigned
234 // for reading from this node, and 'maxsize' will be set
235 // to the maximum size of the vnodes in this file, for use in
236 // preparing the buffer that will be used to read those vnodes.
237 distribute_offsets(offdata, vnodes, maxsize, readDomain);
238
239 // Loop through all the vnodes now; they will appear in any
240 // order, which is fine, we just read them and and see where they
241 // go. The info in the offset struct includes the domain for that
242 // block and whether it was written compressed or not.
243
244 for (int vn=0; vn < vnodes; ++vn) {
245 // Create an NDIndex object storing the vnode domain for this vnode.
246 NDIndex<Dim> vnodeblock;
247 offset_data_to_domain(offdata[vn], vnodeblock);
248
249 // If there is no intersection of this vnode and the read-domain,
250 // we can just skip it entirely.
251 if (! vnodeblock.touches(readDomain)) {
252 continue;
253 }
254
255 // Compute the size of a block to add to the base of this block,
256 // based on the chunk size. If the data is compressed, this won't
257 // matter.
258 int msdim = (Dim-1); // this will be zero-based
259 int chunkelems = Ippl::chunkSize() / sizeof(T);
260 NDIndex<Dim> chunkblock = chunk_domain(vnodeblock, chunkelems, msdim,
261 offdata[vn].isCompressed);
262
263 // Initialize the NDIndex we'll use to indicate what portion of the
264 // domain we're reading and processing.
265 NDIndex<Dim> currblock = vnodeblock;
266 currblock[msdim] = Index(vnodeblock[msdim].first() - 1,
267 vnodeblock[msdim].first() - 1);
268 for (unsigned int md = (msdim+1); md < Dim; ++md)
269 currblock[md] = Index(vnodeblock[md].first(),vnodeblock[md].first());
270
271 // Initialize the offset value for this vnode. The seek position
272 // is stored as a byte offset, although it is read from disk as
273 // a number of elements offset from the beginning.
274 Offset_t seekpos = (-1);
275
276 // Loop through the chunks, reading and processing each one.
277 int unread = vnodeblock.size();
278 while (unread > 0) {
279 // Compute the domain of the chunk we'll work on now, and store
280 // this in currblock.
281
282 // First determine if we're at the end of our current incr dimension,
283 // and determine new bounds
284 bool incrhigher=(currblock[msdim].last()==vnodeblock[msdim].last());
285 int a = (incrhigher ?
286 vnodeblock[msdim].first() :
287 currblock[msdim].last() + 1);
288 int b = a + chunkblock[msdim].length() - 1;
289 if (b > vnodeblock[msdim].last())
290 b = vnodeblock[msdim].last();
291
292 // Increment this dimension
293 currblock[msdim] = Index(a, b);
294
295 // Increment higher dimensions, if necessary
296 if (incrhigher) {
297 for (unsigned int cd = (msdim+1); cd < Dim; ++cd) {
298 if (currblock[cd].last() < vnodeblock[cd].last()) {
299 // This dim is not at end, so just inc by 1
300 currblock[cd] = Index(currblock[cd].first() + 1,
301 currblock[cd].last() + 1);
302 break;
303 } else {
304 // Move this dimension back to start, and go on to next one
305 currblock[cd] = Index(vnodeblock[cd].first(),
306 vnodeblock[cd].first());
307 }
308 }
309 }
310
311 // Decrement our unread count, since we'll process this block
312 // either by actually reading it or getting its compressed value
313 // from the offset file, if we have to read it at all.
314 int nelems = currblock.size();
315 unread -= nelems;
316
317 // Set the seek position now, if necessary
318 if (!offdata[vn].isCompressed && seekpos < 0) {
319 seekpos = offdata[vn].offset * sizeof(T);
320 }
321
322 // At this point, we might be able to skip a lot of work if this
323 // particular chunk does not intersect with our read domain any.
324 if (! currblock.touches(readDomain)) {
325 // Before we skip the rest, we must update the offset
326 Offset_t readbytes = nelems * sizeof(T);
327 seekpos += readbytes;
328
329 // Then, we're done with this chunk, move on to the next.
330 continue;
331 }
332
333 // Put the intersecting domain in readDomainSection.
334 NDIndex<Dim> readDomainSection = currblock.intersect(readDomain);
335
336 // if it is not compressed, read in the data. If it is,
337 // just keep the buffer pointer at zero.
338 T *buffer = 0;
339 if (!offdata[vn].isCompressed) {
340 // If we have not yet done so, open the data file.
341 if (outputDatafd < 0) {
342 outputDatafd = open_df_file_fd(Config->getFilename(sf), ".data",
343 O_RDONLY);
344 }
345
346 // Resize the read buffer in case it is not large enough.
347 // We use the max size for all the vnodes here, to avoid doing
348 // this more than once per file set. This also returns the
349 // pointer to the buffer to use, as a void *, which we cast
350 // to the proper type. For direct-io, we might need to make
351 // this a little bigger to match the device block size.
352
353 long nbytes = maxsize*sizeof(T);
354 buffer = static_cast<T *>(DiscBuffer::resize(nbytes));
355
356 // Create some initial values for what and where to read.
357 // We might adjust these if we're doing direct-io.
358 T * readbuffer = buffer;
359 Offset_t readbytes = nelems * sizeof(T);
360 Offset_t readoffset = seekpos;
361
362 // seekpos was only used to set readoffset, so we can update
363 // seekpos now. Add in the extra amount we'll be reading.
364 seekpos += readbytes;
365
366 // Read data in a way that might do direct-io
367 read_data(outputDatafd, readbuffer, readbytes, readoffset);
368 }
369
370 // we have the data block now; find out where the data should
371 // go, and either send the destination node a message, or copy
372 // the data into the destination lfield.
373
374 // Set up to loop over the touching remote vnodes, and send out
375 // messages
377 // int remaining = nelems;
378 int remaining = readDomainSection.size();
379
380 // compute what remote vnodes touch this block's domain, and
381 // iterate over them.
382 // typename FieldLayout<Dim>::touch_range_dv
383 // range(f.getLayout().touch_range_rdv(currblock));
385 range(f.getLayout().touch_range_rdv(readDomainSection));
386 for (rv_i = range.first; rv_i != range.second; ++rv_i) {
387 // Compute the intersection of our domain and the remote vnode
388 // NDIndex<Dim> ri = currblock.intersect((*rv_i).first);
389 NDIndex<Dim> ri = readDomainSection.intersect((*rv_i).first);
390
391 // Find out who will be sending this data
392 int rnode = (*rv_i).second->getNode();
393
394 // Send this data to that remote node, by preparing a
395 // CompressedBrickIterator and putting in the proper data.
396 Message *msg = new Message;
397 ri.putMessage(*msg);
398 LFI cbi(buffer, ri, currblock, offdata[vn].compressedVal);
399 cbi.TryCompress();
400 cbi.putMessage(*msg, false); // 'false' = avoid copy if possible
401 Ippl::Comm->send(msg, rnode, tag);
402
403 // Decrement the remaining count
404 remaining -= ri.size();
405 }
406
407 // loop over touching local vnodes, and copy in data, if there
408 // is anything left
409 typename BareField<T,Dim>::iterator_if lf_i = f.begin_if();
410 for (; remaining > 0 && lf_i != f.end_if(); ++lf_i) {
411 // Get the current LField and LField domain, and make an alias
412 // for the domain of the block we've read from disk
413 LField<T,Dim> &lf = *(*lf_i).second;
414 const NDIndex<Dim>& lo = lf.getOwned();
415 // const NDIndex<Dim>& ro = currblock;
416 const NDIndex<Dim>& ro = readDomainSection;
417
418 // See if it touches the domain of the recently read block.
419 if (lo.touches(ro)) {
420 // Find the intersection.
421 NDIndex<Dim> ri = lo.intersect(ro);
422
423 // If these are compressed we might not have to do any work.
424 if (lf.IsCompressed() &&
425 offdata[vn].isCompressed &&
426 ro.contains(lo)) {
427 PETE_apply(OpAssign(),*lf.begin(),offdata[vn].compressedVal);
428 } else {
429 // Build an iterator for the read-data block
430 // LFI rhs_i(buffer, ri, ro, offdata[vn].compressedVal);
431 LFI rhs_i(buffer, ri, currblock, offdata[vn].compressedVal);
432
433 // Could we compress that rhs iterator?
434 if (rhs_i.CanCompress(*rhs_i) && f.compressible() &&
435 ri.contains(lf.getAllocated())) {
436 // Compress the whole LField to the value on the right
437 lf.Compress(*rhs_i);
438 } else { // Assigning only part of LField on the left
439 // Must uncompress lhs, if not already uncompressed
440 lf.Uncompress(true);
441
442 // Get the iterator for it.
443 LFI lhs_i = lf.begin(ri);
444
445 // And do the assignment.
446 Expr_t(lhs_i,rhs_i).apply();
447 }
448 }
449
450 // Decrement the expected count and the remaining count.
451 // Remaining is how many cells are left of the current block.
452 // Expected is how many cells this node expects to get copied
453 // into its blocks.
454 int bsize = ri.size();
455 remaining -= bsize;
456 expected -= bsize;
457 }
458 }
459
460 // If we're here and still have remaining elements, we're screwed.
461 if (remaining > 0)
462 Ippl::abort("remaining > 0 at end of box0 vnode read!!!");
463 }
464 }
465
466 // Close the data file now
467
468 if (outputDatafd >= 0)
469 close(outputDatafd);
470 }
471
472 // On all nodes, now, keep receiving messages until our expected count
473 // goes to zero.
474 while (expected > 0) {
475 // Receive the next message from any node with the current read tag
476 int node = COMM_ANY_TAG;
477 Message *msg = Ippl::Comm->receive_block(node, tag);
478
479 // Extract the domain from the message
480 NDIndex<Dim> ro;
481 ro.getMessage(*msg);
482
483 // Extract the data from the message
484 T rhs_compressed_data;
485 LFI rhs_i(rhs_compressed_data);
486 rhs_i.getMessage(*msg);
487
488 // Find what local LField contains this domain
489 typename BareField<T,Dim>::iterator_if lf_i = f.begin_if();
490 bool foundlf = false;
491 for (; lf_i != f.end_if(); ++lf_i) {
492 // Get the current LField and LField domain
493 LField<T,Dim> &lf = *(*lf_i).second;
494 const NDIndex<Dim>& lo = lf.getOwned();
495
496 // See if it contains the domain of the recently received block.
497 // If so, assign the block to this LField
498 if (lo.contains(ro)) {
499
500 // Check and see if we really have to do this.
501 if ( !(rhs_i.IsCompressed() && lf.IsCompressed() &&
502 (*rhs_i == *lf.begin())) ) {
503 // Yep. gotta do it, since something is uncompressed or
504 // the values are different.
505
506 // Uncompress the LField first, if necessary. It's necessary
507 // if the received block size is smaller than the LField's.
508 lf.Uncompress(!ro.contains(lo));
509
510 // Make an iterator over the received block's portion of the
511 // LField
512 LFI lhs_i = lf.begin(ro);
513
514 // Do the assignment.
515 Expr_t(lhs_i,rhs_i).apply();
516 }
517
518 // Update our expected value
519 expected -= ro.size();
520
521 // Indicate we're done, since the block we received is
522 // guaranteed to be within only one of our LFields.
523 foundlf = true;
524 break;
525 }
526 }
527
528 // Make sure we found what vnode this message is for; if we don't
529 // we're screwed
530 if (!foundlf) {
531 ERRORMSG("Did not find destination local vnode for received domain ");
532 ERRORMSG(ro << " from node " << node << endl);
533 Ippl::abort("DID NOT FIND DESINATION LOCAL VNODE IN DISCFIELD::READ");
534 }
535
536 // Now we are done with the message
537 delete msg;
538 }
539
540 // We're all done reading, so clean up
541 IpplTimings::stopTimer(readtimer);
542
543 // This is just like an assign, so set dirty flags, fill guard cells,
544 // and try to compress the result.
545
546 f.setDirtyFlag();
548 f.Compress();
549
550 // Let everything catch up
552
553 // print out malloc info at end of read
554 // Inform memmsg("DiscField::read::mallinfo");
555 // struct mallinfo mdata;
556 // mdata = mallinfo();
557 // memmsg << "After read, new malloc info:" << endl;
558 // memmsg << "----------------------------" << endl;
559 // memmsg << " total arena space = " << mdata.arena << endl;
560 // memmsg << " ordinary blocks = " << mdata.ordblks << endl;
561 // memmsg << " small blocks = " << mdata.smblks << endl;
562 // memmsg << " user-held space = " << mdata.usmblks+mdata.uordblks;
563 // memmsg << endl;
564 // memmsg << " free space = " << mdata.fsmblks+mdata.fordblks;
565 // memmsg << endl;
566
567 return true;
568 }
569
570 // versions of read that provide default values for the arguments
571 template <class T, class M, class C>
572 bool read(Field<T,Dim,M,C>& f, unsigned int varID, unsigned int record) {
573 return read(f, f.getLayout().getDomain(), varID, record);
574 }
575
576 template <class T, class M, class C>
577 bool read(Field<T,Dim,M,C>& f, const NDIndex<Dim> &readDomain,
578 unsigned int varID) {
579 return read(f, readDomain, varID, 0);
580 }
581
582 template <class T, class M, class C>
583 bool read(Field<T,Dim,M,C>& f, unsigned int varID) {
584 return read(f, f.getLayout().getDomain(), varID, 0);
585 }
586
587 template <class T, class M, class C>
588 bool read(Field<T,Dim,M,C>& f, const NDIndex<Dim> &readDomain) {
589 return read(f, readDomain, 0, 0);
590 }
591
592 template <class T, class M, class C>
594 return read(f, f.getLayout().getDomain(), 0, 0);
595 }
596
597
599 // write the data from the given Field into the file. This can be used
600 // to either append new records, or to overwrite an existing record.
601 // varID = index for which field is being written ... this should be from
602 // 0 ... (numFields-1), for the case where the file contains
603 // more than one Field. Writing continues for the current record
604 // until all numField fields have been written; then during the
605 // next write, the record number is incremented.
606 //--------------------------------------------------------------------
607 // notes for documentation:
608 // - for now, you can not overwrite Field data on succesive writes
609 // (this can easily be added when needed - some restrictions will apply)
610 // - when writing, all the Fields in a record must have the same layout
611 // - layouts (and vnodes) can vary between records, however
612 // - separate DiscField objects need to be opened for reading and writing
613 //--------------------------------------------------------------------
614 template<class T, class M, class C>
615 bool write(Field<T,Dim,M,C>& f, unsigned int varID) {
616
617 // sanity checking for input arguments and state of this object
618 if (!ConfigOK) {
619 ERRORMSG("Cannot write in DiscField::write - config file error."<<endl);
620 Ippl::abort("Exiting due to DiscField error.");
621 return false;
622 }
623 else if (!WritingFile) {
624 ERRORMSG("DiscField::write called for DiscField opened for read."<<endl);
625 Ippl::abort("Exiting due to DiscField error.");
626 return false;
627 }
628 else if (varID >= NumFields) {
629 ERRORMSG(varID << " is a bad variable ID in DiscField::write." << endl);
630 Ippl::abort("Exiting due to DiscField error.");
631 return false;
632 }
633 else if (NeedStartRecord == 0 && ValidField[varID]) {
634 ERRORMSG("DiscField:write - attempt to overwrite Field " << varID);
635 ERRORMSG(" at record " << NumRecords - 1 << endl);
636 Ippl::abort("Exiting due to DiscField error.");
637 return false;
638 }
639
640 // INCIPPLSTAT(incDiscWrites);
641
642 // useful typedefs for later
643 typedef typename LField<T,Dim>::iterator LFI;
644
645 // Get a new tag value for this write operation, used for all sends
646 // to other nodes with data.
648
649 // Get the layout reference, and set up an iterator over lfields
650 FieldLayout<Dim>& layout = f.getLayout();
651 typename Field<T,Dim,M,C>::iterator_if local;
652
653 // do we need to start a new record? If so, extend data structures and
654 // file storage
655 if (NeedStartRecord != 0) {
656 // convert the layout information for the field into internal storage,
657 // represented as a map from NDIndex --> owner node
658 if (!make_globalID(layout)) {
659 ERRORMSG("DiscField::write - all Field's must have the same ");
660 ERRORMSG("global domain in a single DiscField.\n");
661 ERRORMSG("The original domain is " << get_Domain() << ", ");
662 ERRORMSG("the attempted new domain is " << layout.getDomain() << endl);
663 Ippl::abort("Exiting due to DiscField error.");
664 }
665
666 // update vnode and valid field information for new record
667 if (numFiles() > 0 && myBox0() == (unsigned int) Ippl::myNode()) {
668 int nvtally = 0;
669 NumVnodes[0].push_back(globalID.size());
670 if (NumRecords > 0)
671 nvtally = VnodeTally[0][NumRecords-1] + NumVnodes[0][NumRecords-1];
672 VnodeTally[0].push_back(nvtally);
673 }
674
675 // indicate we have not written out data for any fields for this record
676 for (unsigned int i=0; i < NumFields; ++i)
677 ValidField[i] = false;
678
679 // increment total record number
680 NumRecords++;
681 NumWritten = 0;
682
683 // update necessary data files at the start of a new record
684 if ((unsigned int) Ippl::myNode() == myBox0()) {
685 // Update the meta information ... this can be changed to be only
686 // written out during destruction.
687 if (!write_meta()) {
688 ERRORMSG("Could not write .meta file on node " << Ippl::myNode());
689 ERRORMSG(endl);
690 Ippl::abort("Exiting due to DiscField error.");
691 return false;
692 }
693
694 // write out the NDIndex objects from the FieldLayout to the
695 // .layout file
696 if (!write_layout()) {
697 ERRORMSG("Could not update .layout file on node "<<Ippl::myNode());
698 ERRORMSG(endl);
699 Ippl::abort("Exiting due to DiscField error.");
700 return false;
701 }
702 }
703 }
704
705 // On box0 nodes, do most of the work ... other nodes forward data
706 // to box0 nodes.
707 if ((unsigned int) Ippl::myNode() == myBox0()) {
708
709 // Open the offset data file, and write the Field number.
710 // This precedes all the OffsetData structs for vnodes in the Field,
711 // which are in random order for the given Field. The OffsetData
712 // structs contains a field 'vnode' which tells which vnode the data is
713 // for (0 ... numVnodes - 1).
714 FILE *outputOffset = open_df_file(Config->getFilename(0),
715 ".offset", std::string("a"));
716 int wVarID = (int)varID;
717 if (fwrite(&wVarID, sizeof(int), 1, outputOffset) != 1) {
718 ERRORMSG("DiscField::write - cannot write field number to .offset ");
719 ERRORMSG("file" << endl);
720 Ippl::abort("Exiting due to DiscField error.");
721 fclose(outputOffset);
722 return false;
723 }
724
725 // Initialize output file handle ... we might never write anything to
726 // it if the field is completely compressed.
727 int outputDatafd = open_df_file_fd(Config->getFilename(0), ".data",
728 O_RDWR|O_CREAT);
729 // Later we will receive message from other nodes. This is the
730 // number of blocks we should receive. We'll decrease this by
731 // the number of vnodes we already have on this processor, however.
732 int unreceived = globalID.size();
733 int fromothers = unreceived - layout.size_iv();
734
735 // Now we start processing blocks. We have 'unreceived' total to
736 // write, either from ourselves or others. We first check for
737 // messages, if there are any to receive, get one and process it,
738 // otherwise write out one of our local blocks.
739
740 local = f.begin_if();
741 while (unreceived > 0) {
742 // Keep processing remote blocks until we don't see one available
743 // or we've received them all
744 bool checkremote = (fromothers > 0);
745 while (checkremote) {
746 // Get a message
747 int any_node = COMM_ANY_NODE;
748 Message *msg = Ippl::Comm->receive(any_node, tag);
749
750 // If we found one, process it
751 if (msg != 0) {
752 // Extract the domain from the message
753 NDIndex<Dim> ro;
754 ro.getMessage(*msg);
755
756 // Extract the data from the message
757 T rhs_compressed_data;
758 LFI cbi(rhs_compressed_data);
759 cbi.getMessage(*msg);
760
761 // Write this data out
762 write_offset_and_data(outputOffset, outputDatafd, cbi, ro);
763
764 // finish with this message
765 delete msg;
766
767 // Update counters
768 unreceived -= 1;
769 fromothers -= 1;
770 } else {
771 // We didn't see one, so stop checking for now
772 checkremote = false;
773 }
774 }
775
776 // Process a local block if any are left
777 if (local != f.end_if()) {
778 // Cache some information about this local field.
779 LField<T,Dim>& l = *(*local).second.get();
780 LFI cbi = l.begin();
781
782 // Write this data out
783 write_offset_and_data(outputOffset, outputDatafd, cbi, l.getOwned());
784
785 // Update counters
786 ++local;
787 unreceived -= 1;
788 }
789 }
790
791 // Close the output data file
792 if (outputDatafd >= 0)
793 close(outputDatafd);
794
795 // Close the output offset file
796 if (outputOffset != 0)
797 fclose(outputOffset);
798
799 } else {
800 // On other nodes, just send out our LField blocks.
801 for (local = f.begin_if(); local != f.end_if(); ++local) {
802 // Cache some information about this local field.
803 LField<T,Dim>& l = *(*local).second.get();
804 const NDIndex<Dim> &ro = l.getOwned();
805 LFI cbi = l.begin();
806
807 // Create a message to send to box0
808 Message *msg = new Message;
809
810 // Put in the domain and data
811 ro.putMessage(*msg);
812 cbi.putMessage(*msg, false); // 'false' = avoid copy if possible
813
814 // Send this message to the box0 node.
815 int node = myBox0();
816 Ippl::Comm->send(msg, node, tag);
817 }
818 }
819
820 // indicate we've written one more field
821 ValidField[varID] = true;
823
824 // Let everything catch up
826
827 // if we actually make it down to here, we were successful in writing
828 return true;
829 }
830
831 // version of write that provides default value for varID
832 template<class T, class M, class C>
834 return write(f, 0);
835 }
836
837
838 //
839 // console printing methods
840 //
841
842 // print out debugging info to the given stream
843 void printDebug(std::ostream&);
844 void printDebug();
845
846private:
847 // private typedefs
849 typedef long long Offset_t;
850
851 //
852 // meta data (static info for the file which does not change)
853 //
854
855 // the configuration file mechanism
858
859 // flag which is true if we're writing, false if reading
861
862 // the base name for the output file, and the descriptive type string
863 std::string BaseFile;
864 std::string TypeString;
865 std::string DiscType;
866
867 // dimension of data in file ... this may not match the template dimension.
868 unsigned int DataDimension;
869
870 //
871 // dynamic data (varies as records are written or read)
872 //
873
874 // do we need to start a new record during the next write? Or, if reading,
875 // which record have we current read into our Size and globalID variables?
876 // If this is < 0, we have not read in any yet.
878
879 // the number of fields and records in the file, and the number of Fields
880 // written to the current record
881 unsigned int NumFields;
882 unsigned int NumRecords;
883 unsigned int NumWritten;
884
885 // this keeps track of where in the .data file writing is occuring
886 // it is correlated with a given Field and record through the .offset file
888
889 // the global domain of the Fields in this DiscField object
891
892 // keep track of which Fields have been written to the current record
893 std::vector<bool> ValidField;
894
895 // the running tally of vnodes ON THIS SMP for each record, for each file.
896 // VnodeTally[n] = number of vnodes written out total in prev records.
897 // NOTE: this is not the number of vnodes TOTAL, it is the number of
898 // vnodes on all the processors which are on this same SMP machine.
899 std::vector<int> *VnodeTally;
900
901 // the number of vnodes ON THIS SMP in each record, for each file.
902 // NumVnodes[n] = number of vnodes in record n.
903 // NOTE: this is not the number of vnodes TOTAL, it is the number of
904 // vnodes on all the processors which are on this same SMP machine.
905 std::vector<int> *NumVnodes;
906
907 // store a mapping from an NDIndex to the physical node it resides on.
908 // These values are stored only for those vnodes on processors which are
909 // on our same SMP. This must be remade when the layout changes
910 // (e.g., every time we write a record for a Field,
911 // since each Field can have a different layout and each Field's layout
912 // can change from record to record).
913 // key: local NDIndex, value: node
915
916 //
917 // functions used to build/query information about the processors, etc.
918 //
919
920 // perform initialization based on the constuctor arguments
921 void initialize(const char* base, const char* config,
922 const char* typestr, unsigned int numFields);
923
924 // open a file in the given mode. If an error occurs, print a message (but
925 // only if the last argument is true).
926 // fnm = complete name of file (can include a path)
927 // mode = open method ("r" == read, "rw" == read/write, etc.
928 FILE *open_df_file(const std::string& fnm, const std::string& mode);
929 FILE *open_df_file(const std::string& fnm, const std::string& suffix,
930 const std::string& mode);
931
932 // Open a file using direct IO, if possible, otherwise without. This
933 // returns a file descriptor, not a FILE handle. If the file was opened
934 // using direct-io, also initialize the dioinfo member data.
935 // The last argument indicates whether to init (create)
936 // the file or just open for general read-write.
937 int open_df_file_fd(const std::string& fnm, const std::string& suf, int flags);
938
939 // create the data files used to store Field data. Return success.
940 bool create_files();
941
942 // return the total number of SMP's in the system
943 unsigned int numSMPs() const {
944 return Config->numSMPs();
945 }
946
947 // return the total number of files which are being read or written
948 unsigned int fileSMPs() const {
949 return Config->fileSMPs();
950 }
951
952 // return the index of our SMP
953 unsigned int mySMP() const {
954 return Config->mySMP();
955 }
956
957 // return the Box0 node for this SMP
958 unsigned int myBox0() const {
959 return Config->getSMPBox0();
960 }
961
962 // return the number of files on our SMP (if no arg given) or the
963 // given SMP
964 unsigned int numFiles() const {
965 return Config->getNumFiles();
966 }
967 unsigned int numFiles(unsigned int s) const {
968 return Config->getNumFiles(s);
969 }
970
971 // compute how many physical nodes there are on the same SMP as the given
972 // pnode.
973 unsigned int pNodesPerSMP(unsigned int node) const {
974 return Config->pNodesPerSMP(node);
975 }
976
977 // parse the IO configuration file and store the information.
978 // arguments = name of config file, and if we're writing a file (true) or
979 // reading (false)
980 bool parse_config(const char *, bool);
981
982 // Compute how many elements we should expect to store into the local
983 // node for the given FieldLayout. Just loop through the local vnodes
984 // and accumulate the sizes of the owned domains. This is modified
985 // by the second "read domain" argument, which might be a subset of
986 // the total domain.
987 int compute_expected(const FieldLayout<Dim> &, const NDIndex<Dim> &);
988
989 // Since the layout can be different every time write
990 // is called, the globalID container needs to be recalculated. The total
991 // domain of the Field should not change, though, just the layout. Return
992 // success.
994
995 // Compute the size of a domain, zero-based, that has a total
996 // size <= chunkelems and has evenly chunked slices.
997 NDIndex<Dim> chunk_domain(const NDIndex<Dim> &currblock,
998 int chunkelems,
999 int &msdim,
1000 bool iscompressed);
1001
1002 //
1003 //
1004 // read/write functions for individual components
1005 //
1006
1007 // read or write .meta data file information. Return success.
1008 bool write_meta();
1009 bool read_meta();
1010
1011 // read or write NDIndex values for a file. Return success.
1012 bool read_NDIndex(FILE *, NDIndex<Dim> &);
1013 bool write_NDIndex(FILE *, const NDIndex<Dim> &);
1014
1015 // Write .layout data file information. Return success.
1016 bool write_layout();
1017
1018 // Read layout info for one file set in the given record.
1019 int read_layout(int record, int sf);
1020
1022 // Write out the data in a provided brick iterator with given owned
1023 // domain, updating both the offset file and the data file. The .offset file
1024 // contains info on where in the main data file each vnode's data can be
1025 // found. The .offset file's structure looks like this:
1026 // |--Record n----------------------------|
1027 // | Field ID a |
1028 // | VNa | VNb | VNc | VNd | .... | VNx |
1029 // | Field ID b |
1030 // | VNa | VNb | VNc | VNd | .... | VNx |
1031 // | Field ID c |
1032 // | VNa | VNb | VNc | VNd | .... | VNx |
1033 // |--------------------------------------|
1034 // where
1035 // VNn is the data for a single Offset struct. The sets of Offset structs
1036 // for the Fields can appear in any order in a record, and the Offsets
1037 // structs within a specific Field can also appear in any order.
1038 template<class T>
1039 void write_offset_and_data(FILE *outputOffset, int outputDatafd,
1041 const NDIndex<Dim> &owned) {
1042
1043 // Create an offset output file struct, and initialize what we can.
1044 // We must take care to first zero out the offset struct.
1045 DFOffsetData<Dim,T> offset;
1046 memset(static_cast<void *>(&offset), 0, sizeof(DFOffsetData<Dim,T>));
1047
1048 domain_to_offset_data(owned, offset);
1049 offset.isCompressed = cbi.IsCompressed();
1050 offset.offset = CurrentOffset;
1051
1052 // Set the compressed or uncompressed data in the offset struct
1053 if (offset.isCompressed) {
1054 // For compressed data, we just need to write out the entry to the
1055 // offset file ... that will contain the single compressed value.
1056 offset.compressedVal = *cbi;
1057 } else {
1058 // This is not compressed, so we must write to the data file. The
1059 // main question now is whether we can use existing buffers, or write
1060 // things out in chunks, or what.
1061
1062 // First, calculate how many elements to write out at a time. The
1063 // 'chunkbytes' might be adjusted to match the maximum amount of data
1064 // that can be written out in a single direct-io call. This is
1065 // true only if we are actually using direct-io, of course.
1066
1067 long elems = owned.size();
1068 long chunkbytes = Ippl::chunkSize();
1069 long chunksize = chunkbytes / sizeof(T);
1070 if (chunksize < 1 || chunksize > elems)
1071 chunksize = elems;
1072
1073 // If cbi is iterating over its whole domain, we can just use the block
1074 // there as-is to write out data. So if cbiptr is set to an non-zero
1075 // address, we'll just use that for the data, otherwise we'll have
1076 // to copy to a buffer.
1077
1078 T *cbiptr = 0;
1079 if (cbi.whole())
1080 cbiptr = &(*cbi);
1081
1082 // Loop through the data, writing out chunks.
1083 int needwrite = elems;
1084 while (needwrite > 0) {
1085 // Find out how many elements we'll write this time.
1086 int amount = chunksize;
1087 if (amount > needwrite)
1088 amount = needwrite;
1089
1090 // Find the size of a buffer of at least large enough size. We
1091 // might need a slighly larger buffer if we are using direct-io,
1092 // where data must be written out in blocks with sizes that
1093 // match the device block size.
1094 size_t nbytes = amount*sizeof(T);
1095
1096 // Get a pointer to the next data, or copy more data into a buffer
1097 // Initially start with the vnode pointer
1098 T *buffer = cbiptr;
1099
1100 // If necessary, make a copy of the data
1101 if (buffer == 0) {
1102 buffer = static_cast<T *>(DiscBuffer::resize(nbytes));
1103
1104 // Copy data into this buffer from the iterator.
1105 T *bufptr = buffer;
1106 T *bufend = buffer + amount;
1107 for ( ; bufptr != bufend; ++bufptr, ++cbi)
1108 new (bufptr) T(*cbi);
1109 }
1110
1111 // Write the data now
1112 off_t seekoffset = CurrentOffset * sizeof(T);
1113 bool seekok = true;
1114 Timer wtimer;
1115 wtimer.clear();
1116 wtimer.start();
1117
1118 size_t nout = 0;
1119 if (::lseek(outputDatafd, seekoffset, SEEK_SET) == seekoffset) {
1120 char *wbuf = (char *)buffer;
1121 nout = ::write(outputDatafd, wbuf, nbytes);
1122 } else {
1123 seekok = false;
1124 }
1125
1126 wtimer.stop();
1128 DiscBuffer::writebytes += nbytes;
1129
1130 if (!seekok) {
1131 ERRORMSG("Seek error in DiscField::write_offset_and_data" << endl);
1132 ERRORMSG("Could not seek to position " << seekoffset << endl);
1133 Ippl::abort("Exiting due to DiscField error.");
1134 }
1135
1136 if (nout != nbytes) {
1137 ERRORMSG("Write error in DiscField::write_offset_and_data" << endl);
1138 ERRORMSG("Could not write " << nbytes << " bytes." << endl);
1139 Ippl::abort("Exiting due to DiscField error.");
1140 }
1141
1142 // Update pointers and counts
1143 needwrite -= amount;
1144 if (cbiptr != 0)
1145 cbiptr += amount;
1146
1147 // update the offset and stats
1148
1149 CurrentOffset += (nbytes / sizeof(T));
1150 }
1151 }
1152
1153 // write to offset file now
1154 if (fwrite(&offset, sizeof(DFOffsetData<Dim,T>), 1, outputOffset) != 1) {
1155 ERRORMSG("Write error in DiscField::write_offset_and_data" << endl);
1156 Ippl::abort("Exiting due to DiscField error.");
1157 }
1158 }
1159
1161 // seek to the beginning of the vnode data for field 'varID' in record
1162 // 'record', for file 'sf'. If not found, close the file and return false.
1163 // If it is found, read in all the offset records, and return them
1164 // in the provided vector.
1165 template <class T>
1166 bool read_offset(unsigned int varID,
1167 unsigned int record,
1168 unsigned int sf,
1169 std::vector<DFOffsetData<Dim,T> > &offdata,
1170 int vnodes) {
1171
1172 // Open the offset file
1173 FILE *outputOffset = open_df_file(Config->getFilename(sf),
1174 ".offset", std::string("r"));
1175
1176 // seek to the start of this record
1177 Offset_t seekpos = NumFields * (record * sizeof(int) +
1178 VnodeTally[sf][record] *
1179 sizeof(DFOffsetData<Dim,T>));
1180 if (fseek(outputOffset, seekpos, SEEK_SET) != 0) {
1181 ERRORMSG("Error seeking to position " << static_cast<long>(seekpos));
1182 ERRORMSG(" in .offset file " << endl);
1183 Ippl::abort("Exiting due to DiscField error.");
1184 fclose(outputOffset);
1185 return false;
1186 }
1187
1188 // now keep looking at the Field ID in this record until we find the one
1189 // we want
1190 unsigned int checked = 0;
1191 while (checked < NumFields) {
1192 // read the next field ID number
1193 int rVarID;
1194 if (fread(&rVarID, sizeof(int), 1, outputOffset) != 1) {
1195 ERRORMSG("Error reading field ID from .offset file" << endl);
1196 Ippl::abort("Exiting due to DiscField error.");
1197 fclose(outputOffset);
1198 return false;
1199 }
1200
1201 // is it what we want?
1202 if ((unsigned int) rVarID == varID) {
1203 // Yes it is, so read in the offset record data. First resize
1204 // the offset data vector.
1205 offdata.resize(vnodes);
1206 size_t result = fread(&(offdata[0]), sizeof(DFOffsetData<Dim,T>),
1207 offdata.size(), outputOffset);
1208 if (result != offdata.size()) {
1209 ERRORMSG("Read error in DiscField::find_file_in_offset" << endl);
1210 ERRORMSG("Results is " << result << ", should be ");
1211 ERRORMSG(offdata.size() << endl);
1212 ERRORMSG("outputOffset is " << (void *)outputOffset << endl);
1213 Ippl::abort("Exiting due to DiscField error.");
1214 fclose(outputOffset);
1215 return false;
1216 }
1217
1218 // And return success.
1219 fclose(outputOffset);
1220 return true;
1221 }
1222
1223 // it was not, so move on to the next one
1224 checked++;
1225 seekpos += (NumVnodes[sf][record] * sizeof(DFOffsetData<Dim,T>) +
1226 sizeof(int));
1227 if (fseek(outputOffset, seekpos, SEEK_SET) != 0) {
1228 ERRORMSG("Error seeking to position " << static_cast<long>(seekpos));
1229 ERRORMSG(" in .offset file " << endl);
1230 Ippl::abort("Exiting due to DiscField error.");
1231 fclose(outputOffset);
1232 return false;
1233 }
1234 }
1235
1236 // if we're here, we did not find the Field ID anywhere in the .offset file
1237 ERRORMSG("Could not find data for field " << varID << " of record ");
1238 ERRORMSG(record << " in .offset file." << endl);
1239 Ippl::abort("Exiting due to DiscField error.");
1240 fclose(outputOffset);
1241 return false;
1242 }
1243
1245 // On all nodes, either send out or receive in offset information.
1246 // Some nodes will not get any, and will not have to do any reading.
1247 // But those that do, will read in data for the vnodes they are
1248 // assigned. 'vnodes' will be set to the number of vnodes assigned
1249 // for reading from this node, and 'maxsize' will be set
1250 // to the maximum size of the vnodes in this file, for use in
1251 // preparing the buffer that will be used to read those vnodes.
1252 template <class T>
1253 void distribute_offsets(std::vector<DFOffsetData<Dim,T> > &offdata,
1254 int &vnodes, int &maxsize,
1255 const NDIndex<Dim> &readDomain) {
1256
1257 // Initialize the vnode and maxsize values.
1258 vnodes = 0;
1259 maxsize = 0;
1260
1261 // If parallel reads are turned off, just box0 nodes will read
1262 if (!Ippl::perSMPParallelIO()) {
1263 if ((unsigned int) Ippl::myNode() == myBox0())
1264 vnodes = offdata.size();
1265
1266 } else {
1267
1268 // Generate the tag to use
1270
1271 // Nodes that do not have their box0 process on the same SMP should
1272 // not receive anything
1273 if (Config->getNodeSMPIndex(myBox0()) != mySMP()) {
1274 return;
1275 }
1276
1277 // All box0 nodes will (possibly) send out offset data. Others will
1278 // receive it, even if it just says "you don't get any vnodes."
1279 if ((unsigned int) Ippl::myNode() == myBox0()) {
1280 // How many offset blocks per processor
1281 int pernode = offdata.size() / pNodesPerSMP(myBox0());
1282
1283 // Extra vnodes we might have to give to others
1284 int extra = offdata.size() % pNodesPerSMP(myBox0());
1285
1286 // The next vnode to assign; box0 will always get an extra one if
1287 // necessary.
1288 int nextvnode = pernode;
1289 if (extra > 0) {
1290 nextvnode += 1;
1291 extra -= 1;
1292 }
1293
1294 // box0 nodes get the first 'nextvnode' vnodes.
1295 vnodes = nextvnode;
1296
1297 // Loop through the nodes on this vnode; nodes other than box0 will
1298 // get sent a message.
1299 for (unsigned int n=0; n < Config->getNumSMPNodes(); ++n) {
1300 int node = Config->getSMPNode(mySMP(), n);
1301 if (node != Ippl::myNode()) {
1302 // How many vnodes to assign?
1303 int numvnodes = pernode;
1304 if (extra > 0) {
1305 numvnodes += 1;
1306 extra -= 1;
1307 }
1308
1309 // Create a message for this other node, storing:
1310 // - number of vnodes (int)
1311 // - list of vnode offset structs (list)
1312 Message *msg = new Message;
1313 msg->put(numvnodes);
1314 if (numvnodes > 0) {
1315 msg->setCopy(false);
1316 msg->setDelete(false);
1317 msg->putmsg(static_cast<void *>(&(offdata[nextvnode])),
1318 sizeof(DFOffsetData<Dim,T>),
1319 numvnodes);
1320 }
1321
1322 // Send this message to the other node
1323 Ippl::Comm->send(msg, node, tag);
1324
1325 // Update what the next vnode info to send is
1326 nextvnode += numvnodes;
1327 }
1328 }
1329
1330 // At the end, we should have no vnodes left to send
1331 if ((unsigned int) nextvnode != offdata.size())
1332 Ippl::abort("ERROR: Could not give away all my vnodes!");
1333
1334 } else {
1335 // On non-box0 nodes, receive offset info
1336 int node = myBox0();
1337 Message *msg = Ippl::Comm->receive_block(node, tag);
1338
1339 // Get the number of vnodes to store here
1340 msg->get(vnodes);
1341
1342 // If this is > 0, copy out vnode info
1343 if (vnodes > 0) {
1344 // resize the vector to make some storage
1345 offdata.resize(vnodes);
1346
1347 // get offset data from the message
1348 ::getMessage_iter(*msg, &(offdata[0]));
1349 }
1350
1351 // Done with the message now.
1352 delete msg;
1353 }
1354 }
1355
1356 // Now, finally, on all nodes we scan the vnodes to find out the maximum
1357 // size of the buffer needed to read this data in.
1358 for (int v=0; v < vnodes; ++v) {
1359 // Convert data to NDIndex
1360 NDIndex<Dim> dom;
1361 offset_data_to_domain(offdata[v], dom);
1362 if (dom.touches(readDomain)) {
1363 // Compute chunk block size
1364 int msdim = (Dim-1); // this will be zero-based
1365 int chunkelems = Ippl::chunkSize() / sizeof(T);
1366 NDIndex<Dim> chunkblock = chunk_domain(dom, chunkelems, msdim,
1367 offdata[v].isCompressed);
1368
1369 // Now compare the size
1370 int dsize = chunkblock.size();
1371 if (dsize > maxsize) {
1372 maxsize = dsize;
1373 }
1374 }
1375 }
1376 }
1377
1379 // read the data for a block of values of type T from the given data file.
1380 // Return success of read.
1381 // The size and seekpos values are in bytes.
1382 template <class T>
1383 bool read_data(int outputDatafd, T* buffer, Offset_t readsize,
1384 Offset_t seekpos) {
1385
1386 PAssert_GE(seekpos, 0);
1387 PAssert_GT(readsize, 0);
1388 PAssert(buffer);
1389 PAssert_GE(outputDatafd, 0);
1390 PAssert_EQ(readsize % sizeof(T), 0);
1391
1392 // Now read the block of data
1393 off_t seekoffset = seekpos;
1394 size_t nbytes = readsize;
1395 bool seekok = true;
1396
1397 Timer rtimer;
1398 rtimer.clear();
1399 rtimer.start();
1400
1401 size_t nout = 0;
1402 if (::lseek(outputDatafd, seekoffset, SEEK_SET) == seekoffset) {
1403 char *rbuf = (char *)buffer;
1404 nout = ::read(outputDatafd, rbuf, nbytes);
1405 } else {
1406 seekok = false;
1407 }
1408
1409 rtimer.stop();
1410 DiscBuffer::readtime += rtimer.clock_time();
1411 DiscBuffer::readbytes += readsize;
1412
1413 if (!seekok) {
1414 ERRORMSG("Seek error in DiscField::read_data" << endl);
1415 ERRORMSG("Could not seek to position " << seekoffset << endl);
1416 Ippl::abort("Exiting due to DiscField error.");
1417 }
1418
1419 if (nout != nbytes) {
1420 ERRORMSG("Read error in DiscField::read_data" << endl);
1421 ERRORMSG("Could not read " << nbytes << " bytes." << endl);
1422 Ippl::abort("Exiting due to DiscField error.");
1423 }
1424
1425 return true;
1426 }
1427
1429 // Convert data in an offset data struct to an NDIndex, and return it
1430 // in the second argument.
1431 template<class T>
1433 NDIndex<Dim> &domain) {
1434 int *dptr = offdata.vnodedata + 1;
1435 for (unsigned int i=0; i < Dim; ++i) {
1436 int first = *dptr;
1437 int stride = *(dptr + 1);
1438 int length = *(dptr + 2);
1439 domain[i] = Index(first, first + (length - 1)*stride, stride);
1440 dptr += 6;
1441 }
1442 }
1443
1445 // Convert domain data to offset I/O struct data
1446 template<class T>
1448 DFOffsetData<Dim,T> &offdata) {
1449 int *dptr = offdata.vnodedata;
1450 for (unsigned int i=0; i < Dim; ++i) {
1451 *dptr++ = 0;
1452 *dptr++ = domain[i].first();
1453 *dptr++ = domain[i].stride();
1454 *dptr++ = domain[i].length();
1455 *dptr++ = 0;
1456 *dptr++ = 0;
1457 }
1458 }
1459
1460 //
1461 // don't allow copy or assign ... these are declared but never defined,
1462 // if something tries to use them it will generate a missing symbol error
1463 //
1464
1467};
1468
1469#include "Utility/DiscField.hpp"
1470
1471#endif // DISC_FIELD_H
1472
1473/***************************************************************************
1474 * $RCSfile: DiscField.h,v $ $Author: adelmann $
1475 * $Revision: 1.1.1.1 $ $Date: 2003/01/23 07:40:33 $
1476 * IPPL_VERSION_ID: $Id: DiscField.h,v 1.1.1.1 2003/01/23 07:40:33 adelmann Exp $
1477 ***************************************************************************/
const unsigned Dim
void getMessage_iter(Message &m, OutputIterator o)
Definition: Message.h:595
const int COMM_ANY_NODE
Definition: Communicate.h:40
const int COMM_ANY_TAG
Definition: Communicate.h:41
#define DF_READ_TAG
Definition: Tags.h:71
#define FB_TAG_CYCLE
Definition: Tags.h:62
#define DF_TAG_CYCLE
Definition: Tags.h:74
#define DF_OFFSET_TAG
Definition: Tags.h:72
#define FB_WRITE_TAG
Definition: Tags.h:60
void PETE_apply(const OpPeriodic< T > &, T &a, const T &b)
Definition: BCond.hpp:353
std::complex< double > a
Inform & endl(Inform &inf)
Definition: Inform.cpp:42
#define PAssert_EQ(a, b)
Definition: PAssert.h:104
#define PAssert(c)
Definition: PAssert.h:102
#define PAssert_GE(a, b)
Definition: PAssert.h:109
#define PAssert_GT(a, b)
Definition: PAssert.h:108
#define ERRORMSG(msg)
Definition: IpplInfo.h:350
Expression Expr_t
type of an expression
Definition: Expression.h:63
Definition: Field.h:33
unsigned size() const
bool touches(const NDIndex< Dim > &) const
Message & putMessage(Message &m) const
Definition: NDIndex.h:130
bool contains(const NDIndex< Dim > &a) const
Message & getMessage(Message &m)
Definition: NDIndex.h:138
NDIndex< Dim > intersect(const NDIndex< Dim > &) const
void setDirtyFlag()
Definition: BareField.h:117
void fillGuardCellsIfNotDirty() const
Definition: BareField.h:122
iterator_if begin_if()
Definition: BareField.h:100
ac_id_larray::iterator iterator_if
Definition: BareField.h:92
void Compress() const
Definition: BareField.hpp:991
bool compressible() const
Definition: BareField.h:191
Layout_t & getLayout() const
Definition: BareField.h:131
iterator_if end_if()
Definition: BareField.h:101
Definition: LField.h:58
void Compress()
Definition: LField.h:161
const NDIndex< Dim > & getOwned() const
Definition: LField.h:99
const NDIndex< Dim > & getAllocated() const
Definition: LField.h:98
bool IsCompressed() const
Definition: LField.h:134
void Uncompress(bool fill_domain=true)
Definition: LField.h:172
const iterator & begin() const
Definition: LField.h:110
touch_range_dv touch_range_rdv(const NDIndex< Dim > &domain, const GuardCellSizes< Dim > &gc=gc0()) const
Definition: FieldLayout.h:780
const NDIndex< Dim > & getDomain() const
Definition: FieldLayout.h:325
std::pair< touch_iterator_dv, touch_iterator_dv > touch_range_dv
Definition: FieldLayout.h:77
ac_id_vnodes::size_type size_iv() const
Definition: FieldLayout.h:702
bool whole() const
Definition: BrickIterator.h:76
Definition: Index.h:237
Message * receive(int &node, int &tag)
bool send(Message *, int node, int tag, bool delmsg=true)
Message * receive_block(int &node, int &tag)
void barrier(void)
Message & putmsg(void *, int, int=0)
Message & setCopy(const bool c)
Definition: Message.h:319
Message & put(const T &val)
Definition: Message.h:406
Message & setDelete(const bool c)
Definition: Message.h:331
Message & get(const T &cval)
Definition: Message.h:476
int next_tag(int t, int s=1000)
Definition: TagMaker.h:39
static long writebytes
Definition: DiscBuffer.h:85
static double writetime
Definition: DiscBuffer.h:83
static long readbytes
Definition: DiscBuffer.h:84
static double readtime
Definition: DiscBuffer.h:82
static void * resize(long sz)
Definition: DiscBuffer.cpp:79
unsigned int getNodeSMPIndex(unsigned int n) const
Definition: DiscConfig.h:157
unsigned int getSMPBox0() const
Definition: DiscConfig.h:113
unsigned int fileSMPs() const
Definition: DiscConfig.h:81
unsigned int numSMPs() const
Definition: DiscConfig.h:80
unsigned int getSMPNode(unsigned int n) const
Definition: DiscConfig.h:107
unsigned int getNumSMPNodes() const
Definition: DiscConfig.h:101
const std::string & getFilename(unsigned int fn) const
Definition: DiscConfig.h:125
unsigned int mySMP() const
Definition: DiscConfig.h:82
unsigned int getNumFiles() const
Definition: DiscConfig.h:119
unsigned int pNodesPerSMP(unsigned int node) const
Definition: DiscConfig.cpp:123
long long offset
Definition: DiscField.h:43
int vnodedata[6 *Dim]
Definition: DiscField.h:41
bool isCompressed
Definition: DiscField.h:42
bool read_offset(unsigned int varID, unsigned int record, unsigned int sf, std::vector< DFOffsetData< Dim, T > > &offdata, int vnodes)
Definition: DiscField.h:1166
bool write(Field< T, Dim, M, C > &f, unsigned int varID)
Definition: DiscField.h:615
bool read(Field< T, Dim, M, C > &f, unsigned int varID, unsigned int record)
Definition: DiscField.h:572
bool write_layout()
Definition: DiscField.hpp:760
bool make_globalID(FieldLayout< Dim > &)
Definition: DiscField.hpp:303
unsigned int get_Dimension() const
Definition: DiscField.h:108
int compute_expected(const FieldLayout< Dim > &, const NDIndex< Dim > &)
Definition: DiscField.hpp:849
bool read(Field< T, Dim, M, C > &f, const NDIndex< Dim > &readDomain, unsigned int varID, unsigned int record)
Definition: DiscField.h:140
std::string TypeString
Definition: DiscField.h:864
void query(int &numRecords, int &numFields, std::vector< int > &size) const
Definition: DiscField.hpp:160
unsigned int myBox0() const
Definition: DiscField.h:958
DiscField(const char *fname, const char *config, unsigned int numFields, const char *typestr=0)
Definition: DiscField.hpp:52
NDIndex< Dim > get_Domain() const
Definition: DiscField.h:101
const char * get_TypeString()
Definition: DiscField.h:111
DiscField(const DiscField< Dim > &)
void offset_data_to_domain(DFOffsetData< Dim, T > &offdata, NDIndex< Dim > &domain)
Definition: DiscField.h:1432
std::string BaseFile
Definition: DiscField.h:863
unsigned int get_NumRecords() const
Definition: DiscField.h:95
unsigned int numSMPs() const
Definition: DiscField.h:943
unsigned int fileSMPs() const
Definition: DiscField.h:948
std::vector< int > * NumVnodes
Definition: DiscField.h:905
void initialize(const char *base, const char *config, const char *typestr, unsigned int numFields)
Definition: DiscField.hpp:96
void write_offset_and_data(FILE *outputOffset, int outputDatafd, CompressedBrickIterator< T, Dim > &cbi, const NDIndex< Dim > &owned)
Definition: DiscField.h:1039
std::vector< bool > ValidField
Definition: DiscField.h:893
unsigned int numFiles(unsigned int s) const
Definition: DiscField.h:967
bool read(Field< T, Dim, M, C > &f, const NDIndex< Dim > &readDomain, unsigned int varID)
Definition: DiscField.h:577
unsigned int NumWritten
Definition: DiscField.h:883
NDIndex< Dim > Size
Definition: DiscField.h:890
bool read(Field< T, Dim, M, C > &f, unsigned int varID)
Definition: DiscField.h:583
NDIndex< Dim > chunk_domain(const NDIndex< Dim > &currblock, int chunkelems, int &msdim, bool iscompressed)
Definition: DiscField.hpp:873
unsigned int NumFields
Definition: DiscField.h:881
vmap< NDIndex< Dim >, int > GlobalIDList_t
Definition: DiscField.h:848
unsigned int mySMP() const
Definition: DiscField.h:953
std::string DiscType
Definition: DiscField.h:865
const char * get_DiscType()
Definition: DiscField.h:114
bool write(Field< T, Dim, M, C > &f)
Definition: DiscField.h:833
Offset_t CurrentOffset
Definition: DiscField.h:887
void printDebug()
Definition: DiscField.hpp:414
unsigned int numFiles() const
Definition: DiscField.h:964
bool read_meta()
Definition: DiscField.hpp:493
int read_layout(int record, int sf)
Definition: DiscField.hpp:799
bool parse_config(const char *, bool)
Definition: DiscField.hpp:373
bool write_NDIndex(FILE *, const NDIndex< Dim > &)
Definition: DiscField.hpp:712
unsigned int NumRecords
Definition: DiscField.h:882
std::vector< int > * VnodeTally
Definition: DiscField.h:899
bool read(Field< T, Dim, M, C > &f)
Definition: DiscField.h:593
int open_df_file_fd(const std::string &fnm, const std::string &suf, int flags)
Definition: DiscField.hpp:196
unsigned int DataDimension
Definition: DiscField.h:868
int NeedStartRecord
Definition: DiscField.h:877
bool read_data(int outputDatafd, T *buffer, Offset_t readsize, Offset_t seekpos)
Definition: DiscField.h:1383
DiscConfig * Config
Definition: DiscField.h:856
bool ConfigOK
Definition: DiscField.h:857
long long Offset_t
Definition: DiscField.h:849
bool read_NDIndex(FILE *, NDIndex< Dim > &)
Definition: DiscField.hpp:679
void distribute_offsets(std::vector< DFOffsetData< Dim, T > > &offdata, int &vnodes, int &maxsize, const NDIndex< Dim > &readDomain)
Definition: DiscField.h:1253
bool read(Field< T, Dim, M, C > &f, const NDIndex< Dim > &readDomain)
Definition: DiscField.h:588
DiscField & operator=(const DiscField< Dim > &)
GlobalIDList_t globalID
Definition: DiscField.h:914
FILE * open_df_file(const std::string &fnm, const std::string &mode)
Definition: DiscField.hpp:179
unsigned int pNodesPerSMP(unsigned int node) const
Definition: DiscField.h:973
bool WritingFile
Definition: DiscField.h:860
bool create_files()
Definition: DiscField.hpp:257
bool write_meta()
Definition: DiscField.hpp:447
unsigned int get_NumFields() const
Definition: DiscField.h:98
void domain_to_offset_data(const NDIndex< Dim > &domain, DFOffsetData< Dim, T > &offdata)
Definition: DiscField.h:1447
static bool perSMPParallelIO()
Definition: IpplInfo.h:211
static int chunkSize()
Definition: IpplInfo.h:206
static void abort(const char *=0)
Definition: IpplInfo.cpp:616
static int myNode()
Definition: IpplInfo.cpp:691
static Communicate * Comm
Definition: IpplInfo.h:84
Timing::TimerRef TimerRef
Definition: IpplTimings.h:176
static TimerRef getTimer(const char *nm)
Definition: IpplTimings.h:182
static void stopTimer(TimerRef t)
Definition: IpplTimings.h:192
static void startTimer(TimerRef t)
Definition: IpplTimings.h:187
Definition: Timer.h:28
void start()
Definition: Timer.cpp:34
void stop()
Definition: Timer.cpp:39
void clear()
Definition: Timer.cpp:29
double clock_time()
Definition: Timer.cpp:50
Definition: vmap.h:59
size_type size() const
Definition: vmap.h:120