OPAL (Object Oriented Parallel Accelerator Library)  2021.1.99
OPAL
AmrYtWriter.cpp
Go to the documentation of this file.
1 //
2 // Class AmrYtWriter
3 // This concrete class writes output files that are readable by yt
4 // (cf. http://yt-project.org/). We have a fork of yt in
5 // the repository at https://gitlab.psi.ch/frey_m/yt.
6 // The functions of this class are copied from AMReX and modified to fit
7 // our needs.
8 //
9 // Copyright (c) 2016 - 2020, Matthias Frey, Paul Scherrer Institut, Villigen PSI, Switzerland
10 // All rights reserved
11 //
12 // Implemented as part of the PhD thesis
13 // "Precise Simulations of Multibunches in High Intensity Cyclotrons"
14 //
15 // This file is part of OPAL.
16 //
17 // OPAL is free software: you can redistribute it and/or modify
18 // it under the terms of the GNU General Public License as published by
19 // the Free Software Foundation, either version 3 of the License, or
20 // (at your option) any later version.
21 //
22 // You should have received a copy of the GNU General Public License
23 // along with OPAL. If not, see <https://www.gnu.org/licenses/>.
24 //
25 #include "AmrYtWriter.h"
26 
27 #include <AMReX_Utility.H>
28 #include <AMReX_IntVect.H>
29 #include <AMReX_ParmParse.H>
30 #include <AMReX_ParallelDescriptor.H>
31 #include <AMReX_VisMF.H>
32 #include <AMReX_VectorIO.H>
33 #include <AMReX_NFiles.H>
34 
37 
38 
39 AmrYtWriter::AmrYtWriter(int step, int bin)
40 {
41  intData_m.resize(1);
42 // intData_m[0] = "id";
43 // intData_m[1] = "cpu";
44  intData_m[0] = "energy_bin";
45 
46  realData_m.resize(//3 + // coordinates
47  3 + // momenta
48  3 + // charge + mass + timestep
49 // 1 + // potential at particle location
50  3 + // electric field at particle location
51  3); // magnetic field at particle location
52 
53  int idx = 0;
54 // realData_m[idx++] ="position_x";
55 // realData_m[idx++] ="position_y";
56 // realData_m[idx++] ="position_z";
57  realData_m[idx++] ="momentum_x";
58  realData_m[idx++] ="momentum_y";
59  realData_m[idx++] ="momentum_z";
60  realData_m[idx++] ="charge";
61  realData_m[idx++] ="mass";
62  realData_m[idx++] ="timestep";
63 // realData_m[idx++] ="potential";
64  realData_m[idx++] ="electric_field_x";
65  realData_m[idx++] ="electric_field_y";
66  realData_m[idx++] ="electric_field_z";
67  realData_m[idx++] ="magnetic_field_x";
68  realData_m[idx++] ="magnetic_field_y";
69  realData_m[idx++] ="magnetic_field_z";
70 
71  namespace fs = boost::filesystem;
72 
73  fs::path dir = OpalData::getInstance()->getInputBasename();
74  std::string dataDir = OpalData::getInstance()->getAuxiliaryOutputDirectory();
75  boost::filesystem::path path = dir.parent_path() / dataDir / "amr" / "yt";
76  dir_m = amrex::Concatenate((path / "plt").string(), step, 10);
77  dir_m += "-";
78  dir_m = amrex::Concatenate(dir_m, bin, 3);
79 
80  if ( Ippl::myNode() == 0 && !fs::exists(path) ) {
81  try {
82  fs::create_directories( path );
83  } catch(const fs::filesystem_error& ex) {
84  throw OpalException("AmrYtWriter::AmrYtWriter()",
85  ex.what());
86  }
87  }
88 
90 }
91 
92 
95  const amr::AmrVectorFieldContainer_t& efield,
96  const amr::AmrIntArray_t& refRatio,
97  const amr::AmrGeomContainer_t& geom,
98  const int& nLevel,
99  const double& time,
100  const double& scale)
101 {
102  /* We need to scale the geometry and cell sizes according to the
103  * particle mapping
104  */
105 
106  //
107  // Only let 64 CPUs be writing at any one time.
108  //
109  amrex::VisMF::SetNOutFiles(64);
110  //
111  // Only the I/O processor makes the directory if it doesn't already exist.
112  //
113  if ( Ippl::myNode() == 0 )
114  if (!amrex::UtilCreateDirectory(dir_m, 0755))
115  amrex::CreateDirectoryFailed(dir_m);
116  //
117  // Force other processors to wait till directory is built.
118  //
119  Ippl::Comm->barrier();
120 
121  std::string HeaderFileName = dir_m + "/Header";
122 
123  amrex::VisMF::IO_Buffer io_buffer(amrex::VisMF::IO_Buffer_Size);
124 
125  std::ofstream HeaderFile;
126 
127  HeaderFile.rdbuf()->pubsetbuf(io_buffer.dataPtr(), io_buffer.size());
128 
129  int nData = rho[0]->nComp()
130  + phi[0]->nComp()
131  + efield[0][0]->nComp()
132  + efield[0][1]->nComp()
133  + efield[0][2]->nComp();
134 
135  if ( Ippl::myNode() == 0 )
136  {
137  //
138  // Only the IOProcessor() writes to the header file.
139  //
140  HeaderFile.open(HeaderFileName.c_str(), std::ios::out|std::ios::trunc|std::ios::binary);
141  if (!HeaderFile.good())
142  amrex::FileOpenFailed(HeaderFileName);
143  HeaderFile << "HyperCLaw-V1.1\n";
144 
145  HeaderFile << nData << '\n';
146 
147  // variable names
148  for (int ivar = 1; ivar <= rho[0]->nComp(); ivar++)
149  HeaderFile << "rho\n";
150 
151  for (int ivar = 1; ivar <= phi[0]->nComp(); ivar++)
152  HeaderFile << "phi\n";
153 
154  HeaderFile << "Ex\nEy\nEz\n";
155 
156  // dimensionality
157  HeaderFile << AMREX_SPACEDIM << '\n';
158 
159  // time
160  HeaderFile << time << '\n';
161  HeaderFile << nLevel - 1 << '\n'; // maximum level number (0=single level)
162 
163  // physical domain
164  for (int i = 0; i < AMREX_SPACEDIM; i++)
165  HeaderFile << geom[0].ProbLo(i) / scale << ' ';
166  HeaderFile << '\n';
167  for (int i = 0; i < AMREX_SPACEDIM; i++)
168  HeaderFile << geom[0].ProbHi(i) / scale << ' ';
169  HeaderFile << '\n';
170 
171  // reference ratio
172  for (int i = 0; i < refRatio.size(); ++i)
173  HeaderFile << refRatio[i] << ' ';
174  HeaderFile << '\n';
175 
176  // geometry domain for all levels
177  for (int i = 0; i < nLevel; ++i)
178  HeaderFile << geom[i].Domain() << ' ';
179  HeaderFile << '\n';
180 
181  // number of time steps
182  for (int i = 0; i < nLevel; ++i)
183  HeaderFile << 0 << ' ';
184  HeaderFile << '\n';
185 
186  // cell sizes for all level
187  for (int i = 0; i < nLevel; ++i) {
188  for (int k = 0; k < AMREX_SPACEDIM; k++)
189  HeaderFile << geom[i].CellSize()[k] / scale << ' ';
190  HeaderFile << '\n';
191  }
192 
193  // coordinate system
194  HeaderFile << geom[0].Coord() << '\n';
195  HeaderFile << "0\n"; // write boundary data
196  }
197 
198  for (int lev = 0; lev < nLevel; ++lev) {
199  // Build the directory to hold the MultiFab at this level.
200  // The name is relative to the directory containing the Header file.
201  //
202  static const std::string BaseName = "/Cell";
203  char buf[64];
204  sprintf(buf, "Level_%d", lev);
205  std::string sLevel = buf;
206 
207  //
208  // Now for the full pathname of that directory.
209  //
210  std::string FullPath = dir_m;
211  if (!FullPath.empty() && FullPath[FullPath.length()-1] != '/')
212  FullPath += '/';
213  FullPath += sLevel;
214  //
215  // Only the I/O processor makes the directory if it doesn't already exist.
216  //
217  if ( Ippl::myNode() == 0 )
218  if (!amrex::UtilCreateDirectory(FullPath, 0755))
219  amrex::CreateDirectoryFailed(FullPath);
220  //
221  // Force other processors to wait till directory is built.
222  //
223  Ippl::Comm->barrier();
224 
225  if ( Ippl::myNode() == 0 )
226  {
227  HeaderFile << lev << ' ' << rho[lev]->boxArray().size() << ' ' << 0 /*time*/ << '\n';
228  HeaderFile << 0 /* level steps */ << '\n';
229 
230  for (int i = 0; i < rho[lev]->boxArray().size(); ++i)
231  {
232  amrex::Real dx[3] = {
233  geom[lev].CellSize(0),
234  geom[lev].CellSize(1),
235  geom[lev].CellSize(2)
236  };
237  dx[0] /= scale;
238  dx[1] /= scale;
239  dx[2] /= scale;
240  amrex::Real lo[3] = {
241  geom[lev].ProbLo(0),
242  geom[lev].ProbLo(1),
243  geom[lev].ProbLo(2)
244  };
245  lo[0] /= scale;
246  lo[1] /= scale;
247  lo[2] /= scale;
248  amr::AmrDomain_t loc = amr::AmrDomain_t(rho[lev]->boxArray()[i],
249  &dx[0], &lo[0]);
250 // geom[lev].CellSize(),
251 // geom[lev].ProbLo());
252  for (int n = 0; n < AMREX_SPACEDIM; n++)
253  HeaderFile << loc.lo(n) << ' ' << loc.hi(n) << '\n';
254  }
255 
256  std::string PathNameInHeader = sLevel;
257  PathNameInHeader += BaseName;
258  HeaderFile << PathNameInHeader << '\n';
259  }
260 
261  //
262  // We combine all of the multifabs
263  //
264  amr::AmrField_t data(rho[lev]->boxArray(),
265  rho[lev]->DistributionMap(),
266  nData, 0,
267  amrex::MFInfo());
268 
269  //
270  // Cull data -- use no ghost cells.
271  //
272  // dst, src, srccomp, dstcomp, numcomp, nghost
273  /*
274  * srccomp: the component to copy
275  * dstcmop: the component where to copy
276  * numcomp: how many components to copy
277  */
278  amr::AmrField_t::Copy(data, *rho[lev], 0, 0, 1, 0);
279  amr::AmrField_t::Copy(data, *phi[lev], 0, 1, 1, 0);
280  amr::AmrField_t::Copy(data, *efield[lev][0], 0, 2, 1, 0); // Ex
281  amr::AmrField_t::Copy(data, *efield[lev][1], 0, 3, 1, 0); // Ey
282  amr::AmrField_t::Copy(data, *efield[lev][2], 0, 4, 1, 0); // Ez
283 
284  //
285  // Use the Full pathname when naming the MultiFab.
286  //
287  std::string TheFullPath = FullPath;
288  TheFullPath += BaseName;
289 
290  amrex::VisMF::Write(data, TheFullPath, amrex::VisMF::NFiles, true);
291  }
292 
293  if ( Ippl::myNode() == 0 )
294  HeaderFile.close();
295 }
296 
297 
299  const double& /*time*/,
300  const double& gamma)
301 {
302  /* According to
303  *
304  * template <int NStructReal, int NStructInt, int NArrayReal, int NArrayInt>
305  * void
306  * ParticleContainer<NStructReal, NStructInt, NArrayReal, NArrayInt>
307  * ::Checkpoint (const std::string& dir,
308  * const std::string& name,
309  * bool is_checkpoint,
310  * const Vector<std::string>& real_comp_names,
311  * const Vector<std::string>& int_comp_names) const
312  *
313  * in AMReX_ParticleContainerI.H with AMReX_Particles.H and AMReX_ParticleI.H.
314  *
315  * ATTENTION: We need to Lorentz transform the particles.
316  */
317 
318  const AmrLayout_t* layout_p = static_cast<const AmrLayout_t*>(&bunch_p->getLayout());
319  const AmrPartBunch::pbase_t* amrpbase_p = bunch_p->getAmrParticleBase();
320 
321  const int NProcs = amrex::ParallelDescriptor::NProcs();
322  const int IOProcNumber = amrex::ParallelDescriptor::IOProcessorNumber();
323 
324  bool doUnlink = true;
325 
326  //
327  // We store the particles in a subdirectory of "dir".
328  //
329  std::string pdir = dir_m;
330 
331  if ( ! pdir.empty() && pdir[pdir.size()-1] != '/') {
332  pdir += '/';
333  }
334 
335  pdir += "opal";
336  //
337  // Make the particle directories if they don't already exist.
338  //
339  if ( Ippl::myNode() == 0 ) {
340  if ( ! amrex::UtilCreateDirectory(pdir, 0755)) {
341  amrex::CreateDirectoryFailed(pdir);
342  }
343  }
344 
345  // Force other processors to wait until directory is built.
346  Ippl::Comm->barrier();
347 
348  //
349  // The header contains the info we need to read back in the particles.
350  //
351  // Only the I/O processor writes to the header file.
352  //
353  std::ofstream HdrFile;
354 
355  long nParticles = bunch_p->getTotalNum();
356 
357  // do not modify LocalNumPerLevel in here!!!
358  auto LocalNumPerLevel = amrpbase_p->getLocalNumPerLevel();
359 
360  int nLevel = (&amrpbase_p->getAmrLayout())->maxLevel() + 1;
361 
362  std::unique_ptr<size_t[]> partPerLevel( new size_t[nLevel] );
363  std::unique_ptr<size_t[]> globalPartPerLevel( new size_t[nLevel] );
364 
365  for (size_t i = 0; i < LocalNumPerLevel.size(); ++i)
366  partPerLevel[i] = LocalNumPerLevel[i];
367 
368  allreduce(*partPerLevel.get(),
369  *globalPartPerLevel.get(),
370  nLevel, std::plus<size_t>());
371 
372  int finest_level = layout_p->finestLevel();
373 
374  if ( Ippl::myNode() == 0 )
375  {
376  std::string HdrFileName = pdir;
377 
378  if ( ! HdrFileName.empty() && HdrFileName[HdrFileName.size()-1] != '/') {
379  HdrFileName += '/';
380 
381  }
382 
383  HdrFileName += "Header";
384 
385  HdrFile.open(HdrFileName.c_str(), std::ios::out|std::ios::trunc);
386 
387  if ( ! HdrFile.good()) {
388  amrex::FileOpenFailed(HdrFileName);
389  }
390  //
391  // First thing written is our Checkpoint/Restart version string.
392  //
393  // We append "_single" or "_double" to the version string indicating
394  // whether we're using "float" or "double" floating point data in the
395  // particles so that we can Restart from the checkpoint files.
396  //
397  HdrFile << "Version_Two_Dot_Zero_double" << '\n';
398  //
399  // AMREX_SPACEDIM and N for sanity checking.
400  //
401  HdrFile << AMREX_SPACEDIM << '\n';
402 
403  // The number of extra real parameters
404  HdrFile << realData_m.size() << '\n';
405  for (std::size_t j = 0; j < realData_m.size(); ++j)
406  HdrFile << realData_m[j] << '\n';
407 
408  // The number of extra int parameters
409  HdrFile << intData_m.size() << '\n';
410  for (std::size_t j = 0; j < intData_m.size(); ++j)
411  HdrFile << intData_m[j] << '\n';
412 
413  // is_checkpoint
414  HdrFile << true << '\n';
415 
416  //
417  // The total number of particles.
418  //
419  HdrFile << nParticles << '\n';
420  //
421  // The value of nextid that we need to restore on restart.
422  // --> we don's use this feature
423  //
424  HdrFile << 0 << '\n';
425  //
426  // Then the finest level of the AMR hierarchy.
427  //
428  HdrFile << finest_level << '\n';
429  //
430  // Then the number of grids at each level.
431  //
432  for (int lev = 0; lev <= finest_level; ++lev) {
433  HdrFile << layout_p->ParticleBoxArray(lev).size() << '\n';
434  }
435  }
436  //
437  // We want to write the data out in parallel.
438  //
439  // We'll allow up to nOutFiles active writers at a time.
440  //
441  int nOutFiles(256);
442 
443  nOutFiles = std::max(1, std::min(nOutFiles, NProcs));
444 
445  for (int lev = 0; lev <= finest_level; ++lev) {
446  bool gotsome = (globalPartPerLevel[lev] > 0);
447  //
448  // We store the particles at each level in their own subdirectory.
449  //
450  std::string LevelDir = pdir;
451 
452  if (gotsome) {
453  if ( ! LevelDir.empty() && LevelDir[LevelDir.size()-1] != '/') {
454  LevelDir += '/';
455  }
456 
457  LevelDir = amrex::Concatenate(LevelDir + "Level_", lev, 1);
458 
459  if ( Ippl::myNode() == 0 ) {
460  if ( ! amrex::UtilCreateDirectory(LevelDir, 0755)) {
461  amrex::CreateDirectoryFailed(LevelDir);
462  }
463  }
464  //
465  // Force other processors to wait until directory is built.
466  //
467  Ippl::Comm->barrier();
468  }
469 
470  // Write out the header for each particle
471  if ( gotsome && Ippl::myNode() == 0 ) {
472  std::string HeaderFileName = LevelDir;
473  HeaderFileName += "/Particle_H";
474  std::ofstream ParticleHeader(HeaderFileName);
475 
476  layout_p->ParticleBoxArray(lev).writeOn(ParticleHeader);
477  ParticleHeader << '\n';
478 
479  ParticleHeader.flush();
480  ParticleHeader.close();
481  }
482 
483  amrex::MFInfo info;
484  info.SetAlloc(false);
485  amr::AmrField_t state(layout_p->ParticleBoxArray(lev),
486  layout_p->ParticleDistributionMap(lev),
487  1, 0, info);
488  //
489  // We eventually want to write out the file name and the offset
490  // into that file into which each grid of particles is written.
491  //
492  amrex::Vector<int> which(state.size(),0);
493  amrex::Vector<int > count(state.size(),0);
494  amrex::Vector<long> where(state.size(),0);
495 
496  std::string filePrefix(LevelDir);
497  filePrefix += '/';
498  filePrefix += "DATA_";
499 
500  if (gotsome) {
501  bool groupSets(false), setBuf(true);
502  for(amrex::NFilesIter nfi(nOutFiles, filePrefix, groupSets, setBuf); nfi.ReadyToWrite(); ++nfi) {
503  std::ofstream& myStream = (std::ofstream&) nfi.Stream();
504  //
505  // Write out all the valid particles we own at the specified level.
506  // Do it grid block by grid block remembering the seek offset
507  // for the start of writing of each block of data.
508  //
509  writeParticles_m(lev, myStream, nfi.FileNumber(), which, count, where, bunch_p, gamma);
510  }
511 
512  amrex::ParallelDescriptor::ReduceIntSum (which.dataPtr(), which.size(), IOProcNumber);
513  amrex::ParallelDescriptor::ReduceIntSum (count.dataPtr(), count.size(), IOProcNumber);
514  amrex::ParallelDescriptor::ReduceLongSum(where.dataPtr(), where.size(), IOProcNumber);
515  }
516 
517  if ( Ippl::myNode() == 0 ) {
518  for (int j = 0; j < state.size(); j++) {
519  //
520  // We now write the which file, the particle count, and the
521  // file offset into which the data for each grid was written,
522  // to the header file.
523  //
524  HdrFile << which[j] << ' ' << count[j] << ' ' << where[j] << '\n';
525  }
526 
527  if (gotsome && doUnlink) {
528  //
529  // Unlink any zero-length data files.
530  //
531  amrex::Vector<long> cnt(nOutFiles,0);
532 
533  for (int i = 0, N=count.size(); i < N; i++) {
534  cnt[which[i]] += count[i];
535  }
536 
537  for (int i = 0, N=cnt.size(); i < N; i++) {
538  if (cnt[i] == 0) {
539  std::string FullFileName = amrex::NFilesIter::FileName(i, filePrefix);
540  amrex::UnlinkFile(FullFileName.c_str());
541  }
542  }
543  }
544  }
545  } // ---- end for(lev...)
546 
547  if ( Ippl::myNode() == 0 ) {
548  HdrFile.flush();
549  HdrFile.close();
550  if ( ! HdrFile.good()) {
551  throw OpalException("AmrYtWriter:writeBunch()",
552  "Problem writing HdrFile");
553  }
554  }
555 }
556 
557 
559  std::ofstream& ofs,
560  int fnum,
561  amrex::Vector<int>& which,
562  amrex::Vector<int>& count,
563  amrex::Vector<long>& where,
564  const AmrPartBunch* bunch_p,
565  const double gamma) const
566 {
567  /* Copied and modified of
568  *
569  * template <int NStructReal, int NStructInt, int NArrayReal, int NArrayInt>
570  * void
571  * ParticleContainer<NStructReal,
572  * NStructInt,
573  * NArrayReal,
574  * NArrayInt>
575  * ::WriteParticles(int lev,
576  * std::ofstream& ofs,
577  * int fnum,
578  * Vector<int>& which,
579  * Vector<int>& count,
580  * Vector<long>& where,
581  * bool is_checkpoint) const
582  *
583  * in AMReX_ParticleContainerI.H
584  */
585 
586  const AmrLayout_t* layout_p = static_cast<const AmrLayout_t*>(&bunch_p->getLayout());
587  const AmrPartBunch::pbase_t* amrpbase_p = bunch_p->getAmrParticleBase();
588 
589  const auto& LocalNumPerLevel = amrpbase_p->getLocalNumPerLevel();
590  size_t lBegin = LocalNumPerLevel.begin(level);
591  size_t lEnd = LocalNumPerLevel.end(level);
592 
593  for (size_t ip = lBegin; ip < lEnd; ++ip) {
594  const int grid = amrpbase_p->Grid[ip];
595 
596  count[grid] += 1;
597  }
598 
599  amrex::MFInfo info;
600  info.SetAlloc(false);
601  amr::AmrField_t state(layout_p->ParticleBoxArray(level),
602  layout_p->ParticleDistributionMap(level),
603  1, 0, info);
604 
605  for (amrex::MFIter mfi(state); mfi.isValid(); ++mfi) {
606  const int grid = mfi.index();
607 
608  which[grid] = fnum;
609  where[grid] = amrex::VisMF::FileOffset(ofs);
610 
611  if (count[grid] == 0) {
612  continue;
613  }
614 
615 
616  // First write out the integer data in binary.
617  const int iChunkSize = 2 + intData_m.size();
618  amrex::Vector<int> istuff(count[grid]*iChunkSize);
619  int* iptr = istuff.dataPtr();
620 
621  // inefficient
622  for (size_t ip = lBegin; ip < lEnd; ++ip) {
623  const int pGrid = amrpbase_p->Grid[ip];
624 
625  if ( grid == pGrid ) {
626 
627  iptr[0] = bunch_p->ID[ip];
628  iptr[1] = Ippl::myNode();
629  iptr[2] = bunch_p->Bin[ip];
630  iptr += iChunkSize;
631  }
632  }
633 
634  amrex::writeIntData(istuff.dataPtr(), istuff.size(), ofs);
635 
636  // Write the Real data in binary.
637  const int rChunkSize = AMREX_SPACEDIM + realData_m.size();
638  amrex::Vector<double> rstuff(count[grid]*rChunkSize);
639  double* rptr = rstuff.dataPtr();
640 
641 
642  // inefficient
643  for (size_t ip = lBegin; ip < lEnd; ++ip) {
644  const int pGrid = amrpbase_p->Grid[ip];
645 
646  if ( grid == pGrid ) {
647 
648  int idx = 0;
649 
650  // coordinates
651  for (int j = 0; j < AMREX_SPACEDIM; ++j)
652  rptr[idx++] = bunch_p->R[ip](j);
653 
654  // Lorentz transform
655  if ( OpalData::getInstance()->isInOPALCyclMode() )
656  rptr[1] *= gamma; // y is longitudinal direction
657  else
658  rptr[2] *= gamma; // z is longitudinal direction
659 
660  // momenta
661  for (int j = 0; j < AMREX_SPACEDIM; ++j)
662  rptr[idx++] = bunch_p->P[ip](j);
663 
664  // charge
665  rptr[idx++] = bunch_p->Q[ip];
666 
667  // mass
668  rptr[idx++] = bunch_p->M[ip];
669 
670  // timestep
671  rptr[idx++] = bunch_p->dt[ip];
672 
673 // // potential
674 // rptr[idx++] = bunch_p->Phi[ip];
675 
676  // electric field
677  for (int j = 0; j < AMREX_SPACEDIM; ++j)
678  rptr[idx++] = bunch_p->Ef[ip](j);
679 
680  // magnetic field
681  for (int j = 0; j < AMREX_SPACEDIM; ++j)
682  rptr[idx++] = bunch_p->Bf[ip](j);
683 
684  rptr += idx; //realData_m.size();
685  }
686  }
687  amrex::writeDoubleData(rstuff.dataPtr(), rstuff.size(), ofs);
688  }
689 }
T::PETE_Expr_t::PETE_Return_t max(const PETE_Expr< T > &expr, NDIndex< D > &loc)
Definition: ReductionLoc.h:84
T::PETE_Expr_t::PETE_Return_t min(const PETE_Expr< T > &expr, NDIndex< D > &loc)
Definition: ReductionLoc.h:76
void allreduce(const T *input, T *output, int count, Op op)
Definition: GlobalComm.hpp:510
PETE_TTTree< OpWhere, typename Cond_t::PETE_Expr_t, typename True_t::PETE_Expr_t, PETE_Scalar< Vektor< T, Dim > > > where(const PETE_Expr< Cond_t > &c, const PETE_Expr< True_t > &t, const Vektor< T, Dim > &f)
amrex::Vector< AmrVectorField_t > AmrVectorFieldContainer_t
Definition: AmrDefs.h:42
amrex::RealBox AmrDomain_t
Definition: AmrDefs.h:46
amrex::Vector< AmrGeometry_t > AmrGeomContainer_t
Definition: AmrDefs.h:43
amrex::MultiFab AmrField_t
Definition: AmrDefs.h:34
amrex::Vector< std::unique_ptr< AmrField_t > > AmrScalarFieldContainer_t
Definition: AmrDefs.h:41
amrex::Vector< int > AmrIntArray_t
Definition: AmrDefs.h:47
bool info
Info flag.
Definition: Options.cpp:28
FRONT * fs
Definition: hypervolume.cpp:59
ParticleLayout< T, Dim > & getLayout()
ParticleAttrib< Vector_t > Ef
ParticlePos_t & R
ParticleAttrib< int > Bin
ParticleAttrib< double > M
size_t getTotalNum() const
ParticleAttrib< Vector_t > P
ParticleAttrib< double > Q
ParticleAttrib< double > dt
ParticleAttrib< Vector_t > Bf
ParticleIndex_t & ID
std::string getInputBasename()
get input file name without extension
Definition: OpalData.cpp:658
static OpalData * getInstance()
Definition: OpalData.cpp:195
std::string getAuxiliaryOutputDirectory() const
get the name of the the additional data directory
Definition: OpalData.cpp:650
std::vector< std::string > intData_m
integer bunch data
Definition: AmrYtWriter.h:90
std::vector< std::string > realData_m
real bunch data
Definition: AmrYtWriter.h:91
AmrYtWriter(int step, int bin=0)
Definition: AmrYtWriter.cpp:39
void writeFields(const amr::AmrScalarFieldContainer_t &rho, const amr::AmrScalarFieldContainer_t &phi, const amr::AmrVectorFieldContainer_t &efield, const amr::AmrIntArray_t &refRatio, const amr::AmrGeomContainer_t &geom, const int &nLevel, const double &time, const double &scale)
Definition: AmrYtWriter.cpp:93
std::string dir_m
directory where to write files
Definition: AmrYtWriter.h:88
void writeParticles_m(int level, std::ofstream &ofs, int fnum, amrex::Vector< int > &which, amrex::Vector< int > &count, amrex::Vector< long > &where, const AmrPartBunch *bunch_p, const double gamma) const
void writeBunch(const AmrPartBunch *bunch_p, const double &time, const double &gamma)
int finestLevel() const
pbase_t * getAmrParticleBase()
The base class for all OPAL exceptions.
Definition: OpalException.h:28
PLayout & getAmrLayout()
ParticleIndex_t Grid
const ParticleLevelCounter_t & getLocalNumPerLevel() const
void barrier(void)
static int myNode()
Definition: IpplInfo.cpp:691
static Communicate * Comm
Definition: IpplInfo.h:84