OPAL (Object Oriented Parallel Accelerator Library)  2.2.0
OPAL
AmrYtWriter.cpp
Go to the documentation of this file.
1 #include "AmrYtWriter.h"
2 
3 #include <AMReX_Utility.H>
4 #include <AMReX_IntVect.H>
5 #include <AMReX_ParmParse.H>
6 #include <AMReX_ParallelDescriptor.H>
7 #include <AMReX_VisMF.H>
8 #include <AMReX_VectorIO.H>
9 #include <AMReX_NFiles.H>
10 
13 
14 
15 AmrYtWriter::AmrYtWriter(int step, int bin)
16 {
17  intData_m.resize(1);
18 // intData_m[0] = "id";
19 // intData_m[1] = "cpu";
20  intData_m[0] = "energy_bin";
21 
22  realData_m.resize(//3 + // coordinates
23  3 + // momenta
24  3 + // charge + mass + timestep
25 // 1 + // potential at particle location
26  3 + // electric field at particle location
27  3); // magnetic field at particle location
28 
29  int idx = 0;
30 // realData_m[idx++] ="position_x";
31 // realData_m[idx++] ="position_y";
32 // realData_m[idx++] ="position_z";
33  realData_m[idx++] ="momentum_x";
34  realData_m[idx++] ="momentum_y";
35  realData_m[idx++] ="momentum_z";
36  realData_m[idx++] ="charge";
37  realData_m[idx++] ="mass";
38  realData_m[idx++] ="timestep";
39 // realData_m[idx++] ="potential";
40  realData_m[idx++] ="electric_field_x";
41  realData_m[idx++] ="electric_field_y";
42  realData_m[idx++] ="electric_field_z";
43  realData_m[idx++] ="magnetic_field_x";
44  realData_m[idx++] ="magnetic_field_y";
45  realData_m[idx++] ="magnetic_field_z";
46 
47  namespace fs = boost::filesystem;
48 
49  fs::path dir = OpalData::getInstance()->getInputBasename();
50  boost::filesystem::path path = dir.parent_path() / "data" / "amr" / "yt";
51  dir_m = amrex::Concatenate((path / "plt").string(), step, 10);
52  dir_m += "-";
53  dir_m = amrex::Concatenate(dir_m, bin, 3);
54 
55  if ( Ippl::myNode() == 0 && !fs::exists(path) ) {
56  try {
57  fs::create_directories( path );
58  } catch(const fs::filesystem_error& ex) {
59  throw OpalException("AmrYtWriter::AmrYtWriter()",
60  ex.what());
61  }
62  }
63 
65 }
66 
67 
70  const amr::AmrVectorFieldContainer_t& efield,
71  const amr::AmrIntArray_t& refRatio,
72  const amr::AmrGeomContainer_t& geom,
73  const int& nLevel,
74  const double& time,
75  const double& scale)
76 {
77  /* We need to scale the geometry and cell sizes according to the
78  * particle mapping
79  */
80 
81  //
82  // Only let 64 CPUs be writing at any one time.
83  //
84  amrex::VisMF::SetNOutFiles(64);
85  //
86  // Only the I/O processor makes the directory if it doesn't already exist.
87  //
88  if ( Ippl::myNode() == 0 )
89  if (!amrex::UtilCreateDirectory(dir_m, 0755))
90  amrex::CreateDirectoryFailed(dir_m);
91  //
92  // Force other processors to wait till directory is built.
93  //
95 
96  std::string HeaderFileName = dir_m + "/Header";
97 
98  amrex::VisMF::IO_Buffer io_buffer(amrex::VisMF::IO_Buffer_Size);
99 
100  std::ofstream HeaderFile;
101 
102  HeaderFile.rdbuf()->pubsetbuf(io_buffer.dataPtr(), io_buffer.size());
103 
104  int nData = rho[0]->nComp()
105  + phi[0]->nComp()
106  + efield[0][0]->nComp()
107  + efield[0][1]->nComp()
108  + efield[0][2]->nComp();
109 
110  if ( Ippl::myNode() == 0 )
111  {
112  //
113  // Only the IOProcessor() writes to the header file.
114  //
115  HeaderFile.open(HeaderFileName.c_str(), std::ios::out|std::ios::trunc|std::ios::binary);
116  if (!HeaderFile.good())
117  amrex::FileOpenFailed(HeaderFileName);
118  HeaderFile << "HyperCLaw-V1.1\n";
119 
120  HeaderFile << nData << '\n';
121 
122  // variable names
123  for (int ivar = 1; ivar <= rho[0]->nComp(); ivar++)
124  HeaderFile << "rho\n";
125 
126  for (int ivar = 1; ivar <= phi[0]->nComp(); ivar++)
127  HeaderFile << "phi\n";
128 
129  HeaderFile << "Ex\nEy\nEz\n";
130 
131  // dimensionality
132  HeaderFile << AMREX_SPACEDIM << '\n';
133 
134  // time
135  HeaderFile << time << '\n';
136  HeaderFile << nLevel - 1 << '\n'; // maximum level number (0=single level)
137 
138  // physical domain
139  for (int i = 0; i < AMREX_SPACEDIM; i++)
140  HeaderFile << geom[0].ProbLo(i) / scale << ' ';
141  HeaderFile << '\n';
142  for (int i = 0; i < AMREX_SPACEDIM; i++)
143  HeaderFile << geom[0].ProbHi(i) / scale << ' ';
144  HeaderFile << '\n';
145 
146  // reference ratio
147  for (int i = 0; i < refRatio.size(); ++i)
148  HeaderFile << refRatio[i] << ' ';
149  HeaderFile << '\n';
150 
151  // geometry domain for all levels
152  for (int i = 0; i < nLevel; ++i)
153  HeaderFile << geom[i].Domain() << ' ';
154  HeaderFile << '\n';
155 
156  // number of time steps
157  for (int i = 0; i < nLevel; ++i)
158  HeaderFile << 0 << ' ';
159  HeaderFile << '\n';
160 
161  // cell sizes for all level
162  for (int i = 0; i < nLevel; ++i) {
163  for (int k = 0; k < AMREX_SPACEDIM; k++)
164  HeaderFile << geom[i].CellSize()[k] / scale << ' ';
165  HeaderFile << '\n';
166  }
167 
168  // coordinate system
169  HeaderFile << geom[0].Coord() << '\n';
170  HeaderFile << "0\n"; // write boundary data
171  }
172 
173  for (int lev = 0; lev < nLevel; ++lev) {
174  // Build the directory to hold the MultiFab at this level.
175  // The name is relative to the directory containing the Header file.
176  //
177  static const std::string BaseName = "/Cell";
178  char buf[64];
179  sprintf(buf, "Level_%d", lev);
180  std::string sLevel = buf;
181 
182  //
183  // Now for the full pathname of that directory.
184  //
185  std::string FullPath = dir_m;
186  if (!FullPath.empty() && FullPath[FullPath.length()-1] != '/')
187  FullPath += '/';
188  FullPath += sLevel;
189  //
190  // Only the I/O processor makes the directory if it doesn't already exist.
191  //
192  if ( Ippl::myNode() == 0 )
193  if (!amrex::UtilCreateDirectory(FullPath, 0755))
194  amrex::CreateDirectoryFailed(FullPath);
195  //
196  // Force other processors to wait till directory is built.
197  //
198  Ippl::Comm->barrier();
199 
200  if ( Ippl::myNode() == 0 )
201  {
202  HeaderFile << lev << ' ' << rho[lev]->boxArray().size() << ' ' << 0 /*time*/ << '\n';
203  HeaderFile << 0 /* level steps */ << '\n';
204 
205  for (int i = 0; i < rho[lev]->boxArray().size(); ++i)
206  {
207  amrex::Real dx[3] = {
208  geom[lev].CellSize(0),
209  geom[lev].CellSize(1),
210  geom[lev].CellSize(2)
211  };
212  dx[0] /= scale;
213  dx[1] /= scale;
214  dx[2] /= scale;
215  amrex::Real lo[3] = {
216  geom[lev].ProbLo(0),
217  geom[lev].ProbLo(1),
218  geom[lev].ProbLo(2)
219  };
220  lo[0] /= scale;
221  lo[1] /= scale;
222  lo[2] /= scale;
223  amr::AmrDomain_t loc = amr::AmrDomain_t(rho[lev]->boxArray()[i],
224  &dx[0], &lo[0]);
225 // geom[lev].CellSize(),
226 // geom[lev].ProbLo());
227  for (int n = 0; n < AMREX_SPACEDIM; n++)
228  HeaderFile << loc.lo(n) << ' ' << loc.hi(n) << '\n';
229  }
230 
231  std::string PathNameInHeader = sLevel;
232  PathNameInHeader += BaseName;
233  HeaderFile << PathNameInHeader << '\n';
234  }
235 
236  //
237  // We combine all of the multifabs
238  //
239  amr::AmrField_t data(rho[lev]->boxArray(),
240  rho[lev]->DistributionMap(),
241  nData, 0,
242  amrex::MFInfo());
243 
244  //
245  // Cull data -- use no ghost cells.
246  //
247  // dst, src, srccomp, dstcomp, numcomp, nghost
248  /*
249  * srccomp: the component to copy
250  * dstcmop: the component where to copy
251  * numcomp: how many components to copy
252  */
253  amr::AmrField_t::Copy(data, *rho[lev], 0, 0, 1, 0);
254  amr::AmrField_t::Copy(data, *phi[lev], 0, 1, 1, 0);
255  amr::AmrField_t::Copy(data, *efield[lev][0], 0, 2, 1, 0); // Ex
256  amr::AmrField_t::Copy(data, *efield[lev][1], 0, 3, 1, 0); // Ey
257  amr::AmrField_t::Copy(data, *efield[lev][2], 0, 4, 1, 0); // Ez
258 
259  //
260  // Use the Full pathname when naming the MultiFab.
261  //
262  std::string TheFullPath = FullPath;
263  TheFullPath += BaseName;
264 
265  amrex::VisMF::Write(data, TheFullPath, amrex::VisMF::NFiles, true);
266  }
267 
268  if ( Ippl::myNode() == 0 )
269  HeaderFile.close();
270 }
271 
272 
274  const double& time,
275  const double& gamma)
276 {
277  /* According to
278  *
279  * template <int NStructReal, int NStructInt, int NArrayReal, int NArrayInt>
280  * void
281  * ParticleContainer<NStructReal, NStructInt, NArrayReal, NArrayInt>
282  * ::Checkpoint (const std::string& dir,
283  * const std::string& name,
284  * bool is_checkpoint,
285  * const Vector<std::string>& real_comp_names,
286  * const Vector<std::string>& int_comp_names) const
287  *
288  * in AMReX_ParticleContainerI.H with AMReX_Particles.H and AMReX_ParticleI.H.
289  *
290  * ATTENTION: We need to Lorentz transform the particles.
291  */
292 
293  const AmrLayout_t* layout_p = static_cast<const AmrLayout_t*>(&bunch_p->getLayout());
294  const AmrPartBunch::pbase_t* amrpbase_p = bunch_p->getAmrParticleBase();
295 
296  const int NProcs = amrex::ParallelDescriptor::NProcs();
297  const int IOProcNumber = amrex::ParallelDescriptor::IOProcessorNumber();
298 
299  bool doUnlink = true;
300 
301  //
302  // We store the particles in a subdirectory of "dir".
303  //
304  std::string pdir = dir_m;
305 
306  if ( ! pdir.empty() && pdir[pdir.size()-1] != '/') {
307  pdir += '/';
308  }
309 
310  pdir += "opal";
311  //
312  // Make the particle directories if they don't already exist.
313  //
314  if ( Ippl::myNode() == 0 ) {
315  if ( ! amrex::UtilCreateDirectory(pdir, 0755)) {
316  amrex::CreateDirectoryFailed(pdir);
317  }
318  }
319 
320  // Force other processors to wait until directory is built.
321  Ippl::Comm->barrier();
322 
323  //
324  // The header contains the info we need to read back in the particles.
325  //
326  // Only the I/O processor writes to the header file.
327  //
328  std::ofstream HdrFile;
329 
330  long nParticles = bunch_p->getTotalNum();
331 
332  // do not modify LocalNumPerLevel in here!!!
333  auto LocalNumPerLevel = amrpbase_p->getLocalNumPerLevel();
334 
335  int nLevel = (&amrpbase_p->getAmrLayout())->maxLevel() + 1;
336 
337  std::unique_ptr<size_t[]> partPerLevel( new size_t[nLevel] );
338  std::unique_ptr<size_t[]> globalPartPerLevel( new size_t[nLevel] );
339 
340  for (size_t i = 0; i < LocalNumPerLevel.size(); ++i)
341  partPerLevel[i] = LocalNumPerLevel[i];
342 
343  allreduce(*partPerLevel.get(),
344  *globalPartPerLevel.get(),
345  nLevel, std::plus<size_t>());
346 
347  int finest_level = layout_p->finestLevel();
348 
349  if ( Ippl::myNode() == 0 )
350  {
351  std::string HdrFileName = pdir;
352 
353  if ( ! HdrFileName.empty() && HdrFileName[HdrFileName.size()-1] != '/') {
354  HdrFileName += '/';
355 
356  }
357 
358  HdrFileName += "Header";
359 
360  HdrFile.open(HdrFileName.c_str(), std::ios::out|std::ios::trunc);
361 
362  if ( ! HdrFile.good()) {
363  amrex::FileOpenFailed(HdrFileName);
364  }
365  //
366  // First thing written is our Checkpoint/Restart version string.
367  //
368  // We append "_single" or "_double" to the version string indicating
369  // whether we're using "float" or "double" floating point data in the
370  // particles so that we can Restart from the checkpoint files.
371  //
372  HdrFile << "Version_Two_Dot_Zero_double" << '\n';
373  //
374  // AMREX_SPACEDIM and N for sanity checking.
375  //
376  HdrFile << AMREX_SPACEDIM << '\n';
377 
378  // The number of extra real parameters
379  HdrFile << realData_m.size() << '\n';
380  for (std::size_t j = 0; j < realData_m.size(); ++j)
381  HdrFile << realData_m[j] << '\n';
382 
383  // The number of extra int parameters
384  HdrFile << intData_m.size() << '\n';
385  for (std::size_t j = 0; j < intData_m.size(); ++j)
386  HdrFile << intData_m[j] << '\n';
387 
388  // is_checkpoint
389  HdrFile << true << '\n';
390 
391  //
392  // The total number of particles.
393  //
394  HdrFile << nParticles << '\n';
395  //
396  // The value of nextid that we need to restore on restart.
397  // --> we don's use this feature
398  //
399  HdrFile << 0 << '\n';
400  //
401  // Then the finest level of the AMR hierarchy.
402  //
403  HdrFile << finest_level << '\n';
404  //
405  // Then the number of grids at each level.
406  //
407  for (int lev = 0; lev <= finest_level; ++lev) {
408  HdrFile << layout_p->ParticleBoxArray(lev).size() << '\n';
409  }
410  }
411  //
412  // We want to write the data out in parallel.
413  //
414  // We'll allow up to nOutFiles active writers at a time.
415  //
416  int nOutFiles(256);
417 
418  nOutFiles = std::max(1, std::min(nOutFiles, NProcs));
419 
420  for (int lev = 0; lev <= finest_level; ++lev) {
421  bool gotsome = (globalPartPerLevel[lev] > 0);
422  //
423  // We store the particles at each level in their own subdirectory.
424  //
425  std::string LevelDir = pdir;
426 
427  if (gotsome) {
428  if ( ! LevelDir.empty() && LevelDir[LevelDir.size()-1] != '/') {
429  LevelDir += '/';
430  }
431 
432  LevelDir = amrex::Concatenate(LevelDir + "Level_", lev, 1);
433 
434  if ( Ippl::myNode() == 0 ) {
435  if ( ! amrex::UtilCreateDirectory(LevelDir, 0755)) {
436  amrex::CreateDirectoryFailed(LevelDir);
437  }
438  }
439  //
440  // Force other processors to wait until directory is built.
441  //
442  Ippl::Comm->barrier();
443  }
444 
445  // Write out the header for each particle
446  if ( gotsome && Ippl::myNode() == 0 ) {
447  std::string HeaderFileName = LevelDir;
448  HeaderFileName += "/Particle_H";
449  std::ofstream ParticleHeader(HeaderFileName);
450 
451  layout_p->ParticleBoxArray(lev).writeOn(ParticleHeader);
452  ParticleHeader << '\n';
453 
454  ParticleHeader.flush();
455  ParticleHeader.close();
456  }
457 
458  amrex::MFInfo info;
459  info.SetAlloc(false);
460  amr::AmrField_t state(layout_p->ParticleBoxArray(lev),
461  layout_p->ParticleDistributionMap(lev),
462  1, 0, info);
463  //
464  // We eventually want to write out the file name and the offset
465  // into that file into which each grid of particles is written.
466  //
467  amrex::Vector<int> which(state.size(),0);
468  amrex::Vector<int > count(state.size(),0);
469  amrex::Vector<long> where(state.size(),0);
470 
471  std::string filePrefix(LevelDir);
472  filePrefix += '/';
473  filePrefix += "DATA_";
474  bool groupSets(false), setBuf(true);
475 
476  if (gotsome) {
477  for(amrex::NFilesIter nfi(nOutFiles, filePrefix, groupSets, setBuf); nfi.ReadyToWrite(); ++nfi) {
478  std::ofstream& myStream = (std::ofstream&) nfi.Stream();
479  //
480  // Write out all the valid particles we own at the specified level.
481  // Do it grid block by grid block remembering the seek offset
482  // for the start of writing of each block of data.
483  //
484  writeParticles_m(lev, myStream, nfi.FileNumber(), which, count, where, bunch_p, gamma);
485  }
486 
487  amrex::ParallelDescriptor::ReduceIntSum (which.dataPtr(), which.size(), IOProcNumber);
488  amrex::ParallelDescriptor::ReduceIntSum (count.dataPtr(), count.size(), IOProcNumber);
489  amrex::ParallelDescriptor::ReduceLongSum(where.dataPtr(), where.size(), IOProcNumber);
490  }
491 
492  if ( Ippl::myNode() == 0 ) {
493  for (int j = 0; j < state.size(); j++) {
494  //
495  // We now write the which file, the particle count, and the
496  // file offset into which the data for each grid was written,
497  // to the header file.
498  //
499  HdrFile << which[j] << ' ' << count[j] << ' ' << where[j] << '\n';
500  }
501 
502  if (gotsome && doUnlink) {
503  //
504  // Unlink any zero-length data files.
505  //
506  amrex::Vector<long> cnt(nOutFiles,0);
507 
508  for (int i = 0, N=count.size(); i < N; i++) {
509  cnt[which[i]] += count[i];
510  }
511 
512  for (int i = 0, N=cnt.size(); i < N; i++) {
513  if (cnt[i] == 0) {
514  std::string FullFileName = amrex::NFilesIter::FileName(i, filePrefix);
515  amrex::UnlinkFile(FullFileName.c_str());
516  }
517  }
518  }
519  }
520  } // ---- end for(lev...)
521 
522  if ( Ippl::myNode() == 0 ) {
523  HdrFile.flush();
524  HdrFile.close();
525  if ( ! HdrFile.good()) {
526  throw OpalException("AmrYtWriter:writeBunch()",
527  "Problem writing HdrFile");
528  }
529  }
530 }
531 
532 
534  std::ofstream& ofs,
535  int fnum,
536  amrex::Vector<int>& which,
537  amrex::Vector<int>& count,
538  amrex::Vector<long>& where,
539  const AmrPartBunch* bunch_p,
540  const double gamma) const
541 {
542  /* Copied and modified of
543  *
544  * template <int NStructReal, int NStructInt, int NArrayReal, int NArrayInt>
545  * void
546  * ParticleContainer<NStructReal,
547  * NStructInt,
548  * NArrayReal,
549  * NArrayInt>
550  * ::WriteParticles(int lev,
551  * std::ofstream& ofs,
552  * int fnum,
553  * Vector<int>& which,
554  * Vector<int>& count,
555  * Vector<long>& where,
556  * bool is_checkpoint) const
557  *
558  * in AMReX_ParticleContainerI.H
559  */
560 
561  const AmrLayout_t* layout_p = static_cast<const AmrLayout_t*>(&bunch_p->getLayout());
562  const AmrPartBunch::pbase_t* amrpbase_p = bunch_p->getAmrParticleBase();
563 
564  const auto& LocalNumPerLevel = amrpbase_p->getLocalNumPerLevel();
565  size_t lBegin = LocalNumPerLevel.begin(level);
566  size_t lEnd = LocalNumPerLevel.end(level);
567 
568  for (size_t ip = lBegin; ip < lEnd; ++ip) {
569  const int grid = amrpbase_p->Grid[ip];
570 
571  count[grid] += 1;
572  }
573 
574  amrex::MFInfo info;
575  info.SetAlloc(false);
576  amr::AmrField_t state(layout_p->ParticleBoxArray(level),
577  layout_p->ParticleDistributionMap(level),
578  1, 0, info);
579 
580  for (amrex::MFIter mfi(state); mfi.isValid(); ++mfi) {
581  const int grid = mfi.index();
582 
583  which[grid] = fnum;
584  where[grid] = amrex::VisMF::FileOffset(ofs);
585 
586  if (count[grid] == 0) {
587  continue;
588  }
589 
590 
591  // First write out the integer data in binary.
592  const int iChunkSize = 2 + intData_m.size();
593  amrex::Vector<int> istuff(count[grid]*iChunkSize);
594  int* iptr = istuff.dataPtr();
595 
596  // inefficient
597  for (size_t ip = lBegin; ip < lEnd; ++ip) {
598  const int pGrid = amrpbase_p->Grid[ip];
599 
600  if ( grid == pGrid ) {
601 
602  iptr[0] = bunch_p->ID[ip];
603  iptr[1] = Ippl::myNode();
604  iptr[2] = bunch_p->Bin[ip];
605  iptr += iChunkSize;
606  }
607  }
608 
609  amrex::writeIntData(istuff.dataPtr(), istuff.size(), ofs);
610 
611  // Write the Real data in binary.
612  const int rChunkSize = AMREX_SPACEDIM + realData_m.size();
613  amrex::Vector<double> rstuff(count[grid]*rChunkSize);
614  double* rptr = rstuff.dataPtr();
615 
616 
617  // inefficient
618  for (size_t ip = lBegin; ip < lEnd; ++ip) {
619  const int pGrid = amrpbase_p->Grid[ip];
620 
621  if ( grid == pGrid ) {
622 
623  int idx = 0;
624 
625  // coordinates
626  for (int j = 0; j < AMREX_SPACEDIM; ++j)
627  rptr[idx++] = bunch_p->R[ip](j);
628 
629  // Lorentz transform
630  if ( OpalData::getInstance()->isInOPALCyclMode() )
631  rptr[1] *= gamma; // y is longitudinal direction
632  else
633  rptr[2] *= gamma; // z is longitudinal direction
634 
635  // momenta
636  for (int j = 0; j < AMREX_SPACEDIM; ++j)
637  rptr[idx++] = bunch_p->P[ip](j);
638 
639  // charge
640  rptr[idx++] = bunch_p->Q[ip];
641 
642  // mass
643  rptr[idx++] = bunch_p->M[ip];
644 
645  // timestep
646  rptr[idx++] = bunch_p->dt[ip];
647 
648 // // potential
649 // rptr[idx++] = bunch_p->Phi[ip];
650 
651  // electric field
652  for (int j = 0; j < AMREX_SPACEDIM; ++j)
653  rptr[idx++] = bunch_p->Ef[ip](j);
654 
655  // magnetic field
656  for (int j = 0; j < AMREX_SPACEDIM; ++j)
657  rptr[idx++] = bunch_p->Bf[ip](j);
658 
659  rptr += idx; //realData_m.size();
660  }
661  }
662  amrex::writeDoubleData(rstuff.dataPtr(), rstuff.size(), ofs);
663  }
664 }
ParticleAttrib< Vector_t > P
PLayout & getAmrLayout()
ParticleAttrib< Vector_t > Ef
AmrYtWriter(int step, int bin=0)
Definition: AmrYtWriter.cpp:15
T::PETE_Expr_t::PETE_Return_t max(const PETE_Expr< T > &expr, NDIndex< D > &loc)
Definition: ReductionLoc.h:123
The base class for all OPAL exceptions.
Definition: OpalException.h:28
ParticleAttrib< double > Q
void writeParticles_m(int level, std::ofstream &ofs, int fnum, amrex::Vector< int > &which, amrex::Vector< int > &count, amrex::Vector< long > &where, const AmrPartBunch *bunch_p, const double gamma) const
std::string dir_m
directory where to write files
Definition: AmrYtWriter.h:74
const ParticleLevelCounter_t & getLocalNumPerLevel() const
void barrier(void)
static int myNode()
Definition: IpplInfo.cpp:794
FRONT * fs
Definition: hypervolume.cpp:59
bool info
Info flag.
Definition: Options.cpp:8
amrex::Vector< AmrGeometry_t > AmrGeomContainer_t
Definition: AmrDefs.h:23
size_t getTotalNum() const
void writeBunch(const AmrPartBunch *bunch_p, const double &time, const double &gamma)
void writeFields(const amr::AmrScalarFieldContainer_t &rho, const amr::AmrScalarFieldContainer_t &phi, const amr::AmrVectorFieldContainer_t &efield, const amr::AmrIntArray_t &refRatio, const amr::AmrGeomContainer_t &geom, const int &nLevel, const double &time, const double &scale)
Definition: AmrYtWriter.cpp:68
pbase_t * getAmrParticleBase()
amrex::MultiFab AmrField_t
Definition: AmrDefs.h:14
int finestLevel() const
static OpalData * getInstance()
Definition: OpalData.cpp:209
ParticleIndex_t & ID
ParticleLayout< T, Dim > & getLayout()
amrex::Vector< int > AmrIntArray_t
Definition: AmrDefs.h:27
amrex::RealBox AmrDomain_t
Definition: AmrDefs.h:26
std::vector< std::string > intData_m
integer bunch data
Definition: AmrYtWriter.h:76
void allreduce(const T *input, T *output, int count, Op op)
Definition: GlobalComm.hpp:510
PETE_TTTree< OpWhere, typename Cond_t::PETE_Expr_t, typename True_t::PETE_Expr_t, PETE_Scalar< Vektor< T, Dim > > > where(const PETE_Expr< Cond_t > &c, const PETE_Expr< True_t > &t, const Vektor< T, Dim > &f)
ParticleAttrib< double > dt
ParticleAttrib< int > Bin
ParticleAttrib< Vector_t > Bf
ParticleAttrib< double > M
ParticlePos_t & R
amrex::Vector< AmrVectorField_t > AmrVectorFieldContainer_t
Definition: AmrDefs.h:22
std::string getInputBasename()
get input file name without extension
Definition: OpalData.cpp:717
static Communicate * Comm
Definition: IpplInfo.h:93
ParticleIndex_t Grid
T::PETE_Expr_t::PETE_Return_t min(const PETE_Expr< T > &expr, NDIndex< D > &loc)
Definition: ReductionLoc.h:95
amrex::Vector< std::unique_ptr< AmrField_t > > AmrScalarFieldContainer_t
Definition: AmrDefs.h:21
std::vector< std::string > realData_m
real bunch data
Definition: AmrYtWriter.h:77