OPAL (Object Oriented Parallel Accelerator Library)  2021.1.99
OPAL
AmrMultiGrid.cpp
Go to the documentation of this file.
1 //
2 // Class AmrMultiGrid
3 // Main class of the AMR Poisson multigrid solver.
4 // It implements the multigrid solver described in https://doi.org/10.1016/j.cpc.2019.106912
5 //
6 // Copyright (c) 2017 - 2020, Matthias Frey, Paul Scherrer Institut, Villigen PSI, Switzerland
7 // All rights reserved
8 //
9 // Implemented as part of the PhD thesis
10 // "Precise Simulations of Multibunches in High Intensity Cyclotrons"
11 //
12 // This file is part of OPAL.
13 //
14 // OPAL is free software: you can redistribute it and/or modify
15 // it under the terms of the GNU General Public License as published by
16 // the Free Software Foundation, either version 3 of the License, or
17 // (at your option) any later version.
18 //
19 // You should have received a copy of the GNU General Public License
20 // along with OPAL. If not, see <https://www.gnu.org/licenses/>.
21 //
22 #include "AmrMultiGrid.h"
23 
24 #include <algorithm>
25 #include <functional>
26 #include <map>
27 #include <numeric>
28 
29 #include "OPALconfig.h"
32 #include "Utilities/Timer.h"
33 #include "Utilities/Util.h"
34 #include <AMReX_ParallelDescriptor.H>
35 
36 #include <boost/filesystem.hpp>
37 
38 #if AMR_MG_WRITE
39  #include <iomanip>
40 #endif
41 
43  const std::string& bsolver,
44  const std::string& prec,
45  const bool& rebalance,
46  const std::string& reuse,
47  const std::string& bcx,
48  const std::string& bcy,
49  const std::string& bcz,
50  const std::string& smoother,
51  const std::size_t& nSweeps,
52  const std::string& interp,
53  const std::string& norm)
54  : AmrPoissonSolver<AmrBoxLib>(itsAmrObject_p)
55  , comm_mp( new comm_t( amrex::ParallelDescriptor::Communicator() ) )
56  , nIter_m(0)
57  , bIter_m(0)
58  , maxiter_m(100)
59  , nSweeps_m(nSweeps)
60  , mglevel_m(0)
61  , lbase_m(0)
62  , lfine_m(0)
63  , nlevel_m(1)
64  , nBcPoints_m(0)
65  , snorm_m(norm)
66  , eps_m(1.0e-10)
67  , verbose_m(false)
68  , fname_m(OpalData::getInstance()->getInputBasename() + std::string(".solver"))
69  , flag_m(std::ios::out)
70 {
71 #if AMR_MG_TIMER
72  this->initTimer_m();
73 #endif
74 
75  const Boundary bcs[AMREX_SPACEDIM] = {
76  this->convertToEnumBoundary_m(bcx),
77  this->convertToEnumBoundary_m(bcy),
78  this->convertToEnumBoundary_m(bcz)
79  };
80 
81  this->initPhysicalBoundary_m(&bcs[0]);
82 
83  smootherType_m = this->convertToEnumSmoother_m(smoother);
84 
85  norm_m = this->convertToEnumNorm_m(norm);
86 
87  const Interpolater interpolater = this->convertToEnumInterpolater_m(interp);
88  this->initInterpolater_m(interpolater);
89 
90  // interpolater for crse-fine-interface
91  this->initCrseFineInterp_m(Interpolater::LAGRANGE);
92 
93  // preconditioner
94  const Preconditioner precond = this->convertToEnumPreconditioner_m(prec);
95  this->initPrec_m(precond, reuse);
96 
97  // base level solver
98  const BaseSolver solver = this->convertToEnumBaseSolver_m(bsolver);
99  this->initBaseSolver_m(solver, rebalance, reuse);
100 
101  if (boost::filesystem::exists(fname_m)) {
102  flag_m = std::ios::app;
103  INFOMSG("Appending solver information to existing file: " << fname_m << endl);
104  } else {
105  INFOMSG("Creating new file for solver information: " << fname_m << endl);
106  }
107 }
108 
109 
113  unsigned short baseLevel,
114  unsigned short finestLevel,
115  bool prevAsGuess)
116 {
117  lbase_m = baseLevel;
118  lfine_m = finestLevel;
119  nlevel_m = lfine_m - lbase_m + 1;
120 
121  /* we cannot use the previous solution
122  * if we have to regrid (AmrPoissonSolver::hasToRegrid())
123  *
124  * regrid_m is set in AmrBoxlib::regrid()
125  */
126  bool reset = !prevAsGuess;
127 
128  if ( this->regrid_m )
129  reset = true;
130 
131  this->initLevels_m(rho, itsAmrObject_mp->Geom(), this->regrid_m);
132 
133  // build all necessary matrices and vectors
134  this->setup_m(rho, phi, this->regrid_m);
135 
136  this->initGuess_m(reset);
137 
138  // actual solve
139  scalar_t error = this->iterate_m();
140 
141  for (int lev = nlevel_m - 1; lev > -1; --lev) {
142  averageDown_m(lev);
143  }
144 
145  // write efield to AMReX
146  this->computeEfield_m(efield);
147 
148  // copy solution back
149  for (int lev = 0; lev < nlevel_m; ++lev) {
150  int ilev = lbase_m + lev;
151 
152  phi[ilev]->setVal(0.0, phi[ilev]->nGrow());
153 
154  this->trilinos2amrex_m(lev, 0, *phi[ilev], mglevel_m[lev]->phi_p);
155  }
156 
157  if ( verbose_m )
158  this->writeSDDSData_m(error);
159 
160  // we can now reset
161  this->regrid_m = false;
162 }
163 
164 
165 void AmrMultiGrid::setNumberOfSweeps(const std::size_t& nSweeps) {
166  nSweeps_m = nSweeps;
167 }
168 
169 
170 void AmrMultiGrid::setMaxNumberOfIterations(const std::size_t& maxiter) {
171  if ( maxiter < 1 )
172  throw OpalException("AmrMultiGrid::setMaxNumberOfIterations()",
173  "The max. number of iterations needs to be positive!");
174 
175  maxiter_m = maxiter;
176 }
177 
178 
180  return nIter_m;
181 }
182 
183 
185  return evalNorm_m(mglevel_m[level]->residual_p);
186 }
187 
188 
189 void AmrMultiGrid::setVerbose(bool verbose) {
190  verbose_m = verbose;
191 }
192 
193 
195  eps_m = eps;
196 }
197 
198 
200 {
201  // make sure it's reset
202  nBcPoints_m = 0;
203 
204  for (unsigned int i = 0; i < AMREX_SPACEDIM; ++i) {
205  switch ( bc[i] ) {
206  case Boundary::DIRICHLET:
208  break;
209  case Boundary::OPEN:
210  bc_m[i].reset( new AmrOpenBoundary<AmrMultiGridLevel_t>() );
211  break;
212  case Boundary::PERIODIC:
214  break;
215  default:
216  throw OpalException("AmrMultiGrid::initPhysicalBoundary_m()",
217  "This type of boundary is not supported");
218  }
219  // we use the maximum in order to build matrices
220  go_t tmp = bc_m[i]->getNumberOfPoints();
221  if ( nBcPoints_m < tmp )
222  nBcPoints_m = tmp;
223  }
224 }
225 
226 
227 void AmrMultiGrid::initLevels_m(const amrex::Vector<AmrField_u>& rho,
228  const amrex::Vector<AmrGeometry_t>& geom,
229  bool regrid)
230 {
231  if ( !regrid )
232  return;
233 
234  // although we do a resize afterwards, we do this to be safe
235  for (int lev = nlevel_m; lev < (int)mglevel_m.size(); ++lev) {
236  mglevel_m[lev].reset(nullptr);
237  }
238 
239  mglevel_m.resize(nlevel_m);
240 
241  amrex::Periodicity period(AmrIntVect_t(D_DECL(0, 0, 0)));
242 
243  AmrIntVect_t rr = AmrIntVect_t(D_DECL(2, 2, 2));
244 
245  for (int lev = 0; lev < nlevel_m; ++lev) {
246  int ilev = lbase_m + lev;
247 
248  // do not initialize base level every time
249  if (mglevel_m[lev] == nullptr || lev > lbase_m) {
251  rho[ilev]->boxArray(),
252  rho[ilev]->DistributionMap(),
253  geom[ilev],
254  rr,
255  bc_m,
256  comm_mp));
257  } else {
258  mglevel_m[lev]->buildLevelMask();
259  }
260 
261  mglevel_m[lev]->refmask.reset(
262  new AmrMultiGridLevel_t::mask_t(mglevel_m[lev]->grids,
263  mglevel_m[lev]->dmap, 1, 2)
264  );
265  mglevel_m[lev]->refmask->setVal(AmrMultiGridLevel_t::Refined::NO, 2);
266  mglevel_m[lev]->refmask->FillBoundary(period);
267 
268  amrex::BoxArray ba = mglevel_m[lev]->grids;
269  ba.coarsen(rr);
270  mglevel_m[lev]->crsemask.reset(
272  mglevel_m[lev]->dmap, 1, 2)
273  );
274  mglevel_m[lev]->crsemask->setVal(AmrMultiGridLevel_t::Refined::NO, 2);
275  }
276 
277  for (int lev = 1; lev < nlevel_m; ++lev) {
278  mglevel_m[lev]->crsemask->setVal(AmrMultiGridLevel_t::Refined::YES, 0);
279 
280  // used for boundary interpolation --> replaces expensive calls to isBoundary
281  mglevel_m[lev]->crsemask->setDomainBndry(AmrMultiGridLevel_t::Mask::PHYSBNDRY,
282  mglevel_m[lev-1]->geom); //FIXME: geometry of lev - 1
283  // really needed ?
284  mglevel_m[lev]->crsemask->FillBoundary(period);
285  }
286 
287  /* to complete initialization we need to fill
288  * the mask of refinement
289  */
290  for (int lev = 0; lev < nlevel_m-1; ++lev) {
291  // get boxarray with refined cells
292  amrex::BoxArray ba = mglevel_m[lev]->grids;
293  ba.refine(rr);
294  ba = amrex::intersect(mglevel_m[lev+1]->grids, ba);
295  ba.coarsen(rr);
296 
297  // refined cells
298  amrex::DistributionMapping dmap(ba, comm_mp->getSize());
299  AmrMultiGridLevel_t::mask_t refined(ba, dmap, 1, 0);
300  refined.setVal(AmrMultiGridLevel_t::Refined::YES);
301 // refined.setDomainBndry(AmrMultiGridLevel_t::Mask::PHYSBNDRY, mglevel_m[lev]->geom);
302 
303  // fill intersection with YES
304  mglevel_m[lev]->refmask->copy(refined, 0, 0, 1, 0, 2);
305 
306  /* physical boundary cells will never be refined cells
307  * since they are ghost cells
308  */
309  mglevel_m[lev]->refmask->setDomainBndry(AmrMultiGridLevel_t::Mask::PHYSBNDRY,
310  mglevel_m[lev]->geom);
311 
312  mglevel_m[lev]->refmask->FillBoundary(period);
313  }
314 }
315 
316 
318  for (int lev = 0; lev < nlevel_m; ++lev) {
319  mglevel_m[lev]->refmask.reset(nullptr);
320  mglevel_m[lev]->crsemask.reset(nullptr);
321  mglevel_m[lev]->mask.reset(nullptr);
322  }
323 }
324 
325 
326 void AmrMultiGrid::initGuess_m(bool reset) {
327  if ( !reset )
328  return;
329 
330  // reset
331  for (int lev = 0; lev < nlevel_m; ++lev)
332  mglevel_m[lev]->phi_p->putScalar(0.0);
333 }
334 
335 
337 
338  // initial error
339  std::vector<scalar_t> rhsNorms;
340  std::vector<scalar_t> resNorms;
341 
342  this->initResidual_m(rhsNorms, resNorms);
343 
344  std::for_each(rhsNorms.begin(), rhsNorms.end(),
345  [this](double& val){ val *= eps_m; });
346 
347  nIter_m = 0;
348  bIter_m = 0;
349 
350  while ( !isConverged_m(rhsNorms, resNorms) && nIter_m < maxiter_m ) {
351 
352  relax_m(lfine_m);
353 
354 // /* in contrast to algorithm, we average down now
355 // * --> potential is valid also on coarse covered
356 // * cells
357 // * --> however, it may take 1-2 iterations longer
358 // */
359 // for (int lev = nlevel_m - 1; lev > -1; --lev) {
360 // averageDown_m(lev);
361 // }
362 
363  // update residual
364  for (int lev = 0; lev < nlevel_m; ++lev) {
365 
366  this->residual_m(lev,
367  mglevel_m[lev]->residual_p,
368  mglevel_m[lev]->rho_p,
369  mglevel_m[lev]->phi_p);
370  }
371 
372 
373  for (lo_t lev = 0; lev < nlevel_m; ++lev)
374  resNorms[lev] = getLevelResidualNorm(lev);
375 
376  ++nIter_m;
377 
378 #if AMR_MG_WRITE
379  this->writeResidualNorm_m();
380 #endif
381 
382  bIter_m += solver_mp->getNumIters();
383  }
384 
385  return std::accumulate(resNorms.begin(),
386  resNorms.end(), 0.0,
387  std::plus<scalar_t>());
388 }
389 
390 
391 bool AmrMultiGrid::isConverged_m(std::vector<scalar_t>& rhsNorms,
392  std::vector<scalar_t>& resNorms)
393 {
394  return std::equal(resNorms.begin(), resNorms.end(),
395  rhsNorms.begin(),
396  std::less<scalar_t>());
397 }
398 
399 
400 void AmrMultiGrid::residual_m(const lo_t& level,
401  Teuchos::RCP<vector_t>& r,
402  const Teuchos::RCP<vector_t>& b,
403  const Teuchos::RCP<vector_t>& x)
404 {
405  /*
406  * r = b - A*x
407  */
408  if ( level < lfine_m ) {
409 
410  vector_t fine2crse(mglevel_m[level]->Awf_p->getDomainMap(), true);
411 
412  // get boundary for
413  if ( mglevel_m[level]->Bfine_p != Teuchos::null ) {
414  mglevel_m[level]->Bfine_p->apply(*mglevel_m[level+1]->phi_p, fine2crse);
415  }
416 
417  // operation: fine2crse += A * x
418  mglevel_m[level]->Awf_p->apply(*x, fine2crse,
419  Teuchos::NO_TRANS,
420  scalar_t(1.0),
421  scalar_t(1.0));
422 
423  if ( mglevel_m[level]->Bcrse_p != Teuchos::null ) {
424  // operation: fine2crse += B * phi^(l-1)
425  mglevel_m[level]->Bcrse_p->apply(*mglevel_m[level-1]->phi_p,
426  fine2crse,
427  Teuchos::NO_TRANS,
428  scalar_t(1.0),
429  scalar_t(1.0));
430  }
431 
432  Teuchos::RCP<vector_t> uncov_Ax = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p, true) );
433  mglevel_m[level]->UnCovered_p->apply(fine2crse, *uncov_Ax);
434 
435  Teuchos::RCP<vector_t> uncov_b = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p, true) );
436 
437  mglevel_m[level]->UnCovered_p->apply(*b, *uncov_b);
438 
439  // ONLY subtract coarse rho
440 // mglevel_m[level]->residual_p->putScalar(0.0);
441 
442  r->update(1.0, *uncov_b, -1.0, *uncov_Ax, 0.0);
443 
444  } else {
445  /* finest level: Awf_p == Anf_p
446  */
447  Teuchos::RCP<vector_t> Ax = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p, true) );
448  mglevel_m[level]->Anf_p->apply(*x, *Ax);
449 
450  if ( mglevel_m[level]->Bcrse_p != Teuchos::null ) {
451  // operationr: Ax += B * phi^(l-1)
452  mglevel_m[level]->Bcrse_p->apply(*mglevel_m[level-1]->phi_p,
453  *Ax,
454  Teuchos::NO_TRANS,
455  scalar_t(1.0),
456  scalar_t(1.0));
457  }
458  r->update(1.0, *b, -1.0, *Ax, 0.0);
459  }
460 }
461 
462 
463 void AmrMultiGrid::relax_m(const lo_t& level) {
464 
465  if ( level == lfine_m ) {
466 
467  if ( level == lbase_m ) {
468  /* Anf_p == Awf_p
469  */
470  Teuchos::RCP<vector_t> Ax = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p, true) );
471  mglevel_m[level]->Anf_p->apply(*mglevel_m[level]->phi_p, *Ax);
472  mglevel_m[level]->residual_p->update(1.0, *mglevel_m[level]->rho_p, -1.0, *Ax, 0.0);
473 
474  } else {
475  this->residual_no_fine_m(level,
476  mglevel_m[level]->residual_p,
477  mglevel_m[level]->phi_p,
478  mglevel_m[level-1]->phi_p,
479  mglevel_m[level]->rho_p);
480  }
481  }
482 
483  if ( level > 0 ) {
484  // phi^(l, save) = phi^(l)
485  Teuchos::RCP<vector_t> phi_save = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p) );
486  Tpetra::deep_copy(*phi_save, *mglevel_m[level]->phi_p);
487 
488  mglevel_m[level-1]->error_p->putScalar(0.0);
489 
490  // smoothing
491  this->smooth_m(level,
492  mglevel_m[level]->error_p,
493  mglevel_m[level]->residual_p);
494 
495 
496  // phi = phi + e
497  mglevel_m[level]->phi_p->update(1.0, *mglevel_m[level]->error_p, 1.0);
498 
499  /*
500  * restrict
501  */
502  this->restrict_m(level);
503 
504  this->relax_m(level - 1);
505 
506  /*
507  * prolongate / interpolate
508  */
509  this->prolongate_m(level);
510 
511  // residual update
512  Teuchos::RCP<vector_t> tmp = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p) );
513  this->residual_no_fine_m(level, tmp,
514  mglevel_m[level]->error_p,
515  mglevel_m[level-1]->error_p,
516  mglevel_m[level]->residual_p);
517 
518  Tpetra::deep_copy(*mglevel_m[level]->residual_p, *tmp);
519 
520  // delta error
521  Teuchos::RCP<vector_t> derror = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p, true) );
522 
523  // smoothing
524  this->smooth_m(level, derror, mglevel_m[level]->residual_p);
525 
526  // e^(l) += de^(l)
527  mglevel_m[level]->error_p->update(1.0, *derror, 1.0);
528 
529  // phi^(l) = phi^(l, save) + e^(l)
530  mglevel_m[level]->phi_p->update(1.0, *phi_save, 1.0, *mglevel_m[level]->error_p, 0.0);
531 
532  } else {
533  // e = A^(-1)r
534 #if AMR_MG_TIMER
535  IpplTimings::startTimer(bottomTimer_m);
536 #endif
537 
538  solver_mp->solve(mglevel_m[level]->error_p,
539  mglevel_m[level]->residual_p);
540 
541 #if AMR_MG_TIMER
542  IpplTimings::stopTimer(bottomTimer_m);
543 #endif
544  // phi = phi + e
545  mglevel_m[level]->phi_p->update(1.0, *mglevel_m[level]->error_p, 1.0);
546  }
547 }
548 
549 
551  Teuchos::RCP<vector_t>& result,
552  const Teuchos::RCP<vector_t>& rhs,
553  const Teuchos::RCP<vector_t>& crs_rhs,
554  const Teuchos::RCP<vector_t>& b)
555 {
556  vector_t crse2fine(mglevel_m[level]->Anf_p->getDomainMap(), true);
557 
558  // get boundary for
559  if ( mglevel_m[level]->Bcrse_p != Teuchos::null ) {
560  mglevel_m[level]->Bcrse_p->apply(*crs_rhs, crse2fine);
561  }
562 
563  // operation: crse2fine = 1.0 * crse2fine + 1.0 * A^(l) * rhs
564  mglevel_m[level]->Anf_p->apply(*rhs, crse2fine,
565  Teuchos::NO_TRANS,
566  scalar_t(1.0),
567  scalar_t(1.0));
568 
569  result->update(1.0, *b, -1.0, crse2fine, 0.0);
570 }
571 
572 
573 #if AMR_MG_WRITE
574 void AmrMultiGrid::writeResidualNorm_m() {
575  scalar_t err = 0.0;
576 
577  std::ofstream out;
578  if ( comm_mp->getRank() == 0 )
579  out.open("residual.dat", std::ios::app);
580 
581  for (int lev = 0; lev < nlevel_m; ++lev) {
582  scalar_t tmp = evalNorm_m(mglevel_m[lev]->residual_p);
583 
584  if ( comm_mp->getRank() == 0 )
585  out << std::setw(15) << std::right << tmp;
586  }
587 
588  if ( comm_mp->getRank() == 0 )
589  out.close();
590 }
591 #endif
592 
593 
594 typename AmrMultiGrid::scalar_t
595 AmrMultiGrid::evalNorm_m(const Teuchos::RCP<const vector_t>& x)
596 {
597  scalar_t norm = 0.0;
598 
599  switch ( norm_m ) {
600  case Norm::L1:
601  {
602  norm = x->norm1();
603  break;
604  }
605  case Norm::L2:
606  {
607  norm = x->norm2();
608  break;
609  }
610  case Norm::LINF:
611  {
612  norm = x->normInf();
613  break;
614  }
615  default:
616  throw OpalException("AmrMultiGrid::evalNorm_m()",
617  "This type of norm not suppported.");
618  }
619  return norm;
620 }
621 
622 
623 void AmrMultiGrid::initResidual_m(std::vector<scalar_t>& rhsNorms,
624  std::vector<scalar_t>& resNorms)
625 {
626  rhsNorms.clear();
627  resNorms.clear();
628 
629 #if AMR_MG_WRITE
630  std::ofstream out;
631 
632  if ( comm_mp->getRank() == 0) {
633  out.open("residual.dat", std::ios::out);
634 
635  for (int lev = 0; lev < nlevel_m; ++lev)
636  out << std::setw(14) << std::right << "level" << lev;
637  out << std::endl;
638  }
639 #endif
640 
641  for (int lev = 0; lev < nlevel_m; ++lev) {
642  this->residual_m(lev,
643  mglevel_m[lev]->residual_p,
644  mglevel_m[lev]->rho_p,
645  mglevel_m[lev]->phi_p);
646 
647  resNorms.push_back(evalNorm_m(mglevel_m[lev]->residual_p));
648 
649 #if AMR_MG_WRITE
650  if ( comm_mp->getRank() == 0 )
651  out << std::setw(15) << std::right << resNorms.back();
652 #endif
653 
654  rhsNorms.push_back(evalNorm_m(mglevel_m[lev]->rho_p));
655  }
656 
657 #if AMR_MG_WRITE
658  if ( comm_mp->getRank() == 0 )
659  out.close();
660 #endif
661 }
662 
663 
665 #if AMR_MG_TIMER
666  IpplTimings::startTimer(efieldTimer_m);
667 #endif
668  Teuchos::RCP<vector_t> efield_p = Teuchos::null;
669  for (int lev = nlevel_m - 1; lev > -1; --lev) {
670  int ilev = lbase_m + lev;
671 
672  efield_p = Teuchos::rcp( new vector_t(mglevel_m[lev]->map_p, false) );
673 
674 
675  for (int d = 0; d < AMREX_SPACEDIM; ++d) {
676  efield[ilev][d]->setVal(0.0, efield[ilev][d]->nGrow());
677  efield_p->putScalar(0.0);
678  mglevel_m[lev]->G_p[d]->apply(*mglevel_m[lev]->phi_p, *efield_p);
679  this->trilinos2amrex_m(lev, 0, *efield[ilev][d], efield_p);
680  }
681  }
682 #if AMR_MG_TIMER
683  IpplTimings::stopTimer(efieldTimer_m);
684 #endif
685 }
686 
687 
688 void AmrMultiGrid::setup_m(const amrex::Vector<AmrField_u>& rho,
689  const amrex::Vector<AmrField_u>& phi,
690  const bool& matrices)
691 {
692 #if AMR_MG_TIMER
693  IpplTimings::startTimer(buildTimer_m);
694 #endif
695 
696  if ( lbase_m == lfine_m )
697  this->buildSingleLevel_m(rho, phi, matrices);
698  else
699  this->buildMultiLevel_m(rho, phi, matrices);
700 
701  mglevel_m[lfine_m]->error_p->putScalar(0.0);
702 
703  if ( matrices ) {
704  this->clearMasks_m();
705  // set the bottom solve operator
706  if (!solver_mp->hasOperator()) {
707  solver_mp->setOperator(mglevel_m[lbase_m]->Anf_p, mglevel_m[0].get());
708  }
709  }
710 
711 #if AMR_MG_TIMER
712  IpplTimings::stopTimer(buildTimer_m);
713 #endif
714 }
715 
716 
717 void AmrMultiGrid::buildSingleLevel_m(const amrex::Vector<AmrField_u>& rho,
718  const amrex::Vector<AmrField_u>& phi,
719  const bool& matrices)
720 {
721  this->open_m(lbase_m, matrices);
722 
723  const scalar_t* invdx = mglevel_m[lbase_m]->invCellSize();
724 
725  const scalar_t invdx2[] = {
726  D_DECL( invdx[0] * invdx[0],
727  invdx[1] * invdx[1],
728  invdx[2] * invdx[2] )
729  };
730 
731  if (matrices) {
732  for (amrex::MFIter mfi(*mglevel_m[lbase_m]->mask, true);
733  mfi.isValid(); ++mfi)
734  {
735  const box_t& tbx = mfi.tilebox();
736  const basefab_t& mfab = (*mglevel_m[lbase_m]->mask)[mfi];
737  const farraybox_t& rhofab = (*rho[lbase_m])[mfi];
738  const farraybox_t& pfab = (*phi[lbase_m])[mfi];
739 
740  const int* lo = tbx.loVect();
741  const int* hi = tbx.hiVect();
742 
743  for (int i = lo[0]; i <= hi[0]; ++i) {
744  for (int j = lo[1]; j <= hi[1]; ++j) {
745 #if AMREX_SPACEDIM == 3
746  for (int k = lo[2]; k <= hi[2]; ++k) {
747 #endif
748  AmrIntVect_t iv(D_DECL(i, j, k));
749  go_t gidx = mglevel_m[lbase_m]->serialize(iv);
750 
751  if (!solver_mp->hasOperator()) {
752  this->buildNoFinePoissonMatrix_m(lbase_m, gidx, iv, mfab, invdx2);
753  this->buildGradientMatrix_m(lbase_m, gidx, iv, mfab, invdx);
754  }
755 
756  mglevel_m[lbase_m]->rho_p->replaceGlobalValue(gidx, rhofab(iv, 0));
757  mglevel_m[lbase_m]->phi_p->replaceGlobalValue(gidx, pfab(iv, 0));
758 
759 #if AMREX_SPACEDIM == 3
760  }
761 #endif
762  }
763  }
764  }
765  } else {
766  this->buildDensityVector_m(lbase_m, *rho[lbase_m]);
767  }
768 
769  this->close_m(lbase_m, matrices);
770 
771  if (matrices) {
772  mglevel_m[lbase_m]->Awf_p = Teuchos::null;
773  mglevel_m[lbase_m]->UnCovered_p = Teuchos::null;
774  }
775 }
776 
777 
778 void AmrMultiGrid::buildMultiLevel_m(const amrex::Vector<AmrField_u>& rho,
779  const amrex::Vector<AmrField_u>& phi,
780  const bool& matrices)
781 {
782  // the base level has no smoother --> nlevel_m - 1
783  if ( matrices ) {
784  // although we do a resize afterwards, we do this to be safe
785  for (int lev = nlevel_m-1; lev < (int)smoother_m.size(); ++lev) {
786  smoother_m[lev].reset();
787  }
788  smoother_m.resize(nlevel_m-1);
789  }
790 
791  for (int lev = 0; lev < nlevel_m; ++lev) {
792  this->open_m(lev, matrices);
793 
794  int ilev = lbase_m + lev;
795 
796  // find all coarse cells that are covered by fine cells
797 // AmrIntVect_t rr = mglevel_m[lev]->refinement();
798 
799  const scalar_t* invdx = mglevel_m[lev]->invCellSize();
800 
801  const scalar_t invdx2[] = {
802  D_DECL( invdx[0] * invdx[0],
803  invdx[1] * invdx[1],
804  invdx[2] * invdx[2] )
805  };
806 
807  if ( matrices ) {
808  for (amrex::MFIter mfi(*mglevel_m[lev]->mask, true);
809  mfi.isValid(); ++mfi)
810  {
811  const box_t& tbx = mfi.tilebox();
812  const basefab_t& mfab = (*mglevel_m[lev]->mask)[mfi];
813  const basefab_t& rfab = (*mglevel_m[lev]->refmask)[mfi];
814  const basefab_t& cfab = (*mglevel_m[lev]->crsemask)[mfi];
815  const farraybox_t& rhofab = (*rho[ilev])[mfi];
816  const farraybox_t& pfab = (*phi[ilev])[mfi];
817 
818  const int* lo = tbx.loVect();
819  const int* hi = tbx.hiVect();
820 
821  for (int i = lo[0]; i <= hi[0]; ++i) {
822  int ii = i << 1;
823  for (int j = lo[1]; j <= hi[1]; ++j) {
824  int jj = j << 1;
825 #if AMREX_SPACEDIM == 3
826  for (int k = lo[2]; k <= hi[2]; ++k) {
827  int kk = k << 1;
828 #endif
829  AmrIntVect_t iv(D_DECL(i, j, k));
830  go_t gidx = mglevel_m[lev]->serialize(iv);
831 
832  this->buildRestrictionMatrix_m(lev, gidx, iv,
833  D_DECL(ii, jj, kk), rfab);
834 
835  this->buildInterpolationMatrix_m(lev, gidx, iv, cfab);
836 
837  this->buildCrseBoundaryMatrix_m(lev, gidx, iv, mfab,
838  cfab, invdx2);
839 
840  this->buildFineBoundaryMatrix_m(lev, gidx, iv,
841  mfab, rfab);
842 
843  this->buildCompositePoissonMatrix_m(lev, gidx, iv, mfab,
844  rfab, invdx2);
845 
846  if (lev > lbase_m || (lev == lbase_m && !solver_mp->hasOperator())) {
847  this->buildNoFinePoissonMatrix_m(lev, gidx, iv, mfab, invdx2);
848  this->buildGradientMatrix_m(lev, gidx, iv, mfab, invdx);
849  }
850 
851  mglevel_m[lev]->rho_p->replaceGlobalValue(gidx, rhofab(iv, 0));
852  mglevel_m[lev]->phi_p->replaceGlobalValue(gidx, pfab(iv, 0));
853 #if AMREX_SPACEDIM == 3
854  }
855 #endif
856  }
857  }
858  }
859  } else {
860  for (lo_t lev = 0; lev < nlevel_m; ++lev) {
861  int ilev = lbase_m + lev;
862  this->buildDensityVector_m(lev, *rho[ilev]);
863  }
864  }
865 
866  this->close_m(lev, matrices);
867 
868  if ( matrices && lev > lbase_m ) {
869  smoother_m[lev-1].reset( new AmrSmoother(mglevel_m[lev]->Anf_p,
871  }
872  }
873 }
874 
875 
876 void AmrMultiGrid::open_m(const lo_t& level,
877  const bool& matrices)
878 {
879  if ( matrices ) {
880 
881  if ( level > lbase_m ) {
882 
883  /*
884  * interpolation matrix
885  */
886 
887  int nNeighbours = (nBcPoints_m + 1) * interp_mp->getNumberOfPoints();
888 
889  mglevel_m[level]->I_p = Teuchos::rcp( new matrix_t(mglevel_m[level]->map_p,
890  nNeighbours,
891  Tpetra::StaticProfile) );
892 
893  /*
894  * coarse boundary matrix
895  */
896 
897  nNeighbours = 2 * AMREX_SPACEDIM * nBcPoints_m *
898  2 * AMREX_SPACEDIM * interface_mp->getNumberOfPoints();
899 
900  mglevel_m[level]->Bcrse_p = Teuchos::rcp(
901  new matrix_t(mglevel_m[level]->map_p, nNeighbours,
902  Tpetra::StaticProfile) );
903 
904  }
905 
906 
907  if ( level < lfine_m ) {
908 
909  /*
910  * restriction matrix
911  */
912 
913  // refinement 2
914  int nNeighbours = AMREX_D_TERM(2, * 2, * 2);
915 
916  mglevel_m[level]->R_p = Teuchos::rcp(
917  new matrix_t(mglevel_m[level]->map_p, nNeighbours,
918  Tpetra::StaticProfile) );
919 
920  /*
921  * fine boundary matrix
922  */
923 
924  // refinement 2
925  nNeighbours = 2 * AMREX_SPACEDIM * AMREX_D_TERM(2, * 2, * 2);
926 
927  mglevel_m[level]->Bfine_p = Teuchos::rcp(
928  new matrix_t(mglevel_m[level]->map_p, nNeighbours,
929  Tpetra::StaticProfile) );
930 
931  }
932 
933  /*
934  * no-fine Poisson matrix
935  */
936 
937  int nPhysBoundary = 2 * AMREX_SPACEDIM * nBcPoints_m;
938 
939  // number of internal stencil points
940  int nIntBoundary = AMREX_SPACEDIM * interface_mp->getNumberOfPoints();
941 
942  int nEntries = (AMREX_SPACEDIM << 1) + 2 /* plus boundaries */ + nPhysBoundary + nIntBoundary;
943 
944  if (level > lbase_m || (level == lbase_m && !solver_mp->hasOperator())) {
945  mglevel_m[level]->Anf_p = Teuchos::rcp(
946  new matrix_t(mglevel_m[level]->map_p, nEntries,
947  Tpetra::StaticProfile) );
948  }
949 
950  /*
951  * with-fine / composite Poisson matrix
952  */
953  if ( lbase_m != lfine_m ) {
954  nEntries = (AMREX_SPACEDIM << 1) + 5 /* plus boundaries */ + nPhysBoundary + nIntBoundary;
955 
956  mglevel_m[level]->Awf_p = Teuchos::rcp(
957  new matrix_t(mglevel_m[level]->map_p, nEntries,
958  Tpetra::StaticProfile) );
959 
960  /*
961  * uncovered cells matrix
962  */
963  mglevel_m[level]->UnCovered_p = Teuchos::rcp(
964  new matrix_t(mglevel_m[level]->map_p, 1,
965  Tpetra::StaticProfile) );
966  }
967 
968  /*
969  * gradient matrices
970  */
971  nEntries = 11;
972 
973  if (level > lbase_m || (level == lbase_m && !solver_mp->hasOperator())) {
974  for (int d = 0; d < AMREX_SPACEDIM; ++d) {
975  mglevel_m[level]->G_p[d] = Teuchos::rcp(
976  new matrix_t(mglevel_m[level]->map_p, nEntries,
977  Tpetra::StaticProfile) );
978  }
979  }
980  }
981 
982  mglevel_m[level]->rho_p = Teuchos::rcp(
983  new vector_t(mglevel_m[level]->map_p, false) );
984 
985  if ( matrices ) {
986  mglevel_m[level]->phi_p = Teuchos::rcp(
987  new vector_t(mglevel_m[level]->map_p, false) );
988  }
989 }
990 
991 
992 void AmrMultiGrid::close_m(const lo_t& level,
993  const bool& matrices)
994 {
995  if ( matrices ) {
996  if ( level > lbase_m ) {
997 
998  mglevel_m[level]->I_p->fillComplete(mglevel_m[level-1]->map_p, // col map (domain map)
999  mglevel_m[level]->map_p); // row map (range map)
1000 
1001  mglevel_m[level]->Bcrse_p->fillComplete(mglevel_m[level-1]->map_p, // col map
1002  mglevel_m[level]->map_p); // row map
1003  }
1004 
1005  if ( level < lfine_m ) {
1006 
1007  mglevel_m[level]->R_p->fillComplete(mglevel_m[level+1]->map_p,
1008  mglevel_m[level]->map_p);
1009 
1010  mglevel_m[level]->Bfine_p->fillComplete(mglevel_m[level+1]->map_p,
1011  mglevel_m[level]->map_p);
1012  }
1013 
1014  if (level > lbase_m || (level == lbase_m && !solver_mp->hasOperator())) {
1015  mglevel_m[level]->Anf_p->fillComplete();
1016 
1017  for (int d = 0; d < AMREX_SPACEDIM; ++d)
1018  mglevel_m[level]->G_p[d]->fillComplete();
1019  }
1020 
1021  if ( lbase_m != lfine_m ) {
1022  mglevel_m[level]->Awf_p->fillComplete();
1023 
1024  mglevel_m[level]->UnCovered_p->fillComplete();
1025  }
1026  }
1027 }
1028 
1029 
1031  const go_t& gidx,
1032  const AmrIntVect_t& iv,
1033  const basefab_t& mfab,
1034  const scalar_t* invdx2)
1035 {
1036  /*
1037  * Laplacian of "no fine"
1038  */
1039 
1040  /*
1041  * 1D not supported
1042  * 2D --> 5 elements per row
1043  * 3D --> 7 elements per row
1044  */
1045 
1046  umap_t map;
1047  indices_t indices;
1048  coefficients_t values;
1049 
1050  /*
1051  * check neighbours in all directions (Laplacian stencil --> cross)
1052  */
1053  for (int d = 0; d < AMREX_SPACEDIM; ++d) {
1054  for (int shift = -1; shift <= 1; shift += 2) {
1055  AmrIntVect_t biv = iv;
1056  biv[d] += shift;
1057 
1058  switch ( mfab(biv) )
1059  {
1060  case AmrMultiGridLevel_t::Mask::INTERIOR:
1061  case AmrMultiGridLevel_t::Mask::COVERED: // covered --> interior cell
1062  {
1063  map[mglevel_m[level]->serialize(biv)] += invdx2[d];
1064  break;
1065  }
1066  case AmrMultiGridLevel_t::Mask::BNDRY:
1067  {
1068  // boundary cell
1069  // only level > 0 have this kind of boundary
1070 #ifndef NDEBUG
1071  if ( level == lbase_m )
1072  throw OpalException("AmrMultiGrid::buildNoFinePoissonMatrix_m()",
1073  "Error in mask for level "
1074  + std::to_string(level) + "!");
1075 #endif
1076  /* Dirichlet boundary conditions from coarser level.
1077  */
1078  interface_mp->fine(biv, map, invdx2[d], d, -shift,
1079  mglevel_m[level].get());
1080  break;
1081  }
1082  case AmrMultiGridLevel_t::Mask::PHYSBNDRY:
1083  {
1084  // physical boundary cell
1085  mglevel_m[level]->applyBoundary(biv, d, map,
1086  invdx2[d] /*matrix coefficient*/);
1087  break;
1088  }
1089  default:
1090  throw OpalException("AmrMultiGrid::buildNoFinePoissonMatrix_m()",
1091  "Error in mask for level "
1092  + std::to_string(level) + "!");
1093  }
1094  }
1095  }
1096 
1097  // check center
1098  map[gidx] += AMREX_D_TERM(- 2.0 * invdx2[0],
1099  - 2.0 * invdx2[1],
1100  - 2.0 * invdx2[2]);
1101 
1102  this->map2vector_m(map, indices, values);
1103 
1104  mglevel_m[level]->Anf_p->insertGlobalValues(gidx,
1105  indices.size(),
1106  &values[0],
1107  &indices[0]);
1108 }
1109 
1110 
1112  const go_t& gidx,
1113  const AmrIntVect_t& iv,
1114  const basefab_t& mfab,
1115  const basefab_t& rfab,
1116  const scalar_t* invdx2)
1117 {
1118  /*
1119  * Laplacian of "with fine"
1120  *
1121  * For the finest level: Awf == Anf
1122  */
1123  if ( rfab(iv) == AmrMultiGridLevel_t::Refined::YES ) //|| lbase_m != lfine_m )
1124  return;
1125  /*
1126  * Only cells that are not refined
1127  */
1128 
1129  /*
1130  * 1D not supported by AmrMultiGrid
1131  * 2D --> 5 elements per row
1132  * 3D --> 7 elements per row
1133  */
1134 
1135  umap_t map;
1136  indices_t indices;
1137  coefficients_t values;
1138 
1139  /*
1140  * Only cells that are not refined
1141  */
1142 
1143  /*
1144  * check neighbours in all directions (Laplacian stencil --> cross)
1145  */
1146  for (int d = 0; d < AMREX_SPACEDIM; ++d) {
1147  for (int shift = -1; shift <= 1; shift += 2) {
1148  AmrIntVect_t biv = iv;
1149  biv[d] += shift;
1150 
1151  if ( rfab(biv) != AmrMultiGridLevel_t::Refined::YES )
1152  {
1153  /*
1154  * It can't be a refined cell!
1155  */
1156  switch ( mfab(biv) )
1157  {
1158  case AmrMultiGridLevel_t::Mask::COVERED:
1159  // covered --> interior cell
1160  case AmrMultiGridLevel_t::Mask::INTERIOR:
1161  {
1162  map[mglevel_m[level]->serialize(biv)] += invdx2[d];
1163  map[gidx] -= invdx2[d]; // add center once
1164  break;
1165  }
1166  case AmrMultiGridLevel_t::Mask::BNDRY:
1167  {
1168  // boundary cell
1169  // only level > 0 have this kind of boundary
1170 #ifndef NDEBUG
1171  if ( level == lbase_m )
1172  throw OpalException("AmrMultiGrid::buildCompositePoissonMatrix_m()",
1173  "Error in mask for level "
1174  + std::to_string(level) + "!");
1175 #endif
1176 
1177  /* We are on the fine side of the crse-fine interface
1178  * --> normal stencil --> no flux matching required
1179  * --> interpolation of fine ghost cell required
1180  * (used together with Bcrse)
1181  */
1182 
1183  /* Dirichlet boundary conditions from coarser level.
1184  */
1185  interface_mp->fine(biv, map, invdx2[d], d, -shift,
1186  mglevel_m[level].get());
1187 
1188  // add center once
1189  map[gidx] -= invdx2[d];
1190  break;
1191  }
1192  case AmrMultiGridLevel_t::Mask::PHYSBNDRY:
1193  {
1194  // physical boundary cell
1195  mglevel_m[level]->applyBoundary(biv, d, map,
1196  invdx2[d] /*matrix coefficient*/);
1197 
1198  // add center once
1199  map[gidx] -= invdx2[d];
1200  break;
1201  }
1202  default:
1203  throw OpalException("AmrMultiGrid::buildCompositePoissonMatrix_m()",
1204  "Error in mask for level "
1205  + std::to_string(level) + "!");
1206  }
1207  } else {
1208  /*
1209  * If neighbour cell is refined, we are on the coarse
1210  * side of the crse-fine interface --> flux matching
1211  * required --> interpolation of fine ghost cell
1212  * (used together with Bfine)
1213  */
1214 
1215 
1216  // flux matching, coarse part
1217 
1218  /* 2D --> 2 fine cells to compute flux per coarse-fine-interace --> avg = 2
1219  * 3D --> 4 fine cells to compute flux per coarse-fine-interace --> avg = 4
1220  *
1221  * @precondition: refinement of 2
1222  */
1223  // top and bottom for all directions
1224  const scalar_t* invcdx = mglevel_m[level]->invCellSize();
1225  const scalar_t* invfdx = mglevel_m[level+1]->invCellSize();
1226  scalar_t invavg = AMREX_D_PICK(1.0, 0.5, 0.25);
1227  scalar_t value = - invavg * invcdx[d] * invfdx[d];
1228 
1229  for (int d1 = 0; d1 < 2; ++d1) {
1230 #if AMREX_SPACEDIM == 3
1231  for (int d2 = 0; d2 < 2; ++d2) {
1232 #endif
1233 
1234  /* in order to get a top iv --> needs to be odd value in "d"
1235  * in order to get a bottom iv --> needs to be even value in "d"
1236  */
1237  AmrIntVect_t fake(D_DECL(0, 0, 0));
1238 
1239  fake[(d+1)%AMREX_SPACEDIM] = d1;
1240 #if AMREX_SPACEDIM == 3
1241  fake[(d+2)%AMREX_SPACEDIM] = d2;
1242 #endif
1243  interface_mp->coarse(iv, map, value, d, shift, rfab,
1244  fake, mglevel_m[level].get());
1245 
1246 #if AMREX_SPACEDIM == 3
1247  }
1248 #endif
1249  }
1250  }
1251  }
1252  }
1253 
1254  this->map2vector_m(map, indices, values);
1255 
1256  mglevel_m[level]->Awf_p->insertGlobalValues(gidx,
1257  indices.size(),
1258  &values[0],
1259  &indices[0]);
1260 
1261  scalar_t vv = 1.0;
1262  mglevel_m[level]->UnCovered_p->insertGlobalValues(gidx,
1263  1,
1264  &vv,
1265  &gidx);
1266 }
1267 
1268 
1270  const go_t& gidx,
1271  const AmrIntVect_t& iv,
1272  D_DECL(const go_t& ii,
1273  const go_t& jj,
1274  const go_t& kk),
1275  const basefab_t& rfab)
1276 {
1277  /*
1278  * x^(l) = R * x^(l+1)
1279  */
1280 
1281  // finest level does not need to have a restriction matrix
1282  if ( rfab(iv) == AmrMultiGridLevel_t::Refined::NO || level == lfine_m )
1283  return;
1284 
1285  /* Difficulty: If a fine cell belongs to another processor than the underlying
1286  * coarse cell, we get an error when filling the matrix since the
1287  * cell (--> global index) does not belong to the same processor.
1288  * Solution: Find all coarse cells that are covered by fine cells, thus,
1289  * the distributionmap is correct.
1290  *
1291  *
1292  */
1293  indices_t indices;
1294  indices.reserve(2 << (AMREX_SPACEDIM - 1));
1295  coefficients_t values;
1296  values.reserve(2 << (AMREX_SPACEDIM -1));
1297 
1298  // neighbours
1299  for (int iref = 0; iref < 2; ++iref) {
1300  for (int jref = 0; jref < 2; ++jref) {
1301 #if AMREX_SPACEDIM == 3
1302  for (int kref = 0; kref < 2; ++kref) {
1303 #endif
1304  AmrIntVect_t riv(D_DECL(ii + iref, jj + jref, kk + kref));
1305 
1306  indices.push_back( mglevel_m[level+1]->serialize(riv) );
1307  values.push_back( AMREX_D_PICK(0.5, 0.25, 0.125) );
1308 #if AMREX_SPACEDIM == 3
1309  }
1310 #endif
1311  }
1312  }
1313 
1314  mglevel_m[level]->R_p->insertGlobalValues(gidx,
1315  indices.size(),
1316  &values[0],
1317  &indices[0]);
1318 }
1319 
1320 
1322  const go_t& gidx,
1323  const AmrIntVect_t& iv,
1324  const basefab_t& cfab)
1325 {
1326  /* crse: level - 1
1327  * fine (this): level
1328  */
1329 
1330  /*
1331  * This does not include ghost cells
1332  * --> no boundaries
1333  *
1334  * x^(l) = I * x^(l-1)
1335  */
1336 
1337  if ( level == lbase_m )
1338  return;
1339 
1340  umap_t map;
1341  indices_t indices;
1342  coefficients_t values;
1343 
1344  /*
1345  * we need boundary + indices from coarser level
1346  */
1347  interp_mp->stencil(iv, cfab, map, 1.0, mglevel_m[level-1].get());
1348 
1349  this->map2vector_m(map, indices, values);
1350 
1351  mglevel_m[level]->I_p->insertGlobalValues(gidx,
1352  indices.size(),
1353  &values[0],
1354  &indices[0]);
1355 }
1356 
1357 
1359  const go_t& gidx,
1360  const AmrIntVect_t& iv,
1361  const basefab_t& mfab,
1362  const basefab_t& cfab,
1363  const scalar_t* invdx2)
1364 {
1365  /*
1366  * fine (this): level
1367  * coarse: level - 1
1368  */
1369 
1370  // the base level has only physical boundaries
1371  if ( level == lbase_m )
1372  return;
1373 
1374  // iv is a fine cell
1375 
1376  umap_t map;
1377  indices_t indices;
1378  coefficients_t values;
1379 
1380  // check its neighbours to see if at crse-fine interface
1381  for (int d = 0; d < AMREX_SPACEDIM; ++d) {
1382  for (int shift = -1; shift <= 1; shift += 2) {
1383  // neighbour
1384  AmrIntVect_t niv = iv;
1385  niv[d] += shift;
1386 
1387  if ( mfab(niv) == AmrMultiGridLevel_t::Mask::BNDRY )
1388  {
1389  // neighbour does not belong to fine grids
1390  // includes no cells at physical boundary
1391 
1392  // coarse cell that is not refined
1393  AmrIntVect_t civ = iv;
1394  civ[d] += shift;
1395  civ.coarsen(mglevel_m[level]->refinement());
1396 
1397  // we need boundary + indices from coarser level
1398  // we need normalization by mesh size squared (of fine cell size)
1399  // --> Laplacian for fine cell
1400  // (Dirichlet boundary for coarse --> fine)
1401  interface_mp->coarse(civ, map, invdx2[d], d, shift, cfab,
1402  iv, mglevel_m[level-1].get());
1403  }
1404 #ifndef NDEBUG
1405  else if ( mfab(niv) == AmrMultiGridLevel_t::Mask::PHYSBNDRY ) {
1406  throw OpalException("AmrMultiGrid::buildCrseBoundaryMatrix_m()",
1407  "Fine meshes shouldn't be connected "
1408  "to physical (i.e. mesh) boundary!");
1409  }
1410 #endif
1411  }
1412  }
1413 
1414  this->map2vector_m(map, indices, values);
1415 
1416  mglevel_m[level]->Bcrse_p->insertGlobalValues(gidx,
1417  indices.size(),
1418  &values[0],
1419  &indices[0]);
1420 }
1421 
1422 
1424  const go_t& gidx,
1425  const AmrIntVect_t& iv,
1426  const basefab_t& mfab,
1427  const basefab_t& rfab)
1428 {
1429  /* fine: level + 1
1430  * coarse (this): level
1431  */
1432 
1433  // the finest level does not need data from a finer level
1434  if ( rfab(iv) == AmrMultiGridLevel_t::Refined::YES || level == lfine_m )
1435  return;
1436 
1437  const scalar_t* invcdx = mglevel_m[level]->invCellSize();
1438  const scalar_t* invfdx = mglevel_m[level+1]->invCellSize();
1439 
1440  // inverse of number of fine cell gradients
1441  scalar_t invavg = AMREX_D_PICK(1, 0.5, 0.25);
1442 
1443  umap_t map;
1444  indices_t indices;
1445  coefficients_t values;
1446 
1447  auto fill = [&](umap_t& map,
1448  D_DECL(int ii, int jj, int kk),
1449  int* begin, int* end, int d,
1450  const AmrIntVect_t& iv, int shift)
1451  {
1452  for (int iref = ii - begin[0]; iref <= ii + end[0]; ++iref) {
1453  for (int jref = jj - begin[1]; jref <= jj + end[1]; ++jref) {
1454 #if AMREX_SPACEDIM == 3
1455  for (int kref = kk - begin[2]; kref <= kk + end[2]; ++kref) {
1456 #endif
1457  /* Since all fine cells on the not-refined cell are
1458  * outside of the "domain" --> we need to interpolate
1459  */
1460  AmrIntVect_t riv(D_DECL(iref, jref, kref));
1461 
1462  if ( (riv[d] >> 1) /*refinement*/ == iv[d] ) {
1463  /* the fine cell is on the coarse side --> fine
1464  * ghost cell --> we need to interpolate
1465  */
1466  scalar_t value = - invavg * invcdx[d] * invfdx[d];
1467 
1468  interface_mp->fine(riv, map, value, d, shift,
1469  mglevel_m[level+1].get());
1470  } else {
1471  scalar_t value = invavg * invcdx[d] * invfdx[d];
1472  map[mglevel_m[level+1]->serialize(riv)] += value;
1473  }
1474 #if AMREX_SPACEDIM == 3
1475  }
1476 #endif
1477  }
1478  }
1479  };
1480 
1481 
1482  /*
1483  * iv is a coarse cell that got not refined
1484  *
1485  * --> check all neighbours to see if at crse-fine
1486  * interface
1487  */
1488  for (int d = 0; d < AMREX_SPACEDIM; ++d) {
1489  for (int shift = -1; shift <= 1; shift += 2) {
1490  // neighbour
1491  AmrIntVect_t covered = iv;
1492  covered[d] += shift;
1493 
1494  if ( rfab(covered) == AmrMultiGridLevel_t::Refined::YES &&
1495  mfab(covered) != AmrMultiGridLevel_t::PHYSBNDRY )
1496  {
1497  // neighbour is covered by fine cells
1498 
1499  /*
1500  * "shift" is the amount to a coarse cell that got refined
1501  * "d" is the direction to shift
1502  *
1503  * --> check all covered neighbour cells
1504  */
1505 
1506  /* we need to iterate over correct fine cells. It depends
1507  * on the orientation of the interface
1508  */
1509  int begin[AMREX_SPACEDIM] = { D_DECL( int(d == 0), int(d == 1), int(d == 2) ) };
1510  int end[AMREX_SPACEDIM] = { D_DECL( int(d != 0), int(d != 1), int(d != 2) ) };
1511 
1512  /*
1513  * neighbour cell got refined but is not on physical boundary
1514  * --> we are at a crse-fine interface
1515  *
1516  * we need now to find out which fine cells
1517  * are required to satisfy the flux matching
1518  * condition
1519  */
1520 
1521  switch ( shift ) {
1522  case -1:
1523  {
1524  // --> interface is on the lower face
1525  int ii = iv[0] << 1; // refinemet in x
1526  int jj = iv[1] << 1; // refinemet in y
1527 #if AMREX_SPACEDIM == 3
1528  int kk = iv[2] << 1; // refinemet in z
1529 #endif
1530  // iterate over all fine cells at the interface
1531  // start with lower cells --> cover coarse neighbour
1532  // cell
1533  fill(map, D_DECL(ii, jj, kk), &begin[0], &end[0], d, iv, shift);
1534  break;
1535  }
1536  case 1:
1537  default:
1538  {
1539  // --> interface is on the upper face
1540  int ii = covered[0] << 1; // refinemet in x
1541  int jj = covered[1] << 1; // refinemet in y
1542 #if AMREX_SPACEDIM == 3
1543  int kk = covered[2] << 1; // refinemet in z
1544 #endif
1545  fill(map, D_DECL(ii, jj, kk), &begin[0], &end[0], d, iv, shift);
1546  break;
1547  }
1548  }
1549  }
1550  }
1551  }
1552 
1553  this->map2vector_m(map, indices, values);
1554 
1555  // iv: not covered coarse cell at crse-fine interface
1556 
1557  mglevel_m[level]->Bfine_p->insertGlobalValues(gidx,
1558  indices.size(),
1559  &values[0],
1560  &indices[0]);
1561 }
1562 
1563 
1564 inline void AmrMultiGrid::buildDensityVector_m(const lo_t& level,
1565  const AmrField_t& rho)
1566 {
1567  this->amrex2trilinos_m(level, 0, rho, mglevel_m[level]->rho_p);
1568 }
1569 
1570 
1572  const AmrField_t& phi)
1573 {
1574  this->amrex2trilinos_m(level, 0, phi, mglevel_m[level]->phi_p);
1575 }
1576 
1577 
1579  const go_t& gidx,
1580  const AmrIntVect_t& iv,
1581  const basefab_t& mfab,
1582  const scalar_t* invdx)
1583 {
1584  umap_t map;
1585  indices_t indices;
1586  coefficients_t values;
1587 
1588  auto check = [&](const AmrIntVect_t& iv,
1589  const basefab_t& mfab,
1590  int dir,
1591  scalar_t shift)
1592  {
1593  switch ( mfab(iv) )
1594  {
1595  case AmrMultiGridLevel_t::Mask::INTERIOR:
1596  // interior cells
1597  case AmrMultiGridLevel_t::Mask::COVERED:
1598  // covered --> interior cell
1599  map[mglevel_m[level]->serialize(iv)] -= shift * 0.5 * invdx[dir];
1600  break;
1601  case AmrMultiGridLevel_t::Mask::BNDRY:
1602  {
1603  // interior boundary cells --> only level > 0
1604 #ifndef NDEBUG
1605  if ( level == lbase_m )
1606  throw OpalException("AmrMultiGrid::buildGradientMatrix_m()",
1607  "Error in mask for level "
1608  + std::to_string(level) + "!");
1609 #endif
1610 
1611  scalar_t value = - shift * 0.5 * invdx[dir];
1612 
1613  // use 1st order Lagrange --> only cells of this level required
1614  AmrIntVect_t tmp = iv;
1615  // first fine cell on refined coarse cell (closer to interface)
1616  tmp[dir] -= shift;
1617  map[mglevel_m[level]->serialize(tmp)] += 2.0 * value;
1618 
1619  // second fine cell on refined coarse cell (further away from interface)
1620  tmp[dir] -= shift;
1621  map[mglevel_m[level]->serialize(tmp)] -= value;
1622  break;
1623  }
1624  case AmrMultiGridLevel_t::Mask::PHYSBNDRY:
1625  {
1626  // physical boundary cells
1627 
1628  scalar_t value = - shift * 0.5 * invdx[dir];
1629 
1630  mglevel_m[level]->applyBoundary(iv, dir,
1631  map, value);
1632  break;
1633  }
1634  default:
1635  break;
1636  }
1637  };
1638 
1639  for (int d = 0; d < AMREX_SPACEDIM; ++d) {
1640  for (int shift = -1; shift <= 1; shift += 2) {
1641  AmrIntVect_t niv = iv;
1642  niv[d] += shift;
1643  check(niv, mfab, d, shift);
1644  }
1645 
1646  this->map2vector_m(map, indices, values);
1647 
1648  mglevel_m[level]->G_p[d]->insertGlobalValues(gidx,
1649  indices.size(),
1650  &values[0],
1651  &indices[0]);
1652  }
1653 }
1654 
1655 
1657  const lo_t& comp,
1658  const AmrField_t& mf,
1659  Teuchos::RCP<vector_t>& mv)
1660 {
1661  if ( mv.is_null() )
1662  mv = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p, false) );
1663 
1664  for (amrex::MFIter mfi(mf, true); mfi.isValid(); ++mfi) {
1665  const amrex::Box& tbx = mfi.tilebox();
1666  const amrex::FArrayBox& fab = mf[mfi];
1667 
1668  const int* lo = tbx.loVect();
1669  const int* hi = tbx.hiVect();
1670 
1671  for (int i = lo[0]; i <= hi[0]; ++i) {
1672  for (int j = lo[1]; j <= hi[1]; ++j) {
1673 #if AMREX_SPACEDIM == 3
1674  for (int k = lo[2]; k <= hi[2]; ++k) {
1675 #endif
1676  AmrIntVect_t iv(D_DECL(i, j, k));
1677 
1678  go_t gidx = mglevel_m[level]->serialize(iv);
1679 
1680  mv->replaceGlobalValue(gidx, fab(iv, comp));
1681 #if AMREX_SPACEDIM == 3
1682  }
1683 #endif
1684  }
1685  }
1686  }
1687 }
1688 
1689 
1691  const lo_t& comp,
1692  AmrField_t& mf,
1693  const Teuchos::RCP<vector_t>& mv)
1694 {
1695  Teuchos::ArrayRCP<const amr::scalar_t> data = mv->get1dView();
1696 
1697  for (amrex::MFIter mfi(mf, true); mfi.isValid(); ++mfi) {
1698  const amrex::Box& tbx = mfi.tilebox();
1699  amrex::FArrayBox& fab = mf[mfi];
1700 
1701  const int* lo = tbx.loVect();
1702  const int* hi = tbx.hiVect();
1703 
1704  for (int i = lo[0]; i <= hi[0]; ++i) {
1705  for (int j = lo[1]; j <= hi[1]; ++j) {
1706 #if AMREX_SPACEDIM == 3
1707  for (int k = lo[2]; k <= hi[2]; ++k) {
1708 #endif
1709  AmrIntVect_t iv(D_DECL(i, j, k));
1710 
1711  go_t gidx = mglevel_m[level]->serialize(iv);
1712  lo_t lidx = mglevel_m[level]->map_p->getLocalElement(gidx);
1713 
1714  fab(iv, comp) = data[lidx];
1715  }
1716 #if AMREX_SPACEDIM == 3
1717  }
1718 #endif
1719  }
1720  }
1721 }
1722 
1723 
1724 inline
1726  coefficients_t& values)
1727 {
1728  indices.clear();
1729  values.clear();
1730 
1731  indices.reserve(map.size());
1732  values.reserve(map.size());
1733 
1734  std::for_each(map.begin(), map.end(),
1735  [&](const std::pair<const go_t, scalar_t>& entry)
1736  {
1737 #ifndef NDEBUG
1738  if ( entry.first < 0 ) {
1739  throw OpalException("AmrMultiGrid::map2vector_m()",
1740  "Negative matrix index!");
1741  }
1742 #endif
1743 
1744  indices.push_back(entry.first);
1745  values.push_back(entry.second);
1746  }
1747  );
1748 
1749  map.clear();
1750 }
1751 
1752 
1753 void AmrMultiGrid::smooth_m(const lo_t& level,
1754  Teuchos::RCP<vector_t>& e,
1755  Teuchos::RCP<vector_t>& r)
1756 {
1757 #if AMR_MG_TIMER
1758  IpplTimings::startTimer(smoothTimer_m);
1759 #endif
1760 
1761  // base level has no smoother --> l - 1
1762  smoother_m[level-1]->smooth(e, r);
1763 
1764 #if AMR_MG_TIMER
1765  IpplTimings::stopTimer(smoothTimer_m);
1766 #endif
1767 }
1768 
1769 
1770 void AmrMultiGrid::restrict_m(const lo_t& level) {
1771 
1772 #if AMR_MG_TIMER
1773  IpplTimings::startTimer(restrictTimer_m);
1774 #endif
1775 
1776  Teuchos::RCP<vector_t> tmp = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p) );
1777 
1778  this->residual_no_fine_m(level, tmp,
1779  mglevel_m[level]->error_p,
1780  mglevel_m[level-1]->error_p,
1781  mglevel_m[level]->residual_p);
1782 
1783  mglevel_m[level-1]->residual_p->putScalar(0.0);
1784 
1785  // average down: residual^(l-1) = R^(l) * tmp
1786  mglevel_m[level-1]->R_p->apply(*tmp, *mglevel_m[level-1]->residual_p);
1787 
1788 
1789  // composite matrix, i.e. matrix without covered cells
1790  // r^(l-1) = rho^(l-1) - A * phi^(l-1)
1791 
1792  vector_t fine2crse(mglevel_m[level-1]->Awf_p->getDomainMap(), true);
1793 
1794  // get boundary for
1795  mglevel_m[level-1]->Bfine_p->apply(*mglevel_m[level]->phi_p, fine2crse);
1796 
1797  // operation: fine2coarse += A * phi
1798  mglevel_m[level-1]->Awf_p->apply(*mglevel_m[level-1]->phi_p,
1799  fine2crse, Teuchos::NO_TRANS,
1800  scalar_t(1.0), scalar_t(1.0));
1801 
1802  if ( mglevel_m[level-1]->Bcrse_p != Teuchos::null ) {
1803  // operation: fine2coarse += B * phi
1804  mglevel_m[level-1]->Bcrse_p->apply(*mglevel_m[level-2]->phi_p,
1805  fine2crse, Teuchos::NO_TRANS,
1806  scalar_t(1.0), scalar_t(1.0));
1807  }
1808 
1809  Teuchos::RCP<vector_t> uncoveredRho = Teuchos::rcp( new vector_t(mglevel_m[level-1]->map_p, true) );
1810 
1811  mglevel_m[level-1]->UnCovered_p->apply(*mglevel_m[level-1]->rho_p, *uncoveredRho);
1812 
1813 
1814  //FIXME tmp2 not needed
1815  Teuchos::RCP<vector_t> tmp2 = Teuchos::rcp( new vector_t(mglevel_m[level-1]->map_p, true) );
1816  mglevel_m[level-1]->UnCovered_p->apply(fine2crse, *tmp2);
1817 
1818  // ONLY subtract coarse rho
1819  mglevel_m[level-1]->residual_p->update(1.0, *uncoveredRho, -1.0, *tmp2, 1.0);
1820 
1821 #if AMR_MG_TIMER
1822  IpplTimings::stopTimer(restrictTimer_m);
1823 #endif
1824 }
1825 
1826 
1827 void AmrMultiGrid::prolongate_m(const lo_t& level) {
1828 #if AMR_MG_TIMER
1829  IpplTimings::startTimer(interpTimer_m);
1830 #endif
1831  // interpolate error from l-1 to l
1832  // operation: e^(l) = 1.0 * e^(l) + 1.0 * I^(l) * e^(l-1)
1833  mglevel_m[level]->I_p->apply(*mglevel_m[level-1]->error_p,
1834  *mglevel_m[level]->error_p,
1835  Teuchos::NO_TRANS,
1836  scalar_t(1.0),
1837  scalar_t(1.0));
1838 #if AMR_MG_TIMER
1839  IpplTimings::stopTimer(interpTimer_m);
1840 #endif
1841 }
1842 
1843 
1844 void AmrMultiGrid::averageDown_m(const lo_t& level) {
1845 
1846  if (level == lfine_m )
1847  return;
1848 #if AMR_MG_TIMER
1849  IpplTimings::startTimer(averageTimer_m);
1850 #endif
1851  Teuchos::RCP<vector_t> phicrse = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p, false) );
1852 
1853  // operation: phicrse = 0.0 * phicrse + 1.0 * R^(l) * phi^(l+1)
1854  mglevel_m[level]->R_p->apply(*mglevel_m[level+1]->phi_p, *phicrse);
1855 
1856  Teuchos::RCP<vector_t> uncov_phi = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p, true) );
1857 
1858  mglevel_m[level]->UnCovered_p->apply(*mglevel_m[level]->phi_p, *uncov_phi);
1859 
1860  mglevel_m[level]->phi_p->update(1.0, *phicrse, 1.0, *uncov_phi, 0.0);
1861 #if AMR_MG_TIMER
1862  IpplTimings::stopTimer(averageTimer_m);
1863 #endif
1864 }
1865 
1866 
1868  switch ( interp ) {
1869  case Interpolater::TRILINEAR:
1871  break;
1872  case Interpolater::LAGRANGE:
1873  throw OpalException("AmrMultiGrid::initInterpolater_m()",
1874  "Not yet implemented.");
1875  case Interpolater::PIECEWISE_CONST:
1877  break;
1878  default:
1879  throw OpalException("AmrMultiGrid::initInterpolater_m()",
1880  "No such interpolater available.");
1881  }
1882 }
1883 
1884 
1886  switch ( interface ) {
1887  case Interpolater::TRILINEAR:
1889  break;
1890  case Interpolater::LAGRANGE:
1893  break;
1894  case Interpolater::PIECEWISE_CONST:
1896  break;
1897  default:
1898  throw OpalException("AmrMultiGrid::initCrseFineInterp_m()",
1899  "No such interpolater for the coarse-fine interface available.");
1900  }
1901 }
1902 
1903 
1905  const bool& rebalance,
1906  const std::string& reuse)
1907 {
1908  switch ( solver ) {
1909  // Belos solvers
1910  case BaseSolver::BICGSTAB:
1911  solver_mp.reset( new BelosSolver_t("BICGSTAB", prec_mp) );
1912  break;
1913  case BaseSolver::MINRES:
1914  solver_mp.reset( new BelosSolver_t("MINRES", prec_mp) );
1915  break;
1916  case BaseSolver::PCPG:
1917  solver_mp.reset( new BelosSolver_t("PCPG", prec_mp) );
1918  break;
1919  case BaseSolver::CG:
1920  solver_mp.reset( new BelosSolver_t("Pseudoblock CG", prec_mp) );
1921  break;
1922  case BaseSolver::GMRES:
1923  solver_mp.reset( new BelosSolver_t("Pseudoblock GMRES", prec_mp) );
1924  break;
1925  case BaseSolver::STOCHASTIC_CG:
1926  solver_mp.reset( new BelosSolver_t("Stochastic CG", prec_mp) );
1927  break;
1928  case BaseSolver::RECYCLING_CG:
1929  solver_mp.reset( new BelosSolver_t("RCG", prec_mp) );
1930  break;
1931  case BaseSolver::RECYCLING_GMRES:
1932  solver_mp.reset( new BelosSolver_t("GCRODR", prec_mp) );
1933  break;
1934  // Amesos2 solvers
1935 #ifdef HAVE_AMESOS2_KLU2
1936  case BaseSolver::KLU2:
1937  solver_mp.reset( new Amesos2Solver_t("klu2") );
1938  break;
1939 #endif
1940 #if HAVE_AMESOS2_SUPERLU
1941  case BaseSolver::SUPERLU:
1942  solver_mp.reset( new Amesos2Solver_t("superlu") );
1943  break;
1944 #endif
1945 #ifdef HAVE_AMESOS2_UMFPACK
1946  case BaseSolver::UMFPACK:
1947  solver_mp.reset( new Amesos2Solver_t("umfpack") );
1948  break;
1949 #endif
1950 #ifdef HAVE_AMESOS2_PARDISO_MKL
1951  case BaseSolver::PARDISO_MKL:
1952  solver_mp.reset( new Amesos2Solver_t("pardiso_mkl") );
1953  break;
1954 #endif
1955 #ifdef HAVE_AMESOS2_MUMPS
1956  case BaseSolver::MUMPS:
1957  solver_mp.reset( new Amesos2Solver_t("mumps") );
1958  break;
1959 #endif
1960 #ifdef HAVE_AMESOS2_LAPACK
1961  case BaseSolver::LAPACK:
1962  solver_mp.reset( new Amesos2Solver_t("lapack") );
1963  break;
1964 #endif
1965  case BaseSolver::SA:
1966  {
1967  std::string muelu = MueLuSolver_t::convertToMueLuReuseOption(reuse);
1968  solver_mp.reset( new MueLuSolver_t(rebalance, muelu) );
1969  break;
1970  }
1971  default:
1972  throw OpalException("AmrMultiGrid::initBaseSolver_m()",
1973  "No such bottom solver available.");
1974  }
1975 }
1976 
1977 
1979  const std::string& reuse)
1980 {
1981  switch ( prec ) {
1982  case Preconditioner::ILUT:
1984  case Preconditioner::RILUK:
1987  case Preconditioner::GS:
1989  prec_mp.reset( new Ifpack2Preconditioner_t(prec) );
1990  break;
1991  case Preconditioner::SA:
1992  {
1993  std::string muelu = MueLuPreconditioner_t::convertToMueLuReuseOption(reuse);
1994  prec_mp.reset( new MueLuPreconditioner_t(muelu) );
1995  break;
1996  }
1997  case Preconditioner::NONE:
1998  prec_mp.reset();
1999  break;
2000  default:
2001  throw OpalException("AmrMultiGrid::initPrec_m()",
2002  "No such preconditioner available.");
2003  }
2004 }
2005 
2006 
2009  std::map<std::string, Boundary> map;
2010 
2011  map["DIRICHLET"] = Boundary::DIRICHLET;
2012  map["OPEN"] = Boundary::OPEN;
2013  map["PERIODIC"] = Boundary::PERIODIC;
2014 
2015  auto boundary = map.find(bc);
2016 
2017  if ( boundary == map.end() )
2018  throw OpalException("AmrMultiGrid::convertToEnumBoundary_m()",
2019  "No boundary type '" + bc + "'.");
2020  return boundary->second;
2021 }
2022 
2024 AmrMultiGrid::convertToEnumInterpolater_m(const std::string& interp) {
2025  std::map<std::string, Interpolater> map;
2026 
2027  map["TRILINEAR"] = Interpolater::TRILINEAR;
2028  map["LAGRANGE"] = Interpolater::LAGRANGE;
2029  map["PC"] = Interpolater::PIECEWISE_CONST;
2030 
2031  auto interpolater = map.find(interp);
2032 
2033  if ( interpolater == map.end() )
2034  throw OpalException("AmrMultiGrid::convertToEnumInterpolater_m()",
2035  "No interpolater '" + interp + "'.");
2036  return interpolater->second;
2037 }
2038 
2039 
2041 AmrMultiGrid::convertToEnumBaseSolver_m(const std::string& bsolver) {
2042  std::map<std::string, BaseSolver> map;
2043 
2044  map["BICGSTAB"] = BaseSolver::BICGSTAB;
2045  map["MINRES"] = BaseSolver::MINRES;
2046  map["PCPG"] = BaseSolver::PCPG;
2047  map["CG"] = BaseSolver::CG;
2048  map["GMRES"] = BaseSolver::GMRES;
2049  map["STOCHASTIC_CG"] = BaseSolver::STOCHASTIC_CG;
2050  map["RECYCLING_CG"] = BaseSolver::RECYCLING_GMRES;
2051  map["RECYCLING_GMRES"] = BaseSolver::RECYCLING_GMRES;
2052 #ifdef HAVE_AMESOS2_KLU2
2053  map["KLU2"] = BaseSolver::KLU2;
2054 #endif
2055 #ifdef HAVE_AMESOS2_SUPERLU
2056  map["SUPERLU"] = BaseSolver::SUPERLU;
2057 #endif
2058 #ifdef HAVE_AMESOS2_UMFPACK
2059  map["UMFPACK"] = BaseSolver::UMFPACK;
2060 #endif
2061 #ifdef HAVE_AMESOS2_PARDISO_MKL
2062  map["PARDISO_MKL"] = BaseSolver::PARDISO_MKL;
2063 #endif
2064 #ifdef HAVE_AMESOS2_MUMPS
2065  map["MUMPS"] = BaseSolver::MUMPS;
2066 #endif
2067 #ifdef HAVE_AMESOS2_LAPACK
2068  map["LAPACK"] = BaseSolver::LAPACK;
2069 #endif
2070  map["SA"] = BaseSolver::SA;
2071 
2072  auto solver = map.find(bsolver);
2073 
2074  if ( solver == map.end() )
2075  throw OpalException("AmrMultiGrid::convertToEnumBaseSolver_m()",
2076  "No bottom solver '" + bsolver + "'.");
2077  return solver->second;
2078 }
2079 
2080 
2083  std::map<std::string, Preconditioner> map;
2084 
2085  map["NONE"] = Preconditioner::NONE;
2086 
2088 
2090 
2091  auto precond = map.find(prec);
2092 
2093  if ( precond == map.end() )
2094  throw OpalException("AmrMultiGrid::convertToEnumPreconditioner_m()",
2095  "No preconditioner '" + prec + "'.");
2096  return precond->second;
2097 }
2098 
2099 
2101 AmrMultiGrid::convertToEnumSmoother_m(const std::string& smoother) {
2102  return AmrSmoother::convertToEnumSmoother(smoother);
2103 }
2104 
2105 
2107 AmrMultiGrid::convertToEnumNorm_m(const std::string& norm) {
2108  std::map<std::string, Norm> map;
2109 
2110  map["L1_NORM"] = Norm::L1;
2111  map["L2_NORM"] = Norm::L2;
2112  map["LINF_NORM"] = Norm::LINF;
2113 
2114  auto n = map.find(norm);
2115 
2116  if ( n == map.end() )
2117  throw OpalException("AmrMultiGrid::convertToEnumNorm_m()",
2118  "No norm '" + norm + "'.");
2119  return n->second;
2120 }
2121 
2122 
2123 void AmrMultiGrid::writeSDDSHeader_m(std::ofstream& outfile) {
2124  OPALTimer::Timer simtimer;
2125 
2126  std::string dateStr(simtimer.date());
2127  std::string timeStr(simtimer.time());
2128  std::string indent(" ");
2129 
2130  outfile << "SDDS1" << std::endl;
2131  outfile << "&description\n"
2132  << indent << "text=\"Solver statistics '" << OpalData::getInstance()->getInputFn()
2133  << "' " << dateStr << "" << timeStr << "\",\n"
2134  << indent << "contents=\"solver info\"\n"
2135  << "&end\n";
2136  outfile << "&parameter\n"
2137  << indent << "name=processors,\n"
2138  << indent << "type=long,\n"
2139  << indent << "description=\"Number of Cores used\"\n"
2140  << "&end\n";
2141  outfile << "&parameter\n"
2142  << indent << "name=revision,\n"
2143  << indent << "type=string,\n"
2144  << indent << "description=\"git revision of opal\"\n"
2145  << "&end\n";
2146  outfile << "&parameter\n"
2147  << indent << "name=flavor,\n"
2148  << indent << "type=string,\n"
2149  << indent << "description=\"OPAL flavor that wrote file\"\n"
2150  << "&end\n";
2151  outfile << "&column\n"
2152  << indent << "name=t,\n"
2153  << indent << "type=double,\n"
2154  << indent << "units=ns,\n"
2155  << indent << "description=\"1 Time\"\n"
2156  << "&end\n";
2157  outfile << "&column\n"
2158  << indent << "name=mg_iter,\n"
2159  << indent << "type=long,\n"
2160  << indent << "units=1,\n"
2161  << indent << "description=\"2 Number of Multigrid Iterations\"\n"
2162  << "&end\n";
2163  outfile << "&column\n"
2164  << indent << "name=bottom_iter,\n"
2165  << indent << "type=long,\n"
2166  << indent << "units=1,\n"
2167  << indent << "description=\"3 Total Number of Bottom Solver Iterations\"\n"
2168  << "&end\n";
2169  outfile << "&column\n"
2170  << indent << "name=regrid,\n"
2171  << indent << "type=bool,\n"
2172  << indent << "units=1,\n"
2173  << indent << "description=\"4 Regrid Step\"\n"
2174  << "&end\n";
2175  outfile << "&column\n"
2176  << indent << "name=" + snorm_m + ",\n"
2177  << indent << "type=double,\n"
2178  << indent << "units=1,\n"
2179  << indent << "description=\"5 Error\"\n"
2180  << "&end\n"
2181  << "&data\n"
2182  << indent << "mode=ascii,\n"
2183  << indent << "no_row_counts=1\n"
2184  << "&end\n"
2185  << comm_mp->getSize() << '\n'
2186  << OPAL_PROJECT_NAME << " " << OPAL_PROJECT_VERSION << " git rev. #" << Util::getGitRevision() << '\n'
2187  << (OpalData::getInstance()->isInOPALTMode()? "opal-t":
2188  (OpalData::getInstance()->isInOPALCyclMode()? "opal-cycl": "opal-map")) << std::endl;
2189 }
2190 
2191 
2193 #if AMR_MG_TIMER
2194  IpplTimings::startTimer(dumpTimer_m);
2195 #endif
2196  unsigned int pwi = 10;
2197 
2198  std::ofstream outfile;
2199 
2200  if ( comm_mp->getRank() == 0 ) {
2201  outfile.open(fname_m.c_str(), flag_m);
2202  outfile.precision(15);
2203  outfile.setf(std::ios::scientific, std::ios::floatfield);
2204 
2205  if ( flag_m == std::ios::out ) {
2206  flag_m = std::ios::app;
2207  writeSDDSHeader_m(outfile);
2208  }
2209 
2210  outfile << itsAmrObject_mp->getT() * 1e9 << std::setw(pwi) << '\t' // 1
2211  << this->nIter_m << std::setw(pwi) << '\t' // 2
2212  << this->bIter_m << std::setw(pwi) << '\t' // 3
2213  << this->regrid_m << std::setw(pwi) << '\t' // 4
2214  << error << '\n'; // 5
2215  }
2216 #if AMR_MG_TIMER
2217  IpplTimings::stopTimer(dumpTimer_m);
2218 #endif
2219 }
2220 
2221 
2222 #if AMR_MG_TIMER
2223 void AmrMultiGrid::initTimer_m() {
2224  buildTimer_m = IpplTimings::getTimer("AMR MG matrix setup");
2225  restrictTimer_m = IpplTimings::getTimer("AMR MG restrict");
2226  smoothTimer_m = IpplTimings::getTimer("AMR MG smooth");
2227  interpTimer_m = IpplTimings::getTimer("AMR MG prolongate");
2228  efieldTimer_m = IpplTimings::getTimer("AMR MG e-field");
2229  averageTimer_m = IpplTimings::getTimer("AMR MG average down");
2230  bottomTimer_m = IpplTimings::getTimer("AMR MG bottom-solver");
2231  dumpTimer_m = IpplTimings::getTimer("AMR MG dump");
2232 }
2233 #endif
2234 
2235 
2236 double AmrMultiGrid::getXRangeMin(unsigned short level) {
2237  return itsAmrObject_mp->Geom(level).ProbLo(0);
2238 }
2239 
2240 
2241 double AmrMultiGrid::getXRangeMax(unsigned short level) {
2242  return itsAmrObject_mp->Geom(level).ProbHi(0);
2243 }
2244 
2245 
2246 double AmrMultiGrid::getYRangeMin(unsigned short level) {
2247  return itsAmrObject_mp->Geom(level).ProbLo(1);
2248 }
2249 
2250 
2251 double AmrMultiGrid::getYRangeMax(unsigned short level) {
2252  return itsAmrObject_mp->Geom(level).ProbHi(1);
2253 }
2254 
2255 
2256 double AmrMultiGrid::getZRangeMin(unsigned short level) {
2257  return itsAmrObject_mp->Geom(level).ProbLo(2);
2258 }
2259 
2260 
2261 double AmrMultiGrid::getZRangeMax(unsigned short level) {
2262  return itsAmrObject_mp->Geom(level).ProbHi(2);
2263 }
2264 
2265 
2267  os << "* ********************* A M R M u l t i G r i d ********************** " << endl
2268  //FIXME
2269  << "* ******************************************************************** " << endl;
2270  return os;
2271 }
PartBunchBase< T, Dim >::ConstIterator end(PartBunchBase< T, Dim > const &bunch)
PartBunchBase< T, Dim >::ConstIterator begin(PartBunchBase< T, Dim > const &bunch)
amr::AmrScalarFieldContainer_t AmrScalarFieldContainer_t
Definition: PBunchDefs.h:35
amr::AmrVectorFieldContainer_t AmrVectorFieldContainer_t
Definition: PBunchDefs.h:36
#define OPAL_PROJECT_VERSION
Definition: OPALconfig.h:5
#define OPAL_PROJECT_NAME
Definition: OPALconfig.h:2
bool for_each(const BareFieldIterator< T, D > &p, SameFieldID s, C)
Definition: AssignDefs.h:30
Inform & endl(Inform &inf)
Definition: Inform.cpp:42
#define INFOMSG(msg)
Definition: IpplInfo.h:348
void serialize(Param_t params, std::ostringstream &os)
serializes params using Boost archive
Definition: MPIHelper.cpp:32
@ GS
Gauss-Seidel point relaxation.
@ JACOBI
Jacobi point relaxation.
@ RILUK
ILU(k)
@ ILUT
incomplete LU
@ SA
smoothed aggregation multigrid
@ BLOCK_JACOBI
Jacobi block relaxation.
@ BLOCK_GS
Gauss-Seidel block relaxation.
constexpr double e
The value of.
Definition: Physics.h:39
std::string getGitRevision()
Definition: Util.cpp:18
The global OPAL structure.
Definition: OpalData.h:49
bool isInOPALTMode()
Definition: OpalData.cpp:275
bool isInOPALCyclMode()
Definition: OpalData.cpp:271
std::string getInputFn()
get opals input filename
Definition: OpalData.cpp:654
static OpalData * getInstance()
Definition: OpalData.cpp:195
const Vector_t & getMeshScaling() const
Definition: AmrBoxLib.cpp:496
double getT() const
Definition: AmrBoxLib.cpp:523
void trilinos2amrex_m(const lo_t &level, const lo_t &comp, AmrField_t &mf, const Teuchos::RCP< vector_t > &mv)
void buildCrseBoundaryMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, const basefab_t &mfab, const basefab_t &cfab, const scalar_t *invdx2)
amrex::BaseFab< int > basefab_t
Definition: AmrMultiGrid.h:85
void relax_m(const lo_t &level)
void initGuess_m(bool reset)
void close_m(const lo_t &level, const bool &matrices)
AmrMultiGrid(AmrBoxLib *itsAmrObject_p, const std::string &bsolver, const std::string &prec, const bool &rebalance, const std::string &reuse, const std::string &bcx, const std::string &bcy, const std::string &bcz, const std::string &smoother, const std::size_t &nSweeps, const std::string &interp, const std::string &norm)
double getZRangeMax(unsigned short level=0)
void restrict_m(const lo_t &level)
Boundary
Supported physical boundaries.
Definition: AmrMultiGrid.h:135
Boundary convertToEnumBoundary_m(const std::string &bc)
std::shared_ptr< bsolver_t > solver_mp
bottom solver
Definition: AmrMultiGrid.h:681
BaseSolver convertToEnumBaseSolver_m(const std::string &bsolver)
void setup_m(const amrex::Vector< AmrField_u > &rho, const amrex::Vector< AmrField_u > &phi, const bool &matrices=true)
void clearMasks_m()
double getXRangeMin(unsigned short level=0)
void initPhysicalBoundary_m(const Boundary *bc)
AmrMultiGridLevel_t::coefficients_t coefficients_t
Definition: AmrMultiGrid.h:64
Teuchos::RCP< comm_t > comm_mp
communicator
Definition: AmrMultiGrid.h:663
std::string fname_m
SDDS filename.
Definition: AmrMultiGrid.h:702
BaseSolver
Supported bottom solvers.
Definition: AmrMultiGrid.h:100
AmrMultiGridLevel_t::umap_t umap_t
Definition: AmrMultiGrid.h:65
amrex::FArrayBox farraybox_t
Definition: AmrMultiGrid.h:86
std::string snorm_m
norm for convergence criteria
Definition: AmrMultiGrid.h:697
std::size_t nSweeps_m
number of smoothing iterations
Definition: AmrMultiGrid.h:674
void map2vector_m(umap_t &map, indices_t &indices, coefficients_t &values)
void buildFineBoundaryMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, const basefab_t &mfab, const basefab_t &rfab)
void writeSDDSHeader_m(std::ofstream &outfile)
std::vector< std::unique_ptr< AmrMultiGridLevel_t > > mglevel_m
container for levels
Definition: AmrMultiGrid.h:678
void amrex2trilinos_m(const lo_t &level, const lo_t &comp, const AmrField_t &mf, Teuchos::RCP< vector_t > &mv)
scalar_t getLevelResidualNorm(lo_t level)
scalar_t evalNorm_m(const Teuchos::RCP< const vector_t > &x)
void buildSingleLevel_m(const amrex::Vector< AmrField_u > &rho, const amrex::Vector< AmrField_u > &phi, const bool &matrices=true)
int lbase_m
base level (currently only 0 supported)
Definition: AmrMultiGrid.h:689
void initPrec_m(const Preconditioner &prec, const std::string &reuse)
void setVerbose(bool verbose)
void initInterpolater_m(const Interpolater &interp)
amr::comm_t comm_t
Definition: AmrMultiGrid.h:51
std::unique_ptr< AmrInterpolater< AmrMultiGridLevel_t > > interp_mp
interpolater without coarse-fine interface
Definition: AmrMultiGrid.h:666
amr::scalar_t scalar_t
Definition: AmrMultiGrid.h:54
scalar_t iterate_m()
void buildGradientMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, const basefab_t &mfab, const scalar_t *invdx)
BelosBottomSolver< AmrMultiGridLevel_t > BelosSolver_t
Definition: AmrMultiGrid.h:74
void initCrseFineInterp_m(const Interpolater &interface)
std::shared_ptr< preconditioner_t > prec_mp
preconditioner for bottom solver
Definition: AmrMultiGrid.h:687
amrex::Box box_t
Definition: AmrMultiGrid.h:84
std::unique_ptr< AmrInterpolater< AmrMultiGridLevel_t > > interface_mp
interpolater for coarse-fine interface
Definition: AmrMultiGrid.h:669
Norm convertToEnumNorm_m(const std::string &norm)
Interpolater convertToEnumInterpolater_m(const std::string &interp)
amr::global_ordinal_t go_t
Definition: AmrMultiGrid.h:53
void averageDown_m(const lo_t &level)
amr::matrix_t matrix_t
Definition: AmrMultiGrid.h:47
Smoother convertToEnumSmoother_m(const std::string &smoother)
double getZRangeMin(unsigned short level=0)
amr::vector_t vector_t
Definition: AmrMultiGrid.h:48
Amesos2BottomSolver< AmrMultiGridLevel_t > Amesos2Solver_t
Definition: AmrMultiGrid.h:75
void buildPotentialVector_m(const lo_t &level, const AmrField_t &phi)
void residual_no_fine_m(const lo_t &level, Teuchos::RCP< vector_t > &result, const Teuchos::RCP< vector_t > &rhs, const Teuchos::RCP< vector_t > &crs_rhs, const Teuchos::RCP< vector_t > &b)
scalar_t eps_m
rhs scale for convergence
Definition: AmrMultiGrid.h:699
AmrMultiGridLevel_t::AmrField_t AmrField_t
Definition: AmrMultiGrid.h:58
std::size_t nIter_m
number of iterations till convergence
Definition: AmrMultiGrid.h:671
int lfine_m
fineste level
Definition: AmrMultiGrid.h:690
double getXRangeMax(unsigned short level=0)
Inform & print(Inform &os) const
AmrMultiGridLevel_t::AmrIntVect_t AmrIntVect_t
Definition: AmrMultiGrid.h:62
bool verbose_m
If true, a SDDS file is written.
Definition: AmrMultiGrid.h:701
std::size_t bIter_m
number of iterations of bottom solver
Definition: AmrMultiGrid.h:672
int nlevel_m
number of levelss
Definition: AmrMultiGrid.h:691
amr::local_ordinal_t lo_t
Definition: AmrMultiGrid.h:52
std::ios_base::openmode flag_m
std::ios::out or std::ios::app
Definition: AmrMultiGrid.h:703
void setNumberOfSweeps(const std::size_t &nSweeps)
void initResidual_m(std::vector< scalar_t > &rhsNorms, std::vector< scalar_t > &resNorms)
void buildMultiLevel_m(const amrex::Vector< AmrField_u > &rho, const amrex::Vector< AmrField_u > &phi, const bool &matrices=true)
void open_m(const lo_t &level, const bool &matrices)
void buildDensityVector_m(const lo_t &level, const AmrField_t &rho)
MueLuPreconditioner< AmrMultiGridLevel_t > MueLuPreconditioner_t
Definition: AmrMultiGrid.h:81
AmrMultiGridLevel< matrix_t, vector_t > AmrMultiGridLevel_t
Definition: AmrMultiGrid.h:56
std::size_t getNumIters()
std::size_t maxiter_m
maximum number of iterations allowed
Definition: AmrMultiGrid.h:673
void buildNoFinePoissonMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, const basefab_t &mfab, const scalar_t *invdx2)
double getYRangeMin(unsigned short level=0)
void writeSDDSData_m(const scalar_t &error)
int nBcPoints_m
maximum number of stencils points for BC
Definition: AmrMultiGrid.h:694
void setMaxNumberOfIterations(const std::size_t &maxiter)
void initBaseSolver_m(const BaseSolver &solver, const bool &rebalance, const std::string &reuse)
Smoother smootherType_m
type of smoother
Definition: AmrMultiGrid.h:675
Norm
Supported convergence criteria.
Definition: AmrMultiGrid.h:142
void initLevels_m(const amrex::Vector< AmrField_u > &rho, const amrex::Vector< AmrGeometry_t > &geom, bool regrid)
void smooth_m(const lo_t &level, Teuchos::RCP< vector_t > &e, Teuchos::RCP< vector_t > &r)
Ifpack2Preconditioner< AmrMultiGridLevel_t > Ifpack2Preconditioner_t
Definition: AmrMultiGrid.h:80
boundary_t bc_m[AMREX_SPACEDIM]
boundary conditions
Definition: AmrMultiGrid.h:693
void residual_m(const lo_t &level, Teuchos::RCP< vector_t > &r, const Teuchos::RCP< vector_t > &b, const Teuchos::RCP< vector_t > &x)
std::vector< std::shared_ptr< AmrSmoother > > smoother_m
error smoother
Definition: AmrMultiGrid.h:684
void buildInterpolationMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, const basefab_t &cfab)
void computeEfield_m(AmrVectorFieldContainer_t &efield)
AmrMultiGridLevel_t::indices_t indices_t
Definition: AmrMultiGrid.h:63
Interpolater
Supported interpolaters for prolongation operation.
Definition: AmrMultiGrid.h:93
void prolongate_m(const lo_t &level)
bool isConverged_m(std::vector< scalar_t > &rhsNorms, std::vector< scalar_t > &resNorms)
Norm norm_m
norm for convergence criteria (l1, l2, linf)
Definition: AmrMultiGrid.h:696
double getYRangeMax(unsigned short level=0)
void setTolerance(const scalar_t &eps)
void solve(AmrScalarFieldContainer_t &rho, AmrScalarFieldContainer_t &phi, AmrVectorFieldContainer_t &efield, unsigned short baseLevel, unsigned short finestLevel, bool prevAsGuess=true)
MueLuBottomSolver< AmrMultiGridLevel_t > MueLuSolver_t
Definition: AmrMultiGrid.h:76
void buildRestrictionMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, D_DECL(const go_t &ii, const go_t &jj, const go_t &kk), const basefab_t &rfab)
Preconditioner convertToEnumPreconditioner_m(const std::string &prec)
void buildCompositePoissonMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, const basefab_t &mfab, const basefab_t &rfab, const scalar_t *invdx2)
amrex::FabArray< basefab_t > mask_t
Smoother
All supported Ifpack2 smoothers.
Definition: AmrSmoother.h:46
static Smoother convertToEnumSmoother(const std::string &smoother)
Definition: AmrSmoother.cpp:64
static void fillMap(map_t &map)
static std::string convertToMueLuReuseOption(const std::string &reuse)
static std::string convertToMueLuReuseOption(const std::string &reuse)
static void fillMap(map_t &map)
bool regrid_m
is set to true by itsAmrObject_mp and reset to false by solver
The base class for all OPAL exceptions.
Definition: OpalException.h:28
std::string date() const
Return date.
Definition: Timer.cpp:35
std::string time() const
Return time.
Definition: Timer.cpp:42
Definition: Inform.h:42
static TimerRef getTimer(const char *nm)
Definition: IpplTimings.h:182
static void stopTimer(TimerRef t)
Definition: IpplTimings.h:192
static void startTimer(TimerRef t)
Definition: IpplTimings.h:187