OPAL (Object Oriented Parallel Accelerator Library)  2.2.0
OPAL
AmrMultiGrid.cpp
Go to the documentation of this file.
1 #include "AmrMultiGrid.h"
2 
3 #include <algorithm>
4 #include <functional>
5 #include <map>
6 #include <numeric>
7 
8 #include "OPALconfig.h"
11 #include "Utilities/Timer.h"
12 #include "Utilities/Util.h"
13 #include <AMReX_ParallelDescriptor.H>
14 
15 #include <boost/filesystem.hpp>
16 
17 #if AMR_MG_WRITE
18  #include <iomanip>
19 #endif
20 
22  const std::string& bsolver,
23  const std::string& prec,
24  const bool& rebalance,
25  const std::string& reuse,
26  const std::string& bcx,
27  const std::string& bcy,
28  const std::string& bcz,
29  const std::string& smoother,
30  const std::size_t& nSweeps,
31  const std::string& interp,
32  const std::string& norm)
33  : AmrPoissonSolver<AmrBoxLib>(itsAmrObject_p)
34  , comm_mp( new comm_t( amrex::ParallelDescriptor::Communicator() ) )
35  , nIter_m(0)
36  , bIter_m(0)
37  , maxiter_m(100)
38  , nSweeps_m(nSweeps)
39  , mglevel_m(0)
40  , lbase_m(0)
41  , lfine_m(0)
42  , nlevel_m(1)
43  , nBcPoints_m(0)
44  , eps_m(1.0e-10)
45  , verbose_m(false)
46  , fname_m(OpalData::getInstance()->getInputBasename() + std::string(".solver"))
47  , flag_m(std::ios::out)
48 {
49  node_mp = KokkosClassic::Details::getNode<amr::node_t>(); //KokkosClassic::DefaultNode::getDefaultNode();
50 
51 #if AMR_MG_TIMER
52  this->initTimer_m();
53 #endif
54 
55  const Boundary bcs[AMREX_SPACEDIM] = {
56  this->convertToEnumBoundary_m(bcx),
57  this->convertToEnumBoundary_m(bcy),
58  this->convertToEnumBoundary_m(bcz)
59  };
60 
61  this->initPhysicalBoundary_m(&bcs[0]);
62 
63  smootherType_m = this->convertToEnumSmoother_m(smoother);
64 
65  norm_m = this->convertToEnumNorm_m(norm);
66 
67  const Interpolater interpolater = this->convertToEnumInterpolater_m(interp);
68  this->initInterpolater_m(interpolater);
69 
70  // interpolater for crse-fine-interface
71  this->initCrseFineInterp_m(Interpolater::LAGRANGE);
72 
73  // preconditioner
74  const Preconditioner precond = this->convertToEnumPreconditioner_m(prec);
75  this->initPrec_m(precond, rebalance, reuse);
76 
77  // base level solver
78  const BaseSolver solver = this->convertToEnumBaseSolver_m(bsolver);
79  this->initBaseSolver_m(solver, rebalance, reuse);
80 
81  if (boost::filesystem::exists(fname_m)) {
82  flag_m = std::ios::app;
83  INFOMSG("Appending solver information to existing file: " << fname_m << endl);
84  } else {
85  INFOMSG("Creating new file for solver information: " << fname_m << endl);
86  }
87 }
88 
89 
93  unsigned short baseLevel,
94  unsigned short finestLevel,
95  bool prevAsGuess)
96 {
97  lbase_m = baseLevel;
98  lfine_m = finestLevel;
99  nlevel_m = lfine_m - lbase_m + 1;
100 
101  /* we cannot use the previous solution
102  * if we have to regrid (AmrPoissonSolver::hasToRegrid())
103  *
104  * regrid_m is set in AmrBoxlib::regrid()
105  */
106  bool reset = !prevAsGuess;
107 
108  if ( this->regrid_m )
109  reset = true;
110 
111  this->initLevels_m(rho, itsAmrObject_mp->Geom(), this->regrid_m);
112 
113  // build all necessary matrices and vectors
114  this->setup_m(rho, phi, this->regrid_m);
115 
116  this->initGuess_m(reset);
117 
118  // actual solve
119  scalar_t error = this->iterate_m();
120 
121  for (int lev = nlevel_m - 1; lev > -1; --lev) {
122  averageDown_m(lev);
123  }
124 
125  // write efield to AMReX
126  this->computeEfield_m(efield);
127 
128  // copy solution back
129  for (int lev = 0; lev < nlevel_m; ++lev) {
130  int ilev = lbase_m + lev;
131 
132  phi[ilev]->setVal(0.0, phi[ilev]->nGrow());
133 
134  this->trilinos2amrex_m(lev, 0, *phi[ilev], mglevel_m[lev]->phi_p);
135  }
136 
137  if ( verbose_m )
138  this->writeSDDSData_m(error);
139 
140  // we can now reset
141  this->regrid_m = false;
142 }
143 
144 
145 void AmrMultiGrid::setNumberOfSweeps(const std::size_t& nSweeps) {
146  if ( nSweeps < 0 )
147  throw OpalException("AmrMultiGrid::setNumberOfSweeps()",
148  "The number of smoothing sweeps needs to be non-negative!");
149 
150  nSweeps_m = nSweeps;
151 }
152 
153 
154 void AmrMultiGrid::setMaxNumberOfIterations(const std::size_t& maxiter) {
155  if ( maxiter < 1 )
156  throw OpalException("AmrMultiGrid::setMaxNumberOfIterations()",
157  "The max. number of iterations needs to be positive!");
158 
159  maxiter_m = maxiter;
160 }
161 
162 
164  return nIter_m;
165 }
166 
167 
169  return evalNorm_m(mglevel_m[level]->residual_p);
170 }
171 
172 
173 void AmrMultiGrid::setVerbose(bool verbose) {
174  verbose_m = verbose;
175 }
176 
177 
179 {
180  // make sure it's reset
181  nBcPoints_m = 0;
182 
183  for (unsigned int i = 0; i < AMREX_SPACEDIM; ++i) {
184  switch ( bc[i] ) {
185  case Boundary::DIRICHLET:
187  break;
188  case Boundary::OPEN:
189  bc_m[i].reset( new AmrOpenBoundary<AmrMultiGridLevel_t>() );
190  break;
191  case Boundary::PERIODIC:
193  break;
194  default:
195  throw OpalException("AmrMultiGrid::initPhysicalBoundary_m()",
196  "This type of boundary is not supported");
197  }
198  // we use the maximum in order to build matrices
199  go_t tmp = bc_m[i]->getNumberOfPoints();
200  if ( nBcPoints_m < tmp )
201  nBcPoints_m = tmp;
202  }
203 }
204 
205 
206 void AmrMultiGrid::initLevels_m(const amrex::Vector<AmrField_u>& rho,
207  const amrex::Vector<AmrGeometry_t>& geom,
208  bool regrid)
209 {
210  if ( !regrid )
211  return;
212 
213  // although we do a resize afterwards, we do this to be safe
214  for (int lev = nlevel_m; lev < (int)mglevel_m.size(); ++lev) {
215  mglevel_m[lev].reset(nullptr);
216  }
217 
218  mglevel_m.resize(nlevel_m);
219 
220  amrex::Periodicity period(AmrIntVect_t(D_DECL(0, 0, 0)));
221 
222  AmrIntVect_t rr = AmrIntVect_t(D_DECL(2, 2, 2));
223 
224  for (int lev = 0; lev < nlevel_m; ++lev) {
225  int ilev = lbase_m + lev;
226 
227  // do not initialize base level every time
228  if (mglevel_m[lev] == nullptr || lev > lbase_m) {
230  rho[ilev]->boxArray(),
231  rho[ilev]->DistributionMap(),
232  geom[ilev],
233  rr,
234  bc_m,
235  comm_mp,
236  node_mp));
237  } else {
238  mglevel_m[lev]->buildLevelMask();
239  }
240 
241  mglevel_m[lev]->refmask.reset(
242  new AmrMultiGridLevel_t::mask_t(mglevel_m[lev]->grids,
243  mglevel_m[lev]->dmap, 1, 2)
244  );
245  mglevel_m[lev]->refmask->setVal(AmrMultiGridLevel_t::Refined::NO, 2);
246  mglevel_m[lev]->refmask->FillBoundary(period);
247 
248  amrex::BoxArray ba = mglevel_m[lev]->grids;
249  ba.coarsen(rr);
250  mglevel_m[lev]->crsemask.reset(
252  mglevel_m[lev]->dmap, 1, 2)
253  );
254  mglevel_m[lev]->crsemask->setVal(AmrMultiGridLevel_t::Refined::NO, 2);
255  }
256 
257  for (int lev = 1; lev < nlevel_m; ++lev) {
258  mglevel_m[lev]->crsemask->setVal(AmrMultiGridLevel_t::Refined::YES, 0);
259 
260  // used for boundary interpolation --> replaces expensive calls to isBoundary
261  mglevel_m[lev]->crsemask->setDomainBndry(AmrMultiGridLevel_t::Mask::PHYSBNDRY,
262  mglevel_m[lev-1]->geom); //FIXME: geometry of lev - 1
263  // really needed ?
264  mglevel_m[lev]->crsemask->FillBoundary(period);
265  }
266 
267  /* to complete initialization we need to fill
268  * the mask of refinement
269  */
270  for (int lev = 0; lev < nlevel_m-1; ++lev) {
271  // get boxarray with refined cells
272  amrex::BoxArray ba = mglevel_m[lev]->grids;
273  ba.refine(rr);
274  ba = amrex::intersect(mglevel_m[lev+1]->grids, ba);
275  ba.coarsen(rr);
276 
277  // refined cells
278  amrex::DistributionMapping dmap(ba, comm_mp->getSize());
279  AmrMultiGridLevel_t::mask_t refined(ba, dmap, 1, 0);
280  refined.setVal(AmrMultiGridLevel_t::Refined::YES);
281 // refined.setDomainBndry(AmrMultiGridLevel_t::Mask::PHYSBNDRY, mglevel_m[lev]->geom);
282 
283  // fill intersection with YES
284  mglevel_m[lev]->refmask->copy(refined, 0, 0, 1, 0, 2);
285 
286  /* physical boundary cells will never be refined cells
287  * since they are ghost cells
288  */
289  mglevel_m[lev]->refmask->setDomainBndry(AmrMultiGridLevel_t::Mask::PHYSBNDRY,
290  mglevel_m[lev]->geom);
291 
292  mglevel_m[lev]->refmask->FillBoundary(period);
293  }
294 }
295 
296 
298  for (int lev = 0; lev < nlevel_m; ++lev) {
299  mglevel_m[lev]->refmask.reset(nullptr);
300  mglevel_m[lev]->crsemask.reset(nullptr);
301  mglevel_m[lev]->mask.reset(nullptr);
302  }
303 }
304 
305 
306 void AmrMultiGrid::initGuess_m(bool reset) {
307  if ( !reset )
308  return;
309 
310  // reset
311  for (int lev = 0; lev < nlevel_m; ++lev)
312  mglevel_m[lev]->phi_p->putScalar(0.0);
313 }
314 
315 
317 
318  // initial error
319  std::vector<scalar_t> rhsNorms;
320  std::vector<scalar_t> resNorms;
321 
322  this->initResidual_m(rhsNorms, resNorms);
323 
324  std::for_each(rhsNorms.begin(), rhsNorms.end(),
325  [this](double& val){ val *= eps_m; });
326 
327  nIter_m = 0;
328  bIter_m = 0;
329 
330  while ( !isConverged_m(rhsNorms, resNorms) && nIter_m < maxiter_m ) {
331 
332  relax_m(lfine_m);
333 
334 // /* in contrast to algorithm, we average down now
335 // * --> potential is valid also on coarse covered
336 // * cells
337 // * --> however, it may take 1-2 iterations longer
338 // */
339 // for (int lev = nlevel_m - 1; lev > -1; --lev) {
340 // averageDown_m(lev);
341 // }
342 
343  // update residual
344  for (int lev = 0; lev < nlevel_m; ++lev) {
345 
346  this->residual_m(lev,
347  mglevel_m[lev]->residual_p,
348  mglevel_m[lev]->rho_p,
349  mglevel_m[lev]->phi_p);
350  }
351 
352 
353  for (lo_t lev = 0; lev < nlevel_m; ++lev)
354  resNorms[lev] = getLevelResidualNorm(lev);
355 
356  ++nIter_m;
357 
358 #if AMR_MG_WRITE
359  this->writeResidualNorm_m();
360 #endif
361 
362  bIter_m += solver_mp->getNumIters();
363  }
364 
365  return std::accumulate(resNorms.begin(),
366  resNorms.end(), 0.0,
367  std::plus<scalar_t>());
368 }
369 
370 
371 bool AmrMultiGrid::isConverged_m(std::vector<scalar_t>& rhsNorms,
372  std::vector<scalar_t>& resNorms)
373 {
374  return std::equal(resNorms.begin(), resNorms.end(),
375  rhsNorms.begin(),
376  std::less<scalar_t>());
377 }
378 
379 
380 void AmrMultiGrid::residual_m(const lo_t& level,
381  Teuchos::RCP<vector_t>& r,
382  const Teuchos::RCP<vector_t>& b,
383  const Teuchos::RCP<vector_t>& x)
384 {
385  /*
386  * r = b - A*x
387  */
388  if ( level < lfine_m ) {
389 
390  vector_t fine2crse(mglevel_m[level]->Awf_p->getDomainMap(), true);
391 
392  // get boundary for
393  if ( mglevel_m[level]->Bfine_p != Teuchos::null ) {
394  mglevel_m[level]->Bfine_p->apply(*mglevel_m[level+1]->phi_p, fine2crse);
395  }
396 
397  // operation: fine2crse += A * x
398  mglevel_m[level]->Awf_p->apply(*x, fine2crse,
399  Teuchos::NO_TRANS,
400  scalar_t(1.0),
401  scalar_t(1.0));
402 
403  if ( mglevel_m[level]->Bcrse_p != Teuchos::null ) {
404  // operation: fine2crse += B * phi^(l-1)
405  mglevel_m[level]->Bcrse_p->apply(*mglevel_m[level-1]->phi_p,
406  fine2crse,
407  Teuchos::NO_TRANS,
408  scalar_t(1.0),
409  scalar_t(1.0));
410  }
411 
412  Teuchos::RCP<vector_t> uncov_Ax = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p, true) );
413  mglevel_m[level]->UnCovered_p->apply(fine2crse, *uncov_Ax);
414 
415  Teuchos::RCP<vector_t> uncov_b = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p, true) );
416 
417  mglevel_m[level]->UnCovered_p->apply(*b, *uncov_b);
418 
419  // ONLY subtract coarse rho
420 // mglevel_m[level]->residual_p->putScalar(0.0);
421 
422  r->update(1.0, *uncov_b, -1.0, *uncov_Ax, 0.0);
423 
424  } else {
425  /* finest level: Awf_p == Anf_p
426  */
427  Teuchos::RCP<vector_t> Ax = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p, true) );
428  mglevel_m[level]->Anf_p->apply(*x, *Ax);
429 
430  if ( mglevel_m[level]->Bcrse_p != Teuchos::null ) {
431  // operationr: Ax += B * phi^(l-1)
432  mglevel_m[level]->Bcrse_p->apply(*mglevel_m[level-1]->phi_p,
433  *Ax,
434  Teuchos::NO_TRANS,
435  scalar_t(1.0),
436  scalar_t(1.0));
437  }
438  r->update(1.0, *b, -1.0, *Ax, 0.0);
439  }
440 }
441 
442 
443 void AmrMultiGrid::relax_m(const lo_t& level) {
444 
445  if ( level == lfine_m ) {
446 
447  if ( level == lbase_m ) {
448  /* Anf_p == Awf_p
449  */
450  Teuchos::RCP<vector_t> Ax = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p, true) );
451  mglevel_m[level]->Anf_p->apply(*mglevel_m[level]->phi_p, *Ax);
452  mglevel_m[level]->residual_p->update(1.0, *mglevel_m[level]->rho_p, -1.0, *Ax, 0.0);
453 
454  } else {
455  this->residual_no_fine_m(level,
456  mglevel_m[level]->residual_p,
457  mglevel_m[level]->phi_p,
458  mglevel_m[level-1]->phi_p,
459  mglevel_m[level]->rho_p);
460  }
461  }
462 
463  if ( level > 0 ) {
464  // phi^(l, save) = phi^(l)
465  Teuchos::RCP<vector_t> phi_save = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p) );
466  Tpetra::deep_copy(*phi_save, *mglevel_m[level]->phi_p);
467 
468  mglevel_m[level-1]->error_p->putScalar(0.0);
469 
470  // smoothing
471  this->smooth_m(level,
472  mglevel_m[level]->error_p,
473  mglevel_m[level]->residual_p);
474 
475 
476  // phi = phi + e
477  mglevel_m[level]->phi_p->update(1.0, *mglevel_m[level]->error_p, 1.0);
478 
479  /*
480  * restrict
481  */
482  this->restrict_m(level);
483 
484  this->relax_m(level - 1);
485 
486  /*
487  * prolongate / interpolate
488  */
489  this->prolongate_m(level);
490 
491  // residual update
492  Teuchos::RCP<vector_t> tmp = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p) );
493  this->residual_no_fine_m(level, tmp,
494  mglevel_m[level]->error_p,
495  mglevel_m[level-1]->error_p,
496  mglevel_m[level]->residual_p);
497 
498  Tpetra::deep_copy(*mglevel_m[level]->residual_p, *tmp);
499 
500  // delta error
501  Teuchos::RCP<vector_t> derror = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p, true) );
502 
503  // smoothing
504  this->smooth_m(level, derror, mglevel_m[level]->residual_p);
505 
506  // e^(l) += de^(l)
507  mglevel_m[level]->error_p->update(1.0, *derror, 1.0);
508 
509  // phi^(l) = phi^(l, save) + e^(l)
510  mglevel_m[level]->phi_p->update(1.0, *phi_save, 1.0, *mglevel_m[level]->error_p, 0.0);
511 
512  } else {
513  // e = A^(-1)r
514 #if AMR_MG_TIMER
515  IpplTimings::startTimer(bottomTimer_m);
516 #endif
517 
518  solver_mp->solve(mglevel_m[level]->error_p,
519  mglevel_m[level]->residual_p);
520 
521 #if AMR_MG_TIMER
522  IpplTimings::stopTimer(bottomTimer_m);
523 #endif
524  // phi = phi + e
525  mglevel_m[level]->phi_p->update(1.0, *mglevel_m[level]->error_p, 1.0);
526  }
527 }
528 
529 
531  Teuchos::RCP<vector_t>& result,
532  const Teuchos::RCP<vector_t>& rhs,
533  const Teuchos::RCP<vector_t>& crs_rhs,
534  const Teuchos::RCP<vector_t>& b)
535 {
536  vector_t crse2fine(mglevel_m[level]->Anf_p->getDomainMap(), true);
537 
538  // get boundary for
539  if ( mglevel_m[level]->Bcrse_p != Teuchos::null ) {
540  mglevel_m[level]->Bcrse_p->apply(*crs_rhs, crse2fine);
541  }
542 
543  // operation: crse2fine = 1.0 * crse2fine + 1.0 * A^(l) * rhs
544  mglevel_m[level]->Anf_p->apply(*rhs, crse2fine,
545  Teuchos::NO_TRANS,
546  scalar_t(1.0),
547  scalar_t(1.0));
548 
549  result->update(1.0, *b, -1.0, crse2fine, 0.0);
550 }
551 
552 
553 #if AMR_MG_WRITE
554 void AmrMultiGrid::writeResidualNorm_m() {
555  scalar_t err = 0.0;
556 
557  std::ofstream out;
558  if ( comm_mp->getRank() == 0 )
559  out.open("residual.dat", std::ios::app);
560 
561  for (int lev = 0; lev < nlevel_m; ++lev) {
562  scalar_t tmp = evalNorm_m(mglevel_m[lev]->residual_p);
563 
564  if ( comm_mp->getRank() == 0 )
565  out << std::setw(15) << std::right << tmp;
566  }
567 
568  if ( comm_mp->getRank() == 0 )
569  out.close();
570 }
571 #endif
572 
573 
574 typename AmrMultiGrid::scalar_t
575 AmrMultiGrid::evalNorm_m(const Teuchos::RCP<const vector_t>& x)
576 {
577  scalar_t norm = 0.0;
578 
579  switch ( norm_m ) {
580  case Norm::L1:
581  {
582  norm = x->norm1();
583  break;
584  }
585  case Norm::L2:
586  {
587  norm = x->norm2();
588  break;
589  }
590  case Norm::LINF:
591  {
592  norm = x->normInf();
593  break;
594  }
595  default:
596  throw OpalException("AmrMultiGrid::evalNorm_m()",
597  "This type of norm not suppported.");
598  }
599  return norm;
600 }
601 
602 
603 void AmrMultiGrid::initResidual_m(std::vector<scalar_t>& rhsNorms,
604  std::vector<scalar_t>& resNorms)
605 {
606  rhsNorms.clear();
607  resNorms.clear();
608 
609 #if AMR_MG_WRITE
610  std::ofstream out;
611 
612  if ( comm_mp->getRank() == 0) {
613  out.open("residual.dat", std::ios::out);
614 
615  for (int lev = 0; lev < nlevel_m; ++lev)
616  out << std::setw(14) << std::right << "level" << lev;
617  out << std::endl;
618  }
619 #endif
620 
621  for (int lev = 0; lev < nlevel_m; ++lev) {
622  this->residual_m(lev,
623  mglevel_m[lev]->residual_p,
624  mglevel_m[lev]->rho_p,
625  mglevel_m[lev]->phi_p);
626 
627  resNorms.push_back(evalNorm_m(mglevel_m[lev]->residual_p));
628 
629 #if AMR_MG_WRITE
630  if ( comm_mp->getRank() == 0 )
631  out << std::setw(15) << std::right << resNorms.back();
632 #endif
633 
634  rhsNorms.push_back(evalNorm_m(mglevel_m[lev]->rho_p));
635  }
636 
637 #if AMR_MG_WRITE
638  if ( comm_mp->getRank() == 0 )
639  out.close();
640 #endif
641 }
642 
643 
645 #if AMR_MG_TIMER
646  IpplTimings::startTimer(efieldTimer_m);
647 #endif
648  Teuchos::RCP<vector_t> efield_p = Teuchos::null;
649  for (int lev = nlevel_m - 1; lev > -1; --lev) {
650  int ilev = lbase_m + lev;
651 
652  efield_p = Teuchos::rcp( new vector_t(mglevel_m[lev]->map_p, false) );
653 
654 
655  for (int d = 0; d < AMREX_SPACEDIM; ++d) {
656  efield[ilev][d]->setVal(0.0, efield[ilev][d]->nGrow());
657  efield_p->putScalar(0.0);
658  mglevel_m[lev]->G_p[d]->apply(*mglevel_m[lev]->phi_p, *efield_p);
659  this->trilinos2amrex_m(lev, 0, *efield[ilev][d], efield_p);
660  }
661  }
662 #if AMR_MG_TIMER
663  IpplTimings::stopTimer(efieldTimer_m);
664 #endif
665 }
666 
667 
668 void AmrMultiGrid::setup_m(const amrex::Vector<AmrField_u>& rho,
669  const amrex::Vector<AmrField_u>& phi,
670  const bool& matrices)
671 {
672 #if AMR_MG_TIMER
673  IpplTimings::startTimer(buildTimer_m);
674 #endif
675 
676  if ( lbase_m == lfine_m )
677  this->buildSingleLevel_m(rho, phi, matrices);
678  else
679  this->buildMultiLevel_m(rho, phi, matrices);
680 
681  mglevel_m[lfine_m]->error_p->putScalar(0.0);
682 
683  if ( matrices ) {
684  this->clearMasks_m();
685  // set the bottom solve operator
686  if (!solver_mp->hasOperator()) {
687  solver_mp->setOperator(mglevel_m[lbase_m]->Anf_p, mglevel_m[0].get());
688  }
689  }
690 
691 #if AMR_MG_TIMER
692  IpplTimings::stopTimer(buildTimer_m);
693 #endif
694 }
695 
696 
697 void AmrMultiGrid::buildSingleLevel_m(const amrex::Vector<AmrField_u>& rho,
698  const amrex::Vector<AmrField_u>& phi,
699  const bool& matrices)
700 {
701  this->open_m(lbase_m, matrices);
702 
703  const scalar_t* invdx = mglevel_m[lbase_m]->invCellSize();
704 
705  const scalar_t invdx2[] = {
706  D_DECL( invdx[0] * invdx[0],
707  invdx[1] * invdx[1],
708  invdx[2] * invdx[2] )
709  };
710 
711  if (matrices) {
712  for (amrex::MFIter mfi(*mglevel_m[lbase_m]->mask, true);
713  mfi.isValid(); ++mfi)
714  {
715  const box_t& tbx = mfi.tilebox();
716  const basefab_t& mfab = (*mglevel_m[lbase_m]->mask)[mfi];
717  const farraybox_t& rhofab = (*rho[lbase_m])[mfi];
718  const farraybox_t& pfab = (*phi[lbase_m])[mfi];
719 
720  const int* lo = tbx.loVect();
721  const int* hi = tbx.hiVect();
722 
723  for (int i = lo[0]; i <= hi[0]; ++i) {
724  for (int j = lo[1]; j <= hi[1]; ++j) {
725 #if AMREX_SPACEDIM == 3
726  for (int k = lo[2]; k <= hi[2]; ++k) {
727 #endif
728  AmrIntVect_t iv(D_DECL(i, j, k));
729  go_t gidx = mglevel_m[lbase_m]->serialize(iv);
730 
731  if (!solver_mp->hasOperator()) {
732  this->buildNoFinePoissonMatrix_m(lbase_m, gidx, iv, mfab, invdx2);
733  this->buildGradientMatrix_m(lbase_m, gidx, iv, mfab, invdx);
734  }
735 
736  mglevel_m[lbase_m]->rho_p->replaceGlobalValue(gidx, rhofab(iv, 0));
737  mglevel_m[lbase_m]->phi_p->replaceGlobalValue(gidx, pfab(iv, 0));
738 
739 #if AMREX_SPACEDIM == 3
740  }
741 #endif
742  }
743  }
744  }
745  } else {
746  this->buildDensityVector_m(lbase_m, *rho[lbase_m]);
747  }
748 
749  this->close_m(lbase_m, matrices);
750 
751  if (matrices) {
752  mglevel_m[lbase_m]->Awf_p = Teuchos::null;
753  mglevel_m[lbase_m]->UnCovered_p = Teuchos::null;
754  }
755 }
756 
757 
758 void AmrMultiGrid::buildMultiLevel_m(const amrex::Vector<AmrField_u>& rho,
759  const amrex::Vector<AmrField_u>& phi,
760  const bool& matrices)
761 {
762  // the base level has no smoother --> nlevel_m - 1
763  if ( matrices ) {
764  // although we do a resize afterwards, we do this to be safe
765  for (int lev = nlevel_m-1; lev < (int)smoother_m.size(); ++lev) {
766  smoother_m[lev].reset();
767  }
768  smoother_m.resize(nlevel_m-1);
769  }
770 
771  for (int lev = 0; lev < nlevel_m; ++lev) {
772  this->open_m(lev, matrices);
773 
774  int ilev = lbase_m + lev;
775 
776  // find all coarse cells that are covered by fine cells
777 // AmrIntVect_t rr = mglevel_m[lev]->refinement();
778 
779  const scalar_t* invdx = mglevel_m[lev]->invCellSize();
780 
781  const scalar_t invdx2[] = {
782  D_DECL( invdx[0] * invdx[0],
783  invdx[1] * invdx[1],
784  invdx[2] * invdx[2] )
785  };
786 
787  if ( matrices ) {
788  for (amrex::MFIter mfi(*mglevel_m[lev]->mask, true);
789  mfi.isValid(); ++mfi)
790  {
791  const box_t& tbx = mfi.tilebox();
792  const basefab_t& mfab = (*mglevel_m[lev]->mask)[mfi];
793  const basefab_t& rfab = (*mglevel_m[lev]->refmask)[mfi];
794  const basefab_t& cfab = (*mglevel_m[lev]->crsemask)[mfi];
795  const farraybox_t& rhofab = (*rho[ilev])[mfi];
796  const farraybox_t& pfab = (*phi[ilev])[mfi];
797 
798  const int* lo = tbx.loVect();
799  const int* hi = tbx.hiVect();
800 
801  for (int i = lo[0]; i <= hi[0]; ++i) {
802  int ii = i << 1;
803  for (int j = lo[1]; j <= hi[1]; ++j) {
804  int jj = j << 1;
805 #if AMREX_SPACEDIM == 3
806  for (int k = lo[2]; k <= hi[2]; ++k) {
807  int kk = k << 1;
808 #endif
809  AmrIntVect_t iv(D_DECL(i, j, k));
810  go_t gidx = mglevel_m[lev]->serialize(iv);
811 
812  this->buildRestrictionMatrix_m(lev, gidx, iv,
813  D_DECL(ii, jj, kk), rfab);
814 
815  this->buildInterpolationMatrix_m(lev, gidx, iv, cfab);
816 
817  this->buildCrseBoundaryMatrix_m(lev, gidx, iv, mfab,
818  cfab, invdx2);
819 
820  this->buildFineBoundaryMatrix_m(lev, gidx, iv,
821  mfab, rfab, cfab);
822 
823  this->buildCompositePoissonMatrix_m(lev, gidx, iv, mfab,
824  rfab, cfab, invdx2);
825 
826  if (lev > lbase_m || (lev == lbase_m && !solver_mp->hasOperator())) {
827  this->buildNoFinePoissonMatrix_m(lev, gidx, iv, mfab, invdx2);
828  this->buildGradientMatrix_m(lev, gidx, iv, mfab, invdx);
829  }
830 
831  mglevel_m[lev]->rho_p->replaceGlobalValue(gidx, rhofab(iv, 0));
832  mglevel_m[lev]->phi_p->replaceGlobalValue(gidx, pfab(iv, 0));
833 #if AMREX_SPACEDIM == 3
834  }
835 #endif
836  }
837  }
838  }
839  } else {
840  for (lo_t lev = 0; lev < nlevel_m; ++lev) {
841  int ilev = lbase_m + lev;
842  this->buildDensityVector_m(lev, *rho[ilev]);
843  }
844  }
845 
846  this->close_m(lev, matrices);
847 
848  if ( matrices && lev > lbase_m ) {
849  smoother_m[lev-1].reset( new AmrSmoother(mglevel_m[lev]->Anf_p,
851  }
852  }
853 }
854 
855 
856 void AmrMultiGrid::open_m(const lo_t& level,
857  const bool& matrices)
858 {
859  if ( matrices ) {
860 
861  if ( level > lbase_m ) {
862 
863  /*
864  * interpolation matrix
865  */
866 
867  int nNeighbours = (nBcPoints_m + 1) * interp_mp->getNumberOfPoints();
868 
869  mglevel_m[level]->I_p = Teuchos::rcp( new matrix_t(mglevel_m[level]->map_p,
870  nNeighbours,
871  Tpetra::StaticProfile) );
872 
873  /*
874  * coarse boundary matrix
875  */
876 
877  nNeighbours = 2 * AMREX_SPACEDIM * nBcPoints_m *
878  2 * AMREX_SPACEDIM * interface_mp->getNumberOfPoints();
879 
880  mglevel_m[level]->Bcrse_p = Teuchos::rcp(
881  new matrix_t(mglevel_m[level]->map_p, nNeighbours,
882  Tpetra::StaticProfile) );
883 
884  }
885 
886 
887  if ( level < lfine_m ) {
888 
889  /*
890  * restriction matrix
891  */
892 
893  // refinement 2
894  int nNeighbours = AMREX_D_TERM(2, * 2, * 2);
895 
896  mglevel_m[level]->R_p = Teuchos::rcp(
897  new matrix_t(mglevel_m[level]->map_p, nNeighbours,
898  Tpetra::StaticProfile) );
899 
900  /*
901  * fine boundary matrix
902  */
903 
904  // refinement 2
905  nNeighbours = 2 * AMREX_SPACEDIM * AMREX_D_TERM(2, * 2, * 2);
906 
907  mglevel_m[level]->Bfine_p = Teuchos::rcp(
908  new matrix_t(mglevel_m[level]->map_p, nNeighbours,
909  Tpetra::StaticProfile) );
910 
911  }
912 
913  /*
914  * no-fine Poisson matrix
915  */
916 
917  int nPhysBoundary = 2 * AMREX_SPACEDIM * nBcPoints_m;
918 
919  // number of internal stencil points
920  int nIntBoundary = AMREX_SPACEDIM * interface_mp->getNumberOfPoints();
921 
922  int nEntries = (AMREX_SPACEDIM << 1) + 2 /* plus boundaries */ + nPhysBoundary + nIntBoundary;
923 
924  if (level > lbase_m || (level == lbase_m && !solver_mp->hasOperator())) {
925  mglevel_m[level]->Anf_p = Teuchos::rcp(
926  new matrix_t(mglevel_m[level]->map_p, nEntries,
927  Tpetra::StaticProfile) );
928  }
929 
930  /*
931  * with-fine / composite Poisson matrix
932  */
933  if ( lbase_m != lfine_m ) {
934  nEntries = (AMREX_SPACEDIM << 1) + 5 /* plus boundaries */ + nPhysBoundary + nIntBoundary;
935 
936  mglevel_m[level]->Awf_p = Teuchos::rcp(
937  new matrix_t(mglevel_m[level]->map_p, nEntries,
938  Tpetra::StaticProfile) );
939 
940  /*
941  * uncovered cells matrix
942  */
943  mglevel_m[level]->UnCovered_p = Teuchos::rcp(
944  new matrix_t(mglevel_m[level]->map_p, 1,
945  Tpetra::StaticProfile) );
946  }
947 
948  /*
949  * gradient matrices
950  */
951  nEntries = 11;
952 
953  if (level > lbase_m || (level == lbase_m && !solver_mp->hasOperator())) {
954  for (int d = 0; d < AMREX_SPACEDIM; ++d) {
955  mglevel_m[level]->G_p[d] = Teuchos::rcp(
956  new matrix_t(mglevel_m[level]->map_p, nEntries,
957  Tpetra::StaticProfile) );
958  }
959  }
960  }
961 
962  mglevel_m[level]->rho_p = Teuchos::rcp(
963  new vector_t(mglevel_m[level]->map_p, false) );
964 
965  if ( matrices ) {
966  mglevel_m[level]->phi_p = Teuchos::rcp(
967  new vector_t(mglevel_m[level]->map_p, false) );
968  }
969 }
970 
971 
972 void AmrMultiGrid::close_m(const lo_t& level,
973  const bool& matrices)
974 {
975  if ( matrices ) {
976  if ( level > lbase_m ) {
977 
978  mglevel_m[level]->I_p->fillComplete(mglevel_m[level-1]->map_p, // col map (domain map)
979  mglevel_m[level]->map_p); // row map (range map)
980 
981  mglevel_m[level]->Bcrse_p->fillComplete(mglevel_m[level-1]->map_p, // col map
982  mglevel_m[level]->map_p); // row map
983  }
984 
985  if ( level < lfine_m ) {
986 
987  mglevel_m[level]->R_p->fillComplete(mglevel_m[level+1]->map_p,
988  mglevel_m[level]->map_p);
989 
990  mglevel_m[level]->Bfine_p->fillComplete(mglevel_m[level+1]->map_p,
991  mglevel_m[level]->map_p);
992  }
993 
994  if (level > lbase_m || (level == lbase_m && !solver_mp->hasOperator())) {
995  mglevel_m[level]->Anf_p->fillComplete();
996 
997  for (int d = 0; d < AMREX_SPACEDIM; ++d)
998  mglevel_m[level]->G_p[d]->fillComplete();
999  }
1000 
1001  if ( lbase_m != lfine_m ) {
1002  mglevel_m[level]->Awf_p->fillComplete();
1003 
1004  mglevel_m[level]->UnCovered_p->fillComplete();
1005  }
1006  }
1007 }
1008 
1009 
1011  const go_t& gidx,
1012  const AmrIntVect_t& iv,
1013  const basefab_t& mfab,
1014  const scalar_t* invdx2)
1015 {
1016  /*
1017  * Laplacian of "no fine"
1018  */
1019 
1020  /*
1021  * 1D not supported
1022  * 2D --> 5 elements per row
1023  * 3D --> 7 elements per row
1024  */
1025 
1026  umap_t map;
1027  indices_t indices;
1028  coefficients_t values;
1029 
1030  /*
1031  * check neighbours in all directions (Laplacian stencil --> cross)
1032  */
1033  for (int d = 0; d < AMREX_SPACEDIM; ++d) {
1034  for (int shift = -1; shift <= 1; shift += 2) {
1035  AmrIntVect_t biv = iv;
1036  biv[d] += shift;
1037 
1038  switch ( mfab(biv) )
1039  {
1040  case AmrMultiGridLevel_t::Mask::INTERIOR:
1041  case AmrMultiGridLevel_t::Mask::COVERED: // covered --> interior cell
1042  {
1043  map[mglevel_m[level]->serialize(biv)] += invdx2[d];
1044  break;
1045  }
1046  case AmrMultiGridLevel_t::Mask::BNDRY:
1047  {
1048  // boundary cell
1049  // only level > 0 have this kind of boundary
1050 #ifndef NDEBUG
1051  if ( level == lbase_m )
1052  throw OpalException("AmrMultiGrid::buildNoFinePoissonMatrix_m()",
1053  "Error in mask for level "
1054  + std::to_string(level) + "!");
1055 #endif
1056  /* Dirichlet boundary conditions from coarser level.
1057  */
1058  interface_mp->fine(biv, map, invdx2[d], d, -shift,
1059  mglevel_m[level].get());
1060  break;
1061  }
1062  case AmrMultiGridLevel_t::Mask::PHYSBNDRY:
1063  {
1064  // physical boundary cell
1065  mglevel_m[level]->applyBoundary(biv, d, map,
1066  invdx2[d] /*matrix coefficient*/);
1067  break;
1068  }
1069  default:
1070  throw OpalException("AmrMultiGrid::buildNoFinePoissonMatrix_m()",
1071  "Error in mask for level "
1072  + std::to_string(level) + "!");
1073  }
1074  }
1075  }
1076 
1077  // check center
1078  map[gidx] += AMREX_D_TERM(- 2.0 * invdx2[0],
1079  - 2.0 * invdx2[1],
1080  - 2.0 * invdx2[2]);
1081 
1082  this->map2vector_m(map, indices, values);
1083 
1084  mglevel_m[level]->Anf_p->insertGlobalValues(gidx,
1085  indices.size(),
1086  &values[0],
1087  &indices[0]);
1088 }
1089 
1090 
1092  const go_t& gidx,
1093  const AmrIntVect_t& iv,
1094  const basefab_t& mfab,
1095  const basefab_t& rfab,
1096  const basefab_t& cfab,
1097  const scalar_t* invdx2)
1098 {
1099  /*
1100  * Laplacian of "with fine"
1101  *
1102  * For the finest level: Awf == Anf
1103  */
1104  if ( rfab(iv) == AmrMultiGridLevel_t::Refined::YES ) //|| lbase_m != lfine_m )
1105  return;
1106  /*
1107  * Only cells that are not refined
1108  */
1109 
1110  /*
1111  * 1D not supported by AmrMultiGrid
1112  * 2D --> 5 elements per row
1113  * 3D --> 7 elements per row
1114  */
1115 
1116  umap_t map;
1117  indices_t indices;
1118  coefficients_t values;
1119 
1120  /*
1121  * Only cells that are not refined
1122  */
1123 
1124  /*
1125  * check neighbours in all directions (Laplacian stencil --> cross)
1126  */
1127  for (int d = 0; d < AMREX_SPACEDIM; ++d) {
1128  for (int shift = -1; shift <= 1; shift += 2) {
1129  AmrIntVect_t biv = iv;
1130  biv[d] += shift;
1131 
1132  if ( rfab(biv) != AmrMultiGridLevel_t::Refined::YES )
1133  {
1134  /*
1135  * It can't be a refined cell!
1136  */
1137  switch ( mfab(biv) )
1138  {
1139  case AmrMultiGridLevel_t::Mask::COVERED:
1140  // covered --> interior cell
1141  case AmrMultiGridLevel_t::Mask::INTERIOR:
1142  {
1143  map[mglevel_m[level]->serialize(biv)] += invdx2[d];
1144  map[gidx] -= invdx2[d]; // add center once
1145  break;
1146  }
1147  case AmrMultiGridLevel_t::Mask::BNDRY:
1148  {
1149  // boundary cell
1150  // only level > 0 have this kind of boundary
1151 #ifndef NDEBUG
1152  if ( level == lbase_m )
1153  throw OpalException("AmrMultiGrid::buildCompositePoissonMatrix_m()",
1154  "Error in mask for level "
1155  + std::to_string(level) + "!");
1156 #endif
1157 
1158  /* We are on the fine side of the crse-fine interface
1159  * --> normal stencil --> no flux matching required
1160  * --> interpolation of fine ghost cell required
1161  * (used together with Bcrse)
1162  */
1163 
1164  /* Dirichlet boundary conditions from coarser level.
1165  */
1166  interface_mp->fine(biv, map, invdx2[d], d, -shift,
1167  mglevel_m[level].get());
1168 
1169  // add center once
1170  map[gidx] -= invdx2[d];
1171  break;
1172  }
1173  case AmrMultiGridLevel_t::Mask::PHYSBNDRY:
1174  {
1175  // physical boundary cell
1176  mglevel_m[level]->applyBoundary(biv, d, map,
1177  invdx2[d] /*matrix coefficient*/);
1178 
1179  // add center once
1180  map[gidx] -= invdx2[d];
1181  break;
1182  }
1183  default:
1184  throw OpalException("AmrMultiGrid::buildCompositePoissonMatrix_m()",
1185  "Error in mask for level "
1186  + std::to_string(level) + "!");
1187  }
1188  } else {
1189  /*
1190  * If neighbour cell is refined, we are on the coarse
1191  * side of the crse-fine interface --> flux matching
1192  * required --> interpolation of fine ghost cell
1193  * (used together with Bfine)
1194  */
1195 
1196 
1197  // flux matching, coarse part
1198 
1199  /* 2D --> 2 fine cells to compute flux per coarse-fine-interace --> avg = 2
1200  * 3D --> 4 fine cells to compute flux per coarse-fine-interace --> avg = 4
1201  *
1202  * @precondition: refinement of 2
1203  */
1204  // top and bottom for all directions
1205  const scalar_t* invcdx = mglevel_m[level]->invCellSize();
1206  const scalar_t* invfdx = mglevel_m[level+1]->invCellSize();
1207  scalar_t invavg = AMREX_D_PICK(1.0, 0.5, 0.25);
1208  scalar_t value = - invavg * invcdx[d] * invfdx[d];
1209 
1210  for (int d1 = 0; d1 < 2; ++d1) {
1211 #if AMREX_SPACEDIM == 3
1212  for (int d2 = 0; d2 < 2; ++d2) {
1213 #endif
1214 
1215  /* in order to get a top iv --> needs to be odd value in "d"
1216  * in order to get a bottom iv --> needs to be even value in "d"
1217  */
1218  AmrIntVect_t fake(D_DECL(0, 0, 0));
1219 
1220  fake[(d+1)%AMREX_SPACEDIM] = d1;
1221 #if AMREX_SPACEDIM == 3
1222  fake[(d+2)%AMREX_SPACEDIM] = d2;
1223 #endif
1224  interface_mp->coarse(iv, map, value, d, shift, rfab,
1225  fake, mglevel_m[level].get());
1226 
1227 #if AMREX_SPACEDIM == 3
1228  }
1229 #endif
1230  }
1231  }
1232  }
1233  }
1234 
1235  this->map2vector_m(map, indices, values);
1236 
1237  mglevel_m[level]->Awf_p->insertGlobalValues(gidx,
1238  indices.size(),
1239  &values[0],
1240  &indices[0]);
1241 
1242  scalar_t vv = 1.0;
1243  mglevel_m[level]->UnCovered_p->insertGlobalValues(gidx,
1244  1,
1245  &vv,
1246  &gidx);
1247 }
1248 
1249 
1251  const go_t& gidx,
1252  const AmrIntVect_t& iv,
1253  D_DECL(const go_t& ii,
1254  const go_t& jj,
1255  const go_t& kk),
1256  const basefab_t& rfab)
1257 {
1258  /*
1259  * x^(l) = R * x^(l+1)
1260  */
1261 
1262  // finest level does not need to have a restriction matrix
1263  if ( rfab(iv) == AmrMultiGridLevel_t::Refined::NO || level == lfine_m )
1264  return;
1265 
1266  /* Difficulty: If a fine cell belongs to another processor than the underlying
1267  * coarse cell, we get an error when filling the matrix since the
1268  * cell (--> global index) does not belong to the same processor.
1269  * Solution: Find all coarse cells that are covered by fine cells, thus,
1270  * the distributionmap is correct.
1271  *
1272  *
1273  */
1274  indices_t indices;
1275  indices.reserve(2 << (AMREX_SPACEDIM - 1));
1276  coefficients_t values;
1277  values.reserve(2 << (AMREX_SPACEDIM -1));
1278 
1279  // neighbours
1280  for (int iref = 0; iref < 2; ++iref) {
1281  for (int jref = 0; jref < 2; ++jref) {
1282 #if AMREX_SPACEDIM == 3
1283  for (int kref = 0; kref < 2; ++kref) {
1284 #endif
1285  AmrIntVect_t riv(D_DECL(ii + iref, jj + jref, kk + kref));
1286 
1287  indices.push_back( mglevel_m[level+1]->serialize(riv) );
1288  values.push_back( AMREX_D_PICK(0.5, 0.25, 0.125) );
1289 #if AMREX_SPACEDIM == 3
1290  }
1291 #endif
1292  }
1293  }
1294 
1295  mglevel_m[level]->R_p->insertGlobalValues(gidx,
1296  indices.size(),
1297  &values[0],
1298  &indices[0]);
1299 }
1300 
1301 
1303  const go_t& gidx,
1304  const AmrIntVect_t& iv,
1305  const basefab_t& cfab)
1306 {
1307  /* crse: level - 1
1308  * fine (this): level
1309  */
1310 
1311  /*
1312  * This does not include ghost cells
1313  * --> no boundaries
1314  *
1315  * x^(l) = I * x^(l-1)
1316  */
1317 
1318  if ( level == lbase_m )
1319  return;
1320 
1321  umap_t map;
1322  indices_t indices;
1323  coefficients_t values;
1324 
1325  /*
1326  * we need boundary + indices from coarser level
1327  */
1328  interp_mp->stencil(iv, cfab, map, 1.0, mglevel_m[level-1].get());
1329 
1330  this->map2vector_m(map, indices, values);
1331 
1332  mglevel_m[level]->I_p->insertGlobalValues(gidx,
1333  indices.size(),
1334  &values[0],
1335  &indices[0]);
1336 }
1337 
1338 
1340  const go_t& gidx,
1341  const AmrIntVect_t& iv,
1342  const basefab_t& mfab,
1343  const basefab_t& cfab,
1344  const scalar_t* invdx2)
1345 {
1346  /*
1347  * fine (this): level
1348  * coarse: level - 1
1349  */
1350 
1351  // the base level has only physical boundaries
1352  if ( level == lbase_m )
1353  return;
1354 
1355  // iv is a fine cell
1356 
1357  umap_t map;
1358  indices_t indices;
1359  coefficients_t values;
1360 
1361  // check its neighbours to see if at crse-fine interface
1362  for (int d = 0; d < AMREX_SPACEDIM; ++d) {
1363  for (int shift = -1; shift <= 1; shift += 2) {
1364  // neighbour
1365  AmrIntVect_t niv = iv;
1366  niv[d] += shift;
1367 
1368  if ( mfab(niv) == AmrMultiGridLevel_t::Mask::BNDRY )
1369  {
1370  // neighbour does not belong to fine grids
1371  // includes no cells at physical boundary
1372 
1373  // coarse cell that is not refined
1374  AmrIntVect_t civ = iv;
1375  civ[d] += shift;
1376  civ.coarsen(mglevel_m[level]->refinement());
1377 
1378  // we need boundary + indices from coarser level
1379  // we need normalization by mesh size squared (of fine cell size)
1380  // --> Laplacian for fine cell
1381  // (Dirichlet boundary for coarse --> fine)
1382  interface_mp->coarse(civ, map, invdx2[d], d, shift, cfab,
1383  iv, mglevel_m[level-1].get());
1384  }
1385 #ifndef NDEBUG
1386  else if ( mfab(niv) == AmrMultiGridLevel_t::Mask::PHYSBNDRY ) {
1387  throw OpalException("AmrMultiGrid::buildCrseBoundaryMatrix_m()",
1388  "Fine meshes shouldn't be connected "
1389  "to physical (i.e. mesh) boundary!");
1390  }
1391 #endif
1392  }
1393  }
1394 
1395  this->map2vector_m(map, indices, values);
1396 
1397  mglevel_m[level]->Bcrse_p->insertGlobalValues(gidx,
1398  indices.size(),
1399  &values[0],
1400  &indices[0]);
1401 }
1402 
1403 
1405  const go_t& gidx,
1406  const AmrIntVect_t& iv,
1407  const basefab_t& mfab,
1408  const basefab_t& rfab,
1409  const basefab_t& cfab)
1410 {
1411  /* fine: level + 1
1412  * coarse (this): level
1413  */
1414 
1415  // the finest level does not need data from a finer level
1416  if ( rfab(iv) == AmrMultiGridLevel_t::Refined::YES || level == lfine_m )
1417  return;
1418 
1419  const scalar_t* invcdx = mglevel_m[level]->invCellSize();
1420  const scalar_t* invfdx = mglevel_m[level+1]->invCellSize();
1421 
1422  // inverse of number of fine cell gradients
1423  scalar_t invavg = AMREX_D_PICK(1, 0.5, 0.25);
1424 
1425  umap_t map;
1426  indices_t indices;
1427  coefficients_t values;
1428 
1429  auto fill = [&](umap_t& map,
1430  D_DECL(int ii, int jj, int kk),
1431  int* begin, int* end, int d,
1432  const AmrIntVect_t& iv, int shift,
1433  int sign)
1434  {
1435  for (int iref = ii - begin[0]; iref <= ii + end[0]; ++iref) {
1436  for (int jref = jj - begin[1]; jref <= jj + end[1]; ++jref) {
1437 #if AMREX_SPACEDIM == 3
1438  for (int kref = kk - begin[2]; kref <= kk + end[2]; ++kref) {
1439 #endif
1440  /* Since all fine cells on the not-refined cell are
1441  * outside of the "domain" --> we need to interpolate
1442  */
1443  AmrIntVect_t riv(D_DECL(iref, jref, kref));
1444 
1445  if ( (riv[d] >> 1) /*refinement*/ == iv[d] ) {
1446  /* the fine cell is on the coarse side --> fine
1447  * ghost cell --> we need to interpolate
1448  */
1449  scalar_t value = - invavg * invcdx[d] * invfdx[d];
1450 
1451  interface_mp->fine(riv, map, value, d, shift,
1452  mglevel_m[level+1].get());
1453  } else {
1454  scalar_t value = invavg * invcdx[d] * invfdx[d];
1455  map[mglevel_m[level+1]->serialize(riv)] += value;
1456  }
1457 #if AMREX_SPACEDIM == 3
1458  }
1459 #endif
1460  }
1461  }
1462  };
1463 
1464 
1465  /*
1466  * iv is a coarse cell that got not refined
1467  *
1468  * --> check all neighbours to see if at crse-fine
1469  * interface
1470  */
1471  for (int d = 0; d < AMREX_SPACEDIM; ++d) {
1472  for (int shift = -1; shift <= 1; shift += 2) {
1473  // neighbour
1474  AmrIntVect_t covered = iv;
1475  covered[d] += shift;
1476 
1477  if ( rfab(covered) == AmrMultiGridLevel_t::Refined::YES &&
1478  mfab(covered) != AmrMultiGridLevel_t::PHYSBNDRY )
1479  {
1480  // neighbour is covered by fine cells
1481 
1482  /*
1483  * "shift" is the amount to a coarse cell that got refined
1484  * "d" is the direction to shift
1485  *
1486  * --> check all covered neighbour cells
1487  */
1488 
1489  /* we need to iterate over correct fine cells. It depends
1490  * on the orientation of the interface
1491  */
1492  int begin[AMREX_SPACEDIM] = { D_DECL( int(d == 0), int(d == 1), int(d == 2) ) };
1493  int end[AMREX_SPACEDIM] = { D_DECL( int(d != 0), int(d != 1), int(d != 2) ) };
1494 
1495  /*
1496  * neighbour cell got refined but is not on physical boundary
1497  * --> we are at a crse-fine interface
1498  *
1499  * we need now to find out which fine cells
1500  * are required to satisfy the flux matching
1501  * condition
1502  */
1503 
1504  switch ( shift ) {
1505  case -1:
1506  {
1507  // --> interface is on the lower face
1508  int ii = iv[0] << 1; // refinemet in x
1509  int jj = iv[1] << 1; // refinemet in y
1510 #if AMREX_SPACEDIM == 3
1511  int kk = iv[2] << 1; // refinemet in z
1512 #endif
1513  // iterate over all fine cells at the interface
1514  // start with lower cells --> cover coarse neighbour
1515  // cell
1516  fill(map, D_DECL(ii, jj, kk), &begin[0], &end[0], d, iv, shift, 1.0);
1517  break;
1518  }
1519  case 1:
1520  default:
1521  {
1522  // --> interface is on the upper face
1523  int ii = covered[0] << 1; // refinemet in x
1524  int jj = covered[1] << 1; // refinemet in y
1525 #if AMREX_SPACEDIM == 3
1526  int kk = covered[2] << 1; // refinemet in z
1527 #endif
1528  fill(map, D_DECL(ii, jj, kk), &begin[0], &end[0], d, iv, shift, 1.0);
1529  break;
1530  }
1531  }
1532  }
1533  }
1534  }
1535 
1536  this->map2vector_m(map, indices, values);
1537 
1538  // iv: not covered coarse cell at crse-fine interface
1539 
1540  mglevel_m[level]->Bfine_p->insertGlobalValues(gidx,
1541  indices.size(),
1542  &values[0],
1543  &indices[0]);
1544 }
1545 
1546 
1547 inline void AmrMultiGrid::buildDensityVector_m(const lo_t& level,
1548  const AmrField_t& rho)
1549 {
1550  this->amrex2trilinos_m(level, 0, rho, mglevel_m[level]->rho_p);
1551 }
1552 
1553 
1555  const AmrField_t& phi)
1556 {
1557  this->amrex2trilinos_m(level, 0, phi, mglevel_m[level]->phi_p);
1558 }
1559 
1560 
1562  const go_t& gidx,
1563  const AmrIntVect_t& iv,
1564  const basefab_t& mfab,
1565  const scalar_t* invdx)
1566 {
1567  umap_t map;
1568  indices_t indices;
1569  coefficients_t values;
1570 
1571  auto check = [&](const AmrIntVect_t& iv,
1572  const basefab_t& mfab,
1573  int dir,
1574  scalar_t shift)
1575  {
1576  switch ( mfab(iv) )
1577  {
1578  case AmrMultiGridLevel_t::Mask::INTERIOR:
1579  // interior cells
1580  case AmrMultiGridLevel_t::Mask::COVERED:
1581  // covered --> interior cell
1582  map[mglevel_m[level]->serialize(iv)] -= shift * 0.5 * invdx[dir];
1583  break;
1584  case AmrMultiGridLevel_t::Mask::BNDRY:
1585  {
1586  // interior boundary cells --> only level > 0
1587 #ifndef NDEBUG
1588  if ( level == lbase_m )
1589  throw OpalException("AmrMultiGrid::buildGradientMatrix_m()",
1590  "Error in mask for level "
1591  + std::to_string(level) + "!");
1592 #endif
1593 
1594  scalar_t value = - shift * 0.5 * invdx[dir];
1595 
1596  // use 1st order Lagrange --> only cells of this level required
1597  AmrIntVect_t tmp = iv;
1598  // first fine cell on refined coarse cell (closer to interface)
1599  tmp[dir] -= shift;
1600  map[mglevel_m[level]->serialize(tmp)] += 2.0 * value;
1601 
1602  // second fine cell on refined coarse cell (further away from interface)
1603  tmp[dir] -= shift;
1604  map[mglevel_m[level]->serialize(tmp)] -= value;
1605  break;
1606  }
1607  case AmrMultiGridLevel_t::Mask::PHYSBNDRY:
1608  {
1609  // physical boundary cells
1610 
1611  scalar_t value = - shift * 0.5 * invdx[dir];
1612 
1613  mglevel_m[level]->applyBoundary(iv, dir,
1614  map, value);
1615  break;
1616  }
1617  default:
1618  break;
1619  }
1620  };
1621 
1622  for (int d = 0; d < AMREX_SPACEDIM; ++d) {
1623  for (int shift = -1; shift <= 1; shift += 2) {
1624  AmrIntVect_t niv = iv;
1625  niv[d] += shift;
1626  check(niv, mfab, d, shift);
1627  }
1628 
1629  this->map2vector_m(map, indices, values);
1630 
1631  mglevel_m[level]->G_p[d]->insertGlobalValues(gidx,
1632  indices.size(),
1633  &values[0],
1634  &indices[0]);
1635  }
1636 }
1637 
1638 
1640  const lo_t& comp,
1641  const AmrField_t& mf,
1642  Teuchos::RCP<vector_t>& mv)
1643 {
1644  if ( mv.is_null() )
1645  mv = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p, false) );
1646 
1647  for (amrex::MFIter mfi(mf, true); mfi.isValid(); ++mfi) {
1648  const amrex::Box& tbx = mfi.tilebox();
1649  const amrex::FArrayBox& fab = mf[mfi];
1650 
1651  const int* lo = tbx.loVect();
1652  const int* hi = tbx.hiVect();
1653 
1654  for (int i = lo[0]; i <= hi[0]; ++i) {
1655  for (int j = lo[1]; j <= hi[1]; ++j) {
1656 #if AMREX_SPACEDIM == 3
1657  for (int k = lo[2]; k <= hi[2]; ++k) {
1658 #endif
1659  AmrIntVect_t iv(D_DECL(i, j, k));
1660 
1661  go_t gidx = mglevel_m[level]->serialize(iv);
1662 
1663  mv->replaceGlobalValue(gidx, fab(iv, comp));
1664 #if AMREX_SPACEDIM == 3
1665  }
1666 #endif
1667  }
1668  }
1669  }
1670 }
1671 
1672 
1674  const lo_t& comp,
1675  AmrField_t& mf,
1676  const Teuchos::RCP<vector_t>& mv)
1677 {
1678  Teuchos::ArrayRCP<const amr::scalar_t> data = mv->get1dView();
1679 
1680  for (amrex::MFIter mfi(mf, true); mfi.isValid(); ++mfi) {
1681  const amrex::Box& tbx = mfi.tilebox();
1682  amrex::FArrayBox& fab = mf[mfi];
1683 
1684  const int* lo = tbx.loVect();
1685  const int* hi = tbx.hiVect();
1686 
1687  for (int i = lo[0]; i <= hi[0]; ++i) {
1688  for (int j = lo[1]; j <= hi[1]; ++j) {
1689 #if AMREX_SPACEDIM == 3
1690  for (int k = lo[2]; k <= hi[2]; ++k) {
1691 #endif
1692  AmrIntVect_t iv(D_DECL(i, j, k));
1693 
1694  go_t gidx = mglevel_m[level]->serialize(iv);
1695  lo_t lidx = mglevel_m[level]->map_p->getLocalElement(gidx);
1696 
1697  fab(iv, comp) = data[lidx];
1698  }
1699 #if AMREX_SPACEDIM == 3
1700  }
1701 #endif
1702  }
1703  }
1704 }
1705 
1706 
1707 inline
1709  coefficients_t& values)
1710 {
1711  indices.clear();
1712  values.clear();
1713 
1714  indices.reserve(map.size());
1715  values.reserve(map.size());
1716 
1717  std::for_each(map.begin(), map.end(),
1718  [&](const std::pair<const go_t, scalar_t>& entry)
1719  {
1720 #ifndef NDEBUG
1721  if ( entry.first < 0 ) {
1722  throw OpalException("AmrMultiGrid::map2vector_m()",
1723  "Negative matrix index!");
1724  }
1725 #endif
1726 
1727  indices.push_back(entry.first);
1728  values.push_back(entry.second);
1729  }
1730  );
1731 
1732  map.clear();
1733 }
1734 
1735 
1736 void AmrMultiGrid::smooth_m(const lo_t& level,
1737  Teuchos::RCP<vector_t>& e,
1738  Teuchos::RCP<vector_t>& r)
1739 {
1740 #if AMR_MG_TIMER
1741  IpplTimings::startTimer(smoothTimer_m);
1742 #endif
1743 
1744  // base level has no smoother --> l - 1
1745  smoother_m[level-1]->smooth(e, mglevel_m[level]->Anf_p, r);
1746 
1747 #if AMR_MG_TIMER
1748  IpplTimings::stopTimer(smoothTimer_m);
1749 #endif
1750 }
1751 
1752 
1753 void AmrMultiGrid::restrict_m(const lo_t& level) {
1754 
1755 #if AMR_MG_TIMER
1756  IpplTimings::startTimer(restrictTimer_m);
1757 #endif
1758 
1759  Teuchos::RCP<vector_t> tmp = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p) );
1760 
1761  this->residual_no_fine_m(level, tmp,
1762  mglevel_m[level]->error_p,
1763  mglevel_m[level-1]->error_p,
1764  mglevel_m[level]->residual_p);
1765 
1766  mglevel_m[level-1]->residual_p->putScalar(0.0);
1767 
1768  // average down: residual^(l-1) = R^(l) * tmp
1769  mglevel_m[level-1]->R_p->apply(*tmp, *mglevel_m[level-1]->residual_p);
1770 
1771 
1772  // composite matrix, i.e. matrix without covered cells
1773  // r^(l-1) = rho^(l-1) - A * phi^(l-1)
1774 
1775  vector_t fine2crse(mglevel_m[level-1]->Awf_p->getDomainMap(), true);
1776 
1777  // get boundary for
1778  mglevel_m[level-1]->Bfine_p->apply(*mglevel_m[level]->phi_p, fine2crse);
1779 
1780  // operation: fine2coarse += A * phi
1781  mglevel_m[level-1]->Awf_p->apply(*mglevel_m[level-1]->phi_p,
1782  fine2crse, Teuchos::NO_TRANS,
1783  scalar_t(1.0), scalar_t(1.0));
1784 
1785  if ( mglevel_m[level-1]->Bcrse_p != Teuchos::null ) {
1786  // operation: fine2coarse += B * phi
1787  mglevel_m[level-1]->Bcrse_p->apply(*mglevel_m[level-2]->phi_p,
1788  fine2crse, Teuchos::NO_TRANS,
1789  scalar_t(1.0), scalar_t(1.0));
1790  }
1791 
1792  Teuchos::RCP<vector_t> uncoveredRho = Teuchos::rcp( new vector_t(mglevel_m[level-1]->map_p, true) );
1793 
1794  mglevel_m[level-1]->UnCovered_p->apply(*mglevel_m[level-1]->rho_p, *uncoveredRho);
1795 
1796 
1797  //FIXME tmp2 not needed
1798  Teuchos::RCP<vector_t> tmp2 = Teuchos::rcp( new vector_t(mglevel_m[level-1]->map_p, true) );
1799  mglevel_m[level-1]->UnCovered_p->apply(fine2crse, *tmp2);
1800 
1801  // ONLY subtract coarse rho
1802  mglevel_m[level-1]->residual_p->update(1.0, *uncoveredRho, -1.0, *tmp2, 1.0);
1803 
1804 #if AMR_MG_TIMER
1805  IpplTimings::stopTimer(restrictTimer_m);
1806 #endif
1807 }
1808 
1809 
1810 void AmrMultiGrid::prolongate_m(const lo_t& level) {
1811 #if AMR_MG_TIMER
1812  IpplTimings::startTimer(interpTimer_m);
1813 #endif
1814  // interpolate error from l-1 to l
1815  // operation: e^(l) = 1.0 * e^(l) + 1.0 * I^(l) * e^(l-1)
1816  mglevel_m[level]->I_p->apply(*mglevel_m[level-1]->error_p,
1817  *mglevel_m[level]->error_p,
1818  Teuchos::NO_TRANS,
1819  scalar_t(1.0),
1820  scalar_t(1.0));
1821 #if AMR_MG_TIMER
1822  IpplTimings::stopTimer(interpTimer_m);
1823 #endif
1824 }
1825 
1826 
1827 void AmrMultiGrid::averageDown_m(const lo_t& level) {
1828 
1829  if (level == lfine_m )
1830  return;
1831 #if AMR_MG_TIMER
1832  IpplTimings::startTimer(averageTimer_m);
1833 #endif
1834  Teuchos::RCP<vector_t> phicrse = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p, false) );
1835 
1836  // operation: phicrse = 0.0 * phicrse + 1.0 * R^(l) * phi^(l+1)
1837  mglevel_m[level]->R_p->apply(*mglevel_m[level+1]->phi_p, *phicrse);
1838 
1839  Teuchos::RCP<vector_t> uncov_phi = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p, true) );
1840 
1841  mglevel_m[level]->UnCovered_p->apply(*mglevel_m[level]->phi_p, *uncov_phi);
1842 
1843  mglevel_m[level]->phi_p->update(1.0, *phicrse, 1.0, *uncov_phi, 0.0);
1844 #if AMR_MG_TIMER
1845  IpplTimings::stopTimer(averageTimer_m);
1846 #endif
1847 }
1848 
1849 
1851  switch ( interp ) {
1852  case Interpolater::TRILINEAR:
1854  break;
1855  case Interpolater::LAGRANGE:
1856  throw OpalException("AmrMultiGrid::initInterpolater_m()",
1857  "Not yet implemented.");
1858  case Interpolater::PIECEWISE_CONST:
1860  break;
1861  default:
1862  throw OpalException("AmrMultiGrid::initInterpolater_m()",
1863  "No such interpolater available.");
1864  }
1865 }
1866 
1867 
1869  switch ( interface ) {
1870  case Interpolater::TRILINEAR:
1872  break;
1873  case Interpolater::LAGRANGE:
1876  break;
1877  case Interpolater::PIECEWISE_CONST:
1879  break;
1880  default:
1881  throw OpalException("AmrMultiGrid::initCrseFineInterp_m()",
1882  "No such interpolater for the coarse-fine interface available.");
1883  }
1884 }
1885 
1886 
1888  const bool& rebalance,
1889  const std::string& reuse)
1890 {
1891  switch ( solver ) {
1892  // Belos solvers
1893  case BaseSolver::BICGSTAB:
1894  solver_mp.reset( new BelosSolver_t("BICGSTAB", prec_mp) );
1895  break;
1896  case BaseSolver::MINRES:
1897  solver_mp.reset( new BelosSolver_t("MINRES", prec_mp) );
1898  break;
1899  case BaseSolver::PCPG:
1900  solver_mp.reset( new BelosSolver_t("PCPG", prec_mp) );
1901  break;
1902  case BaseSolver::CG:
1903  solver_mp.reset( new BelosSolver_t("Pseudoblock CG", prec_mp) );
1904  break;
1905  case BaseSolver::GMRES:
1906  solver_mp.reset( new BelosSolver_t("Pseudoblock GMRES", prec_mp) );
1907  break;
1908  case BaseSolver::STOCHASTIC_CG:
1909  solver_mp.reset( new BelosSolver_t("Stochastic CG", prec_mp) );
1910  break;
1911  case BaseSolver::RECYCLING_CG:
1912  solver_mp.reset( new BelosSolver_t("RCG", prec_mp) );
1913  break;
1914  case BaseSolver::RECYCLING_GMRES:
1915  solver_mp.reset( new BelosSolver_t("GCRODR", prec_mp) );
1916  break;
1917  // Amesos2 solvers
1918 #ifdef HAVE_AMESOS2_KLU2
1919  case BaseSolver::KLU2:
1920  solver_mp.reset( new Amesos2Solver_t("klu2") );
1921  break;
1922 #endif
1923 #if HAVE_AMESOS2_SUPERLU
1924  case BaseSolver::SUPERLU:
1925  solver_mp.reset( new Amesos2Solver_t("superlu") );
1926  break;
1927 #endif
1928 #ifdef HAVE_AMESOS2_UMFPACK
1929  case BaseSolver::UMFPACK:
1930  solver_mp.reset( new Amesos2Solver_t("umfpack") );
1931  break;
1932 #endif
1933 #ifdef HAVE_AMESOS2_PARDISO_MKL
1934  case BaseSolver::PARDISO_MKL:
1935  solver_mp.reset( new Amesos2Solver_t("pardiso_mkl") );
1936  break;
1937 #endif
1938 #ifdef HAVE_AMESOS2_MUMPS
1939  case BaseSolver::MUMPS:
1940  solver_mp.reset( new Amesos2Solver_t("mumps") );
1941  break;
1942 #endif
1943 #ifdef HAVE_AMESOS2_LAPACK
1944  case BaseSolver::LAPACK:
1945  solver_mp.reset( new Amesos2Solver_t("lapack") );
1946  break;
1947 #endif
1948  case BaseSolver::SA:
1949  {
1950  std::string muelu = MueLuSolver_t::convertToMueLuReuseOption(reuse);
1951  solver_mp.reset( new MueLuSolver_t(rebalance, muelu) );
1952  break;
1953  }
1954  default:
1955  throw OpalException("AmrMultiGrid::initBaseSolver_m()",
1956  "No such bottom solver available.");
1957  }
1958 }
1959 
1960 
1962  const bool& rebalance,
1963  const std::string& reuse)
1964 {
1965  switch ( prec ) {
1966  case Preconditioner::ILUT:
1968  case Preconditioner::RILUK:
1971  case Preconditioner::GS:
1973  prec_mp.reset( new Ifpack2Preconditioner_t(prec) );
1974  break;
1975  case Preconditioner::SA:
1976  {
1977  std::string muelu = MueLuPreconditioner_t::convertToMueLuReuseOption(reuse);
1978  prec_mp.reset( new MueLuPreconditioner_t(rebalance, muelu) );
1979  break;
1980  }
1981  case Preconditioner::NONE:
1982  prec_mp.reset();
1983  break;
1984  default:
1985  throw OpalException("AmrMultiGrid::initPrec_m()",
1986  "No such preconditioner available.");
1987  }
1988 }
1989 
1990 
1993  std::map<std::string, Boundary> map;
1994 
1995  map["DIRICHLET"] = Boundary::DIRICHLET;
1996  map["OPEN"] = Boundary::OPEN;
1997  map["PERIODIC"] = Boundary::PERIODIC;
1998 
1999  auto boundary = map.find(Util::toUpper(bc));
2000 
2001  if ( boundary == map.end() )
2002  throw OpalException("AmrMultiGrid::convertToEnumBoundary_m()",
2003  "No boundary type '" + bc + "'.");
2004  return boundary->second;
2005 }
2006 
2008 AmrMultiGrid::convertToEnumInterpolater_m(const std::string& interp) {
2009  std::map<std::string, Interpolater> map;
2010 
2011  map["TRILINEAR"] = Interpolater::TRILINEAR;
2012  map["LAGRANGE"] = Interpolater::LAGRANGE;
2013  map["PC"] = Interpolater::PIECEWISE_CONST;
2014 
2015  auto interpolater = map.find(Util::toUpper(interp));
2016 
2017  if ( interpolater == map.end() )
2018  throw OpalException("AmrMultiGrid::convertToEnumInterpolater_m()",
2019  "No interpolater '" + interp + "'.");
2020  return interpolater->second;
2021 }
2022 
2023 
2025 AmrMultiGrid::convertToEnumBaseSolver_m(const std::string& bsolver) {
2026  std::map<std::string, BaseSolver> map;
2027 
2028  map["BICGSTAB"] = BaseSolver::BICGSTAB;
2029  map["MINRES"] = BaseSolver::MINRES;
2030  map["PCPG"] = BaseSolver::PCPG;
2031  map["CG"] = BaseSolver::CG;
2032  map["GMRES"] = BaseSolver::GMRES;
2033  map["STOCHASTIC_CG"] = BaseSolver::STOCHASTIC_CG;
2034  map["RECYCLING_CG"] = BaseSolver::RECYCLING_GMRES;
2035  map["RECYCLING_GMRES"] = BaseSolver::RECYCLING_GMRES;
2036 #ifdef HAVE_AMESOS2_KLU2
2037  map["KLU2"] = BaseSolver::KLU2;
2038 #endif
2039 #ifdef HAVE_AMESOS2_SUPERLU
2040  map["SUPERLU"] = BaseSolver::SUPERLU;
2041 #endif
2042 #ifdef HAVE_AMESOS2_UMFPACK
2043  map["UMFPACK"] = BaseSolver::UMFPACK;
2044 #endif
2045 #ifdef HAVE_AMESOS2_PARDISO_MKL
2046  map["PARDISO_MKL"] = BaseSolver::PARDISO_MKL;
2047 #endif
2048 #ifdef HAVE_AMESOS2_MUMPS
2049  map["MUMPS"] = BaseSolver::MUMPS;
2050 #endif
2051 #ifdef HAVE_AMESOS2_LAPACK
2052  map["LAPACK"] = BaseSolver::LAPACK;
2053 #endif
2054  map["SA"] = BaseSolver::SA;
2055 
2056  auto solver = map.find(Util::toUpper(bsolver));
2057 
2058  if ( solver == map.end() )
2059  throw OpalException("AmrMultiGrid::convertToEnumBaseSolver_m()",
2060  "No bottom solver '" + bsolver + "'.");
2061  return solver->second;
2062 }
2063 
2064 
2067  std::map<std::string, Preconditioner> map;
2068 
2069  map["NONE"] = Preconditioner::NONE;
2070 
2072 
2074 
2075  auto precond = map.find(Util::toUpper(prec));
2076 
2077  if ( precond == map.end() )
2078  throw OpalException("AmrMultiGrid::convertToEnumPreconditioner_m()",
2079  "No preconditioner '" + prec + "'.");
2080  return precond->second;
2081 }
2082 
2083 
2085 AmrMultiGrid::convertToEnumSmoother_m(const std::string& smoother) {
2086  return AmrSmoother::convertToEnumSmoother(smoother);
2087 }
2088 
2089 
2091 AmrMultiGrid::convertToEnumNorm_m(const std::string& norm) {
2092  std::map<std::string, Norm> map;
2093 
2094  map["L1"] = Norm::L1;
2095  map["L2"] = Norm::L2;
2096  map["LINF"] = Norm::LINF;
2097 
2098  snorm_m = Util::toUpper(norm);
2099 
2100  auto n = map.find(snorm_m);
2101 
2102  if ( n == map.end() )
2103  throw OpalException("AmrMultiGrid::convertToEnumNorm_m()",
2104  "No norm '" + norm + "'.");
2105  return n->second;
2106 }
2107 
2108 
2109 void AmrMultiGrid::writeSDDSHeader_m(std::ofstream& outfile) {
2110  OPALTimer::Timer simtimer;
2111 
2112  std::string dateStr(simtimer.date());
2113  std::string timeStr(simtimer.time());
2114  std::string indent(" ");
2115 
2116  outfile << "SDDS1" << std::endl;
2117  outfile << "&description\n"
2118  << indent << "text=\"Solver statistics '" << OpalData::getInstance()->getInputFn()
2119  << "' " << dateStr << "" << timeStr << "\",\n"
2120  << indent << "contents=\"solver info\"\n"
2121  << "&end\n";
2122  outfile << "&parameter\n"
2123  << indent << "name=processors,\n"
2124  << indent << "type=long,\n"
2125  << indent << "description=\"Number of Cores used\"\n"
2126  << "&end\n";
2127  outfile << "&parameter\n"
2128  << indent << "name=revision,\n"
2129  << indent << "type=string,\n"
2130  << indent << "description=\"git revision of opal\"\n"
2131  << "&end\n";
2132  outfile << "&parameter\n"
2133  << indent << "name=flavor,\n"
2134  << indent << "type=string,\n"
2135  << indent << "description=\"OPAL flavor that wrote file\"\n"
2136  << "&end\n";
2137  outfile << "&column\n"
2138  << indent << "name=t,\n"
2139  << indent << "type=double,\n"
2140  << indent << "units=ns,\n"
2141  << indent << "description=\"1 Time\"\n"
2142  << "&end\n";
2143  outfile << "&column\n"
2144  << indent << "name=mg_iter,\n"
2145  << indent << "type=long,\n"
2146  << indent << "units=1,\n"
2147  << indent << "description=\"2 Number of Multigrid Iterations\"\n"
2148  << "&end\n";
2149  outfile << "&column\n"
2150  << indent << "name=bottom_iter,\n"
2151  << indent << "type=long,\n"
2152  << indent << "units=1,\n"
2153  << indent << "description=\"3 Total Number of Bottom Solver Iterations\"\n"
2154  << "&end\n";
2155  outfile << "&column\n"
2156  << indent << "name=regrid,\n"
2157  << indent << "type=bool,\n"
2158  << indent << "units=1,\n"
2159  << indent << "description=\"4 Regrid Step\"\n"
2160  << "&end\n";
2161  outfile << "&column\n"
2162  << indent << "name=" + snorm_m + ",\n"
2163  << indent << "type=double,\n"
2164  << indent << "units=1,\n"
2165  << indent << "description=\"5 Error\"\n"
2166  << "&end\n"
2167  << "&data\n"
2168  << indent << "mode=ascii,\n"
2169  << indent << "no_row_counts=1\n"
2170  << "&end\n"
2171  << comm_mp->getSize() << '\n'
2172  << OPAL_PROJECT_NAME << " " << OPAL_PROJECT_VERSION << " git rev. #" << Util::getGitRevision() << '\n'
2173  << (OpalData::getInstance()->isInOPALTMode()? "opal-t":
2174  (OpalData::getInstance()->isInOPALCyclMode()? "opal-cycl": "opal-env")) << std::endl;
2175 }
2176 
2177 
2179 #if AMR_MG_TIMER
2180  IpplTimings::startTimer(dumpTimer_m);
2181 #endif
2182  unsigned int pwi = 10;
2183 
2184  std::ofstream outfile;
2185 
2186  if ( comm_mp->getRank() == 0 ) {
2187  outfile.open(fname_m.c_str(), flag_m);
2188  outfile.precision(15);
2189  outfile.setf(std::ios::scientific, std::ios::floatfield);
2190 
2191  if ( flag_m == std::ios::out ) {
2192  flag_m = std::ios::app;
2193  writeSDDSHeader_m(outfile);
2194  }
2195 
2196  outfile << itsAmrObject_mp->getT() * 1e9 << std::setw(pwi) << '\t' // 1
2197  << this->nIter_m << std::setw(pwi) << '\t' // 2
2198  << this->bIter_m << std::setw(pwi) << '\t' // 3
2199  << this->regrid_m << std::setw(pwi) << '\t' // 4
2200  << error << '\n'; // 5
2201  }
2202 #if AMR_MG_TIMER
2203  IpplTimings::stopTimer(dumpTimer_m);
2204 #endif
2205 }
2206 
2207 
2208 #if AMR_MG_TIMER
2209 void AmrMultiGrid::initTimer_m() {
2210  buildTimer_m = IpplTimings::getTimer("AMR MG matrix setup");
2211  restrictTimer_m = IpplTimings::getTimer("AMR MG restrict");
2212  smoothTimer_m = IpplTimings::getTimer("AMR MG smooth");
2213  interpTimer_m = IpplTimings::getTimer("AMR MG prolongate");
2214  efieldTimer_m = IpplTimings::getTimer("AMR MG e-field");
2215  averageTimer_m = IpplTimings::getTimer("AMR MG average down");
2216  bottomTimer_m = IpplTimings::getTimer("AMR MG bottom-solver");
2217  dumpTimer_m = IpplTimings::getTimer("AMR MG dump");
2218 }
2219 #endif
2220 
2221 
2222 double AmrMultiGrid::getXRangeMin(unsigned short level) {
2223  return itsAmrObject_mp->Geom(level).ProbLo(0);
2224 }
2225 
2226 
2227 double AmrMultiGrid::getXRangeMax(unsigned short level) {
2228  return itsAmrObject_mp->Geom(level).ProbHi(0);
2229 }
2230 
2231 
2232 double AmrMultiGrid::getYRangeMin(unsigned short level) {
2233  return itsAmrObject_mp->Geom(level).ProbLo(1);
2234 }
2235 
2236 
2237 double AmrMultiGrid::getYRangeMax(unsigned short level) {
2238  return itsAmrObject_mp->Geom(level).ProbHi(1);
2239 }
2240 
2241 
2242 double AmrMultiGrid::getZRangeMin(unsigned short level) {
2243  return itsAmrObject_mp->Geom(level).ProbLo(2);
2244 }
2245 
2246 
2247 double AmrMultiGrid::getZRangeMax(unsigned short level) {
2248  return itsAmrObject_mp->Geom(level).ProbHi(2);
2249 }
2250 
2251 
2253  os << "* ********************* A M R M u l t i G r i d ********************** " << endl
2254  //FIXME
2255  << "* ******************************************************************** " << endl;
2256  return os;
2257 }
The global OPAL structure.
Definition: OpalData.h:54
std::vector< std::unique_ptr< AmrMultiGridLevel_t > > mglevel_m
container for levels
Definition: AmrMultiGrid.h:656
static void fillMap(map_t &map)
std::unique_ptr< AmrInterpolater< AmrMultiGridLevel_t > > interface_mp
interpolater for coarse-fine interface
Definition: AmrMultiGrid.h:647
constexpr double e
The value of .
Definition: Physics.h:40
void close_m(const lo_t &level, const bool &matrices)
void buildNoFinePoissonMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, const basefab_t &mfab, const scalar_t *invdx2)
MueLuPreconditioner< AmrMultiGridLevel_t > MueLuPreconditioner_t
Definition: AmrMultiGrid.h:60
Norm convertToEnumNorm_m(const std::string &norm)
AmrMultiGrid(AmrBoxLib *itsAmrObject_p, const std::string &bsolver, const std::string &prec, const bool &rebalance, const std::string &reuse, const std::string &bcx, const std::string &bcy, const std::string &bcz, const std::string &smoother, const std::size_t &nSweeps, const std::string &interp, const std::string &norm)
void residual_m(const lo_t &level, Teuchos::RCP< vector_t > &r, const Teuchos::RCP< vector_t > &b, const Teuchos::RCP< vector_t > &x)
int lfine_m
fineste level
Definition: AmrMultiGrid.h:668
void buildDensityVector_m(const lo_t &level, const AmrField_t &rho)
amrex::FArrayBox farraybox_t
Definition: AmrMultiGrid.h:65
Teuchos::RCP< comm_t > comm_mp
communicator
Definition: AmrMultiGrid.h:640
void smooth_m(const lo_t &level, Teuchos::RCP< vector_t > &e, Teuchos::RCP< vector_t > &r)
double getXRangeMin(unsigned short level=0)
The base class for all OPAL exceptions.
Definition: OpalException.h:28
void buildRestrictionMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, D_DECL(const go_t &ii, const go_t &jj, const go_t &kk), const basefab_t &rfab)
Jacobi block relaxation.
int nBcPoints_m
maximum number of stencils points for BC
Definition: AmrMultiGrid.h:672
scalar_t getLevelResidualNorm(lo_t level)
bool verbose_m
If true, a SDDS file is written.
Definition: AmrMultiGrid.h:679
bool isInOPALCyclMode()
Definition: OpalData.cpp:288
Smoother
All supported Ifpack2 smoothers.
Definition: AmrSmoother.h:27
std::string toUpper(const std::string &str)
Definition: Util.cpp:130
void buildPotentialVector_m(const lo_t &level, const AmrField_t &phi)
void initCrseFineInterp_m(const Interpolater &interface)
void serialize(Param_t params, std::ostringstream &os)
serializes params using Boost archive
Definition: MPIHelper.cpp:11
void setVerbose(bool verbose)
void initGuess_m(bool reset)
Amesos2BottomSolver< AmrMultiGridLevel_t > Amesos2Solver_t
Definition: AmrMultiGrid.h:54
std::ios_base::openmode flag_m
std::ios::out or std::ios::app
Definition: AmrMultiGrid.h:681
void open_m(const lo_t &level, const bool &matrices)
MueLuBottomSolver< AmrMultiGridLevel_t > MueLuSolver_t
Definition: AmrMultiGrid.h:55
BaseSolver convertToEnumBaseSolver_m(const std::string &bsolver)
void initInterpolater_m(const Interpolater &interp)
std::size_t nIter_m
number of iterations till convergence
Definition: AmrMultiGrid.h:649
amrex::Box box_t
Definition: AmrMultiGrid.h:63
int nlevel_m
number of levelss
Definition: AmrMultiGrid.h:669
std::size_t nSweeps_m
number of smoothing iterations
Definition: AmrMultiGrid.h:652
AmrMultiGridLevel_t::AmrIntVect_t AmrIntVect_t
Definition: AmrMultiGrid.h:41
void solve(AmrScalarFieldContainer_t &rho, AmrScalarFieldContainer_t &phi, AmrVectorFieldContainer_t &efield, unsigned short baseLevel, unsigned short finestLevel, bool prevAsGuess=true)
amr::scalar_t scalar_t
Definition: AmrMultiGrid.h:33
std::string date() const
Return date.
Definition: Timer.cpp:35
double getYRangeMin(unsigned short level=0)
void setNumberOfSweeps(const std::size_t &nSweeps)
static OpalData * getInstance()
Definition: OpalData.cpp:209
#define OPAL_PROJECT_NAME
Definition: OPALconfig.h:2
int lbase_m
base level (currently only 0 supported)
Definition: AmrMultiGrid.h:667
incomplete LU
static Smoother convertToEnumSmoother(const std::string &smoother)
Definition: AmrSmoother.cpp:46
bool isInOPALTMode()
Definition: OpalData.cpp:292
void writeSDDSData_m(const scalar_t &error)
double getXRangeMax(unsigned short level=0)
Interpolater convertToEnumInterpolater_m(const std::string &interp)
amrex::FabArray< basefab_t > mask_t
static std::string convertToMueLuReuseOption(const std::string &reuse)
std::string snorm_m
norm for convergence criteria
Definition: AmrMultiGrid.h:675
void buildSingleLevel_m(const amrex::Vector< AmrField_u > &rho, const amrex::Vector< AmrField_u > &phi, const bool &matrices=true)
AmrMultiGridLevel< matrix_t, vector_t > AmrMultiGridLevel_t
Definition: AmrMultiGrid.h:35
void buildCompositePoissonMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, const basefab_t &mfab, const basefab_t &rfab, const basefab_t &cfab, const scalar_t *invdx2)
smoothed aggregation multigrid
double getZRangeMax(unsigned short level=0)
std::shared_ptr< bsolver_t > solver_mp
bottom solver
Definition: AmrMultiGrid.h:659
Jacobi point relaxation.
void buildCrseBoundaryMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, const basefab_t &mfab, const basefab_t &cfab, const scalar_t *invdx2)
static void startTimer(TimerRef t)
Definition: IpplTimings.h:187
AmrMultiGridLevel_t::coefficients_t coefficients_t
Definition: AmrMultiGrid.h:43
void initPrec_m(const Preconditioner &prec, const bool &rebalance, const std::string &reuse)
std::string getInputFn()
get opals input filename
Definition: OpalData.cpp:713
#define INFOMSG(msg)
Definition: IpplInfo.h:397
bool regrid_m
is set to true by itsAmrObject_mp and reset to false by solver
double getYRangeMax(unsigned short level=0)
#define OPAL_PROJECT_VERSION
Definition: OPALconfig.h:5
std::size_t getNumIters()
void map2vector_m(umap_t &map, indices_t &indices, coefficients_t &values)
amr::local_ordinal_t lo_t
Definition: AmrMultiGrid.h:31
BaseSolver
Supported bottom solvers.
Definition: AmrMultiGrid.h:79
std::vector< std::shared_ptr< AmrSmoother > > smoother_m
error smoother
Definition: AmrMultiGrid.h:662
amr::vector_t vector_t
Definition: AmrMultiGrid.h:27
AmrMultiGridLevel_t::AmrField_t AmrField_t
Definition: AmrMultiGrid.h:37
Norm norm_m
norm for convergence criteria (l1, l2, linf)
Definition: AmrMultiGrid.h:674
boundary_t bc_m[AMREX_SPACEDIM]
boundary conditions
Definition: AmrMultiGrid.h:671
void restrict_m(const lo_t &level)
double getZRangeMin(unsigned short level=0)
static std::string convertToMueLuReuseOption(const std::string &reuse)
void initResidual_m(std::vector< scalar_t > &rhsNorms, std::vector< scalar_t > &resNorms)
void averageDown_m(const lo_t &level)
void prolongate_m(const lo_t &level)
Smoother convertToEnumSmoother_m(const std::string &smoother)
std::size_t maxiter_m
maximum number of iterations allowed
Definition: AmrMultiGrid.h:651
void clearMasks_m()
void buildInterpolationMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, const basefab_t &cfab)
double getT() const
Definition: AmrBoxLib.cpp:537
scalar_t iterate_m()
AmrMultiGridLevel_t::umap_t umap_t
Definition: AmrMultiGrid.h:44
Gauss-Seidel block relaxation.
FnArg FnReal sign(a))
void initPhysicalBoundary_m(const Boundary *bc)
Gauss-Seidel point relaxation.
Interface to Ifpack2 smoothers of the Trilinos package.
Definition: AmrSmoother.h:11
std::string time() const
Return time.
Definition: Timer.cpp:42
Inform & print(Inform &os) const
void initBaseSolver_m(const BaseSolver &solver, const bool &rebalance, const std::string &reuse)
std::string fname_m
SDDS filename.
Definition: AmrMultiGrid.h:680
Norm
Supported convergence criteria.
Definition: AmrMultiGrid.h:121
Ifpack2Preconditioner< AmrMultiGridLevel_t > Ifpack2Preconditioner_t
Definition: AmrMultiGrid.h:59
Boundary convertToEnumBoundary_m(const std::string &bc)
Preconditioner convertToEnumPreconditioner_m(const std::string &prec)
amr::comm_t comm_t
Definition: AmrMultiGrid.h:30
void buildFineBoundaryMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, const basefab_t &mfab, const basefab_t &rfab, const basefab_t &cfab)
void setup_m(const amrex::Vector< AmrField_u > &rho, const amrex::Vector< AmrField_u > &phi, const bool &matrices=true)
std::string getGitRevision()
Definition: Util.cpp:15
Smoother smootherType_m
type of smoother
Definition: AmrMultiGrid.h:653
amr::matrix_t matrix_t
Definition: AmrMultiGrid.h:26
void residual_no_fine_m(const lo_t &level, Teuchos::RCP< vector_t > &result, const Teuchos::RCP< vector_t > &rhs, const Teuchos::RCP< vector_t > &crs_rhs, const Teuchos::RCP< vector_t > &b)
void writeSDDSHeader_m(std::ofstream &outfile)
Teuchos::RCP< amr::node_t > node_mp
kokkos node
Definition: AmrMultiGrid.h:641
const Vector_t & getMeshScaling() const
Definition: AmrBoxLib.cpp:510
Boundary
Supported physical boundaries.
Definition: AmrMultiGrid.h:114
static void fillMap(map_t &map)
void buildGradientMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, const basefab_t &mfab, const scalar_t *invdx)
scalar_t evalNorm_m(const Teuchos::RCP< const vector_t > &x)
void buildMultiLevel_m(const amrex::Vector< AmrField_u > &rho, const amrex::Vector< AmrField_u > &phi, const bool &matrices=true)
amr::global_ordinal_t go_t
Definition: AmrMultiGrid.h:32
void trilinos2amrex_m(const lo_t &level, const lo_t &comp, AmrField_t &mf, const Teuchos::RCP< vector_t > &mv)
std::shared_ptr< preconditioner_t > prec_mp
preconditioner for bottom solver
Definition: AmrMultiGrid.h:665
BelosBottomSolver< AmrMultiGridLevel_t > BelosSolver_t
Definition: AmrMultiGrid.h:53
AmrMultiGridLevel_t::indices_t indices_t
Definition: AmrMultiGrid.h:42
static TimerRef getTimer(const char *nm)
Definition: IpplTimings.h:182
void initLevels_m(const amrex::Vector< AmrField_u > &rho, const amrex::Vector< AmrGeometry_t > &geom, bool regrid)
bool for_each(const BareFieldIterator< T, D > &p, SameFieldID s, C)
Definition: AssignDefs.h:30
amr::AmrScalarFieldContainer_t AmrScalarFieldContainer_t
Definition: PBunchDefs.h:60
void setMaxNumberOfIterations(const std::size_t &maxiter)
static void stopTimer(TimerRef t)
Definition: IpplTimings.h:192
std::size_t bIter_m
number of iterations of bottom solver
Definition: AmrMultiGrid.h:650
const scalar_t eps_m
rhs scale for convergence
Definition: AmrMultiGrid.h:677
Definition: Inform.h:41
void relax_m(const lo_t &level)
Interpolater
Supported interpolaters for prolongation operation.
Definition: AmrMultiGrid.h:72
void computeEfield_m(AmrVectorFieldContainer_t &efield)
bool isConverged_m(std::vector< scalar_t > &rhsNorms, std::vector< scalar_t > &resNorms)
Inform & endl(Inform &inf)
Definition: Inform.cpp:42
amrex::BaseFab< int > basefab_t
Definition: AmrMultiGrid.h:64
amr::AmrVectorFieldContainer_t AmrVectorFieldContainer_t
Definition: PBunchDefs.h:61
std::unique_ptr< AmrInterpolater< AmrMultiGridLevel_t > > interp_mp
interpolater without coarse-fine interface
Definition: AmrMultiGrid.h:644
void amrex2trilinos_m(const lo_t &level, const lo_t &comp, const AmrField_t &mf, Teuchos::RCP< vector_t > &mv)