OPAL (Object Oriented Parallel Accelerator Library) 2022.1
OPAL
AmrMultiGrid.cpp
Go to the documentation of this file.
1//
2// Class AmrMultiGrid
3// Main class of the AMR Poisson multigrid solver.
4// It implements the multigrid solver described in https://doi.org/10.1016/j.cpc.2019.106912
5//
6// Copyright (c) 2017 - 2020, Matthias Frey, Paul Scherrer Institut, Villigen PSI, Switzerland
7// All rights reserved
8//
9// Implemented as part of the PhD thesis
10// "Precise Simulations of Multibunches in High Intensity Cyclotrons"
11//
12// This file is part of OPAL.
13//
14// OPAL is free software: you can redistribute it and/or modify
15// it under the terms of the GNU General Public License as published by
16// the Free Software Foundation, either version 3 of the License, or
17// (at your option) any later version.
18//
19// You should have received a copy of the GNU General Public License
20// along with OPAL. If not, see <https://www.gnu.org/licenses/>.
21//
22#include "AmrMultiGrid.h"
23
24#include <algorithm>
25#include <functional>
26#include <map>
27#include <numeric>
28
30#include "OPALconfig.h"
31#include "Physics/Units.h"
33#include "Utilities/Timer.h"
34#include "Utilities/Util.h"
35
36#include <AMReX_ParallelDescriptor.H>
37
38#include <boost/filesystem.hpp>
39
40#if AMR_MG_WRITE
41 #include <iomanip>
42#endif
43
45 const std::string& bsolver,
46 const std::string& prec,
47 const bool& rebalance,
48 const std::string& reuse,
49 const std::string& bcx,
50 const std::string& bcy,
51 const std::string& bcz,
52 const std::string& smoother,
53 const std::size_t& nSweeps,
54 const std::string& interp,
55 const std::string& norm)
56 : AmrPoissonSolver<AmrBoxLib>(itsAmrObject_p)
57 , comm_mp( new comm_t( amrex::ParallelDescriptor::Communicator() ) )
58 , nIter_m(0)
59 , bIter_m(0)
60 , maxiter_m(100)
61 , nSweeps_m(nSweeps)
62 , mglevel_m(0)
63 , lbase_m(0)
64 , lfine_m(0)
65 , nlevel_m(1)
66 , nBcPoints_m(0)
67 , snorm_m(norm)
68 , eps_m(1.0e-10)
69 , verbose_m(false)
70 , fname_m(OpalData::getInstance()->getInputBasename() + std::string(".solver"))
71 , flag_m(std::ios::out)
72{
73#if AMR_MG_TIMER
74 this->initTimer_m();
75#endif
76
77 const Boundary bcs[AMREX_SPACEDIM] = {
78 this->convertToEnumBoundary_m(bcx),
79 this->convertToEnumBoundary_m(bcy),
81 };
82
83 this->initPhysicalBoundary_m(&bcs[0]);
84
86
87 norm_m = this->convertToEnumNorm_m(norm);
88
89 const Interpolater interpolater = this->convertToEnumInterpolater_m(interp);
90 this->initInterpolater_m(interpolater);
91
92 // interpolater for crse-fine-interface
93 this->initCrseFineInterp_m(Interpolater::LAGRANGE);
94
95 // preconditioner
96 const Preconditioner precond = this->convertToEnumPreconditioner_m(prec);
97 this->initPrec_m(precond, reuse);
98
99 // base level solver
100 const BaseSolver solver = this->convertToEnumBaseSolver_m(bsolver);
101 this->initBaseSolver_m(solver, rebalance, reuse);
102
103 if (boost::filesystem::exists(fname_m)) {
104 flag_m = std::ios::app;
105 INFOMSG("Appending solver information to existing file: " << fname_m << endl);
106 } else {
107 INFOMSG("Creating new file for solver information: " << fname_m << endl);
108 }
109}
110
111
115 unsigned short baseLevel,
116 unsigned short finestLevel,
117 bool prevAsGuess)
118{
119 lbase_m = baseLevel;
120 lfine_m = finestLevel;
121 nlevel_m = lfine_m - lbase_m + 1;
122
123 /* we cannot use the previous solution
124 * if we have to regrid (AmrPoissonSolver::hasToRegrid())
125 *
126 * regrid_m is set in AmrBoxlib::regrid()
127 */
128 bool reset = !prevAsGuess;
129
130 if ( this->regrid_m )
131 reset = true;
132
133 this->initLevels_m(rho, itsAmrObject_mp->Geom(), this->regrid_m);
134
135 // build all necessary matrices and vectors
136 this->setup_m(rho, phi, this->regrid_m);
137
138 this->initGuess_m(reset);
139
140 // actual solve
141 scalar_t error = this->iterate_m();
142
143 for (int lev = nlevel_m - 1; lev > -1; --lev) {
144 averageDown_m(lev);
145 }
146
147 // write efield to AMReX
148 this->computeEfield_m(efield);
149
150 // copy solution back
151 for (int lev = 0; lev < nlevel_m; ++lev) {
152 int ilev = lbase_m + lev;
153
154 phi[ilev]->setVal(0.0, phi[ilev]->nGrow());
155
156 this->trilinos2amrex_m(lev, 0, *phi[ilev], mglevel_m[lev]->phi_p);
157 }
158
159 if ( verbose_m )
160 this->writeSDDSData_m(error);
161
162 // we can now reset
163 this->regrid_m = false;
164}
165
166
167void AmrMultiGrid::setNumberOfSweeps(const std::size_t& nSweeps) {
168 nSweeps_m = nSweeps;
169}
170
171
172void AmrMultiGrid::setMaxNumberOfIterations(const std::size_t& maxiter) {
173 if ( maxiter < 1 )
174 throw OpalException("AmrMultiGrid::setMaxNumberOfIterations()",
175 "The max. number of iterations needs to be positive!");
176
177 maxiter_m = maxiter;
178}
179
180
182 return nIter_m;
183}
184
185
187 return evalNorm_m(mglevel_m[level]->residual_p);
188}
189
190
191void AmrMultiGrid::setVerbose(bool verbose) {
192 verbose_m = verbose;
193}
194
195
197 eps_m = eps;
198}
199
200
202{
203 // make sure it's reset
204 nBcPoints_m = 0;
205
206 for (unsigned int i = 0; i < AMREX_SPACEDIM; ++i) {
207 switch ( bc[i] ) {
208 case Boundary::DIRICHLET:
210 break;
211 case Boundary::OPEN:
213 break;
214 case Boundary::PERIODIC:
216 break;
217 default:
218 throw OpalException("AmrMultiGrid::initPhysicalBoundary_m()",
219 "This type of boundary is not supported");
220 }
221 // we use the maximum in order to build matrices
222 go_t tmp = bc_m[i]->getNumberOfPoints();
223 if ( nBcPoints_m < tmp )
224 nBcPoints_m = tmp;
225 }
226}
227
228
229void AmrMultiGrid::initLevels_m(const amrex::Vector<AmrField_u>& rho,
230 const amrex::Vector<AmrGeometry_t>& geom,
231 bool regrid)
232{
233 if ( !regrid )
234 return;
235
236 // although we do a resize afterwards, we do this to be safe
237 for (int lev = nlevel_m; lev < (int)mglevel_m.size(); ++lev) {
238 mglevel_m[lev].reset(nullptr);
239 }
240
241 mglevel_m.resize(nlevel_m);
242
243 amrex::Periodicity period(AmrIntVect_t(D_DECL(0, 0, 0)));
244
245 AmrIntVect_t rr = AmrIntVect_t(D_DECL(2, 2, 2));
246
247 for (int lev = 0; lev < nlevel_m; ++lev) {
248 int ilev = lbase_m + lev;
249
250 // do not initialize base level every time
251 if (mglevel_m[lev] == nullptr || lev > lbase_m) {
253 rho[ilev]->boxArray(),
254 rho[ilev]->DistributionMap(),
255 geom[ilev],
256 rr,
257 bc_m,
258 comm_mp));
259 } else {
260 mglevel_m[lev]->buildLevelMask();
261 }
262
263 mglevel_m[lev]->refmask.reset(
265 mglevel_m[lev]->dmap, 1, 2)
266 );
267 mglevel_m[lev]->refmask->setVal(AmrMultiGridLevel_t::Refined::NO, 2);
268 mglevel_m[lev]->refmask->FillBoundary(period);
269
270 amrex::BoxArray ba = mglevel_m[lev]->grids;
271 ba.coarsen(rr);
272 mglevel_m[lev]->crsemask.reset(
274 mglevel_m[lev]->dmap, 1, 2)
275 );
276 mglevel_m[lev]->crsemask->setVal(AmrMultiGridLevel_t::Refined::NO, 2);
277 }
278
279 for (int lev = 1; lev < nlevel_m; ++lev) {
280 mglevel_m[lev]->crsemask->setVal(AmrMultiGridLevel_t::Refined::YES, 0);
281
282 // used for boundary interpolation --> replaces expensive calls to isBoundary
283 mglevel_m[lev]->crsemask->setDomainBndry(AmrMultiGridLevel_t::Mask::PHYSBNDRY,
284 mglevel_m[lev-1]->geom); //FIXME: geometry of lev - 1
285 // really needed ?
286 mglevel_m[lev]->crsemask->FillBoundary(period);
287 }
288
289 /* to complete initialization we need to fill
290 * the mask of refinement
291 */
292 for (int lev = 0; lev < nlevel_m-1; ++lev) {
293 // get boxarray with refined cells
294 amrex::BoxArray ba = mglevel_m[lev]->grids;
295 ba.refine(rr);
296 ba = amrex::intersect(mglevel_m[lev+1]->grids, ba);
297 ba.coarsen(rr);
298
299 // refined cells
300 amrex::DistributionMapping dmap(ba, comm_mp->getSize());
301 AmrMultiGridLevel_t::mask_t refined(ba, dmap, 1, 0);
302 refined.setVal(AmrMultiGridLevel_t::Refined::YES);
303// refined.setDomainBndry(AmrMultiGridLevel_t::Mask::PHYSBNDRY, mglevel_m[lev]->geom);
304
305 // fill intersection with YES
306 mglevel_m[lev]->refmask->copy(refined, 0, 0, 1, 0, 2);
307
308 /* physical boundary cells will never be refined cells
309 * since they are ghost cells
310 */
311 mglevel_m[lev]->refmask->setDomainBndry(AmrMultiGridLevel_t::Mask::PHYSBNDRY,
312 mglevel_m[lev]->geom);
313
314 mglevel_m[lev]->refmask->FillBoundary(period);
315 }
316}
317
318
320 for (int lev = 0; lev < nlevel_m; ++lev) {
321 mglevel_m[lev]->refmask.reset(nullptr);
322 mglevel_m[lev]->crsemask.reset(nullptr);
323 mglevel_m[lev]->mask.reset(nullptr);
324 }
325}
326
327
329 if ( !reset )
330 return;
331
332 // reset
333 for (int lev = 0; lev < nlevel_m; ++lev)
334 mglevel_m[lev]->phi_p->putScalar(0.0);
335}
336
337
339
340 // initial error
341 std::vector<scalar_t> rhsNorms;
342 std::vector<scalar_t> resNorms;
343
344 this->initResidual_m(rhsNorms, resNorms);
345
346 std::for_each(rhsNorms.begin(), rhsNorms.end(),
347 [this](double& val){ val *= eps_m; });
348
349 nIter_m = 0;
350 bIter_m = 0;
351
352 while ( !isConverged_m(rhsNorms, resNorms) && nIter_m < maxiter_m ) {
353
355
356// /* in contrast to algorithm, we average down now
357// * --> potential is valid also on coarse covered
358// * cells
359// * --> however, it may take 1-2 iterations longer
360// */
361// for (int lev = nlevel_m - 1; lev > -1; --lev) {
362// averageDown_m(lev);
363// }
364
365 // update residual
366 for (int lev = 0; lev < nlevel_m; ++lev) {
367
368 this->residual_m(lev,
369 mglevel_m[lev]->residual_p,
370 mglevel_m[lev]->rho_p,
371 mglevel_m[lev]->phi_p);
372 }
373
374
375 for (lo_t lev = 0; lev < nlevel_m; ++lev)
376 resNorms[lev] = getLevelResidualNorm(lev);
377
378 ++nIter_m;
379
380#if AMR_MG_WRITE
381 this->writeResidualNorm_m();
382#endif
383
384 bIter_m += solver_mp->getNumIters();
385 }
386
387 return std::accumulate(resNorms.begin(),
388 resNorms.end(), 0.0,
389 std::plus<scalar_t>());
390}
391
392
393bool AmrMultiGrid::isConverged_m(std::vector<scalar_t>& rhsNorms,
394 std::vector<scalar_t>& resNorms)
395{
396 return std::equal(resNorms.begin(), resNorms.end(),
397 rhsNorms.begin(),
398 std::less<scalar_t>());
399}
400
401
403 Teuchos::RCP<vector_t>& r,
404 const Teuchos::RCP<vector_t>& b,
405 const Teuchos::RCP<vector_t>& x)
406{
407 /*
408 * r = b - A*x
409 */
410 if ( level < lfine_m ) {
411
412 vector_t fine2crse(mglevel_m[level]->Awf_p->getDomainMap(), true);
413
414 // get boundary for
415 if ( mglevel_m[level]->Bfine_p != Teuchos::null ) {
416 mglevel_m[level]->Bfine_p->apply(*mglevel_m[level+1]->phi_p, fine2crse);
417 }
418
419 // operation: fine2crse += A * x
420 mglevel_m[level]->Awf_p->apply(*x, fine2crse,
421 Teuchos::NO_TRANS,
422 scalar_t(1.0),
423 scalar_t(1.0));
424
425 if ( mglevel_m[level]->Bcrse_p != Teuchos::null ) {
426 // operation: fine2crse += B * phi^(l-1)
427 mglevel_m[level]->Bcrse_p->apply(*mglevel_m[level-1]->phi_p,
428 fine2crse,
429 Teuchos::NO_TRANS,
430 scalar_t(1.0),
431 scalar_t(1.0));
432 }
433
434 Teuchos::RCP<vector_t> uncov_Ax = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p, true) );
435 mglevel_m[level]->UnCovered_p->apply(fine2crse, *uncov_Ax);
436
437 Teuchos::RCP<vector_t> uncov_b = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p, true) );
438
439 mglevel_m[level]->UnCovered_p->apply(*b, *uncov_b);
440
441 // ONLY subtract coarse rho
442// mglevel_m[level]->residual_p->putScalar(0.0);
443
444 r->update(1.0, *uncov_b, -1.0, *uncov_Ax, 0.0);
445
446 } else {
447 /* finest level: Awf_p == Anf_p
448 */
449 Teuchos::RCP<vector_t> Ax = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p, true) );
450 mglevel_m[level]->Anf_p->apply(*x, *Ax);
451
452 if ( mglevel_m[level]->Bcrse_p != Teuchos::null ) {
453 // operationr: Ax += B * phi^(l-1)
454 mglevel_m[level]->Bcrse_p->apply(*mglevel_m[level-1]->phi_p,
455 *Ax,
456 Teuchos::NO_TRANS,
457 scalar_t(1.0),
458 scalar_t(1.0));
459 }
460 r->update(1.0, *b, -1.0, *Ax, 0.0);
461 }
462}
463
464
465void AmrMultiGrid::relax_m(const lo_t& level) {
466
467 if ( level == lfine_m ) {
468
469 if ( level == lbase_m ) {
470 /* Anf_p == Awf_p
471 */
472 Teuchos::RCP<vector_t> Ax = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p, true) );
473 mglevel_m[level]->Anf_p->apply(*mglevel_m[level]->phi_p, *Ax);
474 mglevel_m[level]->residual_p->update(1.0, *mglevel_m[level]->rho_p, -1.0, *Ax, 0.0);
475
476 } else {
477 this->residual_no_fine_m(level,
478 mglevel_m[level]->residual_p,
479 mglevel_m[level]->phi_p,
480 mglevel_m[level-1]->phi_p,
481 mglevel_m[level]->rho_p);
482 }
483 }
484
485 if ( level > 0 ) {
486 // phi^(l, save) = phi^(l)
487 Teuchos::RCP<vector_t> phi_save = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p) );
488 Tpetra::deep_copy(*phi_save, *mglevel_m[level]->phi_p);
489
490 mglevel_m[level-1]->error_p->putScalar(0.0);
491
492 // smoothing
493 this->smooth_m(level,
494 mglevel_m[level]->error_p,
495 mglevel_m[level]->residual_p);
496
497
498 // phi = phi + e
499 mglevel_m[level]->phi_p->update(1.0, *mglevel_m[level]->error_p, 1.0);
500
501 /*
502 * restrict
503 */
504 this->restrict_m(level);
505
506 this->relax_m(level - 1);
507
508 /*
509 * prolongate / interpolate
510 */
511 this->prolongate_m(level);
512
513 // residual update
514 Teuchos::RCP<vector_t> tmp = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p) );
515 this->residual_no_fine_m(level, tmp,
516 mglevel_m[level]->error_p,
517 mglevel_m[level-1]->error_p,
518 mglevel_m[level]->residual_p);
519
520 Tpetra::deep_copy(*mglevel_m[level]->residual_p, *tmp);
521
522 // delta error
523 Teuchos::RCP<vector_t> derror = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p, true) );
524
525 // smoothing
526 this->smooth_m(level, derror, mglevel_m[level]->residual_p);
527
528 // e^(l) += de^(l)
529 mglevel_m[level]->error_p->update(1.0, *derror, 1.0);
530
531 // phi^(l) = phi^(l, save) + e^(l)
532 mglevel_m[level]->phi_p->update(1.0, *phi_save, 1.0, *mglevel_m[level]->error_p, 0.0);
533
534 } else {
535 // e = A^(-1)r
536#if AMR_MG_TIMER
537 IpplTimings::startTimer(bottomTimer_m);
538#endif
539
540 solver_mp->solve(mglevel_m[level]->error_p,
541 mglevel_m[level]->residual_p);
542
543#if AMR_MG_TIMER
544 IpplTimings::stopTimer(bottomTimer_m);
545#endif
546 // phi = phi + e
547 mglevel_m[level]->phi_p->update(1.0, *mglevel_m[level]->error_p, 1.0);
548 }
549}
550
551
553 Teuchos::RCP<vector_t>& result,
554 const Teuchos::RCP<vector_t>& rhs,
555 const Teuchos::RCP<vector_t>& crs_rhs,
556 const Teuchos::RCP<vector_t>& b)
557{
558 vector_t crse2fine(mglevel_m[level]->Anf_p->getDomainMap(), true);
559
560 // get boundary for
561 if ( mglevel_m[level]->Bcrse_p != Teuchos::null ) {
562 mglevel_m[level]->Bcrse_p->apply(*crs_rhs, crse2fine);
563 }
564
565 // operation: crse2fine = 1.0 * crse2fine + 1.0 * A^(l) * rhs
566 mglevel_m[level]->Anf_p->apply(*rhs, crse2fine,
567 Teuchos::NO_TRANS,
568 scalar_t(1.0),
569 scalar_t(1.0));
570
571 result->update(1.0, *b, -1.0, crse2fine, 0.0);
572}
573
574
575#if AMR_MG_WRITE
576void AmrMultiGrid::writeResidualNorm_m() {
577 scalar_t err = 0.0;
578
579 std::ofstream out;
580 if ( comm_mp->getRank() == 0 )
581 out.open("residual.dat", std::ios::app);
582
583 for (int lev = 0; lev < nlevel_m; ++lev) {
584 scalar_t tmp = evalNorm_m(mglevel_m[lev]->residual_p);
585
586 if ( comm_mp->getRank() == 0 )
587 out << std::setw(15) << std::right << tmp;
588 }
589
590 if ( comm_mp->getRank() == 0 )
591 out.close();
592}
593#endif
594
595
597AmrMultiGrid::evalNorm_m(const Teuchos::RCP<const vector_t>& x)
598{
599 scalar_t norm = 0.0;
600
601 switch ( norm_m ) {
602 case Norm::L1:
603 {
604 norm = x->norm1();
605 break;
606 }
607 case Norm::L2:
608 {
609 norm = x->norm2();
610 break;
611 }
612 case Norm::LINF:
613 {
614 norm = x->normInf();
615 break;
616 }
617 default:
618 throw OpalException("AmrMultiGrid::evalNorm_m()",
619 "This type of norm not suppported.");
620 }
621 return norm;
622}
623
624
625void AmrMultiGrid::initResidual_m(std::vector<scalar_t>& rhsNorms,
626 std::vector<scalar_t>& resNorms)
627{
628 rhsNorms.clear();
629 resNorms.clear();
630
631#if AMR_MG_WRITE
632 std::ofstream out;
633
634 if ( comm_mp->getRank() == 0) {
635 out.open("residual.dat", std::ios::out);
636
637 for (int lev = 0; lev < nlevel_m; ++lev)
638 out << std::setw(14) << std::right << "level" << lev;
639 out << std::endl;
640 }
641#endif
642
643 for (int lev = 0; lev < nlevel_m; ++lev) {
644 this->residual_m(lev,
645 mglevel_m[lev]->residual_p,
646 mglevel_m[lev]->rho_p,
647 mglevel_m[lev]->phi_p);
648
649 resNorms.push_back(evalNorm_m(mglevel_m[lev]->residual_p));
650
651#if AMR_MG_WRITE
652 if ( comm_mp->getRank() == 0 )
653 out << std::setw(15) << std::right << resNorms.back();
654#endif
655
656 rhsNorms.push_back(evalNorm_m(mglevel_m[lev]->rho_p));
657 }
658
659#if AMR_MG_WRITE
660 if ( comm_mp->getRank() == 0 )
661 out.close();
662#endif
663}
664
665
667#if AMR_MG_TIMER
668 IpplTimings::startTimer(efieldTimer_m);
669#endif
670 Teuchos::RCP<vector_t> efield_p = Teuchos::null;
671 for (int lev = nlevel_m - 1; lev > -1; --lev) {
672 int ilev = lbase_m + lev;
673
674 efield_p = Teuchos::rcp( new vector_t(mglevel_m[lev]->map_p, false) );
675
676
677 for (int d = 0; d < AMREX_SPACEDIM; ++d) {
678 efield[ilev][d]->setVal(0.0, efield[ilev][d]->nGrow());
679 efield_p->putScalar(0.0);
680 mglevel_m[lev]->G_p[d]->apply(*mglevel_m[lev]->phi_p, *efield_p);
681 this->trilinos2amrex_m(lev, 0, *efield[ilev][d], efield_p);
682 }
683 }
684#if AMR_MG_TIMER
685 IpplTimings::stopTimer(efieldTimer_m);
686#endif
687}
688
689
690void AmrMultiGrid::setup_m(const amrex::Vector<AmrField_u>& rho,
691 const amrex::Vector<AmrField_u>& phi,
692 const bool& matrices)
693{
694#if AMR_MG_TIMER
695 IpplTimings::startTimer(buildTimer_m);
696#endif
697
698 if ( lbase_m == lfine_m )
699 this->buildSingleLevel_m(rho, phi, matrices);
700 else
701 this->buildMultiLevel_m(rho, phi, matrices);
702
703 mglevel_m[lfine_m]->error_p->putScalar(0.0);
704
705 if ( matrices ) {
706 this->clearMasks_m();
707 // set the bottom solve operator
708 if (!solver_mp->hasOperator()) {
709 solver_mp->setOperator(mglevel_m[lbase_m]->Anf_p, mglevel_m[0].get());
710 }
711 }
712
713#if AMR_MG_TIMER
714 IpplTimings::stopTimer(buildTimer_m);
715#endif
716}
717
718
719void AmrMultiGrid::buildSingleLevel_m(const amrex::Vector<AmrField_u>& rho,
720 const amrex::Vector<AmrField_u>& phi,
721 const bool& matrices)
722{
723 this->open_m(lbase_m, matrices);
724
725 const scalar_t* invdx = mglevel_m[lbase_m]->invCellSize();
726
727 const scalar_t invdx2[] = {
728 D_DECL( invdx[0] * invdx[0],
729 invdx[1] * invdx[1],
730 invdx[2] * invdx[2] )
731 };
732
733 if (matrices) {
734 for (amrex::MFIter mfi(*mglevel_m[lbase_m]->mask, true);
735 mfi.isValid(); ++mfi)
736 {
737 const box_t& tbx = mfi.tilebox();
738 const basefab_t& mfab = (*mglevel_m[lbase_m]->mask)[mfi];
739 const farraybox_t& rhofab = (*rho[lbase_m])[mfi];
740 const farraybox_t& pfab = (*phi[lbase_m])[mfi];
741
742 const int* lo = tbx.loVect();
743 const int* hi = tbx.hiVect();
744
745 for (int i = lo[0]; i <= hi[0]; ++i) {
746 for (int j = lo[1]; j <= hi[1]; ++j) {
747#if AMREX_SPACEDIM == 3
748 for (int k = lo[2]; k <= hi[2]; ++k) {
749#endif
750 AmrIntVect_t iv(D_DECL(i, j, k));
751 go_t gidx = mglevel_m[lbase_m]->serialize(iv);
752
753 if (!solver_mp->hasOperator()) {
754 this->buildNoFinePoissonMatrix_m(lbase_m, gidx, iv, mfab, invdx2);
755 this->buildGradientMatrix_m(lbase_m, gidx, iv, mfab, invdx);
756 }
757
758 mglevel_m[lbase_m]->rho_p->replaceGlobalValue(gidx, rhofab(iv, 0));
759 mglevel_m[lbase_m]->phi_p->replaceGlobalValue(gidx, pfab(iv, 0));
760
761#if AMREX_SPACEDIM == 3
762 }
763#endif
764 }
765 }
766 }
767 } else {
769 }
770
771 this->close_m(lbase_m, matrices);
772
773 if (matrices) {
774 mglevel_m[lbase_m]->Awf_p = Teuchos::null;
775 mglevel_m[lbase_m]->UnCovered_p = Teuchos::null;
776 }
777}
778
779
780void AmrMultiGrid::buildMultiLevel_m(const amrex::Vector<AmrField_u>& rho,
781 const amrex::Vector<AmrField_u>& phi,
782 const bool& matrices)
783{
784 // the base level has no smoother --> nlevel_m - 1
785 if ( matrices ) {
786 // although we do a resize afterwards, we do this to be safe
787 for (int lev = nlevel_m-1; lev < (int)smoother_m.size(); ++lev) {
788 smoother_m[lev].reset();
789 }
790 smoother_m.resize(nlevel_m-1);
791 }
792
793 for (int lev = 0; lev < nlevel_m; ++lev) {
794 this->open_m(lev, matrices);
795
796 int ilev = lbase_m + lev;
797
798 // find all coarse cells that are covered by fine cells
799// AmrIntVect_t rr = mglevel_m[lev]->refinement();
800
801 const scalar_t* invdx = mglevel_m[lev]->invCellSize();
802
803 const scalar_t invdx2[] = {
804 D_DECL( invdx[0] * invdx[0],
805 invdx[1] * invdx[1],
806 invdx[2] * invdx[2] )
807 };
808
809 if ( matrices ) {
810 for (amrex::MFIter mfi(*mglevel_m[lev]->mask, true);
811 mfi.isValid(); ++mfi)
812 {
813 const box_t& tbx = mfi.tilebox();
814 const basefab_t& mfab = (*mglevel_m[lev]->mask)[mfi];
815 const basefab_t& rfab = (*mglevel_m[lev]->refmask)[mfi];
816 const basefab_t& cfab = (*mglevel_m[lev]->crsemask)[mfi];
817 const farraybox_t& rhofab = (*rho[ilev])[mfi];
818 const farraybox_t& pfab = (*phi[ilev])[mfi];
819
820 const int* lo = tbx.loVect();
821 const int* hi = tbx.hiVect();
822
823 for (int i = lo[0]; i <= hi[0]; ++i) {
824 int ii = i << 1;
825 for (int j = lo[1]; j <= hi[1]; ++j) {
826 int jj = j << 1;
827#if AMREX_SPACEDIM == 3
828 for (int k = lo[2]; k <= hi[2]; ++k) {
829 int kk = k << 1;
830#endif
831 AmrIntVect_t iv(D_DECL(i, j, k));
832 go_t gidx = mglevel_m[lev]->serialize(iv);
833
834 this->buildRestrictionMatrix_m(lev, gidx, iv,
835 D_DECL(ii, jj, kk), rfab);
836
837 this->buildInterpolationMatrix_m(lev, gidx, iv, cfab);
838
839 this->buildCrseBoundaryMatrix_m(lev, gidx, iv, mfab,
840 cfab, invdx2);
841
842 this->buildFineBoundaryMatrix_m(lev, gidx, iv,
843 mfab, rfab);
844
845 this->buildCompositePoissonMatrix_m(lev, gidx, iv, mfab,
846 rfab, invdx2);
847
848 if (lev > lbase_m || (lev == lbase_m && !solver_mp->hasOperator())) {
849 this->buildNoFinePoissonMatrix_m(lev, gidx, iv, mfab, invdx2);
850 this->buildGradientMatrix_m(lev, gidx, iv, mfab, invdx);
851 }
852
853 mglevel_m[lev]->rho_p->replaceGlobalValue(gidx, rhofab(iv, 0));
854 mglevel_m[lev]->phi_p->replaceGlobalValue(gidx, pfab(iv, 0));
855#if AMREX_SPACEDIM == 3
856 }
857#endif
858 }
859 }
860 }
861 } else {
862 for (lo_t lev = 0; lev < nlevel_m; ++lev) {
863 int ilev = lbase_m + lev;
864 this->buildDensityVector_m(lev, *rho[ilev]);
865 }
866 }
867
868 this->close_m(lev, matrices);
869
870 if ( matrices && lev > lbase_m ) {
871 smoother_m[lev-1].reset( new AmrSmoother(mglevel_m[lev]->Anf_p,
873 }
874 }
875}
876
877
878void AmrMultiGrid::open_m(const lo_t& level,
879 const bool& matrices)
880{
881 if ( matrices ) {
882
883 if ( level > lbase_m ) {
884
885 /*
886 * interpolation matrix
887 */
888
889 int nNeighbours = (nBcPoints_m + 1) * interp_mp->getNumberOfPoints();
890
891 mglevel_m[level]->I_p = Teuchos::rcp( new matrix_t(mglevel_m[level]->map_p,
892 nNeighbours,
893 Tpetra::StaticProfile) );
894
895 /*
896 * coarse boundary matrix
897 */
898
899 nNeighbours = 2 * AMREX_SPACEDIM * nBcPoints_m *
900 2 * AMREX_SPACEDIM * interface_mp->getNumberOfPoints();
901
902 mglevel_m[level]->Bcrse_p = Teuchos::rcp(
903 new matrix_t(mglevel_m[level]->map_p, nNeighbours,
904 Tpetra::StaticProfile) );
905
906 }
907
908
909 if ( level < lfine_m ) {
910
911 /*
912 * restriction matrix
913 */
914
915 // refinement 2
916 int nNeighbours = AMREX_D_TERM(2, * 2, * 2);
917
918 mglevel_m[level]->R_p = Teuchos::rcp(
919 new matrix_t(mglevel_m[level]->map_p, nNeighbours,
920 Tpetra::StaticProfile) );
921
922 /*
923 * fine boundary matrix
924 */
925
926 // refinement 2
927 nNeighbours = 2 * AMREX_SPACEDIM * AMREX_D_TERM(2, * 2, * 2);
928
929 mglevel_m[level]->Bfine_p = Teuchos::rcp(
930 new matrix_t(mglevel_m[level]->map_p, nNeighbours,
931 Tpetra::StaticProfile) );
932
933 }
934
935 /*
936 * no-fine Poisson matrix
937 */
938
939 int nPhysBoundary = 2 * AMREX_SPACEDIM * nBcPoints_m;
940
941 // number of internal stencil points
942 int nIntBoundary = AMREX_SPACEDIM * interface_mp->getNumberOfPoints();
943
944 int nEntries = (AMREX_SPACEDIM << 1) + 2 /* plus boundaries */ + nPhysBoundary + nIntBoundary;
945
946 if (level > lbase_m || (level == lbase_m && !solver_mp->hasOperator())) {
947 mglevel_m[level]->Anf_p = Teuchos::rcp(
948 new matrix_t(mglevel_m[level]->map_p, nEntries,
949 Tpetra::StaticProfile) );
950 }
951
952 /*
953 * with-fine / composite Poisson matrix
954 */
955 if ( lbase_m != lfine_m ) {
956 nEntries = (AMREX_SPACEDIM << 1) + 5 /* plus boundaries */ + nPhysBoundary + nIntBoundary;
957
958 mglevel_m[level]->Awf_p = Teuchos::rcp(
959 new matrix_t(mglevel_m[level]->map_p, nEntries,
960 Tpetra::StaticProfile) );
961
962 /*
963 * uncovered cells matrix
964 */
965 mglevel_m[level]->UnCovered_p = Teuchos::rcp(
966 new matrix_t(mglevel_m[level]->map_p, 1,
967 Tpetra::StaticProfile) );
968 }
969
970 /*
971 * gradient matrices
972 */
973 nEntries = 11;
974
975 if (level > lbase_m || (level == lbase_m && !solver_mp->hasOperator())) {
976 for (int d = 0; d < AMREX_SPACEDIM; ++d) {
977 mglevel_m[level]->G_p[d] = Teuchos::rcp(
978 new matrix_t(mglevel_m[level]->map_p, nEntries,
979 Tpetra::StaticProfile) );
980 }
981 }
982 }
983
984 mglevel_m[level]->rho_p = Teuchos::rcp(
985 new vector_t(mglevel_m[level]->map_p, false) );
986
987 if ( matrices ) {
988 mglevel_m[level]->phi_p = Teuchos::rcp(
989 new vector_t(mglevel_m[level]->map_p, false) );
990 }
991}
992
993
994void AmrMultiGrid::close_m(const lo_t& level,
995 const bool& matrices)
996{
997 if ( matrices ) {
998 if ( level > lbase_m ) {
999
1000 mglevel_m[level]->I_p->fillComplete(mglevel_m[level-1]->map_p, // col map (domain map)
1001 mglevel_m[level]->map_p); // row map (range map)
1002
1003 mglevel_m[level]->Bcrse_p->fillComplete(mglevel_m[level-1]->map_p, // col map
1004 mglevel_m[level]->map_p); // row map
1005 }
1006
1007 if ( level < lfine_m ) {
1008
1009 mglevel_m[level]->R_p->fillComplete(mglevel_m[level+1]->map_p,
1010 mglevel_m[level]->map_p);
1011
1012 mglevel_m[level]->Bfine_p->fillComplete(mglevel_m[level+1]->map_p,
1013 mglevel_m[level]->map_p);
1014 }
1015
1016 if (level > lbase_m || (level == lbase_m && !solver_mp->hasOperator())) {
1017 mglevel_m[level]->Anf_p->fillComplete();
1018
1019 for (int d = 0; d < AMREX_SPACEDIM; ++d)
1020 mglevel_m[level]->G_p[d]->fillComplete();
1021 }
1022
1023 if ( lbase_m != lfine_m ) {
1024 mglevel_m[level]->Awf_p->fillComplete();
1025
1026 mglevel_m[level]->UnCovered_p->fillComplete();
1027 }
1028 }
1029}
1030
1031
1033 const go_t& gidx,
1034 const AmrIntVect_t& iv,
1035 const basefab_t& mfab,
1036 const scalar_t* invdx2)
1037{
1038 /*
1039 * Laplacian of "no fine"
1040 */
1041
1042 /*
1043 * 1D not supported
1044 * 2D --> 5 elements per row
1045 * 3D --> 7 elements per row
1046 */
1047
1048 umap_t map;
1049 indices_t indices;
1050 coefficients_t values;
1051
1052 /*
1053 * check neighbours in all directions (Laplacian stencil --> cross)
1054 */
1055 for (int d = 0; d < AMREX_SPACEDIM; ++d) {
1056 for (int shift = -1; shift <= 1; shift += 2) {
1057 AmrIntVect_t biv = iv;
1058 biv[d] += shift;
1059
1060 switch ( mfab(biv) )
1061 {
1062 case AmrMultiGridLevel_t::Mask::INTERIOR:
1063 case AmrMultiGridLevel_t::Mask::COVERED: // covered --> interior cell
1064 {
1065 map[mglevel_m[level]->serialize(biv)] += invdx2[d];
1066 break;
1067 }
1068 case AmrMultiGridLevel_t::Mask::BNDRY:
1069 {
1070 // boundary cell
1071 // only level > 0 have this kind of boundary
1072#ifndef NDEBUG
1073 if ( level == lbase_m )
1074 throw OpalException("AmrMultiGrid::buildNoFinePoissonMatrix_m()",
1075 "Error in mask for level "
1076 + std::to_string(level) + "!");
1077#endif
1078 /* Dirichlet boundary conditions from coarser level.
1079 */
1080 interface_mp->fine(biv, map, invdx2[d], d, -shift,
1081 mglevel_m[level].get());
1082 break;
1083 }
1084 case AmrMultiGridLevel_t::Mask::PHYSBNDRY:
1085 {
1086 // physical boundary cell
1087 mglevel_m[level]->applyBoundary(biv, d, map,
1088 invdx2[d] /*matrix coefficient*/);
1089 break;
1090 }
1091 default:
1092 throw OpalException("AmrMultiGrid::buildNoFinePoissonMatrix_m()",
1093 "Error in mask for level "
1094 + std::to_string(level) + "!");
1095 }
1096 }
1097 }
1098
1099 // check center
1100 map[gidx] += AMREX_D_TERM(- 2.0 * invdx2[0],
1101 - 2.0 * invdx2[1],
1102 - 2.0 * invdx2[2]);
1103
1104 this->map2vector_m(map, indices, values);
1105
1106 mglevel_m[level]->Anf_p->insertGlobalValues(gidx,
1107 indices.size(),
1108 &values[0],
1109 &indices[0]);
1110}
1111
1112
1114 const go_t& gidx,
1115 const AmrIntVect_t& iv,
1116 const basefab_t& mfab,
1117 const basefab_t& rfab,
1118 const scalar_t* invdx2)
1119{
1120 /*
1121 * Laplacian of "with fine"
1122 *
1123 * For the finest level: Awf == Anf
1124 */
1125 if ( rfab(iv) == AmrMultiGridLevel_t::Refined::YES ) //|| lbase_m != lfine_m )
1126 return;
1127 /*
1128 * Only cells that are not refined
1129 */
1130
1131 /*
1132 * 1D not supported by AmrMultiGrid
1133 * 2D --> 5 elements per row
1134 * 3D --> 7 elements per row
1135 */
1136
1137 umap_t map;
1138 indices_t indices;
1139 coefficients_t values;
1140
1141 /*
1142 * Only cells that are not refined
1143 */
1144
1145 /*
1146 * check neighbours in all directions (Laplacian stencil --> cross)
1147 */
1148 for (int d = 0; d < AMREX_SPACEDIM; ++d) {
1149 for (int shift = -1; shift <= 1; shift += 2) {
1150 AmrIntVect_t biv = iv;
1151 biv[d] += shift;
1152
1153 if ( rfab(biv) != AmrMultiGridLevel_t::Refined::YES )
1154 {
1155 /*
1156 * It can't be a refined cell!
1157 */
1158 switch ( mfab(biv) )
1159 {
1160 case AmrMultiGridLevel_t::Mask::COVERED:
1161 // covered --> interior cell
1162 case AmrMultiGridLevel_t::Mask::INTERIOR:
1163 {
1164 map[mglevel_m[level]->serialize(biv)] += invdx2[d];
1165 map[gidx] -= invdx2[d]; // add center once
1166 break;
1167 }
1168 case AmrMultiGridLevel_t::Mask::BNDRY:
1169 {
1170 // boundary cell
1171 // only level > 0 have this kind of boundary
1172#ifndef NDEBUG
1173 if ( level == lbase_m )
1174 throw OpalException("AmrMultiGrid::buildCompositePoissonMatrix_m()",
1175 "Error in mask for level "
1176 + std::to_string(level) + "!");
1177#endif
1178
1179 /* We are on the fine side of the crse-fine interface
1180 * --> normal stencil --> no flux matching required
1181 * --> interpolation of fine ghost cell required
1182 * (used together with Bcrse)
1183 */
1184
1185 /* Dirichlet boundary conditions from coarser level.
1186 */
1187 interface_mp->fine(biv, map, invdx2[d], d, -shift,
1188 mglevel_m[level].get());
1189
1190 // add center once
1191 map[gidx] -= invdx2[d];
1192 break;
1193 }
1194 case AmrMultiGridLevel_t::Mask::PHYSBNDRY:
1195 {
1196 // physical boundary cell
1197 mglevel_m[level]->applyBoundary(biv, d, map,
1198 invdx2[d] /*matrix coefficient*/);
1199
1200 // add center once
1201 map[gidx] -= invdx2[d];
1202 break;
1203 }
1204 default:
1205 throw OpalException("AmrMultiGrid::buildCompositePoissonMatrix_m()",
1206 "Error in mask for level "
1207 + std::to_string(level) + "!");
1208 }
1209 } else {
1210 /*
1211 * If neighbour cell is refined, we are on the coarse
1212 * side of the crse-fine interface --> flux matching
1213 * required --> interpolation of fine ghost cell
1214 * (used together with Bfine)
1215 */
1216
1217
1218 // flux matching, coarse part
1219
1220 /* 2D --> 2 fine cells to compute flux per coarse-fine-interace --> avg = 2
1221 * 3D --> 4 fine cells to compute flux per coarse-fine-interace --> avg = 4
1222 *
1223 * @precondition: refinement of 2
1224 */
1225 // top and bottom for all directions
1226 const scalar_t* invcdx = mglevel_m[level]->invCellSize();
1227 const scalar_t* invfdx = mglevel_m[level+1]->invCellSize();
1228 scalar_t invavg = AMREX_D_PICK(1.0, 0.5, 0.25);
1229 scalar_t value = - invavg * invcdx[d] * invfdx[d];
1230
1231 for (int d1 = 0; d1 < 2; ++d1) {
1232#if AMREX_SPACEDIM == 3
1233 for (int d2 = 0; d2 < 2; ++d2) {
1234#endif
1235
1236 /* in order to get a top iv --> needs to be odd value in "d"
1237 * in order to get a bottom iv --> needs to be even value in "d"
1238 */
1239 AmrIntVect_t fake(D_DECL(0, 0, 0));
1240
1241 fake[(d+1)%AMREX_SPACEDIM] = d1;
1242#if AMREX_SPACEDIM == 3
1243 fake[(d+2)%AMREX_SPACEDIM] = d2;
1244#endif
1245 interface_mp->coarse(iv, map, value, d, shift, rfab,
1246 fake, mglevel_m[level].get());
1247
1248#if AMREX_SPACEDIM == 3
1249 }
1250#endif
1251 }
1252 }
1253 }
1254 }
1255
1256 this->map2vector_m(map, indices, values);
1257
1258 mglevel_m[level]->Awf_p->insertGlobalValues(gidx,
1259 indices.size(),
1260 &values[0],
1261 &indices[0]);
1262
1263 scalar_t vv = 1.0;
1264 mglevel_m[level]->UnCovered_p->insertGlobalValues(gidx,
1265 1,
1266 &vv,
1267 &gidx);
1268}
1269
1270
1272 const go_t& gidx,
1273 const AmrIntVect_t& iv,
1274 D_DECL(const go_t& ii,
1275 const go_t& jj,
1276 const go_t& kk),
1277 const basefab_t& rfab)
1278{
1279 /*
1280 * x^(l) = R * x^(l+1)
1281 */
1282
1283 // finest level does not need to have a restriction matrix
1284 if ( rfab(iv) == AmrMultiGridLevel_t::Refined::NO || level == lfine_m )
1285 return;
1286
1287 /* Difficulty: If a fine cell belongs to another processor than the underlying
1288 * coarse cell, we get an error when filling the matrix since the
1289 * cell (--> global index) does not belong to the same processor.
1290 * Solution: Find all coarse cells that are covered by fine cells, thus,
1291 * the distributionmap is correct.
1292 *
1293 *
1294 */
1295 indices_t indices;
1296 indices.reserve(2 << (AMREX_SPACEDIM - 1));
1297 coefficients_t values;
1298 values.reserve(2 << (AMREX_SPACEDIM -1));
1299
1300 // neighbours
1301 for (int iref = 0; iref < 2; ++iref) {
1302 for (int jref = 0; jref < 2; ++jref) {
1303#if AMREX_SPACEDIM == 3
1304 for (int kref = 0; kref < 2; ++kref) {
1305#endif
1306 AmrIntVect_t riv(D_DECL(ii + iref, jj + jref, kk + kref));
1307
1308 indices.push_back( mglevel_m[level+1]->serialize(riv) );
1309 values.push_back( AMREX_D_PICK(0.5, 0.25, 0.125) );
1310#if AMREX_SPACEDIM == 3
1311 }
1312#endif
1313 }
1314 }
1315
1316 mglevel_m[level]->R_p->insertGlobalValues(gidx,
1317 indices.size(),
1318 &values[0],
1319 &indices[0]);
1320}
1321
1322
1324 const go_t& gidx,
1325 const AmrIntVect_t& iv,
1326 const basefab_t& cfab)
1327{
1328 /* crse: level - 1
1329 * fine (this): level
1330 */
1331
1332 /*
1333 * This does not include ghost cells
1334 * --> no boundaries
1335 *
1336 * x^(l) = I * x^(l-1)
1337 */
1338
1339 if ( level == lbase_m )
1340 return;
1341
1342 umap_t map;
1343 indices_t indices;
1344 coefficients_t values;
1345
1346 /*
1347 * we need boundary + indices from coarser level
1348 */
1349 interp_mp->stencil(iv, cfab, map, 1.0, mglevel_m[level-1].get());
1350
1351 this->map2vector_m(map, indices, values);
1352
1353 mglevel_m[level]->I_p->insertGlobalValues(gidx,
1354 indices.size(),
1355 &values[0],
1356 &indices[0]);
1357}
1358
1359
1361 const go_t& gidx,
1362 const AmrIntVect_t& iv,
1363 const basefab_t& mfab,
1364 const basefab_t& cfab,
1365 const scalar_t* invdx2)
1366{
1367 /*
1368 * fine (this): level
1369 * coarse: level - 1
1370 */
1371
1372 // the base level has only physical boundaries
1373 if ( level == lbase_m )
1374 return;
1375
1376 // iv is a fine cell
1377
1378 umap_t map;
1379 indices_t indices;
1380 coefficients_t values;
1381
1382 // check its neighbours to see if at crse-fine interface
1383 for (int d = 0; d < AMREX_SPACEDIM; ++d) {
1384 for (int shift = -1; shift <= 1; shift += 2) {
1385 // neighbour
1386 AmrIntVect_t niv = iv;
1387 niv[d] += shift;
1388
1389 if ( mfab(niv) == AmrMultiGridLevel_t::Mask::BNDRY )
1390 {
1391 // neighbour does not belong to fine grids
1392 // includes no cells at physical boundary
1393
1394 // coarse cell that is not refined
1395 AmrIntVect_t civ = iv;
1396 civ[d] += shift;
1397 civ.coarsen(mglevel_m[level]->refinement());
1398
1399 // we need boundary + indices from coarser level
1400 // we need normalization by mesh size squared (of fine cell size)
1401 // --> Laplacian for fine cell
1402 // (Dirichlet boundary for coarse --> fine)
1403 interface_mp->coarse(civ, map, invdx2[d], d, shift, cfab,
1404 iv, mglevel_m[level-1].get());
1405 }
1406#ifndef NDEBUG
1407 else if ( mfab(niv) == AmrMultiGridLevel_t::Mask::PHYSBNDRY ) {
1408 throw OpalException("AmrMultiGrid::buildCrseBoundaryMatrix_m()",
1409 "Fine meshes shouldn't be connected "
1410 "to physical (i.e. mesh) boundary!");
1411 }
1412#endif
1413 }
1414 }
1415
1416 this->map2vector_m(map, indices, values);
1417
1418 mglevel_m[level]->Bcrse_p->insertGlobalValues(gidx,
1419 indices.size(),
1420 &values[0],
1421 &indices[0]);
1422}
1423
1424
1426 const go_t& gidx,
1427 const AmrIntVect_t& iv,
1428 const basefab_t& mfab,
1429 const basefab_t& rfab)
1430{
1431 /* fine: level + 1
1432 * coarse (this): level
1433 */
1434
1435 // the finest level does not need data from a finer level
1436 if ( rfab(iv) == AmrMultiGridLevel_t::Refined::YES || level == lfine_m )
1437 return;
1438
1439 const scalar_t* invcdx = mglevel_m[level]->invCellSize();
1440 const scalar_t* invfdx = mglevel_m[level+1]->invCellSize();
1441
1442 // inverse of number of fine cell gradients
1443 scalar_t invavg = AMREX_D_PICK(1, 0.5, 0.25);
1444
1445 umap_t map;
1446 indices_t indices;
1447 coefficients_t values;
1448
1449 auto fill = [&](umap_t& map,
1450 D_DECL(int ii, int jj, int kk),
1451 int* begin, int* end, int d,
1452 const AmrIntVect_t& iv, int shift)
1453 {
1454 for (int iref = ii - begin[0]; iref <= ii + end[0]; ++iref) {
1455 for (int jref = jj - begin[1]; jref <= jj + end[1]; ++jref) {
1456#if AMREX_SPACEDIM == 3
1457 for (int kref = kk - begin[2]; kref <= kk + end[2]; ++kref) {
1458#endif
1459 /* Since all fine cells on the not-refined cell are
1460 * outside of the "domain" --> we need to interpolate
1461 */
1462 AmrIntVect_t riv(D_DECL(iref, jref, kref));
1463
1464 if ( (riv[d] >> 1) /*refinement*/ == iv[d] ) {
1465 /* the fine cell is on the coarse side --> fine
1466 * ghost cell --> we need to interpolate
1467 */
1468 scalar_t value = - invavg * invcdx[d] * invfdx[d];
1469
1470 interface_mp->fine(riv, map, value, d, shift,
1471 mglevel_m[level+1].get());
1472 } else {
1473 scalar_t value = invavg * invcdx[d] * invfdx[d];
1474 map[mglevel_m[level+1]->serialize(riv)] += value;
1475 }
1476#if AMREX_SPACEDIM == 3
1477 }
1478#endif
1479 }
1480 }
1481 };
1482
1483
1484 /*
1485 * iv is a coarse cell that got not refined
1486 *
1487 * --> check all neighbours to see if at crse-fine
1488 * interface
1489 */
1490 for (int d = 0; d < AMREX_SPACEDIM; ++d) {
1491 for (int shift = -1; shift <= 1; shift += 2) {
1492 // neighbour
1493 AmrIntVect_t covered = iv;
1494 covered[d] += shift;
1495
1496 if ( rfab(covered) == AmrMultiGridLevel_t::Refined::YES &&
1497 mfab(covered) != AmrMultiGridLevel_t::PHYSBNDRY )
1498 {
1499 // neighbour is covered by fine cells
1500
1501 /*
1502 * "shift" is the amount to a coarse cell that got refined
1503 * "d" is the direction to shift
1504 *
1505 * --> check all covered neighbour cells
1506 */
1507
1508 /* we need to iterate over correct fine cells. It depends
1509 * on the orientation of the interface
1510 */
1511 int begin[AMREX_SPACEDIM] = { D_DECL( int(d == 0), int(d == 1), int(d == 2) ) };
1512 int end[AMREX_SPACEDIM] = { D_DECL( int(d != 0), int(d != 1), int(d != 2) ) };
1513
1514 /*
1515 * neighbour cell got refined but is not on physical boundary
1516 * --> we are at a crse-fine interface
1517 *
1518 * we need now to find out which fine cells
1519 * are required to satisfy the flux matching
1520 * condition
1521 */
1522
1523 switch ( shift ) {
1524 case -1:
1525 {
1526 // --> interface is on the lower face
1527 int ii = iv[0] << 1; // refinemet in x
1528 int jj = iv[1] << 1; // refinemet in y
1529#if AMREX_SPACEDIM == 3
1530 int kk = iv[2] << 1; // refinemet in z
1531#endif
1532 // iterate over all fine cells at the interface
1533 // start with lower cells --> cover coarse neighbour
1534 // cell
1535 fill(map, D_DECL(ii, jj, kk), &begin[0], &end[0], d, iv, shift);
1536 break;
1537 }
1538 case 1:
1539 default:
1540 {
1541 // --> interface is on the upper face
1542 int ii = covered[0] << 1; // refinemet in x
1543 int jj = covered[1] << 1; // refinemet in y
1544#if AMREX_SPACEDIM == 3
1545 int kk = covered[2] << 1; // refinemet in z
1546#endif
1547 fill(map, D_DECL(ii, jj, kk), &begin[0], &end[0], d, iv, shift);
1548 break;
1549 }
1550 }
1551 }
1552 }
1553 }
1554
1555 this->map2vector_m(map, indices, values);
1556
1557 // iv: not covered coarse cell at crse-fine interface
1558
1559 mglevel_m[level]->Bfine_p->insertGlobalValues(gidx,
1560 indices.size(),
1561 &values[0],
1562 &indices[0]);
1563}
1564
1565
1567 const AmrField_t& rho)
1568{
1569 this->amrex2trilinos_m(level, 0, rho, mglevel_m[level]->rho_p);
1570}
1571
1572
1574 const AmrField_t& phi)
1575{
1576 this->amrex2trilinos_m(level, 0, phi, mglevel_m[level]->phi_p);
1577}
1578
1579
1581 const go_t& gidx,
1582 const AmrIntVect_t& iv,
1583 const basefab_t& mfab,
1584 const scalar_t* invdx)
1585{
1586 umap_t map;
1587 indices_t indices;
1588 coefficients_t values;
1589
1590 auto check = [&](const AmrIntVect_t& iv,
1591 const basefab_t& mfab,
1592 int dir,
1593 scalar_t shift)
1594 {
1595 switch ( mfab(iv) )
1596 {
1597 case AmrMultiGridLevel_t::Mask::INTERIOR:
1598 // interior cells
1599 case AmrMultiGridLevel_t::Mask::COVERED:
1600 // covered --> interior cell
1601 map[mglevel_m[level]->serialize(iv)] -= shift * 0.5 * invdx[dir];
1602 break;
1603 case AmrMultiGridLevel_t::Mask::BNDRY:
1604 {
1605 // interior boundary cells --> only level > 0
1606#ifndef NDEBUG
1607 if ( level == lbase_m )
1608 throw OpalException("AmrMultiGrid::buildGradientMatrix_m()",
1609 "Error in mask for level "
1610 + std::to_string(level) + "!");
1611#endif
1612
1613 scalar_t value = - shift * 0.5 * invdx[dir];
1614
1615 // use 1st order Lagrange --> only cells of this level required
1616 AmrIntVect_t tmp = iv;
1617 // first fine cell on refined coarse cell (closer to interface)
1618 tmp[dir] -= shift;
1619 map[mglevel_m[level]->serialize(tmp)] += 2.0 * value;
1620
1621 // second fine cell on refined coarse cell (further away from interface)
1622 tmp[dir] -= shift;
1623 map[mglevel_m[level]->serialize(tmp)] -= value;
1624 break;
1625 }
1626 case AmrMultiGridLevel_t::Mask::PHYSBNDRY:
1627 {
1628 // physical boundary cells
1629
1630 scalar_t value = - shift * 0.5 * invdx[dir];
1631
1632 mglevel_m[level]->applyBoundary(iv, dir,
1633 map, value);
1634 break;
1635 }
1636 default:
1637 break;
1638 }
1639 };
1640
1641 for (int d = 0; d < AMREX_SPACEDIM; ++d) {
1642 for (int shift = -1; shift <= 1; shift += 2) {
1643 AmrIntVect_t niv = iv;
1644 niv[d] += shift;
1645 check(niv, mfab, d, shift);
1646 }
1647
1648 this->map2vector_m(map, indices, values);
1649
1650 mglevel_m[level]->G_p[d]->insertGlobalValues(gidx,
1651 indices.size(),
1652 &values[0],
1653 &indices[0]);
1654 }
1655}
1656
1657
1659 const lo_t& comp,
1660 const AmrField_t& mf,
1661 Teuchos::RCP<vector_t>& mv)
1662{
1663 if ( mv.is_null() )
1664 mv = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p, false) );
1665
1666 for (amrex::MFIter mfi(mf, true); mfi.isValid(); ++mfi) {
1667 const amrex::Box& tbx = mfi.tilebox();
1668 const amrex::FArrayBox& fab = mf[mfi];
1669
1670 const int* lo = tbx.loVect();
1671 const int* hi = tbx.hiVect();
1672
1673 for (int i = lo[0]; i <= hi[0]; ++i) {
1674 for (int j = lo[1]; j <= hi[1]; ++j) {
1675#if AMREX_SPACEDIM == 3
1676 for (int k = lo[2]; k <= hi[2]; ++k) {
1677#endif
1678 AmrIntVect_t iv(D_DECL(i, j, k));
1679
1680 go_t gidx = mglevel_m[level]->serialize(iv);
1681
1682 mv->replaceGlobalValue(gidx, fab(iv, comp));
1683#if AMREX_SPACEDIM == 3
1684 }
1685#endif
1686 }
1687 }
1688 }
1689}
1690
1691
1693 const lo_t& comp,
1694 AmrField_t& mf,
1695 const Teuchos::RCP<vector_t>& mv)
1696{
1697 Teuchos::ArrayRCP<const amr::scalar_t> data = mv->get1dView();
1698
1699 for (amrex::MFIter mfi(mf, true); mfi.isValid(); ++mfi) {
1700 const amrex::Box& tbx = mfi.tilebox();
1701 amrex::FArrayBox& fab = mf[mfi];
1702
1703 const int* lo = tbx.loVect();
1704 const int* hi = tbx.hiVect();
1705
1706 for (int i = lo[0]; i <= hi[0]; ++i) {
1707 for (int j = lo[1]; j <= hi[1]; ++j) {
1708#if AMREX_SPACEDIM == 3
1709 for (int k = lo[2]; k <= hi[2]; ++k) {
1710#endif
1711 AmrIntVect_t iv(D_DECL(i, j, k));
1712
1713 go_t gidx = mglevel_m[level]->serialize(iv);
1714 lo_t lidx = mglevel_m[level]->map_p->getLocalElement(gidx);
1715
1716 fab(iv, comp) = data[lidx];
1717 }
1718#if AMREX_SPACEDIM == 3
1719 }
1720#endif
1721 }
1722 }
1723}
1724
1725
1726inline
1728 coefficients_t& values)
1729{
1730 indices.clear();
1731 values.clear();
1732
1733 indices.reserve(map.size());
1734 values.reserve(map.size());
1735
1736 std::for_each(map.begin(), map.end(),
1737 [&](const std::pair<const go_t, scalar_t>& entry)
1738 {
1739#ifndef NDEBUG
1740 if ( entry.first < 0 ) {
1741 throw OpalException("AmrMultiGrid::map2vector_m()",
1742 "Negative matrix index!");
1743 }
1744#endif
1745
1746 indices.push_back(entry.first);
1747 values.push_back(entry.second);
1748 }
1749 );
1750
1751 map.clear();
1752}
1753
1754
1756 Teuchos::RCP<vector_t>& e,
1757 Teuchos::RCP<vector_t>& r)
1758{
1759#if AMR_MG_TIMER
1760 IpplTimings::startTimer(smoothTimer_m);
1761#endif
1762
1763 // base level has no smoother --> l - 1
1764 smoother_m[level-1]->smooth(e, r);
1765
1766#if AMR_MG_TIMER
1767 IpplTimings::stopTimer(smoothTimer_m);
1768#endif
1769}
1770
1771
1772void AmrMultiGrid::restrict_m(const lo_t& level) {
1773
1774#if AMR_MG_TIMER
1775 IpplTimings::startTimer(restrictTimer_m);
1776#endif
1777
1778 Teuchos::RCP<vector_t> tmp = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p) );
1779
1780 this->residual_no_fine_m(level, tmp,
1781 mglevel_m[level]->error_p,
1782 mglevel_m[level-1]->error_p,
1783 mglevel_m[level]->residual_p);
1784
1785 mglevel_m[level-1]->residual_p->putScalar(0.0);
1786
1787 // average down: residual^(l-1) = R^(l) * tmp
1788 mglevel_m[level-1]->R_p->apply(*tmp, *mglevel_m[level-1]->residual_p);
1789
1790
1791 // composite matrix, i.e. matrix without covered cells
1792 // r^(l-1) = rho^(l-1) - A * phi^(l-1)
1793
1794 vector_t fine2crse(mglevel_m[level-1]->Awf_p->getDomainMap(), true);
1795
1796 // get boundary for
1797 mglevel_m[level-1]->Bfine_p->apply(*mglevel_m[level]->phi_p, fine2crse);
1798
1799 // operation: fine2coarse += A * phi
1800 mglevel_m[level-1]->Awf_p->apply(*mglevel_m[level-1]->phi_p,
1801 fine2crse, Teuchos::NO_TRANS,
1802 scalar_t(1.0), scalar_t(1.0));
1803
1804 if ( mglevel_m[level-1]->Bcrse_p != Teuchos::null ) {
1805 // operation: fine2coarse += B * phi
1806 mglevel_m[level-1]->Bcrse_p->apply(*mglevel_m[level-2]->phi_p,
1807 fine2crse, Teuchos::NO_TRANS,
1808 scalar_t(1.0), scalar_t(1.0));
1809 }
1810
1811 Teuchos::RCP<vector_t> uncoveredRho = Teuchos::rcp( new vector_t(mglevel_m[level-1]->map_p, true) );
1812
1813 mglevel_m[level-1]->UnCovered_p->apply(*mglevel_m[level-1]->rho_p, *uncoveredRho);
1814
1815
1816 //FIXME tmp2 not needed
1817 Teuchos::RCP<vector_t> tmp2 = Teuchos::rcp( new vector_t(mglevel_m[level-1]->map_p, true) );
1818 mglevel_m[level-1]->UnCovered_p->apply(fine2crse, *tmp2);
1819
1820 // ONLY subtract coarse rho
1821 mglevel_m[level-1]->residual_p->update(1.0, *uncoveredRho, -1.0, *tmp2, 1.0);
1822
1823#if AMR_MG_TIMER
1824 IpplTimings::stopTimer(restrictTimer_m);
1825#endif
1826}
1827
1828
1830#if AMR_MG_TIMER
1831 IpplTimings::startTimer(interpTimer_m);
1832#endif
1833 // interpolate error from l-1 to l
1834 // operation: e^(l) = 1.0 * e^(l) + 1.0 * I^(l) * e^(l-1)
1835 mglevel_m[level]->I_p->apply(*mglevel_m[level-1]->error_p,
1836 *mglevel_m[level]->error_p,
1837 Teuchos::NO_TRANS,
1838 scalar_t(1.0),
1839 scalar_t(1.0));
1840#if AMR_MG_TIMER
1841 IpplTimings::stopTimer(interpTimer_m);
1842#endif
1843}
1844
1845
1847
1848 if (level == lfine_m )
1849 return;
1850#if AMR_MG_TIMER
1851 IpplTimings::startTimer(averageTimer_m);
1852#endif
1853 Teuchos::RCP<vector_t> phicrse = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p, false) );
1854
1855 // operation: phicrse = 0.0 * phicrse + 1.0 * R^(l) * phi^(l+1)
1856 mglevel_m[level]->R_p->apply(*mglevel_m[level+1]->phi_p, *phicrse);
1857
1858 Teuchos::RCP<vector_t> uncov_phi = Teuchos::rcp( new vector_t(mglevel_m[level]->map_p, true) );
1859
1860 mglevel_m[level]->UnCovered_p->apply(*mglevel_m[level]->phi_p, *uncov_phi);
1861
1862 mglevel_m[level]->phi_p->update(1.0, *phicrse, 1.0, *uncov_phi, 0.0);
1863#if AMR_MG_TIMER
1864 IpplTimings::stopTimer(averageTimer_m);
1865#endif
1866}
1867
1868
1870 switch ( interp ) {
1871 case Interpolater::TRILINEAR:
1873 break;
1874 case Interpolater::LAGRANGE:
1875 throw OpalException("AmrMultiGrid::initInterpolater_m()",
1876 "Not yet implemented.");
1877 case Interpolater::PIECEWISE_CONST:
1879 break;
1880 default:
1881 throw OpalException("AmrMultiGrid::initInterpolater_m()",
1882 "No such interpolater available.");
1883 }
1884}
1885
1886
1888 switch ( interface ) {
1889 case Interpolater::TRILINEAR:
1891 break;
1892 case Interpolater::LAGRANGE:
1895 break;
1896 case Interpolater::PIECEWISE_CONST:
1898 break;
1899 default:
1900 throw OpalException("AmrMultiGrid::initCrseFineInterp_m()",
1901 "No such interpolater for the coarse-fine interface available.");
1902 }
1903}
1904
1905
1907 const bool& rebalance,
1908 const std::string& reuse)
1909{
1910 switch ( solver ) {
1911 // Belos solvers
1912 case BaseSolver::BICGSTAB:
1913 solver_mp.reset( new BelosSolver_t("BICGSTAB", prec_mp) );
1914 break;
1915 case BaseSolver::MINRES:
1916 solver_mp.reset( new BelosSolver_t("MINRES", prec_mp) );
1917 break;
1918 case BaseSolver::PCPG:
1919 solver_mp.reset( new BelosSolver_t("PCPG", prec_mp) );
1920 break;
1921 case BaseSolver::CG:
1922 solver_mp.reset( new BelosSolver_t("Pseudoblock CG", prec_mp) );
1923 break;
1924 case BaseSolver::GMRES:
1925 solver_mp.reset( new BelosSolver_t("Pseudoblock GMRES", prec_mp) );
1926 break;
1927 case BaseSolver::STOCHASTIC_CG:
1928 solver_mp.reset( new BelosSolver_t("Stochastic CG", prec_mp) );
1929 break;
1930 case BaseSolver::RECYCLING_CG:
1931 solver_mp.reset( new BelosSolver_t("RCG", prec_mp) );
1932 break;
1933 case BaseSolver::RECYCLING_GMRES:
1934 solver_mp.reset( new BelosSolver_t("GCRODR", prec_mp) );
1935 break;
1936 // Amesos2 solvers
1937#ifdef HAVE_AMESOS2_KLU2
1938 case BaseSolver::KLU2:
1939 solver_mp.reset( new Amesos2Solver_t("klu2") );
1940 break;
1941#endif
1942#if HAVE_AMESOS2_SUPERLU
1943 case BaseSolver::SUPERLU:
1944 solver_mp.reset( new Amesos2Solver_t("superlu") );
1945 break;
1946#endif
1947#ifdef HAVE_AMESOS2_UMFPACK
1948 case BaseSolver::UMFPACK:
1949 solver_mp.reset( new Amesos2Solver_t("umfpack") );
1950 break;
1951#endif
1952#ifdef HAVE_AMESOS2_PARDISO_MKL
1953 case BaseSolver::PARDISO_MKL:
1954 solver_mp.reset( new Amesos2Solver_t("pardiso_mkl") );
1955 break;
1956#endif
1957#ifdef HAVE_AMESOS2_MUMPS
1958 case BaseSolver::MUMPS:
1959 solver_mp.reset( new Amesos2Solver_t("mumps") );
1960 break;
1961#endif
1962#ifdef HAVE_AMESOS2_LAPACK
1963 case BaseSolver::LAPACK:
1964 solver_mp.reset( new Amesos2Solver_t("lapack") );
1965 break;
1966#endif
1967 case BaseSolver::SA:
1968 {
1969 std::string muelu = MueLuSolver_t::convertToMueLuReuseOption(reuse);
1970 solver_mp.reset( new MueLuSolver_t(rebalance, muelu) );
1971 break;
1972 }
1973 default:
1974 throw OpalException("AmrMultiGrid::initBaseSolver_m()",
1975 "No such bottom solver available.");
1976 }
1977}
1978
1979
1981 const std::string& reuse)
1982{
1983 switch ( prec ) {
1989 case Preconditioner::GS:
1991 prec_mp.reset( new Ifpack2Preconditioner_t(prec) );
1992 break;
1993 case Preconditioner::SA:
1994 {
1995 std::string muelu = MueLuPreconditioner_t::convertToMueLuReuseOption(reuse);
1996 prec_mp.reset( new MueLuPreconditioner_t(muelu) );
1997 break;
1998 }
1999 case Preconditioner::NONE:
2000 prec_mp.reset();
2001 break;
2002 default:
2003 throw OpalException("AmrMultiGrid::initPrec_m()",
2004 "No such preconditioner available.");
2005 }
2006}
2007
2008
2011 std::map<std::string, Boundary> map;
2012
2013 map["DIRICHLET"] = Boundary::DIRICHLET;
2014 map["OPEN"] = Boundary::OPEN;
2015 map["PERIODIC"] = Boundary::PERIODIC;
2016
2017 auto boundary = map.find(bc);
2018
2019 if ( boundary == map.end() )
2020 throw OpalException("AmrMultiGrid::convertToEnumBoundary_m()",
2021 "No boundary type '" + bc + "'.");
2022 return boundary->second;
2023}
2024
2027 std::map<std::string, Interpolater> map;
2028
2029 map["TRILINEAR"] = Interpolater::TRILINEAR;
2030 map["LAGRANGE"] = Interpolater::LAGRANGE;
2031 map["PC"] = Interpolater::PIECEWISE_CONST;
2032
2033 auto interpolater = map.find(interp);
2034
2035 if ( interpolater == map.end() )
2036 throw OpalException("AmrMultiGrid::convertToEnumInterpolater_m()",
2037 "No interpolater '" + interp + "'.");
2038 return interpolater->second;
2039}
2040
2041
2043AmrMultiGrid::convertToEnumBaseSolver_m(const std::string& bsolver) {
2044 std::map<std::string, BaseSolver> map;
2045
2046 map["BICGSTAB"] = BaseSolver::BICGSTAB;
2047 map["MINRES"] = BaseSolver::MINRES;
2048 map["PCPG"] = BaseSolver::PCPG;
2049 map["CG"] = BaseSolver::CG;
2050 map["GMRES"] = BaseSolver::GMRES;
2051 map["STOCHASTIC_CG"] = BaseSolver::STOCHASTIC_CG;
2052 map["RECYCLING_CG"] = BaseSolver::RECYCLING_GMRES;
2053 map["RECYCLING_GMRES"] = BaseSolver::RECYCLING_GMRES;
2054#ifdef HAVE_AMESOS2_KLU2
2055 map["KLU2"] = BaseSolver::KLU2;
2056#endif
2057#ifdef HAVE_AMESOS2_SUPERLU
2058 map["SUPERLU"] = BaseSolver::SUPERLU;
2059#endif
2060#ifdef HAVE_AMESOS2_UMFPACK
2061 map["UMFPACK"] = BaseSolver::UMFPACK;
2062#endif
2063#ifdef HAVE_AMESOS2_PARDISO_MKL
2064 map["PARDISO_MKL"] = BaseSolver::PARDISO_MKL;
2065#endif
2066#ifdef HAVE_AMESOS2_MUMPS
2067 map["MUMPS"] = BaseSolver::MUMPS;
2068#endif
2069#ifdef HAVE_AMESOS2_LAPACK
2070 map["LAPACK"] = BaseSolver::LAPACK;
2071#endif
2072 map["SA"] = BaseSolver::SA;
2073
2074 auto solver = map.find(bsolver);
2075
2076 if ( solver == map.end() )
2077 throw OpalException("AmrMultiGrid::convertToEnumBaseSolver_m()",
2078 "No bottom solver '" + bsolver + "'.");
2079 return solver->second;
2080}
2081
2082
2085 std::map<std::string, Preconditioner> map;
2086
2087 map["NONE"] = Preconditioner::NONE;
2088
2090
2092
2093 auto precond = map.find(prec);
2094
2095 if ( precond == map.end() )
2096 throw OpalException("AmrMultiGrid::convertToEnumPreconditioner_m()",
2097 "No preconditioner '" + prec + "'.");
2098 return precond->second;
2099}
2100
2101
2103AmrMultiGrid::convertToEnumSmoother_m(const std::string& smoother) {
2104 return AmrSmoother::convertToEnumSmoother(smoother);
2105}
2106
2107
2109AmrMultiGrid::convertToEnumNorm_m(const std::string& norm) {
2110 std::map<std::string, Norm> map;
2111
2112 map["L1_NORM"] = Norm::L1;
2113 map["L2_NORM"] = Norm::L2;
2114 map["LINF_NORM"] = Norm::LINF;
2115
2116 auto n = map.find(norm);
2117
2118 if ( n == map.end() )
2119 throw OpalException("AmrMultiGrid::convertToEnumNorm_m()",
2120 "No norm '" + norm + "'.");
2121 return n->second;
2122}
2123
2124
2125void AmrMultiGrid::writeSDDSHeader_m(std::ofstream& outfile) {
2126 OPALTimer::Timer simtimer;
2127
2128 std::string dateStr(simtimer.date());
2129 std::string timeStr(simtimer.time());
2130 std::string indent(" ");
2131
2132 outfile << "SDDS1" << std::endl;
2133 outfile << "&description\n"
2134 << indent << "text=\"Solver statistics '" << OpalData::getInstance()->getInputFn()
2135 << "' " << dateStr << "" << timeStr << "\",\n"
2136 << indent << "contents=\"solver info\"\n"
2137 << "&end\n";
2138 outfile << "&parameter\n"
2139 << indent << "name=processors,\n"
2140 << indent << "type=long,\n"
2141 << indent << "description=\"Number of Cores used\"\n"
2142 << "&end\n";
2143 outfile << "&parameter\n"
2144 << indent << "name=revision,\n"
2145 << indent << "type=string,\n"
2146 << indent << "description=\"git revision of opal\"\n"
2147 << "&end\n";
2148 outfile << "&parameter\n"
2149 << indent << "name=flavor,\n"
2150 << indent << "type=string,\n"
2151 << indent << "description=\"OPAL flavor that wrote file\"\n"
2152 << "&end\n";
2153 outfile << "&column\n"
2154 << indent << "name=t,\n"
2155 << indent << "type=double,\n"
2156 << indent << "units=ns,\n"
2157 << indent << "description=\"1 Time\"\n"
2158 << "&end\n";
2159 outfile << "&column\n"
2160 << indent << "name=mg_iter,\n"
2161 << indent << "type=long,\n"
2162 << indent << "units=1,\n"
2163 << indent << "description=\"2 Number of Multigrid Iterations\"\n"
2164 << "&end\n";
2165 outfile << "&column\n"
2166 << indent << "name=bottom_iter,\n"
2167 << indent << "type=long,\n"
2168 << indent << "units=1,\n"
2169 << indent << "description=\"3 Total Number of Bottom Solver Iterations\"\n"
2170 << "&end\n";
2171 outfile << "&column\n"
2172 << indent << "name=regrid,\n"
2173 << indent << "type=bool,\n"
2174 << indent << "units=1,\n"
2175 << indent << "description=\"4 Regrid Step\"\n"
2176 << "&end\n";
2177 outfile << "&column\n"
2178 << indent << "name=" + snorm_m + ",\n"
2179 << indent << "type=double,\n"
2180 << indent << "units=1,\n"
2181 << indent << "description=\"5 Error\"\n"
2182 << "&end\n"
2183 << "&data\n"
2184 << indent << "mode=ascii,\n"
2185 << indent << "no_row_counts=1\n"
2186 << "&end\n"
2187 << comm_mp->getSize() << '\n'
2188 << OPAL_PROJECT_NAME << " " << OPAL_PROJECT_VERSION << " git rev. #" << Util::getGitRevision() << '\n'
2189 << (OpalData::getInstance()->isInOPALTMode()? "opal-t":
2190 (OpalData::getInstance()->isInOPALCyclMode()? "opal-cycl": "opal-map")) << std::endl;
2191}
2192
2193
2195#if AMR_MG_TIMER
2196 IpplTimings::startTimer(dumpTimer_m);
2197#endif
2198 unsigned int pwi = 10;
2199
2200 std::ofstream outfile;
2201
2202 if ( comm_mp->getRank() == 0 ) {
2203 outfile.open(fname_m.c_str(), flag_m);
2204 outfile.precision(15);
2205 outfile.setf(std::ios::scientific, std::ios::floatfield);
2206
2207 if ( flag_m == std::ios::out ) {
2208 flag_m = std::ios::app;
2209 writeSDDSHeader_m(outfile);
2210 }
2211
2212 outfile << itsAmrObject_mp->getT() * Units::s2ns << std::setw(pwi) << '\t' // 1
2213 << this->nIter_m << std::setw(pwi) << '\t' // 2
2214 << this->bIter_m << std::setw(pwi) << '\t' // 3
2215 << this->regrid_m << std::setw(pwi) << '\t' // 4
2216 << error << '\n'; // 5
2217 }
2218#if AMR_MG_TIMER
2219 IpplTimings::stopTimer(dumpTimer_m);
2220#endif
2221}
2222
2223
2224#if AMR_MG_TIMER
2225void AmrMultiGrid::initTimer_m() {
2226 buildTimer_m = IpplTimings::getTimer("AMR MG matrix setup");
2227 restrictTimer_m = IpplTimings::getTimer("AMR MG restrict");
2228 smoothTimer_m = IpplTimings::getTimer("AMR MG smooth");
2229 interpTimer_m = IpplTimings::getTimer("AMR MG prolongate");
2230 efieldTimer_m = IpplTimings::getTimer("AMR MG e-field");
2231 averageTimer_m = IpplTimings::getTimer("AMR MG average down");
2232 bottomTimer_m = IpplTimings::getTimer("AMR MG bottom-solver");
2233 dumpTimer_m = IpplTimings::getTimer("AMR MG dump");
2234}
2235#endif
2236
2237
2238double AmrMultiGrid::getXRangeMin(unsigned short level) {
2239 return itsAmrObject_mp->Geom(level).ProbLo(0);
2240}
2241
2242
2243double AmrMultiGrid::getXRangeMax(unsigned short level) {
2244 return itsAmrObject_mp->Geom(level).ProbHi(0);
2245}
2246
2247
2248double AmrMultiGrid::getYRangeMin(unsigned short level) {
2249 return itsAmrObject_mp->Geom(level).ProbLo(1);
2250}
2251
2252
2253double AmrMultiGrid::getYRangeMax(unsigned short level) {
2254 return itsAmrObject_mp->Geom(level).ProbHi(1);
2255}
2256
2257
2258double AmrMultiGrid::getZRangeMin(unsigned short level) {
2259 return itsAmrObject_mp->Geom(level).ProbLo(2);
2260}
2261
2262
2263double AmrMultiGrid::getZRangeMax(unsigned short level) {
2264 return itsAmrObject_mp->Geom(level).ProbHi(2);
2265}
2266
2267
2269 os << "* ********************* A M R M u l t i G r i d ********************** " << endl
2270 //FIXME
2271 << "* ******************************************************************** " << endl;
2272 return os;
2273}
#define OPAL_PROJECT_VERSION
Definition: OPALconfig.h:5
#define OPAL_PROJECT_NAME
Definition: OPALconfig.h:2
amr::AmrScalarFieldContainer_t AmrScalarFieldContainer_t
Definition: PBunchDefs.h:35
amr::AmrVectorFieldContainer_t AmrVectorFieldContainer_t
Definition: PBunchDefs.h:36
PartBunchBase< T, Dim >::ConstIterator end(PartBunchBase< T, Dim > const &bunch)
PartBunchBase< T, Dim >::ConstIterator begin(PartBunchBase< T, Dim > const &bunch)
bool for_each(const BareFieldIterator< T, D > &p, SameFieldID s, C)
Definition: AssignDefs.h:30
Inform & endl(Inform &inf)
Definition: Inform.cpp:42
#define INFOMSG(msg)
Definition: IpplInfo.h:348
void serialize(Param_t params, std::ostringstream &os)
serializes params using Boost archive
Definition: MPIHelper.cpp:32
@ GS
Gauss-Seidel point relaxation.
@ JACOBI
Jacobi point relaxation.
@ RILUK
ILU(k)
@ ILUT
incomplete LU
@ SA
smoothed aggregation multigrid
@ BLOCK_JACOBI
Jacobi block relaxation.
@ BLOCK_GS
Gauss-Seidel block relaxation.
constexpr double e
The value of.
Definition: Physics.h:39
constexpr double s2ns
Definition: Units.h:44
std::string getGitRevision()
Definition: Util.cpp:32
The global OPAL structure.
Definition: OpalData.h:49
bool isInOPALTMode()
Definition: OpalData.cpp:276
bool isInOPALCyclMode()
Definition: OpalData.cpp:272
std::string getInputFn()
get opals input filename
Definition: OpalData.cpp:665
static OpalData * getInstance()
Definition: OpalData.cpp:196
const Vector_t & getMeshScaling() const
Definition: AmrBoxLib.cpp:498
double getT() const
Definition: AmrBoxLib.cpp:525
void trilinos2amrex_m(const lo_t &level, const lo_t &comp, AmrField_t &mf, const Teuchos::RCP< vector_t > &mv)
void buildCrseBoundaryMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, const basefab_t &mfab, const basefab_t &cfab, const scalar_t *invdx2)
amrex::BaseFab< int > basefab_t
Definition: AmrMultiGrid.h:85
void relax_m(const lo_t &level)
void initGuess_m(bool reset)
void close_m(const lo_t &level, const bool &matrices)
AmrMultiGrid(AmrBoxLib *itsAmrObject_p, const std::string &bsolver, const std::string &prec, const bool &rebalance, const std::string &reuse, const std::string &bcx, const std::string &bcy, const std::string &bcz, const std::string &smoother, const std::size_t &nSweeps, const std::string &interp, const std::string &norm)
double getZRangeMax(unsigned short level=0)
void restrict_m(const lo_t &level)
Boundary
Supported physical boundaries.
Definition: AmrMultiGrid.h:135
Boundary convertToEnumBoundary_m(const std::string &bc)
std::shared_ptr< bsolver_t > solver_mp
bottom solver
Definition: AmrMultiGrid.h:681
BaseSolver convertToEnumBaseSolver_m(const std::string &bsolver)
void setup_m(const amrex::Vector< AmrField_u > &rho, const amrex::Vector< AmrField_u > &phi, const bool &matrices=true)
void clearMasks_m()
double getXRangeMin(unsigned short level=0)
void initPhysicalBoundary_m(const Boundary *bc)
AmrMultiGridLevel_t::coefficients_t coefficients_t
Definition: AmrMultiGrid.h:64
Teuchos::RCP< comm_t > comm_mp
communicator
Definition: AmrMultiGrid.h:663
std::string fname_m
SDDS filename.
Definition: AmrMultiGrid.h:702
BaseSolver
Supported bottom solvers.
Definition: AmrMultiGrid.h:100
AmrMultiGridLevel_t::umap_t umap_t
Definition: AmrMultiGrid.h:65
amrex::FArrayBox farraybox_t
Definition: AmrMultiGrid.h:86
std::string snorm_m
norm for convergence criteria
Definition: AmrMultiGrid.h:697
std::size_t nSweeps_m
number of smoothing iterations
Definition: AmrMultiGrid.h:674
void map2vector_m(umap_t &map, indices_t &indices, coefficients_t &values)
void buildFineBoundaryMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, const basefab_t &mfab, const basefab_t &rfab)
void writeSDDSHeader_m(std::ofstream &outfile)
std::vector< std::unique_ptr< AmrMultiGridLevel_t > > mglevel_m
container for levels
Definition: AmrMultiGrid.h:678
void amrex2trilinos_m(const lo_t &level, const lo_t &comp, const AmrField_t &mf, Teuchos::RCP< vector_t > &mv)
scalar_t getLevelResidualNorm(lo_t level)
scalar_t evalNorm_m(const Teuchos::RCP< const vector_t > &x)
void buildSingleLevel_m(const amrex::Vector< AmrField_u > &rho, const amrex::Vector< AmrField_u > &phi, const bool &matrices=true)
int lbase_m
base level (currently only 0 supported)
Definition: AmrMultiGrid.h:689
void initPrec_m(const Preconditioner &prec, const std::string &reuse)
void setVerbose(bool verbose)
void initInterpolater_m(const Interpolater &interp)
amr::comm_t comm_t
Definition: AmrMultiGrid.h:51
std::unique_ptr< AmrInterpolater< AmrMultiGridLevel_t > > interp_mp
interpolater without coarse-fine interface
Definition: AmrMultiGrid.h:666
amr::scalar_t scalar_t
Definition: AmrMultiGrid.h:54
scalar_t iterate_m()
void buildGradientMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, const basefab_t &mfab, const scalar_t *invdx)
BelosBottomSolver< AmrMultiGridLevel_t > BelosSolver_t
Definition: AmrMultiGrid.h:74
void initCrseFineInterp_m(const Interpolater &interface)
std::shared_ptr< preconditioner_t > prec_mp
preconditioner for bottom solver
Definition: AmrMultiGrid.h:687
amrex::Box box_t
Definition: AmrMultiGrid.h:84
std::unique_ptr< AmrInterpolater< AmrMultiGridLevel_t > > interface_mp
interpolater for coarse-fine interface
Definition: AmrMultiGrid.h:669
Norm convertToEnumNorm_m(const std::string &norm)
Interpolater convertToEnumInterpolater_m(const std::string &interp)
amr::global_ordinal_t go_t
Definition: AmrMultiGrid.h:53
void averageDown_m(const lo_t &level)
amr::matrix_t matrix_t
Definition: AmrMultiGrid.h:47
Smoother convertToEnumSmoother_m(const std::string &smoother)
double getZRangeMin(unsigned short level=0)
amr::vector_t vector_t
Definition: AmrMultiGrid.h:48
Amesos2BottomSolver< AmrMultiGridLevel_t > Amesos2Solver_t
Definition: AmrMultiGrid.h:75
void buildPotentialVector_m(const lo_t &level, const AmrField_t &phi)
void residual_no_fine_m(const lo_t &level, Teuchos::RCP< vector_t > &result, const Teuchos::RCP< vector_t > &rhs, const Teuchos::RCP< vector_t > &crs_rhs, const Teuchos::RCP< vector_t > &b)
scalar_t eps_m
rhs scale for convergence
Definition: AmrMultiGrid.h:699
AmrMultiGridLevel_t::AmrField_t AmrField_t
Definition: AmrMultiGrid.h:58
std::size_t nIter_m
number of iterations till convergence
Definition: AmrMultiGrid.h:671
int lfine_m
fineste level
Definition: AmrMultiGrid.h:690
double getXRangeMax(unsigned short level=0)
Inform & print(Inform &os) const
AmrMultiGridLevel_t::AmrIntVect_t AmrIntVect_t
Definition: AmrMultiGrid.h:62
bool verbose_m
If true, a SDDS file is written.
Definition: AmrMultiGrid.h:701
std::size_t bIter_m
number of iterations of bottom solver
Definition: AmrMultiGrid.h:672
int nlevel_m
number of levelss
Definition: AmrMultiGrid.h:691
amr::local_ordinal_t lo_t
Definition: AmrMultiGrid.h:52
std::ios_base::openmode flag_m
std::ios::out or std::ios::app
Definition: AmrMultiGrid.h:703
void setNumberOfSweeps(const std::size_t &nSweeps)
void initResidual_m(std::vector< scalar_t > &rhsNorms, std::vector< scalar_t > &resNorms)
void buildMultiLevel_m(const amrex::Vector< AmrField_u > &rho, const amrex::Vector< AmrField_u > &phi, const bool &matrices=true)
void open_m(const lo_t &level, const bool &matrices)
void buildDensityVector_m(const lo_t &level, const AmrField_t &rho)
MueLuPreconditioner< AmrMultiGridLevel_t > MueLuPreconditioner_t
Definition: AmrMultiGrid.h:81
AmrMultiGridLevel< matrix_t, vector_t > AmrMultiGridLevel_t
Definition: AmrMultiGrid.h:56
std::size_t getNumIters()
std::size_t maxiter_m
maximum number of iterations allowed
Definition: AmrMultiGrid.h:673
void buildNoFinePoissonMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, const basefab_t &mfab, const scalar_t *invdx2)
double getYRangeMin(unsigned short level=0)
void writeSDDSData_m(const scalar_t &error)
int nBcPoints_m
maximum number of stencils points for BC
Definition: AmrMultiGrid.h:694
void setMaxNumberOfIterations(const std::size_t &maxiter)
void initBaseSolver_m(const BaseSolver &solver, const bool &rebalance, const std::string &reuse)
Smoother smootherType_m
type of smoother
Definition: AmrMultiGrid.h:675
Norm
Supported convergence criteria.
Definition: AmrMultiGrid.h:142
void initLevels_m(const amrex::Vector< AmrField_u > &rho, const amrex::Vector< AmrGeometry_t > &geom, bool regrid)
void smooth_m(const lo_t &level, Teuchos::RCP< vector_t > &e, Teuchos::RCP< vector_t > &r)
Ifpack2Preconditioner< AmrMultiGridLevel_t > Ifpack2Preconditioner_t
Definition: AmrMultiGrid.h:80
boundary_t bc_m[AMREX_SPACEDIM]
boundary conditions
Definition: AmrMultiGrid.h:693
void residual_m(const lo_t &level, Teuchos::RCP< vector_t > &r, const Teuchos::RCP< vector_t > &b, const Teuchos::RCP< vector_t > &x)
std::vector< std::shared_ptr< AmrSmoother > > smoother_m
error smoother
Definition: AmrMultiGrid.h:684
void buildInterpolationMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, const basefab_t &cfab)
void computeEfield_m(AmrVectorFieldContainer_t &efield)
AmrMultiGridLevel_t::indices_t indices_t
Definition: AmrMultiGrid.h:63
Interpolater
Supported interpolaters for prolongation operation.
Definition: AmrMultiGrid.h:93
void prolongate_m(const lo_t &level)
bool isConverged_m(std::vector< scalar_t > &rhsNorms, std::vector< scalar_t > &resNorms)
Norm norm_m
norm for convergence criteria (l1, l2, linf)
Definition: AmrMultiGrid.h:696
double getYRangeMax(unsigned short level=0)
void setTolerance(const scalar_t &eps)
void solve(AmrScalarFieldContainer_t &rho, AmrScalarFieldContainer_t &phi, AmrVectorFieldContainer_t &efield, unsigned short baseLevel, unsigned short finestLevel, bool prevAsGuess=true)
MueLuBottomSolver< AmrMultiGridLevel_t > MueLuSolver_t
Definition: AmrMultiGrid.h:76
void buildRestrictionMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, D_DECL(const go_t &ii, const go_t &jj, const go_t &kk), const basefab_t &rfab)
Preconditioner convertToEnumPreconditioner_m(const std::string &prec)
void buildCompositePoissonMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, const basefab_t &mfab, const basefab_t &rfab, const scalar_t *invdx2)
amrex::FabArray< basefab_t > mask_t
Smoother
All supported Ifpack2 smoothers.
Definition: AmrSmoother.h:46
static Smoother convertToEnumSmoother(const std::string &smoother)
Definition: AmrSmoother.cpp:64
static void fillMap(map_t &map)
static std::string convertToMueLuReuseOption(const std::string &reuse)
static std::string convertToMueLuReuseOption(const std::string &reuse)
static void fillMap(map_t &map)
bool regrid_m
is set to true by itsAmrObject_mp and reset to false by solver
The base class for all OPAL exceptions.
Definition: OpalException.h:28
std::string date() const
Return date.
Definition: Timer.cpp:35
std::string time() const
Return time.
Definition: Timer.cpp:42
Definition: Inform.h:42
static TimerRef getTimer(const char *nm)
Definition: IpplTimings.h:182
static void stopTimer(TimerRef t)
Definition: IpplTimings.h:192
static void startTimer(TimerRef t)
Definition: IpplTimings.h:187