37 #include <AMReX_ParallelDescriptor.H>
44 const std::string& bsolver,
45 const std::string& prec,
46 const bool& rebalance,
47 const std::string& reuse,
48 const std::string& bcx,
49 const std::string& bcy,
50 const std::string& bcz,
51 const std::string& smoother,
52 const std::size_t& nSweeps,
53 const std::string& interp,
54 const std::string& norm)
56 , comm_mp( new
comm_t( amrex::ParallelDescriptor::Communicator() ) )
69 , fname_m(
OpalData::getInstance()->getInputBasename() + std::string(
".solver"))
70 , flag_m(std::ios::out)
76 const Boundary bcs[AMREX_SPACEDIM] = {
102 if (std::filesystem::exists(
fname_m)) {
114 unsigned short baseLevel,
115 unsigned short finestLevel,
127 bool reset = !prevAsGuess;
142 for (
int lev =
nlevel_m - 1; lev > -1; --lev) {
150 for (
int lev = 0; lev <
nlevel_m; ++lev) {
151 int ilev = lbase_m + lev;
153 phi[ilev]->setVal(0.0, phi[ilev]->nGrow());
173 throw OpalException(
"AmrMultiGrid::setMaxNumberOfIterations()",
174 "The max. number of iterations needs to be positive!");
205 for (
unsigned int i = 0; i < AMREX_SPACEDIM; ++i) {
207 case Boundary::DIRICHLET:
213 case Boundary::PERIODIC:
217 throw OpalException(
"AmrMultiGrid::initPhysicalBoundary_m()",
218 "This type of boundary is not supported");
221 go_t tmp =
bc_m[i]->getNumberOfPoints();
229 const amrex::Vector<AmrGeometry_t>& geom,
242 amrex::Periodicity period(
AmrIntVect_t(D_DECL(0, 0, 0)));
246 for (
int lev = 0; lev <
nlevel_m; ++lev) {
252 rho[ilev]->boxArray(),
253 rho[ilev]->DistributionMap(),
266 mglevel_m[lev]->refmask->setVal(AmrMultiGridLevel_t::Refined::NO, 2);
267 mglevel_m[lev]->refmask->FillBoundary(period);
269 amrex::BoxArray ba =
mglevel_m[lev]->grids;
275 mglevel_m[lev]->crsemask->setVal(AmrMultiGridLevel_t::Refined::NO, 2);
278 for (
int lev = 1; lev <
nlevel_m; ++lev) {
279 mglevel_m[lev]->crsemask->setVal(AmrMultiGridLevel_t::Refined::YES, 0);
282 mglevel_m[lev]->crsemask->setDomainBndry(AmrMultiGridLevel_t::Mask::PHYSBNDRY,
285 mglevel_m[lev]->crsemask->FillBoundary(period);
291 for (
int lev = 0; lev < nlevel_m-1; ++lev) {
293 amrex::BoxArray ba =
mglevel_m[lev]->grids;
295 ba = amrex::intersect(
mglevel_m[lev+1]->grids, ba);
299 amrex::DistributionMapping dmap(ba,
comm_mp->getSize());
301 refined.setVal(AmrMultiGridLevel_t::Refined::YES);
305 mglevel_m[lev]->refmask->copy(refined, 0, 0, 1, 0, 2);
310 mglevel_m[lev]->refmask->setDomainBndry(AmrMultiGridLevel_t::Mask::PHYSBNDRY,
313 mglevel_m[lev]->refmask->FillBoundary(period);
319 for (
int lev = 0; lev <
nlevel_m; ++lev) {
332 for (
int lev = 0; lev <
nlevel_m; ++lev)
340 std::vector<scalar_t> rhsNorms;
341 std::vector<scalar_t> resNorms;
346 [
this](
double& val){ val *=
eps_m; });
365 for (
int lev = 0; lev <
nlevel_m; ++lev) {
380 this->writeResidualNorm_m();
386 return std::accumulate(resNorms.begin(),
388 std::plus<scalar_t>());
393 std::vector<scalar_t>& resNorms)
395 return std::equal(resNorms.begin(), resNorms.end(),
397 std::less<scalar_t>());
402 Teuchos::RCP<vector_t>& r,
403 const Teuchos::RCP<vector_t>& b,
404 const Teuchos::RCP<vector_t>& x)
414 if (
mglevel_m[level]->Bfine_p != Teuchos::null ) {
419 mglevel_m[level]->Awf_p->apply(*x, fine2crse,
424 if (
mglevel_m[level]->Bcrse_p != Teuchos::null ) {
433 Teuchos::RCP<vector_t> uncov_Ax = Teuchos::rcp(
new vector_t(
mglevel_m[level]->map_p,
true) );
434 mglevel_m[level]->UnCovered_p->apply(fine2crse, *uncov_Ax);
436 Teuchos::RCP<vector_t> uncov_b = Teuchos::rcp(
new vector_t(
mglevel_m[level]->map_p,
true) );
438 mglevel_m[level]->UnCovered_p->apply(*b, *uncov_b);
443 r->update(1.0, *uncov_b, -1.0, *uncov_Ax, 0.0);
448 Teuchos::RCP<vector_t> Ax = Teuchos::rcp(
new vector_t(
mglevel_m[level]->map_p,
true) );
451 if (
mglevel_m[level]->Bcrse_p != Teuchos::null ) {
459 r->update(1.0, *b, -1.0, *Ax, 0.0);
471 Teuchos::RCP<vector_t> Ax = Teuchos::rcp(
new vector_t(
mglevel_m[level]->map_p,
true) );
486 Teuchos::RCP<vector_t> phi_save = Teuchos::rcp(
new vector_t(
mglevel_m[level]->map_p) );
487 Tpetra::deep_copy(*phi_save, *
mglevel_m[level]->phi_p);
489 mglevel_m[level-1]->error_p->putScalar(0.0);
513 Teuchos::RCP<vector_t> tmp = Teuchos::rcp(
new vector_t(
mglevel_m[level]->map_p) );
519 Tpetra::deep_copy(*
mglevel_m[level]->residual_p, *tmp);
522 Teuchos::RCP<vector_t> derror = Teuchos::rcp(
new vector_t(
mglevel_m[level]->map_p,
true) );
528 mglevel_m[level]->error_p->update(1.0, *derror, 1.0);
531 mglevel_m[level]->phi_p->update(1.0, *phi_save, 1.0, *
mglevel_m[level]->error_p, 0.0);
552 Teuchos::RCP<vector_t>&
result,
553 const Teuchos::RCP<vector_t>& rhs,
554 const Teuchos::RCP<vector_t>& crs_rhs,
555 const Teuchos::RCP<vector_t>& b)
560 if (
mglevel_m[level]->Bcrse_p != Teuchos::null ) {
561 mglevel_m[level]->Bcrse_p->apply(*crs_rhs, crse2fine);
565 mglevel_m[level]->Anf_p->apply(*rhs, crse2fine,
570 result->update(1.0, *b, -1.0, crse2fine, 0.0);
575 void AmrMultiGrid::writeResidualNorm_m() {
580 out.open(
"residual.dat", std::ios::app);
582 for (
int lev = 0; lev <
nlevel_m; ++lev) {
586 out << std::setw(15) << std::right << tmp;
618 "This type of norm not suppported.");
625 std::vector<scalar_t>& resNorms)
633 if (
comm_mp->getRank() == 0) {
634 out.open(
"residual.dat", std::ios::out);
636 for (
int lev = 0; lev <
nlevel_m; ++lev)
637 out << std::setw(14) << std::right <<
"level" << lev;
642 for (
int lev = 0; lev <
nlevel_m; ++lev) {
652 out << std::setw(15) << std::right << resNorms.back();
669 Teuchos::RCP<vector_t> efield_p = Teuchos::null;
670 for (
int lev = nlevel_m - 1; lev > -1; --lev) {
676 for (
int d = 0; d < AMREX_SPACEDIM; ++d) {
677 efield[ilev][d]->setVal(0.0, efield[ilev][d]->nGrow());
678 efield_p->putScalar(0.0);
690 const amrex::Vector<AmrField_u>& phi,
691 const bool& matrices)
719 const amrex::Vector<AmrField_u>& phi,
720 const bool& matrices)
727 D_DECL( invdx[0] * invdx[0],
729 invdx[2] * invdx[2] )
734 mfi.isValid(); ++mfi)
736 const box_t& tbx = mfi.tilebox();
741 const int* lo = tbx.loVect();
742 const int* hi = tbx.hiVect();
744 for (
int i = lo[0]; i <= hi[0]; ++i) {
745 for (
int j = lo[1]; j <= hi[1]; ++j) {
746 #if AMREX_SPACEDIM == 3
747 for (
int k = lo[2]; k <= hi[2]; ++k) {
760 #if AMREX_SPACEDIM == 3
780 const amrex::Vector<AmrField_u>& phi,
781 const bool& matrices)
786 for (
int lev = nlevel_m-1; lev < (int)
smoother_m.size(); ++lev) {
792 for (
int lev = 0; lev <
nlevel_m; ++lev) {
793 this->
open_m(lev, matrices);
803 D_DECL( invdx[0] * invdx[0],
805 invdx[2] * invdx[2] )
809 for (amrex::MFIter mfi(*
mglevel_m[lev]->mask,
true);
810 mfi.isValid(); ++mfi)
812 const box_t& tbx = mfi.tilebox();
819 const int* lo = tbx.loVect();
820 const int* hi = tbx.hiVect();
822 for (
int i = lo[0]; i <= hi[0]; ++i) {
824 for (
int j = lo[1]; j <= hi[1]; ++j) {
826 #if AMREX_SPACEDIM == 3
827 for (
int k = lo[2]; k <= hi[2]; ++k) {
834 D_DECL(ii, jj, kk), rfab);
852 mglevel_m[lev]->rho_p->replaceGlobalValue(gidx, rhofab(iv, 0));
853 mglevel_m[lev]->phi_p->replaceGlobalValue(gidx, pfab(iv, 0));
854 #if AMREX_SPACEDIM == 3
869 if ( matrices && lev >
lbase_m ) {
878 const bool& matrices)
900 mglevel_m[level]->Bcrse_p = Teuchos::rcp(
913 int nNeighbours = AMREX_D_TERM(2, * 2, * 2);
923 nNeighbours = 2 * AMREX_SPACEDIM * AMREX_D_TERM(2, * 2, * 2);
925 mglevel_m[level]->Bfine_p = Teuchos::rcp(
934 int nPhysBoundary = 2 * AMREX_SPACEDIM *
nBcPoints_m;
937 int nIntBoundary = AMREX_SPACEDIM *
interface_mp->getNumberOfPoints();
939 int nEntries = (AMREX_SPACEDIM << 1) + 2 + nPhysBoundary + nIntBoundary;
950 nEntries = (AMREX_SPACEDIM << 1) + 5 + nPhysBoundary + nIntBoundary;
958 mglevel_m[level]->UnCovered_p = Teuchos::rcp(
968 for (
int d = 0; d < AMREX_SPACEDIM; ++d) {
986 const bool& matrices)
1008 mglevel_m[level]->Anf_p->fillComplete();
1010 for (
int d = 0; d < AMREX_SPACEDIM; ++d)
1011 mglevel_m[level]->G_p[d]->fillComplete();
1015 mglevel_m[level]->Awf_p->fillComplete();
1017 mglevel_m[level]->UnCovered_p->fillComplete();
1046 for (
int d = 0; d < AMREX_SPACEDIM; ++d) {
1047 for (
int shift = -1; shift <= 1; shift += 2) {
1051 switch ( mfab(biv) )
1053 case AmrMultiGridLevel_t::Mask::INTERIOR:
1054 case AmrMultiGridLevel_t::Mask::COVERED:
1056 map[
mglevel_m[level]->serialize(biv)] += invdx2[d];
1059 case AmrMultiGridLevel_t::Mask::BNDRY:
1065 throw OpalException(
"AmrMultiGrid::buildNoFinePoissonMatrix_m()",
1066 "Error in mask for level "
1067 + std::to_string(level) +
"!");
1075 case AmrMultiGridLevel_t::Mask::PHYSBNDRY:
1078 mglevel_m[level]->applyBoundary(biv, d, map,
1083 throw OpalException(
"AmrMultiGrid::buildNoFinePoissonMatrix_m()",
1084 "Error in mask for level "
1085 + std::to_string(level) +
"!");
1091 map[gidx] += AMREX_D_TERM(- 2.0 * invdx2[0],
1097 mglevel_m[level]->Anf_p->insertGlobalValues(gidx,
1116 if ( rfab(iv) == AmrMultiGridLevel_t::Refined::YES )
1139 for (
int d = 0; d < AMREX_SPACEDIM; ++d) {
1140 for (
int shift = -1; shift <= 1; shift += 2) {
1144 if ( rfab(biv) != AmrMultiGridLevel_t::Refined::YES )
1149 switch ( mfab(biv) )
1151 case AmrMultiGridLevel_t::Mask::COVERED:
1153 case AmrMultiGridLevel_t::Mask::INTERIOR:
1155 map[
mglevel_m[level]->serialize(biv)] += invdx2[d];
1156 map[gidx] -= invdx2[d];
1159 case AmrMultiGridLevel_t::Mask::BNDRY:
1165 throw OpalException(
"AmrMultiGrid::buildCompositePoissonMatrix_m()",
1166 "Error in mask for level "
1167 + std::to_string(level) +
"!");
1182 map[gidx] -= invdx2[d];
1185 case AmrMultiGridLevel_t::Mask::PHYSBNDRY:
1188 mglevel_m[level]->applyBoundary(biv, d, map,
1192 map[gidx] -= invdx2[d];
1196 throw OpalException(
"AmrMultiGrid::buildCompositePoissonMatrix_m()",
1197 "Error in mask for level "
1198 + std::to_string(level) +
"!");
1219 scalar_t invavg = AMREX_D_PICK(1.0, 0.5, 0.25);
1220 scalar_t value = - invavg * invcdx[d] * invfdx[d];
1222 for (
int d1 = 0; d1 < 2; ++d1) {
1223 #if AMREX_SPACEDIM == 3
1224 for (
int d2 = 0;
d2 < 2; ++
d2) {
1232 fake[(d+1)%AMREX_SPACEDIM] = d1;
1233 #if AMREX_SPACEDIM == 3
1234 fake[(d+2)%AMREX_SPACEDIM] =
d2;
1239 #if AMREX_SPACEDIM == 3
1249 mglevel_m[level]->Awf_p->insertGlobalValues(gidx,
1255 mglevel_m[level]->UnCovered_p->insertGlobalValues(gidx,
1265 D_DECL(
const go_t& ii,
1275 if ( rfab(iv) == AmrMultiGridLevel_t::Refined::NO || level ==
lfine_m )
1287 indices.reserve(2 << (AMREX_SPACEDIM - 1));
1289 values.reserve(2 << (AMREX_SPACEDIM -1));
1292 for (
int iref = 0; iref < 2; ++iref) {
1293 for (
int jref = 0; jref < 2; ++jref) {
1294 #if AMREX_SPACEDIM == 3
1295 for (
int kref = 0; kref < 2; ++kref) {
1297 AmrIntVect_t riv(D_DECL(ii + iref, jj + jref, kk + kref));
1300 values.push_back( AMREX_D_PICK(0.5, 0.25, 0.125) );
1301 #if AMREX_SPACEDIM == 3
1307 mglevel_m[level]->R_p->insertGlobalValues(gidx,
1344 mglevel_m[level]->I_p->insertGlobalValues(gidx,
1374 for (
int d = 0; d < AMREX_SPACEDIM; ++d) {
1375 for (
int shift = -1; shift <= 1; shift += 2) {
1380 if ( mfab(niv) == AmrMultiGridLevel_t::Mask::BNDRY )
1388 civ.coarsen(
mglevel_m[level]->refinement());
1394 interface_mp->coarse(civ, map, invdx2[d], d, shift, cfab,
1398 else if ( mfab(niv) == AmrMultiGridLevel_t::Mask::PHYSBNDRY ) {
1399 throw OpalException(
"AmrMultiGrid::buildCrseBoundaryMatrix_m()",
1400 "Fine meshes shouldn't be connected "
1401 "to physical (i.e. mesh) boundary!");
1409 mglevel_m[level]->Bcrse_p->insertGlobalValues(gidx,
1427 if ( rfab(iv) == AmrMultiGridLevel_t::Refined::YES || level ==
lfine_m )
1434 scalar_t invavg = AMREX_D_PICK(1, 0.5, 0.25);
1440 auto fill = [&](
umap_t& map,
1441 D_DECL(
int ii,
int jj,
int kk),
1445 for (
int iref = ii - begin[0]; iref <= ii + end[0]; ++iref) {
1446 for (
int jref = jj - begin[1]; jref <= jj + end[1]; ++jref) {
1447 #if AMREX_SPACEDIM == 3
1448 for (
int kref = kk - begin[2]; kref <= kk + end[2]; ++kref) {
1455 if ( (riv[d] >> 1) == iv[d] ) {
1459 scalar_t value = - invavg * invcdx[d] * invfdx[d];
1464 scalar_t value = invavg * invcdx[d] * invfdx[d];
1465 map[
mglevel_m[level+1]->serialize(riv)] += value;
1467 #if AMREX_SPACEDIM == 3
1481 for (
int d = 0; d < AMREX_SPACEDIM; ++d) {
1482 for (
int shift = -1; shift <= 1; shift += 2) {
1485 covered[d] += shift;
1487 if ( rfab(covered) == AmrMultiGridLevel_t::Refined::YES &&
1502 int begin[AMREX_SPACEDIM] = { D_DECL(
int(d == 0),
int(d == 1),
int(d == 2) ) };
1503 int end[AMREX_SPACEDIM] = { D_DECL(
int(d != 0),
int(d != 1),
int(d != 2) ) };
1518 int ii = iv[0] << 1;
1519 int jj = iv[1] << 1;
1520 #if AMREX_SPACEDIM == 3
1521 int kk = iv[2] << 1;
1526 fill(map, D_DECL(ii, jj, kk), &begin[0], &end[0], d, iv, shift);
1533 int ii = covered[0] << 1;
1534 int jj = covered[1] << 1;
1535 #if AMREX_SPACEDIM == 3
1536 int kk = covered[2] << 1;
1538 fill(map, D_DECL(ii, jj, kk), &begin[0], &end[0], d, iv, shift);
1550 mglevel_m[level]->Bfine_p->insertGlobalValues(gidx,
1588 case AmrMultiGridLevel_t::Mask::INTERIOR:
1590 case AmrMultiGridLevel_t::Mask::COVERED:
1592 map[
mglevel_m[level]->serialize(iv)] -= shift * 0.5 * invdx[dir];
1594 case AmrMultiGridLevel_t::Mask::BNDRY:
1599 throw OpalException(
"AmrMultiGrid::buildGradientMatrix_m()",
1600 "Error in mask for level "
1601 + std::to_string(level) +
"!");
1604 scalar_t value = - shift * 0.5 * invdx[dir];
1610 map[mglevel_m[level]->serialize(tmp)] += 2.0 * value;
1614 map[mglevel_m[level]->serialize(tmp)] -= value;
1617 case AmrMultiGridLevel_t::Mask::PHYSBNDRY:
1621 scalar_t value = - shift * 0.5 * invdx[dir];
1623 mglevel_m[level]->applyBoundary(iv, dir,
1632 for (
int d = 0; d < AMREX_SPACEDIM; ++d) {
1633 for (
int shift = -1; shift <= 1; shift += 2) {
1636 check(niv, mfab, d, shift);
1641 mglevel_m[level]->G_p[d]->insertGlobalValues(gidx,
1652 Teuchos::RCP<vector_t>& mv)
1657 for (amrex::MFIter mfi(mf,
true); mfi.isValid(); ++mfi) {
1658 const amrex::Box& tbx = mfi.tilebox();
1659 const amrex::FArrayBox& fab = mf[mfi];
1661 const int* lo = tbx.loVect();
1662 const int* hi = tbx.hiVect();
1664 for (
int i = lo[0]; i <= hi[0]; ++i) {
1665 for (
int j = lo[1]; j <= hi[1]; ++j) {
1666 #if AMREX_SPACEDIM == 3
1667 for (
int k = lo[2]; k <= hi[2]; ++k) {
1673 mv->replaceGlobalValue(gidx, fab(iv, comp));
1674 #if AMREX_SPACEDIM == 3
1686 const Teuchos::RCP<vector_t>& mv)
1688 Teuchos::ArrayRCP<const amr::scalar_t> data = mv->get1dView();
1690 for (amrex::MFIter mfi(mf,
true); mfi.isValid(); ++mfi) {
1691 const amrex::Box& tbx = mfi.tilebox();
1692 amrex::FArrayBox& fab = mf[mfi];
1694 const int* lo = tbx.loVect();
1695 const int* hi = tbx.hiVect();
1697 for (
int i = lo[0]; i <= hi[0]; ++i) {
1698 for (
int j = lo[1]; j <= hi[1]; ++j) {
1699 #if AMREX_SPACEDIM == 3
1700 for (
int k = lo[2]; k <= hi[2]; ++k) {
1707 fab(iv, comp) = data[lidx];
1709 #if AMREX_SPACEDIM == 3
1724 indices.reserve(map.size());
1725 values.reserve(map.size());
1728 [&](
const std::pair<const go_t, scalar_t>& entry)
1731 if ( entry.first < 0 ) {
1733 "Negative matrix index!");
1737 indices.push_back(entry.first);
1738 values.push_back(entry.second);
1747 Teuchos::RCP<vector_t>&
e,
1748 Teuchos::RCP<vector_t>& r)
1769 Teuchos::RCP<vector_t> tmp = Teuchos::rcp(
new vector_t(
mglevel_m[level]->map_p) );
1776 mglevel_m[level-1]->residual_p->putScalar(0.0);
1792 fine2crse, Teuchos::NO_TRANS,
1795 if (
mglevel_m[level-1]->Bcrse_p != Teuchos::null ) {
1798 fine2crse, Teuchos::NO_TRANS,
1802 Teuchos::RCP<vector_t> uncoveredRho = Teuchos::rcp(
new vector_t(
mglevel_m[level-1]->map_p,
true) );
1808 Teuchos::RCP<vector_t> tmp2 = Teuchos::rcp(
new vector_t(
mglevel_m[level-1]->map_p,
true) );
1809 mglevel_m[level-1]->UnCovered_p->apply(fine2crse, *tmp2);
1812 mglevel_m[level-1]->residual_p->update(1.0, *uncoveredRho, -1.0, *tmp2, 1.0);
1844 Teuchos::RCP<vector_t> phicrse = Teuchos::rcp(
new vector_t(
mglevel_m[level]->map_p,
false) );
1849 Teuchos::RCP<vector_t> uncov_phi = Teuchos::rcp(
new vector_t(
mglevel_m[level]->map_p,
true) );
1853 mglevel_m[level]->phi_p->update(1.0, *phicrse, 1.0, *uncov_phi, 0.0);
1862 case Interpolater::TRILINEAR:
1865 case Interpolater::LAGRANGE:
1867 "Not yet implemented.");
1868 case Interpolater::PIECEWISE_CONST:
1873 "No such interpolater available.");
1879 switch ( interface ) {
1880 case Interpolater::TRILINEAR:
1883 case Interpolater::LAGRANGE:
1887 case Interpolater::PIECEWISE_CONST:
1892 "No such interpolater for the coarse-fine interface available.");
1898 const bool& rebalance,
1899 const std::string& reuse)
1903 case BaseSolver::BICGSTAB:
1906 case BaseSolver::MINRES:
1909 case BaseSolver::PCPG:
1912 case BaseSolver::CG:
1915 case BaseSolver::GMRES:
1918 case BaseSolver::STOCHASTIC_CG:
1921 case BaseSolver::RECYCLING_CG:
1924 case BaseSolver::RECYCLING_GMRES:
1928 #ifdef HAVE_AMESOS2_KLU2
1929 case BaseSolver::KLU2:
1933 #if HAVE_AMESOS2_SUPERLU
1934 case BaseSolver::SUPERLU:
1938 #ifdef HAVE_AMESOS2_UMFPACK
1939 case BaseSolver::UMFPACK:
1943 #ifdef HAVE_AMESOS2_PARDISO_MKL
1944 case BaseSolver::PARDISO_MKL:
1948 #ifdef HAVE_AMESOS2_MUMPS
1949 case BaseSolver::MUMPS:
1953 #ifdef HAVE_AMESOS2_LAPACK
1954 case BaseSolver::LAPACK:
1966 "No such bottom solver available.");
1972 const std::string& reuse)
1990 case Preconditioner::NONE:
1995 "No such preconditioner available.");
2002 std::map<std::string, Boundary> map;
2004 map[
"DIRICHLET"] = Boundary::DIRICHLET;
2005 map[
"OPEN"] = Boundary::OPEN;
2006 map[
"PERIODIC"] = Boundary::PERIODIC;
2008 auto boundary = map.find(bc);
2010 if ( boundary == map.end() )
2011 throw OpalException(
"AmrMultiGrid::convertToEnumBoundary_m()",
2012 "No boundary type '" + bc +
"'.");
2013 return boundary->second;
2018 std::map<std::string, Interpolater> map;
2020 map[
"TRILINEAR"] = Interpolater::TRILINEAR;
2021 map[
"LAGRANGE"] = Interpolater::LAGRANGE;
2022 map[
"PC"] = Interpolater::PIECEWISE_CONST;
2024 auto interpolater = map.find(interp);
2026 if ( interpolater == map.end() )
2027 throw OpalException(
"AmrMultiGrid::convertToEnumInterpolater_m()",
2028 "No interpolater '" + interp +
"'.");
2029 return interpolater->second;
2035 std::map<std::string, BaseSolver> map;
2037 map[
"BICGSTAB"] = BaseSolver::BICGSTAB;
2038 map[
"MINRES"] = BaseSolver::MINRES;
2039 map[
"PCPG"] = BaseSolver::PCPG;
2040 map[
"CG"] = BaseSolver::CG;
2041 map[
"GMRES"] = BaseSolver::GMRES;
2042 map[
"STOCHASTIC_CG"] = BaseSolver::STOCHASTIC_CG;
2043 map[
"RECYCLING_CG"] = BaseSolver::RECYCLING_GMRES;
2044 map[
"RECYCLING_GMRES"] = BaseSolver::RECYCLING_GMRES;
2045 #ifdef HAVE_AMESOS2_KLU2
2046 map[
"KLU2"] = BaseSolver::KLU2;
2048 #ifdef HAVE_AMESOS2_SUPERLU
2049 map[
"SUPERLU"] = BaseSolver::SUPERLU;
2051 #ifdef HAVE_AMESOS2_UMFPACK
2052 map[
"UMFPACK"] = BaseSolver::UMFPACK;
2054 #ifdef HAVE_AMESOS2_PARDISO_MKL
2055 map[
"PARDISO_MKL"] = BaseSolver::PARDISO_MKL;
2057 #ifdef HAVE_AMESOS2_MUMPS
2058 map[
"MUMPS"] = BaseSolver::MUMPS;
2060 #ifdef HAVE_AMESOS2_LAPACK
2061 map[
"LAPACK"] = BaseSolver::LAPACK;
2065 auto solver = map.find(bsolver);
2067 if ( solver == map.end() )
2068 throw OpalException(
"AmrMultiGrid::convertToEnumBaseSolver_m()",
2069 "No bottom solver '" + bsolver +
"'.");
2070 return solver->second;
2076 std::map<std::string, Preconditioner> map;
2078 map[
"NONE"] = Preconditioner::NONE;
2084 auto precond = map.find(prec);
2086 if ( precond == map.end() )
2087 throw OpalException(
"AmrMultiGrid::convertToEnumPreconditioner_m()",
2088 "No preconditioner '" + prec +
"'.");
2089 return precond->second;
2101 std::map<std::string, Norm> map;
2103 map[
"L1_NORM"] = Norm::L1;
2104 map[
"L2_NORM"] = Norm::L2;
2105 map[
"LINF_NORM"] = Norm::LINF;
2107 auto n = map.find(norm);
2109 if (
n == map.end() )
2111 "No norm '" + norm +
"'.");
2119 std::string dateStr(simtimer.
date());
2120 std::string timeStr(simtimer.
time());
2121 std::string indent(
" ");
2124 outfile <<
"&description\n"
2126 <<
"' " << dateStr <<
"" << timeStr <<
"\",\n"
2127 << indent <<
"contents=\"solver info\"\n"
2129 outfile <<
"¶meter\n"
2130 << indent <<
"name=processors,\n"
2131 << indent <<
"type=long,\n"
2132 << indent <<
"description=\"Number of Cores used\"\n"
2134 outfile <<
"¶meter\n"
2135 << indent <<
"name=revision,\n"
2136 << indent <<
"type=string,\n"
2137 << indent <<
"description=\"git revision of opal\"\n"
2139 outfile <<
"¶meter\n"
2140 << indent <<
"name=flavor,\n"
2141 << indent <<
"type=string,\n"
2142 << indent <<
"description=\"OPAL flavor that wrote file\"\n"
2144 outfile <<
"&column\n"
2145 << indent <<
"name=t,\n"
2146 << indent <<
"type=double,\n"
2147 << indent <<
"units=ns,\n"
2148 << indent <<
"description=\"1 Time\"\n"
2150 outfile <<
"&column\n"
2151 << indent <<
"name=mg_iter,\n"
2152 << indent <<
"type=long,\n"
2153 << indent <<
"units=1,\n"
2154 << indent <<
"description=\"2 Number of Multigrid Iterations\"\n"
2156 outfile <<
"&column\n"
2157 << indent <<
"name=bottom_iter,\n"
2158 << indent <<
"type=long,\n"
2159 << indent <<
"units=1,\n"
2160 << indent <<
"description=\"3 Total Number of Bottom Solver Iterations\"\n"
2162 outfile <<
"&column\n"
2163 << indent <<
"name=regrid,\n"
2164 << indent <<
"type=bool,\n"
2165 << indent <<
"units=1,\n"
2166 << indent <<
"description=\"4 Regrid Step\"\n"
2168 outfile <<
"&column\n"
2169 << indent <<
"name=" +
snorm_m +
",\n"
2170 << indent <<
"type=double,\n"
2171 << indent <<
"units=1,\n"
2172 << indent <<
"description=\"5 Error\"\n"
2175 << indent <<
"mode=ascii,\n"
2176 << indent <<
"no_row_counts=1\n"
2189 unsigned int pwi = 10;
2191 std::ofstream outfile;
2193 if (
comm_mp->getRank() == 0 ) {
2195 outfile.precision(15);
2196 outfile.setf(std::ios::scientific, std::ios::floatfield);
2198 if (
flag_m == std::ios::out ) {
2204 << this->
nIter_m << std::setw(pwi) <<
'\t'
2205 << this->
bIter_m << std::setw(pwi) <<
'\t'
2206 << this->
regrid_m << std::setw(pwi) <<
'\t'
2216 void AmrMultiGrid::initTimer_m() {
2260 os <<
"* ********************* A M R M u l t i G r i d ********************** " <<
endl
2262 <<
"* ******************************************************************** " <<
endl;
Smoother convertToEnumSmoother_m(const std::string &smoother)
std::shared_ptr< bsolver_t > solver_mp
bottom solver
void setTolerance(const scalar_t &eps)
AmrMultiGridLevel_t::umap_t umap_t
The global OPAL structure.
void initPhysicalBoundary_m(const Boundary *bc)
static OpalData * getInstance()
void buildFineBoundaryMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, const basefab_t &mfab, const basefab_t &rfab)
AmrMultiGridLevel_t::AmrIntVect_t AmrIntVect_t
void buildCrseBoundaryMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, const basefab_t &mfab, const basefab_t &cfab, const scalar_t *invdx2)
AmrMultiGridLevel_t::coefficients_t coefficients_t
void smooth_m(const lo_t &level, Teuchos::RCP< vector_t > &e, Teuchos::RCP< vector_t > &r)
std::unique_ptr< AmrInterpolater< AmrMultiGridLevel_t > > interp_mp
interpolater without coarse-fine interface
AmrBoxLib * itsAmrObject_mp
Preconditioner convertToEnumPreconditioner_m(const std::string &prec)
#define OPAL_PROJECT_VERSION
static std::string convertToMueLuReuseOption(const std::string &reuse)
std::vector< std::shared_ptr< AmrSmoother > > smoother_m
error smoother
void residual_no_fine_m(const lo_t &level, Teuchos::RCP< vector_t > &result, const Teuchos::RCP< vector_t > &rhs, const Teuchos::RCP< vector_t > &crs_rhs, const Teuchos::RCP< vector_t > &b)
BaseSolver
Supported bottom solvers.
scalar_t evalNorm_m(const Teuchos::RCP< const vector_t > &x)
void trilinos2amrex_m(const lo_t &level, const lo_t &comp, AmrField_t &mf, const Teuchos::RCP< vector_t > &mv)
void buildGradientMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, const basefab_t &mfab, const scalar_t *invdx)
std::shared_ptr< preconditioner_t > prec_mp
preconditioner for bottom solver
void buildSingleLevel_m(const amrex::Vector< AmrField_u > &rho, const amrex::Vector< AmrField_u > &phi, const bool &matrices=true)
std::size_t bIter_m
number of iterations of bottom solver
void initResidual_m(std::vector< scalar_t > &rhsNorms, std::vector< scalar_t > &resNorms)
std::string getGitRevision()
std::size_t maxiter_m
maximum number of iterations allowed
void buildInterpolationMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, const basefab_t &cfab)
std::size_t nIter_m
number of iterations till convergence
smoothed aggregation multigrid
Inform & print(Inform &os) const
Ifpack2Preconditioner< AmrMultiGridLevel_t > Ifpack2Preconditioner_t
void serialize(Param_t params, std::ostringstream &os)
serializes params using Boost archive
double getYRangeMax(unsigned short level=0)
amr::AmrVectorFieldContainer_t AmrVectorFieldContainer_t
#define OPAL_PROJECT_NAME
bool regrid_m
is set to true by itsAmrObject_mp and reset to false by solver
std::string getInputFn()
get opals input filename
scalar_t getLevelResidualNorm(lo_t level)
bool verbose_m
If true, a SDDS file is written.
void setup_m(const amrex::Vector< AmrField_u > &rho, const amrex::Vector< AmrField_u > &phi, const bool &matrices=true)
Smoother
All supported Ifpack2 smoothers.
Smoother smootherType_m
type of smoother
Norm norm_m
norm for convergence criteria (l1, l2, linf)
void writeSDDSHeader_m(std::ofstream &outfile)
std::vector< std::unique_ptr< AmrMultiGridLevel_t > > mglevel_m
container for levels
void buildMultiLevel_m(const amrex::Vector< AmrField_u > &rho, const amrex::Vector< AmrField_u > &phi, const bool &matrices=true)
clearpage the user may choose between constant or variable radius This model includes fringe fields begin
Inform & endl(Inform &inf)
static void fillMap(map_t &map)
amr::global_ordinal_t go_t
void setMaxNumberOfIterations(const std::size_t &maxiter)
AmrMultiGridLevel_t::indices_t indices_t
Amesos2BottomSolver< AmrMultiGridLevel_t > Amesos2Solver_t
Gauss-Seidel point relaxation.
void buildNoFinePoissonMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, const basefab_t &mfab, const scalar_t *invdx2)
void residual_m(const lo_t &level, Teuchos::RCP< vector_t > &r, const Teuchos::RCP< vector_t > &b, const Teuchos::RCP< vector_t > &x)
double getZRangeMax(unsigned short level=0)
bool isConverged_m(std::vector< scalar_t > &rhsNorms, std::vector< scalar_t > &resNorms)
static void startTimer(TimerRef t)
void computeEfield_m(AmrVectorFieldContainer_t &efield)
void initBaseSolver_m(const BaseSolver &solver, const bool &rebalance, const std::string &reuse)
std::string fname_m
SDDS filename.
Boundary convertToEnumBoundary_m(const std::string &bc)
The base class for all OPAL exceptions.
void buildRestrictionMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, D_DECL(const go_t &ii, const go_t &jj, const go_t &kk), const basefab_t &rfab)
int lbase_m
base level (currently only 0 supported)
Gauss-Seidel block relaxation.
static Smoother convertToEnumSmoother(const std::string &smoother)
void writeSDDSData_m(const scalar_t &error)
void initCrseFineInterp_m(const Interpolater &interface)
Boundary
Supported physical boundaries.
void initLevels_m(const amrex::Vector< AmrField_u > &rho, const amrex::Vector< AmrGeometry_t > &geom, bool regrid)
BelosBottomSolver< AmrMultiGridLevel_t > BelosSolver_t
void close_m(const lo_t &level, const bool &matrices)
std::unique_ptr< AmrInterpolater< AmrMultiGridLevel_t > > interface_mp
interpolater for coarse-fine interface
Norm convertToEnumNorm_m(const std::string &norm)
MueLuPreconditioner< AmrMultiGridLevel_t > MueLuPreconditioner_t
BaseSolver convertToEnumBaseSolver_m(const std::string &bsolver)
Interpolater
Supported interpolaters for prolongation operation.
void initInterpolater_m(const Interpolater &interp)
amr::AmrScalarFieldContainer_t AmrScalarFieldContainer_t
Norm
Supported convergence criteria.
std::size_t nSweeps_m
number of smoothing iterations
amrex::BaseFab< int > basefab_t
void buildDensityVector_m(const lo_t &level, const AmrField_t &rho)
Teuchos::RCP< comm_t > comm_mp
communicator
void buildCompositePoissonMatrix_m(const lo_t &level, const go_t &gidx, const AmrIntVect_t &iv, const basefab_t &mfab, const basefab_t &rfab, const scalar_t *invdx2)
void setNumberOfSweeps(const std::size_t &nSweeps)
void map2vector_m(umap_t &map, indices_t &indices, coefficients_t &values)
amr::local_ordinal_t lo_t
std::string time() const
Return time.
amrex::FabArray< basefab_t > mask_t
const Vector_t & getMeshScaling() const
void initPrec_m(const Preconditioner &prec, const std::string &reuse)
void setVerbose(bool verbose)
void restrict_m(const lo_t &level)
double getZRangeMin(unsigned short level=0)
std::string date() const
Return date.
void averageDown_m(const lo_t &level)
void initGuess_m(bool reset)
static TimerRef getTimer(const char *nm)
constexpr double e
The value of .
void relax_m(const lo_t &level)
AmrMultiGridLevel< matrix_t, vector_t > AmrMultiGridLevel_t
AmrMultiGrid(AmrBoxLib *itsAmrObject_p, const std::string &bsolver, const std::string &prec, const bool &rebalance, const std::string &reuse, const std::string &bcx, const std::string &bcy, const std::string &bcz, const std::string &smoother, const std::size_t &nSweeps, const std::string &interp, const std::string &norm)
static void fillMap(map_t &map)
int nlevel_m
number of levelss
bool for_each(const BareFieldIterator< T, D > &p, SameFieldID s, C)
double getYRangeMin(unsigned short level=0)
void solve(AmrScalarFieldContainer_t &rho, AmrScalarFieldContainer_t &phi, AmrVectorFieldContainer_t &efield, unsigned short baseLevel, unsigned short finestLevel, bool prevAsGuess=true)
amrex::FArrayBox farraybox_t
static void stopTimer(TimerRef t)
double getXRangeMin(unsigned short level=0)
int nBcPoints_m
maximum number of stencils points for BC
void amrex2trilinos_m(const lo_t &level, const lo_t &comp, const AmrField_t &mf, Teuchos::RCP< vector_t > &mv)
std::size_t getNumIters()
void buildPotentialVector_m(const lo_t &level, const AmrField_t &phi)
boundary_t bc_m[AMREX_SPACEDIM]
boundary conditions
double getXRangeMax(unsigned short level=0)
Interpolater convertToEnumInterpolater_m(const std::string &interp)
scalar_t eps_m
rhs scale for convergence
AmrMultiGridLevel_t::AmrField_t AmrField_t
std::string snorm_m
norm for convergence criteria
std::ios_base::openmode flag_m
std::ios::out or std::ios::app
static std::string convertToMueLuReuseOption(const std::string &reuse)
void prolongate_m(const lo_t &level)
void open_m(const lo_t &level, const bool &matrices)
MueLuBottomSolver< AmrMultiGridLevel_t > MueLuSolver_t