OPAL (Object Oriented Parallel Accelerator Library)  2021.1.99
OPAL
MGPoissonSolver.cpp
Go to the documentation of this file.
1 //
2 // Class MGPoissonSolver
3 // This class contains methods for solving Poisson's equation for the
4 // space charge portion of the calculation.
5 //
6 // A smoothed aggregation based AMG preconditioned iterative solver for space charge
7 // \see FFTPoissonSolver
8 // \warning This solver is in an EXPERIMENTAL STAGE. For reliable simulations use the FFTPoissonSolver
9 //
10 // Copyright (c) 2008, Yves Ineichen, ETH Zürich,
11 // 2013 - 2015, Tülin Kaman, Paul Scherrer Institut, Villigen PSI, Switzerland
12 // 2017 - 2020, Paul Scherrer Institut, Villigen PSI, Switzerland
13 // All rights reserved
14 //
15 // Implemented as part of the master thesis
16 // "A Parallel Multigrid Solver for Beam Dynamics"
17 // and the paper
18 // "A fast parallel Poisson solver on irregular domains applied to beam dynamics simulations"
19 // (https://doi.org/10.1016/j.jcp.2010.02.022)
20 //
21 // In 2020, the code was ported to the second generation Trilinos packages,
22 // i.e., Epetra --> Tpetra, ML --> MueLu. See also issue #507.
23 //
24 // This file is part of OPAL.
25 //
26 // OPAL is free software: you can redistribute it and/or modify
27 // it under the terms of the GNU General Public License as published by
28 // the Free Software Foundation, either version 3 of the License, or
29 // (at your option) any later version.
30 //
31 // You should have received a copy of the GNU General Public License
32 // along with OPAL. If not, see <https://www.gnu.org/licenses/>.
33 //
34 
35 //#define DBG_STENCIL
36 
38 
40 #include "ArbitraryDomain.h"
41 #include "EllipticDomain.h"
42 #include "BoxCornerDomain.h"
43 #include "RectangularDomain.h"
44 
45 #include "Track/Track.h"
46 #include "Physics/Physics.h"
48 #include "Attributes/Attributes.h"
51 #include "Utilities/Options.h"
52 
53 #include <Tpetra_Import.hpp>
54 #include <BelosTpetraAdapter.hpp>
55 
56 #ifdef DBG_STENCIL
57  #include "TpetraExt_MatrixMatrix.hpp"
58 #endif
59 
60 #include "Teuchos_CommandLineProcessor.hpp"
61 
62 #include "BelosLinearProblem.hpp"
63 #include "BelosRCGSolMgr.hpp"
64 #include "BelosBlockCGSolMgr.hpp"
65 #include "BelosBiCGStabSolMgr.hpp"
66 #include "BelosBlockGmresSolMgr.hpp"
67 #include "BelosGCRODRSolMgr.hpp"
68 
69 #include <MueLu_CreateTpetraPreconditioner.hpp>
70 
71 #include <algorithm>
72 
73 using Teuchos::RCP;
74 using Teuchos::rcp;
75 using Physics::c;
76 
78  Mesh_t *mesh,
79  FieldLayout_t *fl,
80  std::vector<BoundaryGeometry *> geometries,
81  std::string itsolver,
82  std::string interpl,
83  double tol,
84  int maxiters,
85  std::string precmode)
86  : isMatrixfilled_m(false)
87  , useLeftPrec_m(true)
88  , geometries_m(geometries)
89  , tol_m(tol)
90  , maxiters_m(maxiters)
91  , comm_mp(new Comm_t(Ippl::getComm()))
92  , itsBunch_m(beam)
93  , mesh_m(mesh)
94  , layout_m(fl)
95 {
96 
98 
99  for (int i = 0; i < 3; i++) {
100  hr_m[i] = mesh_m->get_meshSpacing(i);
101  orig_nr_m[i] = domain_m[i].length();
102  }
103 
105  if (precmode == "HIERARCHY") precmode_m = REUSE_HIERARCHY;
106  else if (precmode == "REUSE") precmode_m = REUSE_PREC;
107 
108  repartFreq_m = 1000;
109  if (Ippl::Info->getOutputLevel() > 3)
110  verbose_m = true;
111  else
112  verbose_m = false;
113 
114  // Find CURRENT geometry
116  if (currentGeometry->getFilename() == "") {
117  if (currentGeometry->getTopology() == "ELLIPTIC"){
118  bp_m = std::unique_ptr<IrregularDomain>(
119  new EllipticDomain(currentGeometry, orig_nr_m, hr_m, interpl));
120 
121  } else if (currentGeometry->getTopology() == "BOXCORNER") {
122  bp_m = std::unique_ptr<IrregularDomain>(
128  orig_nr_m, hr_m, interpl));
129  bp_m->compute(itsBunch_m->get_hr(), layout_m->getLocalNDIndex());
130  } else if (currentGeometry->getTopology() == "RECTANGULAR") {
131  bp_m = std::unique_ptr<IrregularDomain>(
134  orig_nr_m, hr_m));
135  }
136  } else {
137  NDIndex<3> localId = layout_m->getLocalNDIndex();
138  if (localId[0].length() != domain_m[0].length() ||
139  localId[1].length() != domain_m[1].length()) {
140  throw OpalException("MGPoissonSolver::MGPoissonSolver",
141  "The class ArbitraryDomain only works with parallelization\n"
142  "in z-direction.\n"
143  "Please set PARFFTX=FALSE, PARFFTY=FALSE, PARFFTT=TRUE in \n"
144  "the definition of the field solver in the input file.\n");
145  }
148  bp_m = std::unique_ptr<IrregularDomain>(
149  new ArbitraryDomain(currentGeometry, orig_nr_m, hr, interpl));
150  }
151 
152  map_p = Teuchos::null;
153  A = Teuchos::null;
154  LHS = Teuchos::null;
155  RHS = Teuchos::null;
156  prec_mp = Teuchos::null;
157 
161  setupMueLuList();
162  setupBelosList();
163  problem_mp = rcp(new Belos::LinearProblem<TpetraScalar_t,
166  // setup Belos solver
167  if (itsolver == "CG") {
168  if (numBlocks_m == 0 || recycleBlocks_m == 0) {
169  solver_mp = rcp(new Belos::BlockCGSolMgr<TpetraScalar_t,
171  TpetraOperator_t>());
172  } else {
173  solver_mp = rcp(new Belos::RCGSolMgr<TpetraScalar_t,
175  TpetraOperator_t>());
176  }
177  } else if (itsolver == "BICGSTAB") {
178  useLeftPrec_m = false;
179  solver_mp = rcp(new Belos::BiCGStabSolMgr<TpetraScalar_t,
181  TpetraOperator_t>());
182  } else if (itsolver == "GMRES") {
183  if (numBlocks_m == 0 || recycleBlocks_m == 0) {
184  solver_mp = rcp(new Belos::BlockGmresSolMgr<TpetraScalar_t,
186  TpetraOperator_t>());
187  } else {
188  solver_mp = rcp(new Belos::GCRODRSolMgr<TpetraScalar_t,
190  TpetraOperator_t>());
191  }
192  }
193 
194  solver_mp->setParameters(rcp(&belosList, false));
195 
196 
197  //all timers used here
198  FunctionTimer1_m = IpplTimings::getTimer("BGF-IndexCoordMap");
199  FunctionTimer2_m = IpplTimings::getTimer("computeMap");
200  FunctionTimer3_m = IpplTimings::getTimer("IPPL to RHS");
201  FunctionTimer4_m = IpplTimings::getTimer("ComputeStencil");
205  FunctionTimer8_m = IpplTimings::getTimer("LHS to IPPL");
206 }
207 
209  map_p = Teuchos::null;
210  A = Teuchos::null;
211  LHS = Teuchos::null;
212  RHS = Teuchos::null;
213  prec_mp = Teuchos::null;
214  isMatrixfilled_m = false;
215 }
216 
218  deletePtr ();
219  solver_mp = Teuchos::null;
220  problem_mp = Teuchos::null;
221 }
222 
223 void MGPoissonSolver::computePotential(Field_t& /*rho*/, Vector_t /*hr*/, double /*zshift*/) {
224  throw OpalException("MGPoissonSolver", "not implemented yet");
225 }
226 
229  deletePtr();
230  IPPLToMap3D(localId);
231 
232  extrapolateLHS();
233  }
234 }
235 
237 // Aitken-Neville
238 // Pi0 (x) := yi , i = 0 : n
239 // Pik (x) := (x − xi ) Pi+1,k−1(x) − (x − xi+k ) Pi,k−1(x) /(xi+k − xi )
240 // k = 1, . . . , n, i = 0, . . . , n − k.
241  //we also have to redistribute LHS
242 
243  if (Teuchos::is_null(LHS)){
244  LHS = rcp(new TpetraVector_t(map_p));
245  LHS->putScalar(1.0);
246  } else {
247  RCP<TpetraVector_t> tmplhs = rcp(new TpetraVector_t(map_p));
248  Tpetra::Import<> importer(map_p, LHS->getMap());
249  tmplhs->doImport(*LHS, importer, Tpetra::CombineMode::ADD);
250  LHS = tmplhs;
251  }
252 
253  //...and all previously saved LHS
255  if (OldLHS.size() > 0) {
256  int n = OldLHS.size();
257  for (int i = 0; i < n; ++i) {
259  Tpetra::Import<> importer(map_p, it->getMap());
260  tmplhs.doImport(*it, importer, Tpetra::CombineMode::ADD);
261  *it = tmplhs;
262  ++it;
263  }
264  }
265 
266  // extrapolate last OldLHS.size LHS to get a new start vector
267  it = OldLHS.begin();
268  if (nLHS_m == 0 || OldLHS.size()==0)
269  LHS->putScalar(1.0);
270  else if (OldLHS.size() == 1)
271  *LHS = *it;
272  else if (OldLHS.size() == 2)
273  LHS->update(2.0, *it++, -1.0, *it, 0.0);
274  else if (OldLHS.size() > 2) {
275  int n = OldLHS.size();
276  P_mp = rcp(new TpetraMultiVector_t(map_p, nLHS_m, false));
277  for (int i = 0; i < n; ++i) {
278  *(P_mp->getVectorNonConst(i)) = *it++;
279  }
280  for (int k = 1; k < n; ++k) {
281  for (int i = 0; i < n - k; ++i) {
282  P_mp->getVectorNonConst(i)->update(-(i + 1) / (float)k, *(P_mp->getVector(i + 1)), (i + k + 1) / (float)k);
283  }
284  }
285  *LHS = *(P_mp->getVector(0));
286  } else
287  throw OpalException("MGPoissonSolver",
288  "Invalid number of old LHS: " + std::to_string(OldLHS.size()));
289 }
290 
291 
292 // given a charge-density field rho and a set of mesh spacings hr,
293 // compute the electric field and put in eg by solving the Poisson's equation
294 // XXX: use matrix stencil in computation directly (no Tpetra, define operators
295 // on IPPL GRID)
297 
298  Inform msg("OPAL ", INFORM_ALL_NODES);
299  nr_m[0] = orig_nr_m[0];
300  nr_m[1] = orig_nr_m[1];
301  nr_m[2] = orig_nr_m[2];
302 
303  bp_m->setNr(nr_m);
304 
305  NDIndex<3> localId = layout_m->getLocalNDIndex();
306 
308  // Compute boundary intersections (only do on the first step)
310  bp_m->compute(hr, localId);
312 
313  // Define the Map
314  INFOMSG(level3 << "* Computing Map..." << endl);
316  computeMap(localId);
318  INFOMSG(level3 << "* Done." << endl);
319 
320  // Allocate the RHS with the new Tpetra Map
321  if (Teuchos::is_null(RHS))
322  RHS = rcp(new TpetraVector_t(map_p));
323  RHS->putScalar(0.0);
324 
325  // get charge densities from IPPL field and store in Tpetra vector (RHS)
326  if (verbose_m) {
327  Ippl::Comm->barrier();
328  msg << "* Node:" << Ippl::myNode() << ", Filling RHS..." << endl;
329  Ippl::Comm->barrier();
330  }
332  int id = 0;
333  float scaleFactor = itsBunch_m->getdT();
334 
335 
336  if (verbose_m) {
337  msg << "* Node:" << Ippl::myNode() << ", Rho for final element: "
338  << rho[localId[0].last()][localId[1].last()][localId[2].last()].get()
339  << endl;
340 
341  Ippl::Comm->barrier();
342  msg << "* Node:" << Ippl::myNode() << ", Local nx*ny*nz = "
343  << localId[2].last() * localId[0].last() * localId[1].last()
344  << endl;
345  msg << "* Node:" << Ippl::myNode()
346  << ", Number of reserved local elements in RHS: "
347  << RHS->getLocalLength() << endl;
348  msg << "* Node:" << Ippl::myNode()
349  << ", Number of reserved global elements in RHS: "
350  << RHS->getGlobalLength() << endl;
351  Ippl::Comm->barrier();
352  }
353  for (int idz = localId[2].first(); idz <= localId[2].last(); idz++) {
354  for (int idy = localId[1].first(); idy <= localId[1].last(); idy++) {
355  for (int idx = localId[0].first(); idx <= localId[0].last(); idx++) {
356  NDIndex<3> l(Index(idx, idx), Index(idy, idy), Index(idz, idz));
357  if (bp_m->isInside(idx, idy, idz))
358  RHS->replaceGlobalValue(bp_m->getIdx(idx, idy, idz),
359  4.0 * M_PI * rho.localElement(l) / scaleFactor);
360  }
361  }
362  }
363 
365  if (verbose_m) {
366  Ippl::Comm->barrier();
367  msg << "* Node:" << Ippl::myNode()
368  << ", Number of Local Inside Points " << id << endl;
369  msg << "* Node:" << Ippl::myNode() << ", Done." << endl;
370  Ippl::Comm->barrier();
371  }
372  // build discretization matrix
373  INFOMSG(level3 << "* Building Discretization Matrix..." << endl);
375  if (Teuchos::is_null(A))
376  A = rcp(new TpetraCrsMatrix_t(map_p, 7, Tpetra::StaticProfile));
377  ComputeStencil(hr, RHS);
379  INFOMSG(level3 << "* Done." << endl);
380 
381 #ifdef DBG_STENCIL
382  Tpetra::MatrixMarket::Writer<TpetraCrsMatrix_t>::writeSparseFile(
383  "DiscrStencil.dat", A);
384 #endif
385 
386  INFOMSG(level3 << "* Computing Preconditioner..." << endl);
388  if (Teuchos::is_null(prec_mp)) {
389  Teuchos::RCP<TpetraOperator_t> At = Teuchos::rcp_dynamic_cast<TpetraOperator_t>(A);
390  prec_mp = MueLu::CreateTpetraPreconditioner(At, MueLuList_m);
391  }
392 
393  switch (precmode_m) {
394  case REUSE_PREC:
395  case REUSE_HIERARCHY: {
396  MueLu::ReuseTpetraPreconditioner(A, *prec_mp);
397  break;
398  }
399  case STD_PREC:
400  default: {
401  Teuchos::RCP<TpetraOperator_t> At = Teuchos::rcp_dynamic_cast<TpetraOperator_t>(A);
402  prec_mp = MueLu::CreateTpetraPreconditioner(At, MueLuList_m);
403  break;
404  }
405  }
407  INFOMSG(level3 << "* Done." << endl);
408 
409  // setup preconditioned iterative solver
410  // use old LHS solution as initial guess
411  INFOMSG(level3 << "* Final Setup of Solver..." << endl);
413  problem_mp->setOperator(A);
414  problem_mp->setLHS(LHS);
415  problem_mp->setRHS(RHS);
416 
417  if (useLeftPrec_m)
418  problem_mp->setLeftPrec(prec_mp);
419  else
420  problem_mp->setRightPrec(prec_mp);
421 
422  solver_mp->setProblem( problem_mp);
423  if (!problem_mp->isProblemSet()) {
424  if (problem_mp->setProblem() == false) {
425  ERRORMSG("Belos::LinearProblem failed to set up correctly!" << endl);
426  }
427  }
429  INFOMSG(level3 << "* Done." << endl);
430 
431  double time = MPI_Wtime();
432 
433  INFOMSG(level3 << "* Solving for Space Charge..." << endl);
435  solver_mp->solve();
436 
438  INFOMSG(level3 << "* Done." << endl);
439 
440  std::ofstream timings;
441  if (true || verbose_m) {
442  time = MPI_Wtime() - time;
443  double minTime = 0, maxTime = 0, avgTime = 0;
444  reduce(time, minTime, 1, std::less<double>());
445  reduce(time, maxTime, 1, std::greater<double>());
446  reduce(time, avgTime, 1, std::plus<double>());
447  avgTime /= comm_mp->getSize();
448  if (comm_mp->getRank() == 0) {
449  char filename[50];
450  sprintf(filename, "timing_MX%d_MY%d_MZ%d_nProc%d_recB%d_numB%d_nLHS%d",
451  orig_nr_m[0], orig_nr_m[1], orig_nr_m[2],
452  comm_mp->getSize(), recycleBlocks_m, numBlocks_m, nLHS_m);
453 
454  timings.open(filename, std::ios::app);
455  timings << solver_mp->getNumIters() << "\t"
456  //<< time <<" "<<
457  << minTime << "\t"
458  << maxTime << "\t"
459  << avgTime << "\t"
460  << numBlocks_m << "\t"
461  << recycleBlocks_m << "\t"
462  << nLHS_m << "\t"
463  //<< OldLHS.size() <<"\t"<<
464  << std::endl;
465 
466  timings.close();
467  }
468 
469  }
470  // Store new LHS in OldLHS
471  if (nLHS_m > 1) OldLHS.push_front(*(LHS.get()));
472  if (OldLHS.size() > nLHS_m) OldLHS.pop_back();
473 
474  //now transfer solution back to IPPL grid
476  id = 0;
477  rho = 0.0;
478  for (int idz = localId[2].first(); idz <= localId[2].last(); idz++) {
479  for (int idy = localId[1].first(); idy <= localId[1].last(); idy++) {
480  for (int idx = localId[0].first(); idx <= localId[0].last(); idx++) {
481  NDIndex<3> l(Index(idx, idx), Index(idy, idy), Index(idz, idz));
482  if (bp_m->isInside(idx, idy, idz))
483  rho.localElement(l) = LHS->getData()[id++] * scaleFactor;
484  }
485  }
486  }
488 
489  if (itsBunch_m->getLocalTrackStep()+1 == (long long)Track::block->localTimeSteps.front()) {
490  A = Teuchos::null;
491  LHS = Teuchos::null;
492  RHS = Teuchos::null;
493  prec_mp = Teuchos::null;
494  }
495 }
496 
497 
499 
500  int NumMyElements = 0;
501  std::vector<TpetraGlobalOrdinal_t> MyGlobalElements;
502 
503  for (int idz = localId[2].first(); idz <= localId[2].last(); idz++) {
504  for (int idy = localId[1].first(); idy <= localId[1].last(); idy++) {
505  for (int idx = localId[0].first(); idx <= localId[0].last(); idx++) {
506  if (bp_m->isInside(idx, idy, idz)) {
507  MyGlobalElements.push_back(bp_m->getIdx(idx, idy, idz));
508  NumMyElements++;
509  }
510  }
511  }
512  }
513 
514  int indexbase = 0;
515  map_p = Teuchos::rcp(new TpetraMap_t(Teuchos::OrdinalTraits<Tpetra::global_size_t>::invalid(),
516  &MyGlobalElements[0], NumMyElements, indexbase, comm_mp));
517 }
518 
519 void MGPoissonSolver::ComputeStencil(Vector_t /*hr*/, Teuchos::RCP<TpetraVector_t> RHS) {
520 
521  A->resumeFill();
522  A->setAllToScalar(0.0);
523 
524  int NumMyElements = map_p->getNodeNumElements();
525  auto MyGlobalElements = map_p->getMyGlobalIndices();
526 
527  std::vector<TpetraScalar_t> Values(6);
528  std::vector<TpetraGlobalOrdinal_t> Indices(6);
529 
532 
533  for (int i = 0 ; i < NumMyElements ; i++) {
534 
535  int NumEntries = 0;
536 
537  double scaleFactor=1.0;
538 
539  bp_m->getBoundaryStencil(MyGlobalElements[i], value, scaleFactor);
540  RHS->scale(scaleFactor);
541 
542  bp_m->getNeighbours(MyGlobalElements[i], index);
543  if (index.east != -1) {
544  Indices[NumEntries] = index.east;
545  Values[NumEntries++] = value.east;
546  }
547  if (index.west != -1) {
548  Indices[NumEntries] = index.west;
549  Values[NumEntries++] = value.west;
550  }
551  if (index.south != -1) {
552  Indices[NumEntries] = index.south;
553  Values[NumEntries++] = value.south;
554  }
555  if (index.north != -1) {
556  Indices[NumEntries] = index.north;
557  Values[NumEntries++] = value.north;
558  }
559  if (index.front != -1) {
560  Indices[NumEntries] = index.front;
561  Values[NumEntries++] = value.front;
562  }
563  if (index.back != -1) {
564  Indices[NumEntries] = index.back;
565  Values[NumEntries++] = value.back;
566  }
567 
568  // if matrix has already been filled (fillComplete()) we can only
569  // replace entries
570 
571  if (isMatrixfilled_m) {
572  // off-diagonal entries
573  A->replaceGlobalValues(MyGlobalElements[i], NumEntries, &Values[0], &Indices[0]);
574  // diagonal entry
575  A->replaceGlobalValues(MyGlobalElements[i], 1, &value.center, &MyGlobalElements[i]);
576  } else {
577  // off-diagonal entries
578  A->insertGlobalValues(MyGlobalElements[i], NumEntries, &Values[0], &Indices[0]);
579  // diagonal entry
580  A->insertGlobalValues(MyGlobalElements[i], 1, &value.center, &MyGlobalElements[i]);
581  }
582  }
583 
584  RCP<ParameterList_t> params = Teuchos::parameterList();
585  params->set ("Optimize Storage", true);
586  A->fillComplete(params);
587  isMatrixfilled_m = true;
588 }
589 
591 
592  //compute some load balance statistics
593  size_t myNumPart = map_p->getNodeNumElements();
594  size_t NumPart = map_p->getGlobalNumElements() * 1.0 / comm_mp->getSize();
595  double imbalance = 1.0;
596  if (myNumPart >= NumPart)
597  imbalance += (myNumPart - NumPart) / NumPart;
598  else
599  imbalance += (NumPart - myNumPart) / NumPart;
600 
601  double max = 0.0, min = 0.0, avg = 0.0;
602  size_t minn = 0, maxn = 0;
603  reduce(imbalance, min, 1, std::less<double>());
604  reduce(imbalance, max, 1, std::greater<double>());
605  reduce(imbalance, avg, 1, std::plus<double>());
606  reduce(myNumPart, minn, 1, std::less<size_t>());
607  reduce(myNumPart, maxn, 1, std::greater<size_t>());
608 
609  avg /= comm_mp->getSize();
610  *gmsg << "LBAL min = " << min << ", max = " << max << ", avg = " << avg << endl;
611  *gmsg << "min nr gridpoints = " << minn << ", max nr gridpoints = " << maxn << endl;
612 }
613 
615  belosList.set("Maximum Iterations", maxiters_m);
616  belosList.set("Convergence Tolerance", tol_m);
617 
618  if (numBlocks_m != 0 && recycleBlocks_m != 0){//only set if solver==RCGSolMgr
619  belosList.set("Num Blocks", numBlocks_m); // Maximum number of blocks in Krylov space
620  belosList.set("Num Recycled Blocks", recycleBlocks_m); // Number of vectors in recycle space
621  }
622  if (verbose_m) {
623  belosList.set("Verbosity", Belos::Errors + Belos::Warnings +
624  Belos::TimingDetails + Belos::FinalSummary +
625  Belos::StatusTestDetails);
626  belosList.set("Output Frequency", 1);
627  } else
628  belosList.set("Verbosity", Belos::Errors + Belos::Warnings);
629 }
630 
631 
633  MueLuList_m.set("problem: type", "Poisson-3D");
634  MueLuList_m.set("verbosity", "none");
635  MueLuList_m.set("number of equations", 1);
636  MueLuList_m.set("max levels", 8);
637  MueLuList_m.set("cycle type", "V");
638 
639  // heuristic for max coarse size depending on number of processors
640  int coarsest_size = std::max(comm_mp->getSize() * 10, 1024);
641  MueLuList_m.set("coarse: max size", coarsest_size);
642 
643  MueLuList_m.set("multigrid algorithm", "sa");
644  MueLuList_m.set("sa: damping factor", 1.33);
645  MueLuList_m.set("sa: use filtered matrix", true);
646  MueLuList_m.set("filtered matrix: reuse eigenvalue", false);
647 
648  MueLuList_m.set("repartition: enable", false);
649  MueLuList_m.set("repartition: rebalance P and R", false);
650  MueLuList_m.set("repartition: partitioner", "zoltan2");
651  MueLuList_m.set("repartition: min rows per proc", 800);
652  MueLuList_m.set("repartition: start level", 2);
653 
654  MueLuList_m.set("smoother: type", "CHEBYSHEV");
655  MueLuList_m.set("smoother: pre or post", "both");
656  Teuchos::ParameterList smparms;
657  smparms.set("chebyshev: degree", 3);
658  smparms.set("chebyshev: assume matrix does not change", false);
659  smparms.set("chebyshev: zero starting solution", true);
660  smparms.set("relaxation: sweeps", 3);
661  MueLuList_m.set("smoother: params", smparms);
662 
663  MueLuList_m.set("smoother: type", "CHEBYSHEV");
664  MueLuList_m.set("smoother: pre or post", "both");
665 
666  MueLuList_m.set("coarse: type", "KLU2");
667 
668  MueLuList_m.set("aggregation: type", "uncoupled");
669  MueLuList_m.set("aggregation: min agg size", 3);
670  MueLuList_m.set("aggregation: max agg size", 27);
671 
672  MueLuList_m.set("transpose: use implicit", false);
673 
674  switch (precmode_m) {
675  case REUSE_PREC:
676  MueLuList_m.set("reuse: type", "full");
677  break;
678  case REUSE_HIERARCHY:
679  MueLuList_m.set("sa: use filtered matrix", false);
680  MueLuList_m.set("reuse: type", "tP");
681  break;
682  case STD_PREC:
683  default:
684  MueLuList_m.set("reuse: type", "none");
685  break;
686  }
687 }
688 
690  os << "* *************** M G P o i s s o n S o l v e r ************************************ " << endl;
691  os << "* h " << hr_m << '\n';
692  os << "* ********************************************************************************** " << endl;
693  return os;
694 }
Inform * gmsg
Definition: Main.cpp:62
@ REUSE_HIERARCHY
@ STD_PREC
@ REUSE_PREC
T::PETE_Expr_t::PETE_Return_t max(const PETE_Expr< T > &expr, NDIndex< D > &loc)
Definition: ReductionLoc.h:84
T::PETE_Expr_t::PETE_Return_t min(const PETE_Expr< T > &expr, NDIndex< D > &loc)
Definition: ReductionLoc.h:76
bool reduce(Communicate &, InputIterator, InputIterator, OutputIterator, const ReduceOp &, bool *IncludeVal=0)
Definition: GlobalComm.hpp:55
Inform & endl(Inform &inf)
Definition: Inform.cpp:42
Inform & level3(Inform &inf)
Definition: Inform.cpp:47
#define INFORM_ALL_NODES
Definition: Inform.h:39
#define ERRORMSG(msg)
Definition: IpplInfo.h:350
#define INFOMSG(msg)
Definition: IpplInfo.h:348
constexpr double c
The velocity of light in m/s.
Definition: Physics.h:51
std::string::iterator iterator
Definition: MSLang.h:16
int numBlocks
RCG: cycle length.
Definition: Options.cpp:71
int nLHS
number of old left hand sides used to extrapolate a new start vector
Definition: Options.cpp:75
int recycleBlocks
RCG: number of recycle blocks.
Definition: Options.cpp:73
double getdT() const
long long getLocalTrackStep() const
virtual Vector_t get_hr() const
int precmode_m
preconditioner mode
Teuchos::RCP< TpetraVector_t > LHS
left hand side of the linear system of equations we solve
void computeMap(NDIndex< 3 > localId)
recomputes the map
Teuchos::MpiComm< int > Comm_t
void setupMueLuList()
Setup the parameters for the SAAMG preconditioner.
IpplTimings::TimerRef FunctionTimer2_m
IpplTimings::TimerRef FunctionTimer4_m
Teuchos::RCP< LinearProblem_t > problem_mp
Tpetra::CrsMatrix TpetraCrsMatrix_t
Inform & print(Inform &os) const
NDIndex< 3 > domain_m
Tpetra::Vector ::scalar_type TpetraScalar_t
void printLoadBalanceStats()
useful load balance information
Tpetra::Vector TpetraVector_t
Vector_t hr_m
mesh spacings in each direction
Teuchos::ParameterList belosList
parameter list for the iterative solver (Belos)
PartBunch * itsBunch_m
PartBunch object.
int recycleBlocks_m
number of vectors in recycle space
int numBlocks_m
maximum number of blocks in Krylov space
IpplTimings::TimerRef FunctionTimer5_m
Tpetra::MultiVector TpetraMultiVector_t
Vektor< int, 3 > nr_m
current number of mesh points in each direction
IpplTimings::TimerRef FunctionTimer3_m
double tol_m
tolerance for the iterative solver
std::deque< TpetraVector_t > OldLHS
void setupBelosList()
Setup the parameters for the Belos iterative solver.
void computePotential(Field_t &rho, Vector_t hr)
FieldLayout_t * layout_m
IpplTimings::TimerRef FunctionTimer7_m
int maxiters_m
maximal number of iterations for the iterative solver
Teuchos::RCP< MueLuTpetraOperator_t > prec_mp
MueLu preconditioner object.
void IPPLToMap3D(NDIndex< 3 > localId)
void ComputeStencil(Vector_t hr, Teuchos::RCP< TpetraVector_t > RHS)
unsigned int nLHS_m
last N LHS's for extrapolating the new LHS as starting vector
Teuchos::RCP< const Comm_t > comm_mp
communicator used by Trilinos
bool verbose_m
flag specifying if we are verbose
MGPoissonSolver(PartBunch *beam, Mesh_t *mesh, FieldLayout_t *fl, std::vector< BoundaryGeometry * > geometries, std::string itsolver, std::string interpl, double tol, int maxiters, std::string precmode)
BoundaryGeometry * currentGeometry
holding the currently active geometry
Tpetra::Operator TpetraOperator_t
Vektor< int, 3 > orig_nr_m
global number of mesh points in each direction
Teuchos::RCP< TpetraMap_t > map_p
Map holding the processor distribution of data.
Teuchos::RCP< SolverManager_t > solver_mp
Tpetra::Map TpetraMap_t
IpplTimings::TimerRef FunctionTimer6_m
Teuchos::RCP< TpetraMultiVector_t > P_mp
IpplTimings::TimerRef FunctionTimer8_m
Teuchos::RCP< TpetraVector_t > RHS
right hand side of our problem
IpplTimings::TimerRef FunctionTimer1_m
Teuchos::ParameterList MueLuList_m
parameter list for the MueLu solver
std::vector< BoundaryGeometry * > geometries_m
container for multiple geometries
Teuchos::RCP< TpetraCrsMatrix_t > A
matrix used in the linear system of equations
std::unique_ptr< IrregularDomain > bp_m
structure that holds boundary points
Vector_t getmincoords()
Vector_t getmaxcoords()
std::string getTopology() const
std::string getFilename() const
std::vector< unsigned long long > localTimeSteps
Maximal number of timesteps.
Definition: Track.h:72
static Track * block
The block of track data.
Definition: Track.h:56
The base class for all OPAL exceptions.
Definition: OpalException.h:28
T & localElement(const NDIndex< Dim > &) const
Definition: BareField.hpp:1154
const NDIndex< Dim > & getDomain() const
Definition: FieldLayout.h:325
NDIndex< Dim > getLocalNDIndex()
MFLOAT get_meshSpacing(unsigned d) const
Definition: Index.h:237
void barrier(void)
Definition: Inform.h:42
static Inform * Info
Definition: IpplInfo.h:78
static int myNode()
Definition: IpplInfo.cpp:691
static Communicate * Comm
Definition: IpplInfo.h:84
static TimerRef getTimer(const char *nm)
Definition: IpplTimings.h:182
static void stopTimer(TimerRef t)
Definition: IpplTimings.h:192
static void startTimer(TimerRef t)
Definition: IpplTimings.h:187