OPAL (Object Oriented Parallel Accelerator Library)  2021.1.99
OPAL
AmrMultiGridLevel.hpp
Go to the documentation of this file.
1 //
2 // Class AmrMultiGridLevel
3 // This class represents a single AMR level, i.e. it stores all matrices
4 // and vectors of a level.
5 //
6 // Copyright (c) 2017 - 2020, Matthias Frey, Paul Scherrer Institut, Villigen PSI, Switzerland
7 // All rights reserved
8 //
9 // Implemented as part of the PhD thesis
10 // "Precise Simulations of Multibunches in High Intensity Cyclotrons"
11 //
12 // This file is part of OPAL.
13 //
14 // OPAL is free software: you can redistribute it and/or modify
15 // it under the terms of the GNU General Public License as published by
16 // the Free Software Foundation, either version 3 of the License, or
17 // (at your option) any later version.
18 //
19 // You should have received a copy of the GNU General Public License
20 // along with OPAL. If not, see <https://www.gnu.org/licenses/>.
21 //
22 #define AMR_NO_SCALE false
23 
24 
25 template <class MatrixType, class VectorType>
26 AmrMultiGridLevel<MatrixType,
27  VectorType>::AmrMultiGridLevel(const Vector_t& meshScaling,
28  const amrex::BoxArray& _grids,
29  const amrex::DistributionMapping& _dmap,
30  const AmrGeometry_t& _geom,
31  const AmrIntVect_t& rr,
32  const boundary_t* bc,
33  const Teuchos::RCP<comm_t>& comm)
34  : grids(_grids),
35  dmap(_dmap),
36  geom(_geom),
37  map_p(Teuchos::null),
38  Anf_p(Teuchos::null),
39  R_p(Teuchos::null),
40  I_p(Teuchos::null),
41  Bcrse_p(Teuchos::null),
42  Bfine_p(Teuchos::null),
43  Awf_p(Teuchos::null),
44  rho_p(Teuchos::null),
45  phi_p(Teuchos::null),
46  residual_p(Teuchos::null),
47  error_p(Teuchos::null),
48  UnCovered_p(Teuchos::null),
49  refmask(nullptr),
50  crsemask(nullptr),
51  rr_m(rr)
52 {
53  for (int j = 0; j < AMREX_SPACEDIM; ++j) {
54  G_p[j] = Teuchos::null;
55 
56  nr_m[j] = _geom.Domain().length(j);
57 
58 #if AMR_NO_SCALE
59  // mesh spacing in particle rest frame
60  dx_m[j] = geom.CellSize(j);
61  invdx_m[j] = geom.InvCellSize(j);
62 #else
63  // mesh spacing in particle rest frame
64  dx_m[j] = meshScaling[j] * geom.CellSize(j);
65  invdx_m[j] = meshScaling[j] * geom.InvCellSize(j);
66 #endif
67 
68  bc_mp[j] = bc[j];
69  }
70 
71  this->buildLevelMask();
72 
73  this->buildMap(comm);
74 
75 
76  residual_p = Teuchos::rcp( new vector_t(map_p, false) );
77  error_p = Teuchos::rcp( new vector_t(map_p, false) );
78 }
79 
80 
81 template <class MatrixType, class VectorType>
83 {
84  map_p = Teuchos::null;
85 
86  Anf_p = Teuchos::null;
87  R_p = Teuchos::null;
88  I_p = Teuchos::null;
89  Bcrse_p = Teuchos::null;
90  Bfine_p = Teuchos::null;
91  Awf_p = Teuchos::null;
92 
93  for (int j = 0; j < AMREX_SPACEDIM; ++j)
94  G_p[j] = Teuchos::null;
95 
96  UnCovered_p = Teuchos::null;
97 
98  rho_p = Teuchos::null;
99  phi_p = Teuchos::null;
100  residual_p = Teuchos::null;
101  error_p = Teuchos::null;
102 }
103 
104 
105 template <class MatrixType, class VectorType>
108 #if AMREX_SPACEDIM == 3
109  return iv[0] + (iv[1] + nr_m[1] * iv[2]) * nr_m[0];
110 #else
111  return iv[0] + iv[1] * nr_m[0];
112 #endif
113 }
114 
115 
116 template <class MatrixType, class VectorType>
118  // it doesn't matter with which direction we check, since it checks all
119  return bc_mp[0]->isBoundary(iv, &nr_m[0]);
120 }
121 
122 
123 template <class MatrixType, class VectorType>
125  umap_t& map,
126  const scalar_t& value)
127 {
128  bool applied = false;
129  for (int d = 0; d < AMREX_SPACEDIM; ++d) {
130  if ( bc_mp[d]->isBoundary(iv, d, &nr_m[0]) ) {
131  applied = true;
132  bc_mp[d]->apply(iv, d, map, value, this, &nr_m[0]);
133  }
134  }
135  return applied;
136 }
137 
138 
139 template <class MatrixType, class VectorType>
141  const basefab_t& fab,
142  umap_t& map,
143  const scalar_t& value)
144 {
145  if ( fab(iv) != Mask::PHYSBNDRY )
146  return false;
147 
148  bool applied = false;
149  for (int d = 0; d < AMREX_SPACEDIM; ++d) {
150  if ( bc_mp[d]->isBoundary(iv, d, &nr_m[0]) ) {
151  applied = true;
152  bc_mp[d]->apply(iv, d, map, value, this, &nr_m[0]);
153  }
154  }
155  return applied;
156 }
157 
158 
159 template <class MatrixType, class VectorType>
161  const lo_t& dir,
162  umap_t& map,
163  const scalar_t& value)
164 {
165  bc_mp[dir]->apply(iv, dir, map, value, this, &nr_m[0]);
166 }
167 
168 
169 template <class MatrixType, class VectorType>
171  amrex::Periodicity period(AmrIntVect_t(D_DECL(0, 0, 0)));
172  mask.reset(new mask_t(grids, dmap, 1, 1));
173  mask->BuildMask(geom.Domain(), period,
174  Mask::COVERED, Mask::BNDRY,
175  Mask::PHYSBNDRY, Mask::INTERIOR);
176  mask->FillBoundary(period);
177 }
178 
179 
180 template <class MatrixType, class VectorType>
182  return rr_m;
183 }
184 
185 
186 template <class MatrixType, class VectorType>
188  return dx_m;
189 }
190 
191 
192 template <class MatrixType, class VectorType>
194  return dx_m[dir];
195 }
196 
197 
198 template <class MatrixType, class VectorType>
200  return invdx_m;
201 }
202 
203 
204 template <class MatrixType, class VectorType>
206  return invdx_m[dir];
207 }
208 
209 
210 template <class MatrixType, class VectorType>
212  return ( iv.allGT(AmrIntVect_t(D_DECL(-1, -1, -1))) &&
213  iv.allLT(AmrIntVect_t(D_DECL(nr_m[0], nr_m[1], nr_m[2]))) );
214 }
215 
216 
217 template <class MatrixType, class VectorType>
218 void AmrMultiGridLevel<MatrixType, VectorType>::buildMap(const Teuchos::RCP<comm_t>& comm)
219 {
220 
221  go_t localNumElements = 0;
222 
223  Teuchos::Array<go_t> globalindices;
224 
225  for (amrex::MFIter mfi(grids, dmap, true); mfi.isValid(); ++mfi) {
226  const amrex::Box& tbx = mfi.tilebox();
227  const int* lo = tbx.loVect();
228  const int* hi = tbx.hiVect();
229 
230  for (int i = lo[0]; i <= hi[0]; ++i) {
231  for (int j = lo[1]; j <= hi[1]; ++j) {
232 #if AMREX_SPACEDIM == 3
233  for (int k = lo[2]; k <= hi[2]; ++k) {
234 #endif
235  AmrIntVect_t iv(D_DECL(i, j, k));
236 
237  go_t globalidx = serialize(iv);
238 
239  globalindices.push_back(globalidx);
240 
241  ++localNumElements;
242 #if AMREX_SPACEDIM == 3
243  }
244 #endif
245  }
246  }
247  }
248 
249  /*
250  * create map that specifies which processor gets which data
251  */
252 
253  // get smallest global index of this level
254  amrex::Box bx = grids.minimalBox();
255  const int* lo = bx.loVect();
256  AmrIntVect_t lowcorner(D_DECL(lo[0], lo[1], lo[2]));
257 
258  // where to start indexing
259  go_t baseIndex = serialize(lowcorner);
260 
261  // numGlobalElements == N
262  go_t N = grids.numPts();
263 
264  map_p = Teuchos::rcp( new dmap_t(N, globalindices, baseIndex, comm) );
265 }
void serialize(Param_t params, std::ostringstream &os)
serializes params using Boost archive
Definition: MPIHelper.cpp:32
Tpetra::Map< local_ordinal_t, global_ordinal_t, node_t > dmap_t
amrex::IntVect AmrIntVect_t
Definition: AmrDefs.h:48
double scalar_t
Teuchos::RCP< matrix_t > G_p[AMREX_SPACEDIM]
gradient matrices in x, y, and z to compute electric field
std::unordered_map< go_t, scalar_t > umap_t
amrex::FabArray< basefab_t > mask_t
amr::local_ordinal_t lo_t
scalar_t invdx_m[AMREX_SPACEDIM]
inverse cell size in particle rest frame
bool isValid(const AmrIntVect_t &iv) const
bool applyBoundary(const AmrIntVect_t &iv, umap_t &map, const scalar_t &value)
amrex::BaseFab< int > basefab_t
Teuchos::RCP< vector_t > residual_p
residual over all cells
Teuchos::RCP< vector_t > error_p
error over all cells
amr::AmrIntVect_t AmrIntVect_t
bool isBoundary(const AmrIntVect_t &iv) const
amr::AmrGeometry_t AmrGeometry_t
amr::scalar_t scalar_t
const AmrIntVect_t & refinement() const
go_t nr_m[AMREX_SPACEDIM]
number of grid points
const AmrGeometry_t & geom
geometry of this problem
const scalar_t * invCellSize() const
amr::global_ordinal_t go_t
boundary_t bc_mp[AMREX_SPACEDIM]
boundary conditions
std::shared_ptr< AmrBoundary< AmrMultiGridLevel< MatrixType, VectorType > > > boundary_t
const scalar_t * cellSize() const
scalar_t dx_m[AMREX_SPACEDIM]
cell size in particle rest frame
go_t serialize(const AmrIntVect_t &iv) const
void buildMap(const Teuchos::RCP< comm_t > &comm)
Teuchos::RCP< dmap_t > map_p
Tpetra core map.