OPAL (Object Oriented Parallel Accelerator Library) 2022.1
OPAL
HashPairBuilderPeriodicParallel.h
Go to the documentation of this file.
1// -*- C++ -*-
2/***************************************************************************
3 *
4 * The IPPL Framework
5 *
6 * HashPairBuilderPeriodicParallel follows the Hockney and Eastwood approach to efficiently
7 * find particle pairs. In this version of the code a local Chaining mesh per processor is used to avoid looping
8 * empty buckets.
9 *
10 * Visit http://people.web.psi.ch/adelmann/ for more details
11 *
12 ***************************************************************************/
13
14
15
16#ifndef HASH_PAIR_BUILDER_PERIODIC_PARALLEL_H
17#define HASH_PAIR_BUILDER_PERIODIC_PARALLEL_H
18
19#include <algorithm>
20#include <limits>
21#include <cmath>
22#include <set>
23
24template<class PBase>
26{
27public:
28 enum { Dim = PBase::Dim };
29 typedef typename PBase::Position_t Position_t;
30
31 HashPairBuilderPeriodicParallel(PBase &p) : particles(p) { hr_m = p.get_hr();}
32
33 template<class Pred, class OP>
34 void for_each(const Pred& pred, const OP &op,Vektor<double,3> extend_l, Vektor<double,3> extend_r )
35 {
36 const std::size_t END = std::numeric_limits<std::size_t>::max();
37 std::size_t size = particles.getLocalNum()+particles.getGhostNum();
38
39 // Inform dmsg("debug_msg:");
40 // dmsg << "We use parallel hash pair builder small chaining mesh ****************************" << endl;
41
42 //compute which dimensions are really serial process neighbors itself in this direction
43 Vektor<bool,3> parallel_dims(0,0,0);
44
45 NDIndex<3> globDomain = particles.getFieldLayout().getDomain();
46 NDIndex<3> locDomain = particles.getFieldLayout().getLocalNDIndex();
47
48 parallel_dims[0] = !(globDomain[0]==locDomain[0]);
49 parallel_dims[1] = !(globDomain[1]==locDomain[1]);
50 parallel_dims[2] = !(globDomain[2]==locDomain[2]);
51
52 Vektor<double,3> period;
53 period=extend_r-extend_l;
54
56 Vektor<double,3> extend_l_local, extend_r_local, domain_width_local;
57 for (unsigned i=0; i<3; ++i) {
58 extend_l_local[i] = locDomain[i].first()*hr_m[i]+extend_l[i];
59 extend_r_local[i] = extend_l[i]+(locDomain[i].last()+1)*hr_m[i];
60 domain_width_local[i] = extend_r_local[i]-extend_l_local[i];
61 }
62
63 //make sure that the chaining mesh covers the whole domain and has a gridwidth > r_cut
64 buckets_per_dim[0]=floor(domain_width_local[0]/pred.getRange(0));
65 buckets_per_dim[1]=floor(domain_width_local[1]/pred.getRange(1));
66 buckets_per_dim[2]=floor(domain_width_local[2]/pred.getRange(2));
67
68 for (unsigned dim = 0; dim<3; ++dim)
69 h_chaining[dim] = domain_width_local[dim]/buckets_per_dim[dim];
70
71 //extend the chaining mesh by one layer of chaining cells in each dimension
72 rmin_m = extend_l_local-h_chaining;
73 rmax_m = extend_r_local+h_chaining;
75
76 //dmsg << "local domain iwdth = " << domain_width_local << endl;
77 //dmsg << "local extends : " << extend_l_local << "\t" << extend_r_local << endl;
78 //dmsg << "local extends with chaining: " << rmin_m << "\t" << rmax_m << endl;
79 //dmsg << "buckets per dim = " << buckets_per_dim << endl;
80 //dmsg << "h_chaining = " << h_chaining << endl;
81
82 std::size_t Nbucket = buckets_per_dim[0]*buckets_per_dim[1]*buckets_per_dim[2];
83
84 std::size_t *buckets = new size_t[Nbucket]; //index of first particle in this bucket
85 std::size_t *next = new size_t[size]; //index of next particle in this bucket. END indicates last particle of bucket
86 std::fill(buckets, buckets+Nbucket, END);
87 std::fill(next, next+size, END);
88
89 //in 3D we interact with 14 neighboring cells (including self cell interaction)
90 unsigned neigh = 14;
91
92 int offset[14][3] = {{ 1, 1, 1}, { 0, 1, 1}, {-1, 1, 1},
93 { 1, 0, 1}, { 0, 0, 1}, {-1, 0, 1},
94 { 1,-1, 1}, { 0,-1, 1}, {-1,-1, 1},
95 { 1, 1, 0}, { 0, 1, 0}, {-1, 1, 0},
96 { 1, 0, 0}, { 0, 0, 0}};
97
98 //assign all particles to a bucket
99 for(std::size_t i = 0;i<size;++i)
100 {
101 unsigned bucket_id = get_bucket_id(i,pred);
102 //dmsg << "we got bucket id = " << bucket_id << endl;
103 next[i] = buckets[bucket_id];
104 buckets[bucket_id] = i;
105 }
106
107 double part_count = 0;
108 //loop over all buckets
109 for (int bx=0+int(!parallel_dims[0]); bx<buckets_per_dim[0]-int(!parallel_dims[0]); ++bx) {
110 for (int by=0+int(!parallel_dims[1]); by<buckets_per_dim[1]-int(!parallel_dims[1]); ++by) {
111 for (int bz=0+int(!parallel_dims[2]); bz<buckets_per_dim[2]-int(!parallel_dims[2]); ++bz) {
112 unsigned bucket_id_self = bz*buckets_per_dim[1]*buckets_per_dim[0]+by*buckets_per_dim[0]+bx;
113 //compute index of neighboring bucket to interact with
114 for (unsigned n=0; n<neigh;++n){
115 int bx_neigh, by_neigh, bz_neigh;
116 Vektor<double,3> shift(0,0,0);
117
118 bx_neigh = bx+offset[n][0];
119 //if we are serial in x-dimension we have no cached ghost particles. The local positions get periodically shifted
120 if (!parallel_dims[0]) {
121 if (bx_neigh == 0) {
122 //bucket in -x direction exceed domain boundary
123 bx_neigh+=(buckets_per_dim[0]-2);//consider last bucket in +x instead
124 shift[0] = -period[0];//shift particles in negative x direction by domain size
125 }
126 else if (bx_neigh == (buckets_per_dim[0]-1)) {
127 //bucket in +x direction exceeds domain boundary
128 bx_neigh -= (buckets_per_dim[0]-2);//consider first bucket in +x instead
129 shift[0] = period[0];//shift particles in positive x direction by domain size
130 }
131 }
132 //do the same for y and z direction:
133 by_neigh = by+offset[n][1];
134 if (!parallel_dims[1]) {
135 if (by_neigh == 0) {
136 by_neigh+=(buckets_per_dim[1]-2);
137 shift[1] = -period[0];
138 }
139 else if (by_neigh == (buckets_per_dim[1]-1)) {
140 by_neigh -=(buckets_per_dim[1]-2);
141 shift[1] = period[1];
142 }
143 }
144
145 bz_neigh = bz+offset[n][2];
146 if (!parallel_dims[2]) {
147 if (bz_neigh == 0) {
148 bz_neigh+=(buckets_per_dim[2]-2);
149 shift[2] = -period[2];
150 }
151 else if (bz_neigh == (buckets_per_dim[2]-1)) {
152 bz_neigh -=(buckets_per_dim[2]-2);
153 shift[2] = period[2];
154 }
155 }
156
157 if (bx_neigh >= 0 && bx_neigh<buckets_per_dim[0] &&
158 by_neigh >= 0 && by_neigh<buckets_per_dim[1] &&
159 bz_neigh >= 0 && bz_neigh<buckets_per_dim[2]) {
160
161 unsigned bucket_id_neigh =
162 bz_neigh*buckets_per_dim[1]*buckets_per_dim[0]+by_neigh*buckets_per_dim[0]+bx_neigh;
163
164 //i is index of particle considered in active cahining cell, j is index of neighbor particle considered
165 std::size_t i = buckets[bucket_id_self];
166 std::size_t j;
167
168 //loop over all particles in self cell
169 //self offset avoids double counting in self cell
170 int self_offset = 0;
171 part_count = 0;
172 while (i != END) {
173 part_count++;
174 j = buckets[bucket_id_neigh];
175 //increase offset by number of processed particles in self cell
176 for (int o=0;o<self_offset;o++){
177 j = next[j];
178 }
179 //loop over all particles in neighbor cell
180 while(j != END) {
181 if(pred(particles.R[i], particles.R[j]+shift)) {
182 if (i!=j)
183 op(i, j, particles, shift);
184 }
185 j = next[j];
186 }
187 i = next[i];
188 //adjust self_offset
189 if (bucket_id_self==bucket_id_neigh)
190 self_offset++;
191 else
192 self_offset=0;
193 }
194 }
195 }
196
197 }
198 }
199 }
200
201 delete[] buckets;
202 delete[] next;
203 }
204private:
205
206 //returns the bucket id of particle i
207 template<class Pred>
208 int get_bucket_id(int i, const Pred& /*pred*/)
209 {
210 // Inform dmsg("debug_msg:");
211
212 Vektor<int,3> loc;
213 for (unsigned d=0; d<3; ++d)
214 loc[d] = (particles.R[i][d]-rmin_m[d])/h_chaining[d];
215 int bucket_id = loc[2]*buckets_per_dim[1]*buckets_per_dim[0]+loc[1]*buckets_per_dim[0]+loc[0];
216 // dmsg << "bucket id of particle " << i << "with coords " << particles.R[i] << " = [" << loc[0] << "," << loc[1] << "," << loc[2] << "] => bucket id = " << bucket_id << endl;
217 // dmsg << particles.R[i][0] << "," << particles.R[i][1] << "," << particles.R[i][2] << "," << bucket_id << endl;
218 return bucket_id;
219 }
220
221 PBase &particles;
227};
228
229
230#endif
const unsigned Dim
T::PETE_Expr_t::PETE_Return_t max(const PETE_Expr< T > &expr, NDIndex< D > &loc)
Definition: ReductionLoc.h:84
PETE_TUTree< FnFloor, typename T::PETE_Expr_t > floor(const PETE_Expr< T > &l)
Definition: PETE.h:733
Position_t
Definition: Types.h:43
void for_each(const Pred &pred, const OP &op, Vektor< double, 3 > extend_l, Vektor< double, 3 > extend_r)