src/expde/grid/gpar.cc

Go to the documentation of this file.
00001 //    expde: expression templates for partial differential equations.
00002 //    Copyright (C) 2001  Christoph Pflaum
00003 //    This program is free software; you can redistribute it and/or modify
00004 //    it under the terms of the GNU General Public License as published by
00005 //    the Free Software Foundation; either version 2 of the License, or
00006 //    (at your option) any later version.
00007 //
00008 //    This program is distributed in the hope that it will be useful,
00009 //    but WITHOUT ANY WARRANTY; without even the implied warranty of
00010 //    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00011 //    GNU General Public License for more details.
00012 //
00013 //                 SEE  Notice1.doc made by 
00014 //                 LAWRENCE LIVERMORE NATIONAL LABORATORY
00015 //
00016 
00017 // ------------------------------------------------------------
00018 //
00019 // gparallel.cc
00020 //
00021 // parallel grid generation Part 1.
00022 //
00023 // ------------------------------------------------------------
00024 
00025 
00026 // Include level 0:
00027 #ifdef COMP_GNUOLD
00028  #include <iostream.h>
00029  #include <fstream.h>
00030  #include <time.h>
00031  #include <math.h>
00032 #else
00033  #include <iostream>
00034  #include <fstream>
00035  #include <ctime>
00036  #include <cmath>
00037 #endif 
00038 
00039 
00040 // Include level 0:
00041 #include "../parser.h"
00042 
00043 // Include level 1:
00044 #include "../paramete.h"
00045 #include "../abbrevi.h"
00046 #include "../math_lib/math_lib.h"
00047 
00048 // Include level 2:
00049 #include "../basic/basic.h"
00050 
00051 // Include level 3:
00052 #include "../domain/domain.h"
00053 
00054 // Include level 4:
00055 #include "../formulas/boundy.h"
00056 #include "../formulas/loc_sten.h"
00057 
00058 // Include level 5
00059 #include "gpar.h"
00060 #include "parallel.h"
00061 #include "mgcoeff.h"
00062 #include "sto_man.h"
00063 #include "gridbase.h"
00064 #include "grid.h"
00065 
00067 // 1.  Send neighbour fine boundary cells
00068 // 2.  Send neighbour cells all levels
00069 // 3.  Send neighbour points direct
00070 // 4.  Send neighbour points all levels
00071 // 5.  Send multi grid points
00072 // 6.  member functions for parallel edge correction
00073 // 7.  Lists
00075 
00076 
00078 // 1. Send neighbour boundary cells
00080 
00081 
00082 // II.9.Schritt of grid generation:
00083 
00084 bool Grid_base::Send_boundary_cell_in_direction(Point_hashtable0* poi,
00085                                                 Index3D next_index) {
00086   Index3D I, Icell;
00087   int co;
00088   
00089   I = poi->ind;
00090   if(poi->isCell()) {
00091     if(poi->typ==fine_bo_cell) {
00092       // first part: for send on levels
00093       Icell = I;
00094       for(co=0;co<8;++co) {
00095         // study corners of cell
00096         if(Icell.neighbour((dir_sons)co) << next_index)
00097           return true;
00098       }
00099       // second part: for restriction
00100       Icell = I.father();
00101       for(co=0;co<8;++co) {
00102         // study corners of cell
00103         if(Icell.neighbour((dir_sons)co) << next_index) {
00104           return true;
00105         }
00106       }
00107     }
00108   }
00109   return false;
00110 }
00111 
00112 
00113 void Grid_base::Send_boundary_cells_parallel() {
00114   int k,l,d;
00115   Index3D next, I, I_edge, Icorner;
00116   Index3D next_index;
00117 
00118   int number_bo_send, number_bo_rec, num_message, num, sp;
00119   IndexEdge edge;
00120 
00121   int* send_info;
00122   int* receive_info;
00123 
00124   MPI_Request req[2];
00125   MPI_Status status[2];
00126 
00127   int rank_source, rank_destination;
00128 
00129   for(d=0;d<26;++d) {
00130     // 0. index of receiving next processor 
00131     next_index = Give_next_on_level(d,my_index);
00132 
00133     // 1. part: basic infos about send and recieve
00134     if(d<6) {
00135       rank_source      = give_next_rank_source((dir_3D)d,n_parallel-1);
00136       rank_destination = give_next_rank_destination((dir_3D)d,n_parallel-1);  
00137     }
00138     else if(d<18) {
00139       rank_source      = give_next_rank_source((Edges_cell)(d-6),n_parallel-1);
00140       rank_destination = give_next_rank_destination((Edges_cell)(d-6),
00141                                                     n_parallel-1);  
00142     }
00143     else {
00144       rank_source      = give_next_rank_source((dir_sons)(d-18),n_parallel-1);
00145       rank_destination = give_next_rank_destination((dir_sons)(d-18),
00146                                                     n_parallel-1);
00147     }
00148 
00149     // 2. part: count number of boundary cells to be send
00150     number_bo_send = 0;
00151     iterate_hash0 {
00152       if(Send_boundary_cell_in_direction(point0,next_index)) {
00153         ++number_bo_send;
00154       }
00155     }
00156 
00157     // 3. part: send informations about number_bo_send
00158     number_bo_rec = 0;
00159     num_message = 0;
00160     if(rank_source != -1) {
00161       MPI_Irecv(&number_bo_rec,1,MPI_INT,rank_source,1,comm,
00162                 &req[num_message]);
00163       ++num_message;
00164     }
00165     if(rank_destination != -1) {
00166       MPI_Isend(&number_bo_send,1,MPI_INT,rank_destination,1,comm,
00167                 &req[num_message]);
00168       ++num_message;
00169     }
00170     MPI_Waitall(num_message,req,status);
00171     
00172     // 4. part: build informations about boundary cells + edges
00173     // How informations are stored:
00174     // Index of cell          edges (Edges_cell)  corners  >
00175     // ind_x, ind_y, ind_z,    ...              ,   ...    >   5 int
00176     send_info = Give_buffer_send_info(5*number_bo_send);
00177     receive_info = Give_buffer_receive_info(5*number_bo_rec);
00178     
00179     num = 0;
00180     iterate_hash0 {
00181       if(Send_boundary_cell_in_direction(point0,next_index)) {
00182         // cell index
00183         I = point0->ind;
00184         send_info[num*5]   = I.I_x().get();
00185         send_info[num*5+1] = I.I_y().get();
00186         send_info[num*5+2] = I.I_z().get();
00187         // info edges
00188         sp = 0;
00189         for(k=0;k<12;++k) {
00190           edge = I.edge((Edges_cell)k);
00191           if(Give_edge_typ(edge.I()) == edge_interior) {        
00192             // Kante im Gebiet
00193             sp = (sp << 1) + 1;
00194           }
00195           else {
00196             // Kante nicht im Gebiet
00197             sp = (sp << 1);
00198           }
00199         }
00200         send_info[num*5+3] = sp;
00201         // info corners
00202         sp = 0;
00203         for(k=0;k<8;++k) {
00204           Icorner = I.neighbour((dir_sons)k);
00205           if(Give_type(Icorner) == nearb) {     
00206             // Ecke im Gebiet
00207             sp = (sp << 1) + 1;
00208           }
00209           else {
00210             // Ecke nicht im Gebiet
00211             sp = (sp << 1);
00212           }
00213         }
00214         send_info[num*5+4] = sp;
00215         num++;
00216       }
00217     }
00218     
00219     // 5. part: send boundary cells + edge info
00220     num_message = 0;
00221     if(rank_source != -1 && number_bo_rec>0) {
00222       MPI_Irecv(receive_info,5*number_bo_rec,MPI_INT,rank_source,2,comm,
00223                 &req[num_message]);
00224       ++num_message;
00225     }
00226     if(rank_destination != -1 && number_bo_send>0) {
00227       MPI_Isend(send_info,5*number_bo_send,MPI_INT,rank_destination,2,comm,
00228                 &req[num_message]);
00229       ++num_message;
00230     }
00231     // MPI_Barrier(comm);
00232     MPI_Waitall(num_message,req,status);
00233     
00234     
00235     // 6. part: set boundary cells + edge info
00236     for(num=0;num<number_bo_rec;++num) {
00237       I = Index3D(receive_info[num*5],
00238                   receive_info[num*5+1],
00239                   receive_info[num*5+2]);
00240       if(developer_version)
00241         if(I.Cell_index()==false) 
00242           cout << "\n Mistake in Send_boundary_cells_parallel() " << endl;
00243       // Add fine bo cell
00244       Add_cell(I);
00245       Set_cell_typ(I,fine_bo_cell);
00246       // Add neighbours of fine bo cell
00247       for(k=0;k<8;++k) {
00248         Add_point(I.neighbour((dir_sons)k));
00249       }
00250       // Add edges of fine bo cell (and set type of their corners)
00251       sp = receive_info[num*5+3];
00252       for(k=11;k>=0;--k) {
00253         if((sp&1)==1) { 
00254           // Kante im Gebiet
00255           I_edge = I.I_edge((Edges_cell)k);
00256           Add_edge(I_edge);
00257           Set_edge_typ(I_edge, edge_interior);
00258           
00259           // important for boundary cells in second layer
00260           for(l=0;l<2;++l) {
00261             next = I_edge.corner_of_edge_index((dir1D)l);
00262 
00263             /*
00264 if(next == Index3D(9,8,18)) {
00265   D3vector v;
00266   v = transform_coord(next);
00267   cout << " point: " << Give_type(next) 
00268        << " f level: " << Give_my_finest_parallel_level(next)
00269        << " my_rank: " << Give_my_rank()
00270        << " max_level: " << Max_level() 
00271        << " resposible: " << (next << Give_my_index())
00272     //  << " vector: x: " << v.x << " y: " << v.y << " z: " << v.z 
00273        << " x: " << next.I_x().get()
00274        << " y: " << next.I_y().get()
00275        << " z: " << next.I_z().get()
00276        << " x: " << I.I_x().get()
00277        << " y: " << I.I_y().get()
00278        << " z: " << I.I_z().get()
00279        <<  endl;
00280 }
00281             */
00282 
00283 
00284             if(Give_type(next)<interior)
00285               Set_point_typ(next, parallel_p);
00286           }
00287         }
00288         sp = (sp >> 1);
00289       }
00290       // Add corners of fine bo cell
00291       sp = receive_info[num*5+4];
00292       for(k=7;k>=0;--k) {
00293         if((sp&1)==1) { 
00294           // Ecke im Gebiet
00295           Icorner = I.neighbour((dir_sons)k);
00296           Add_point(Icorner);
00297           if(Give_type(Icorner)<interior)
00298             Set_point_typ(Icorner, parallel_p);
00299         }
00300         sp = (sp >> 1);
00301       }
00302     }
00303   }
00304 }
00305 
00306 
00307 
00308 
00309 
00311 // 2. Send neighbour cells all levels
00313 
00314 // II.9.Schritt of grid generation:
00315 
00316 
00317 inline bool Grid_base::Send_cell_in_direction(Point_hashtable0* poi,
00318                                        int t,
00319                                        Index3D my_lev_index,
00320                                        Index3D next_index) {
00321   Index3D I;
00322   I = poi->ind;
00323   
00324   /* old
00325   if(poi->isCell()) {
00326     if(poi->Give_Tiefe() == t+1) {
00327       I = poi->ind;
00328       if(I < my_lev_index) {
00329         //  && I < my_lev_index ist wohl unnoetig
00330         if(poi->typ==int_cell || poi->typ==bo_cell) {
00331           // send one layer in right and left direction
00332           */
00333   int co;
00334   for(co=0;co<8;++co) {
00335     // study corners of cell
00336     if(I.neighbour((dir_sons)co) << next_index) {
00337       poi->will_be_sent(true);
00338       return true;
00339     }
00340   }
00341   /*
00342     }
00343     }
00344     }
00345     }
00346   */
00347 
00348   poi->will_be_sent(false);
00349   return false;
00350 }
00351 
00352 
00353 
00354 void Grid_base::Send_cells_parallel(int level) {
00355   // here level is the level of the cell point -1
00356   Index3D I, next_index;
00357   Index3D my_lev_index;
00358 
00359   int number_bo_send, number_bo_rec, num_message, num;
00360 
00361   int d;
00362 
00363   int* send_info;
00364   int* receive_info;
00365   
00366   MPI_Request req[2];
00367   MPI_Status status[2];
00368   
00369   int rank_source, rank_destination;
00370 
00371 
00372   // -1. set my_lev_index 
00373   my_lev_index = Give_my_level_index(level+1);
00374 
00375   // construct extra array for invariant points which are needed.
00376   int lenght_invar_array = 0;
00377   iterate_hash0 {
00378     if(point0->isCell()) {
00379       if(point0->Give_Tiefe() == level+1) {
00380         if(point0->ind < my_lev_index) {
00381           if(point0->typ==int_cell || point0->typ==bo_cell) {
00382             lenght_invar_array++;
00383           }
00384         }
00385       }
00386     }
00387   }
00388   Point_hashtable0** invariant_array = new Point_hashtable0*[lenght_invar_array];
00389   int array_i = 0;
00390   iterate_hash0 {
00391     if(point0->isCell()) {
00392       if(point0->Give_Tiefe() == level+1) {
00393         if(point0->ind < my_lev_index) {
00394           if(point0->typ==int_cell || point0->typ==bo_cell) {
00395             invariant_array[array_i] = point0;
00396             array_i++;
00397           }
00398         }
00399       }
00400     }
00401   }
00402 
00403 
00404   for(d=0;d<26;++d) { 
00405     // 0. index of receiving next processor 
00406     next_index = Give_next_on_level(d,my_lev_index);
00407 
00408     // 1. part: basic infos about send and recieve
00409     if(d<6) {
00410       rank_source      = give_next_rank_source((dir_3D)d,level);
00411       rank_destination = give_next_rank_destination((dir_3D)d,level);  
00412     }
00413     else if(d<18) {
00414       rank_source      = give_next_rank_source((Edges_cell)(d-6),level);
00415       rank_destination = give_next_rank_destination((Edges_cell)(d-6),level);  
00416     }
00417     else {
00418       rank_source      = give_next_rank_source((dir_sons)(d-18),level);
00419       rank_destination = give_next_rank_destination((dir_sons)(d-18),level);
00420     }
00421 
00422     // 2. part: count number of boundary cells to be send
00423     number_bo_send = 0;
00424 
00425 
00426     for(int i = 0;i < lenght_invar_array;i++) {
00427       if(Send_cell_in_direction(invariant_array[i],level,my_lev_index,next_index))
00428         ++number_bo_send;
00429     }
00430 
00431 
00432     /* old
00433     iterate_hash0 {
00434       if(Send_cell_in_direction(point0,level,my_lev_index,next_index))
00435         ++number_bo_send;
00436     }
00437     */
00438 
00439     // 3. part: send informations about number_bo_send
00440     number_bo_rec = 0;
00441     num_message = 0;
00442     if(rank_source != -1) {
00443       MPI_Irecv(&number_bo_rec,1,MPI_INT,rank_source,3,comm,
00444                 &req[num_message]);
00445       ++num_message;
00446     }
00447     if(rank_destination != -1) {
00448       MPI_Isend(&number_bo_send,1,MPI_INT,rank_destination,3,comm,
00449                 &req[num_message]);
00450       ++num_message;
00451     }
00452     MPI_Waitall(num_message,req,status);
00453 
00454     // 4. part: build informations about boundary cells + edges
00455     // How informations are stored:
00456     // Index of cell          type  >
00457     // ind_x, ind_y, ind_z,    ...  >   4 int
00458     send_info = Give_buffer_send_info(4*number_bo_send);
00459     receive_info = Give_buffer_receive_info(4*number_bo_rec);
00460     
00461 
00462     num = 0;
00463     for(int i = 0;i < lenght_invar_array;i++) {
00464       point0 = invariant_array[i];
00465       if(point0->doSend()) {
00466         I = point0->ind;
00467         // cell index
00468         send_info[num*4]   = I.I_x().get();
00469         send_info[num*4+1] = I.I_y().get();
00470         send_info[num*4+2] = I.I_z().get();
00471         // type
00472         send_info[num*4+3] = (int)point0->typ;
00473         
00474         ++num;
00475       }
00476     }
00477 
00478     /* old
00479     num = 0;
00480     iterate_hash0 {
00481   // if(Send_cell_in_direction(point0,level,my_lev_index,next_index)) {
00482       if(point0->doSend()) {
00483         I = point0->ind;
00484         // cell index
00485         send_info[num*4]   = I.I_x().get();
00486         send_info[num*4+1] = I.I_y().get();
00487         send_info[num*4+2] = I.I_z().get();
00488         // type
00489         send_info[num*4+3] = (int)point0->typ;
00490         
00491         ++num;
00492       }
00493     }
00494     */
00495     
00496     // 5. part: send cells + type
00497     num_message = 0;
00498     if(rank_source != -1 && number_bo_rec>0) {
00499       MPI_Irecv(receive_info,4*number_bo_rec,MPI_INT,rank_source,4,comm,
00500                 &req[num_message]);
00501       ++num_message;
00502     }
00503     if(rank_destination != -1 && number_bo_send>0) {
00504       MPI_Isend(send_info,4*number_bo_send,MPI_INT,rank_destination,4,comm,
00505                 &req[num_message]);
00506       ++num_message;
00507     }
00508     // MPI_Barrier(comm);
00509     MPI_Waitall(num_message,req,status);
00510     
00511     // 6. part: set cells + type
00512     for(num=0;num<number_bo_rec;++num) {
00513       I = Index3D(receive_info[num*4],
00514                   receive_info[num*4+1],
00515                   receive_info[num*4+2]);
00516       if(developer_version)
00517         if(I.Cell_index()==false) 
00518           cout << "\n Mistake in Send_cells_parallel " << endl;
00519       // Add cell
00520       Add_cell(I);
00521       Set_cell_typ(I,(Celltype)receive_info[num*4+3]);
00522     }
00523   }
00524   delete invariant_array;
00525 }
00526 
00527 
00529 // 3. Send neighbour points direct
00531 
00532 
00533 // Teste ob Punkt poi mit Index poi->ind fuer Prozessor mit Index next_index
00534 // ein Ghostpoint ist. Dazu verschiebe poi->ind um "h" in Richtung d
00535 // in der der naechste Prozessor liegt
00536 bool Grid_base::Send_point_direct_in_direction(int d, 
00537                                                Point_hashtable1* poi,
00538                                                Index3D next_index) {
00539  Index3D I;
00540  Index3D nextS;
00541 
00542   if(poi->typ!=exterior) {
00543 
00544     /*
00545 if(t<Max_level()) {
00546   if(Index3D(4,4,9)==poi->ind) 
00547     cout << " my rank: " << my_rank 
00548          << " typ: " <<poi->typ <<  endl;
00549 }
00550     */
00551 
00552     I = poi->ind;
00553     /*
00554      * Berkeley Feb 2004 and Aenderung 4.6.2003:
00555      *      if(I < my_index) {
00556      */
00557     if(I << my_index) {
00558       if(d<6) {
00559         nextS = I.next((dir_3D)d,max_level);
00560       }
00561       else if(d<18) {
00562         nextS = I.next((Edges_cell)(d-6),max_level);
00563       }
00564       else
00565         nextS = I.next((dir_sons)(d-18),max_level);
00566       
00567       if(nextS < next_index)
00568         return true;
00569     }
00570   }
00571   return false;
00572 }
00573 
00574 // used only on the finest level
00575 void Grid_base::Send_grid_points_direct_parallel(Pointtype typ) {
00576   int d;
00577   Pointtype type_gotten;
00578   Index3D next_index, I;
00579   int number_bo_send, number_bo_rec, num_message, num;
00580 
00581   int* send_info;
00582   int* receive_info;
00583 
00584   MPI_Request req[2];
00585   MPI_Status status[2];
00586 
00587   int rank_source, rank_destination;
00588  
00589   for(d=0;d<26;++d) {
00590     // 0. index of receiving next processor 
00591     next_index = Give_next_on_level(d,my_index);
00592 
00593     // 1. part: basic infos about send and recieve
00594     if(d<6) {
00595       rank_source      = give_next_rank_source((dir_3D)d,n_parallel-1);
00596       rank_destination = give_next_rank_destination((dir_3D)d,n_parallel-1);  
00597     }
00598     else if(d<18) {
00599       rank_source      = give_next_rank_source((Edges_cell)(d-6),n_parallel-1);
00600       rank_destination = give_next_rank_destination((Edges_cell)(d-6),
00601                                                     n_parallel-1);  
00602     }
00603     else {
00604       rank_source      = give_next_rank_source((dir_sons)(d-18),n_parallel-1);
00605       rank_destination = give_next_rank_destination((dir_sons)(d-18),
00606                                                     n_parallel-1);
00607     }
00608 
00609 /*
00610   cout << " my_rank: " << my_rank
00611   << " d: " << d
00612   << " rank_source: " << rank_source
00613   << " rank_destination: " << rank_destination << endl;
00614   MPI_Barrier(comm);
00615 */
00616 
00617     // 2. part: count number of points to be send
00618     number_bo_send = 0;
00619     if(rank_destination != -1) {
00620       iterate_hash1 {
00621         if(Send_point_direct_in_direction(d, point1,next_index))
00622           ++number_bo_send;
00623       }
00624     }
00625     
00626     // 3. part: send informations about number_bo_send
00627     number_bo_rec = 0;
00628     num_message = 0;
00629     if(rank_source != -1) {
00630       MPI_Irecv(&number_bo_rec,1,MPI_INT,rank_source,160+d,comm,
00631                 &req[num_message]);
00632       ++num_message;
00633     }
00634     if(rank_destination != -1) {
00635       MPI_Isend(&number_bo_send,1,MPI_INT,rank_destination,160+d,comm,
00636                 &req[num_message]);
00637       ++num_message;
00638     }
00639     MPI_Waitall(num_message,req,status);
00640   
00641     // 4. part: build informations about points to be send
00642     // How informations are stored:
00643     // Index of cell       
00644     // ind_x, ind_y, ind_z, typ     >   4 int
00645     send_info = Give_buffer_send_info(4*number_bo_send);
00646     receive_info = Give_buffer_receive_info(4*number_bo_rec);
00647     
00648     num = 0; 
00649     if(rank_destination != -1) {
00650       iterate_hash1 {
00651         if(Send_point_direct_in_direction(d, point1, next_index)) {
00652           I = point1->ind;
00653           
00654           send_info[num*4]   = I.I_x().get();
00655           send_info[num*4+1] = I.I_y().get();
00656           send_info[num*4+2] = I.I_z().get();
00657           send_info[num*4+3] = point1->typ;
00658           num++;
00659         }
00660       }
00661     }
00662   
00663     // 4. part: send points
00664     num_message = 0;
00665     if(rank_source != -1 && number_bo_rec>0) {
00666       MPI_Irecv(receive_info,4*number_bo_rec,MPI_INT,rank_source,180+d,comm,
00667                 &req[num_message]);
00668       ++num_message;
00669     }
00670     if(rank_destination != -1 && number_bo_send>0) {
00671       MPI_Isend(send_info,4*number_bo_send,MPI_INT,rank_destination,180+d,comm,
00672                 &req[num_message]);
00673       ++num_message;
00674     }
00675     // MPI_Barrier(comm);
00676     MPI_Waitall(num_message,req,status);
00677     
00678     // 5. part: set points
00679     for(num=0;num<number_bo_rec;++num) {
00680       I = Index3D(receive_info[num*4],
00681                   receive_info[num*4+1],
00682                   receive_info[num*4+2]);
00683 
00684       type_gotten = (Pointtype)receive_info[num*4+3];
00685       // Add point
00686       Add_point(I); // if a new point was added it gets type exterior
00687       if(Give_type(I)==exterior) {
00688         if(type_gotten==typ)
00689           Set_point_typ(I, typ);
00690         else
00691           Set_point_typ(I, parallel_p);
00692       }
00693     }
00694   }
00695 }
00696 
00697 
00699 // 4.  Send neighbour points all levels
00701 
00702 
00703 bool Grid_base::Send_coarse_point_direct_in_direction(int d, 
00704                                                       Point_hashtable1* poi,
00705                                                       int t, 
00706                                                       Index3D my_lev_index,
00707                                                       Index3D next_index) {
00708   Index3D I;
00709   Index3D nextS;
00710   
00711   if(poi->typ!=exterior) {
00712     I = poi->ind;
00713     // here << for multigrid points
00714     if(poi->Give_Tiefe() <= t) {
00715       if(I << my_lev_index) {
00716         if(I << next_index)
00717           return true;
00718         
00719         if(d<6) {
00720           nextS = I.next((dir_3D)d,t);
00721         }
00722         else if(d<18) {
00723           nextS = I.next((Edges_cell)(d-6),t);
00724         }
00725         else
00726           nextS = I.next((dir_sons)(d-18),t);
00727         
00728         // here << for multigrid points
00729         if(nextS << next_index)
00730           return true;
00731       }
00732     }
00733   }
00734   return false;
00735 }
00736 
00737 
00738 void Grid_base::Send_coarse_grid_points_parallel() {
00739   int l; 
00740 
00741   // set my_finest_parallel_level for finest points: Max_level()
00742   iterate_hash1 {
00743     point1->my_finest_parallel_level = Max_level();
00744   }
00745 
00746   // here set my_finest_parallel_level for new points: l 
00747   for(l=Max_level()-1;l>=my_coarsest_level;--l) {
00748     Send_coarse_grid_points_parallel(l);
00749   }
00750 }
00751 
00752 void Grid_base::Send_coarse_grid_points_parallel(int level) {
00753   int d;
00754   Pointtype type_gotten;
00755   Pointtype type_old;
00756 
00757   Index3D I, next_index;
00758   int number_bo_send, number_bo_rec, num_message, num;
00759   Index3D my_lev_index;
00760 
00761   int* send_info;
00762   int* receive_info;
00763 
00764   MPI_Request req[2];
00765   MPI_Status status[2];
00766 
00767   int rank_source, rank_destination;
00768 
00769   // -1. set my_lev_index 
00770   my_lev_index = Give_my_level_index(level+1);
00771 
00772   for(d=0;d<26;++d) { 
00773     // 0. index of receiving next processor 
00774     next_index = Give_next_on_level(d,my_lev_index);
00775 
00776     // 1. part: basic infos about send and recieve
00777     if(d<6) {
00778       rank_source      = give_next_rank_source((dir_3D)d,level);
00779       rank_destination = give_next_rank_destination((dir_3D)d,level);  
00780     }
00781     else if(d<18) {
00782       rank_source      = give_next_rank_source((Edges_cell)(d-6),level);
00783       rank_destination = give_next_rank_destination((Edges_cell)(d-6),level);  
00784     }
00785     else {
00786       rank_source      = give_next_rank_source((dir_sons)(d-18),level);
00787       rank_destination = give_next_rank_destination((dir_sons)(d-18),level);
00788     }
00789     // 2. part: count number of points to be send
00790     number_bo_send = 0;
00791     if(rank_destination != -1) {
00792       iterate_hash1 {
00793         if(Send_coarse_point_direct_in_direction(d, point1,level,
00794                                                  my_lev_index,next_index))
00795           ++number_bo_send;
00796       }
00797     }
00798     
00799     // 3. part: send informations about number_bo_send
00800     number_bo_rec = 0;
00801     num_message = 0;
00802     if(rank_source != -1) {
00803       MPI_Irecv(&number_bo_rec,1,MPI_INT,rank_source,100+d,comm,
00804                 &req[num_message]);
00805       ++num_message;
00806     }
00807     if(rank_destination != -1) {
00808       MPI_Isend(&number_bo_send,1,MPI_INT,rank_destination,100+d,comm,
00809                 &req[num_message]);
00810       ++num_message;
00811     }
00812     MPI_Waitall(num_message,req,status);
00813   
00814     // 4. part: build informations about points to be send
00815     // How informations are stored:
00816     // Index of cell       
00817     // ind_x, ind_y, ind_z, typ     >   4 int
00818     send_info = Give_buffer_send_info(4*number_bo_send);
00819     receive_info = Give_buffer_receive_info(4*number_bo_rec);
00820     
00821     num = 0; 
00822     if(rank_destination != -1) {
00823       iterate_hash1 {
00824         if(Send_coarse_point_direct_in_direction(d, point1,level,
00825                                                  my_lev_index,next_index)) {
00826           I = point1->ind;
00827           
00828           send_info[num*4]   = I.I_x().get();
00829           send_info[num*4+1] = I.I_y().get();
00830           send_info[num*4+2] = I.I_z().get();
00831           send_info[num*4+3] = point1->typ;
00832           num++;
00833         }
00834       }
00835     }
00836   
00837     // 4. part: send points
00838     num_message = 0;
00839     if(rank_source != -1 && number_bo_rec>0) {
00840       MPI_Irecv(receive_info,4*number_bo_rec,MPI_INT,rank_source,120+d,comm,
00841                 &req[num_message]);
00842       ++num_message;
00843     }
00844     if(rank_destination != -1 && number_bo_send>0) {
00845       MPI_Isend(send_info,4*number_bo_send,MPI_INT,rank_destination,120+d,comm,
00846                 &req[num_message]);
00847       ++num_message;
00848     }
00849     // MPI_Barrier(comm);
00850     MPI_Waitall(num_message,req,status);
00851     
00852     // 5. part: set points
00853     for(num=0;num<number_bo_rec;++num) {
00854       I = Index3D(receive_info[num*4],
00855                   receive_info[num*4+1],
00856                   receive_info[num*4+2]);
00857 
00858       type_gotten = (Pointtype)receive_info[num*4+3];
00859       // Add point
00860       Add_point(I); // if a new point was added it gets type exterior
00861       type_old = Give_type(I);
00862       if(type_old==exterior || type_old==multigrid) {
00863         if(type_gotten==multigrid)
00864           Set_point_typ(I,level, multigrid);
00865         else
00866           Set_point_typ(I,level, parallel_p);
00867         /*
00868         if(developer_version) {
00869           if((I << my_lev_index)) {
00870             cout << " error in Send_coarse_grid_points_parallel!" << endl;
00871           }
00872         }
00873         */
00874       }
00875     }
00876   }
00877 }
00878 
00879 
00880 
00881 
00883 // 5. Multigrid points
00885 
00886 
00887 bool Grid_base::Send_multi_grid_point_in_direction(int d, 
00888                                                    Point_hashtable1* poi,
00889                                                    int t, 
00890                                                    Index3D my_lev_index, 
00891                                                    Index3D next_index) {
00892   Index3D I;
00893   Index3D nextS;
00894   
00895   /*
00896 if(t<Max_level()) {
00897   if(Index3D(4,4,9)==poi->ind) 
00898     cout << " my rank: " << my_rank 
00899          << " typ: " <<poi->typ
00900          << " fl: " <<poi->finest_level <<  endl;
00901 }
00902   */
00903 
00904   if(poi->typ==multigrid) {
00905     I = poi->ind;
00906     if(poi->Give_Tiefe() <= t) {
00907       if(I << my_lev_index) {
00908         if(I << next_index)
00909         return true;
00910         
00911         if(d<6) {
00912           nextS = I.next((dir_3D)d,t);
00913         }
00914         else if(d<18) {
00915           nextS = I.next((Edges_cell)(d-6),t);
00916         }
00917         else
00918           nextS = I.next((dir_sons)(d-18),t);
00919         
00920         //      if((nextS < my_lev_index)==false)
00921         if(nextS << next_index)
00922           return true;
00923       }
00924     }
00925   }
00926 
00927   return false;
00928 }
00929 
00930 
00931 void Grid_base::Send_multi_grid_points_parallel(int level) {
00932   int d;
00933   int finest_level;
00934   Index3D I, next_index;
00935   int number_bo_send, number_bo_rec, num_message, num;
00936   Index3D my_lev_index;
00937 
00938   int* send_info;
00939   int* receive_info;
00940 
00941   MPI_Request req[2];
00942   MPI_Status status[2];
00943 
00944   int rank_source, rank_destination;
00945 
00946   // 0. set my_lev_index 
00947   my_lev_index = Give_my_level_index(level+2);
00948   //  my_lev_index = Give_my_level_index(level+1);
00949 
00950   for(d=0;d<26;++d) {
00951     // 0. part: basic infos about send and recieve
00952     if(d<6) {
00953       rank_source      = give_next_rank_source((dir_3D)d,level+1);
00954       rank_destination = give_next_rank_destination((dir_3D)d,level+1);  
00955     }
00956     else if(d<18) {
00957       rank_source      = give_next_rank_source((Edges_cell)(d-6),level+1);
00958       rank_destination = give_next_rank_destination((Edges_cell)(d-6),level+1);  
00959     }
00960     else {
00961       rank_source      = give_next_rank_source((dir_sons)(d-18),level+1);
00962       rank_destination = give_next_rank_destination((dir_sons)(d-18),level+1);
00963     }
00964     
00965     /*
00966     if(d<6) {
00967       rank_source      = give_next_rank_source((dir_3D)d,level);
00968       rank_destination = give_next_rank_destination((dir_3D)d,level);  
00969     }
00970     else if(d<18) {
00971       rank_source      = give_next_rank_source((Edges_cell)(d-6),level);
00972       rank_destination = give_next_rank_destination((Edges_cell)(d-6),level);  
00973     }
00974     else {
00975       rank_source      = give_next_rank_source((dir_sons)(d-18),level);
00976       rank_destination = give_next_rank_destination((dir_sons)(d-18),level);
00977     }
00978     */
00979     
00980     // 1. index of receiving next processor and correct rank's
00981     //next_index = Give_next_on_level(d,my_lev_index);
00982     next_index = Give_level_index_of(rank_destination,level+1);
00983 
00984     // periodisch neu:
00985     if(next_index == Index3D(3,3,3))
00986       rank_destination = -1;
00987 
00988     if(Give_my_coarsest_level()>=level+1)
00989       rank_source      = -1;
00990 
00991 
00992     // 2. part: count number of points to be send
00993     number_bo_send = 0;
00994     if(rank_destination != -1) {
00995       iterate_hash1 {
00996         if(Send_multi_grid_point_in_direction(d, point1,level,
00997                                               my_lev_index, next_index))
00998           ++number_bo_send;
00999       }
01000     }
01001     
01002     // 3. part: send informations about number_bo_send
01003     number_bo_rec = 0;
01004     num_message = 0;
01005     if(rank_source != -1) {
01006       MPI_Irecv(&number_bo_rec,1,MPI_INT,rank_source,140+d,comm,
01007                 &req[num_message]);
01008       ++num_message;
01009     }
01010     if(rank_destination != -1) {
01011       MPI_Isend(&number_bo_send,1,MPI_INT,rank_destination,140+d,comm,
01012                 &req[num_message]);
01013       ++num_message;
01014     }
01015     MPI_Waitall(num_message,req,status);
01016   
01017     // 4. part: build informations about points to be send
01018     // How informations are stored:
01019     // Index of cell       
01020     // ind_x, ind_y, ind_z, typ     >   4 int
01021     send_info = Give_buffer_send_info(4*number_bo_send);
01022     receive_info = Give_buffer_receive_info(4*number_bo_rec);
01023     
01024     num = 0; 
01025     if(rank_destination != -1) {
01026       iterate_hash1 {
01027         if(Send_multi_grid_point_in_direction(d, point1,level,
01028                                               my_lev_index, next_index)) {
01029           I = point1->ind;
01030           
01031           send_info[num*4]   = I.I_x().get();
01032           send_info[num*4+1] = I.I_y().get();
01033           send_info[num*4+2] = I.I_z().get();
01034           send_info[num*4+3] = point1->finest_level;
01035           num++;
01036         }
01037       }
01038     }
01039   
01040     // 4. part: send points
01041     num_message = 0;
01042     if(rank_source != -1 && number_bo_rec>0) {
01043       MPI_Irecv(receive_info,4*number_bo_rec,MPI_INT,rank_source,160+d,comm,
01044                 &req[num_message]);
01045       ++num_message;
01046     }
01047     if(rank_destination != -1 && number_bo_send>0) {
01048       MPI_Isend(send_info,4*number_bo_send,MPI_INT,rank_destination,160+d,comm,
01049                 &req[num_message]);
01050       ++num_message;
01051     }
01052     // MPI_Barrier(comm);
01053     MPI_Waitall(num_message,req,status);
01054     
01055     // 5. part: set points
01056     for(num=0;num<number_bo_rec;++num) {
01057       I = Index3D(receive_info[num*4],
01058                   receive_info[num*4+1],
01059                   receive_info[num*4+2]);
01060 
01061       finest_level = (Pointtype)receive_info[num*4+3];
01062       // Add point
01063       /*
01064       if(developer_version) {
01065         if(Exists_Point(I)==false) {
01066           cout << " Error in Send_multi_grid_points_parallel " 
01067                << endl;
01068         }
01069       }
01070       */
01071       Add_point(I); // if a new point was added it gets type exterior
01072       if(Give_type(I)==exterior)
01073         Set_point_typ(I,multigrid); 
01074       Add_multigrid_point(I, finest_level);
01075     }
01076   }
01077 }
01078 
01079 
01080 
01081 
01082 
01083 
01084 
01085 
01086 
01088 // 6. member functions for parallel edge correction
01090 
01091 void Grid_base::Start_for_face_correction_parallel() {
01092   int i;
01093   for(i=0;i<18;++i) {
01094     edges_to_be_sent[i] = new int[30];
01095     lenght_edges_to_be_sent[i] = 10;
01096     number_edges_to_be_sent[i] = 0;
01097   }
01098   receive_buffer = new int[30];
01099   length_receive_buffer = 30;
01100   direction_and_number_process = new int[2*give_number_of_processes()];
01101 }
01102 
01103 void Grid_base::End_for_face_correction_parallel() {
01104   int i;
01105   for(i=0;i<18;++i) {
01106     delete(edges_to_be_sent[i]);
01107   }
01108   delete(direction_and_number_process);
01109   delete(receive_buffer);
01110 }
01111 
01112 void Grid_base::Test_array(int i) {
01113   int k;
01114   int *sp;
01115   if(number_edges_to_be_sent[i] >= lenght_edges_to_be_sent[i]) {
01116     lenght_edges_to_be_sent[i] = lenght_edges_to_be_sent[i] * 2;
01117     sp =  new int[3*lenght_edges_to_be_sent[i]];
01118     for(k=0;k<lenght_edges_to_be_sent[i]/2*3;++k) {
01119       sp[k] = edges_to_be_sent[i][k];
01120     }
01121     delete(edges_to_be_sent[i]);
01122     edges_to_be_sent[i] = sp;
01123   }
01124 }
01125 
01126 
01127 
01128 void Grid_base::put_for_send_edge(Index3D I) {
01129   int i;
01130   if(developer_version) {
01131     if(!I.Edge_index()) {
01132       cout << "\n Error in Grid_base::put_for_send_edge!" << endl;
01133       return;
01134     }
01135   }
01136   for(i=0;i<6;++i) {
01137     if(my_index.surface((dir_3D)i)>>I) {
01138       Test_array(i);
01139       edges_to_be_sent[i][3*number_edges_to_be_sent[i]]   = I.I_x().get();
01140       edges_to_be_sent[i][3*number_edges_to_be_sent[i]+1] = I.I_y().get();
01141       edges_to_be_sent[i][3*number_edges_to_be_sent[i]+2] = I.I_z().get();
01142       ++number_edges_to_be_sent[i];
01143     }
01144   }
01145   for(i=6;i<18;++i) {
01146     if(my_index.edge((Edges_cell)(i-6))>>I) {
01147       Test_array(i);
01148       edges_to_be_sent[i][3*number_edges_to_be_sent[i]]   = I.I_x().get();
01149       edges_to_be_sent[i][3*number_edges_to_be_sent[i]+1] = I.I_y().get();
01150       edges_to_be_sent[i][3*number_edges_to_be_sent[i]+2] = I.I_z().get();
01151       ++number_edges_to_be_sent[i];
01152     }
01153   }
01154 }
01155 int *Grid_base::Give_receive_buffer(int l) {
01156   if(length_receive_buffer<l) {
01157     delete(receive_buffer);
01158     receive_buffer = new int[l];
01159   }
01160   return receive_buffer;
01161 }
01162 
01163 bool Grid_base::must_edges_be_send() {
01164   int i, I_must_send,global_must_send;
01165 
01166   // 1.step: must I send?
01167   I_must_send = false;
01168   //  for(i=0;i<18;++i) {
01169 //  Achtung!!!
01170   for(i=0;i<6;++i) {
01171     if(number_edges_to_be_sent[i]>0) I_must_send=true;
01172   }
01173  
01174   // 2.step: send-receive
01175   MPI_Allreduce(&I_must_send,&global_must_send,1,MPI_INT,MPI_LOR,comm);
01176 
01177   return global_must_send;
01178 }
01179 
01180 // used only on the finest level
01181 bool Grid_base::send_edges() {
01182   int d;
01183   bool edges_where_added;
01184   Index3D I;
01185   int number_bo_send, number_bo_rec, num_message, num;
01186 
01187   int* send_info;
01188   int* receive_info;
01189 
01190   MPI_Request req[2];
01191   MPI_Status status[2];
01192 
01193   int rank_source, rank_destination;
01194  
01195   edges_where_added = false;
01196   for(d=0;d<18;++d) {
01197     // 1. part: basic infos about send and recieve
01198     if(d<6) {
01199       rank_source      = give_next_rank_source((dir_3D)d,n_parallel-1);
01200       rank_destination = give_next_rank_destination((dir_3D)d,n_parallel-1);  
01201     }
01202     else {
01203       rank_source      = give_next_rank_source((Edges_cell)(d-6));
01204       rank_destination = give_next_rank_destination((Edges_cell)(d-6));  
01205     }
01206     // 2.1 part: count number of points to be send
01207     number_bo_send = number_edges_to_be_sent[d];
01208   
01209     // 2.2 part: send informations about number_bo_send
01210     number_bo_rec = 0;
01211     num_message = 0;
01212     if(rank_source != -1) {
01213       MPI_Irecv(&number_bo_rec,1,MPI_INT,rank_source,11,comm,
01214                 &req[num_message]);
01215       ++num_message;
01216     }
01217     if(rank_destination != -1) {
01218       MPI_Isend(&number_bo_send,1,MPI_INT,rank_destination,11,comm,
01219                 &req[num_message]);
01220       ++num_message;
01221     }
01222     MPI_Waitall(num_message,req,status);
01223 
01224     /*
01225     cout << " myrank: " << my_rank 
01226          << " number_bo_rec "  << number_bo_rec 
01227          << " number_bo_send " << number_bo_send
01228          << endl;
01229     */
01230 
01231     if(number_bo_rec>0) edges_where_added = true;  
01232 
01233     // 3. part: build informations about points to be send
01234     // How informations are stored:
01235     // Index of edges      
01236     // ind_x, ind_y, ind_z     >   3 int
01237     send_info = edges_to_be_sent[d];
01238     receive_info = Give_receive_buffer(3*number_bo_rec);
01239 
01240     // 4. part: send grid points
01241     num_message = 0;
01242     if(rank_source != -1 && number_bo_rec>0) {
01243       MPI_Irecv(receive_info,3*number_bo_rec,MPI_INT,rank_source,12,comm,
01244                 &req[num_message]);
01245       ++num_message;
01246     }
01247     if(rank_destination != -1 && number_bo_send>0) {
01248       MPI_Isend(send_info,3*number_bo_send,MPI_INT,rank_destination,12,comm,
01249                 &req[num_message]);
01250       ++num_message;
01251     }
01252     MPI_Waitall(num_message,req,status);
01253   
01254     // 5. part: set points
01255     for(num=0;num<number_bo_rec;++num) {
01256       I = Index3D(receive_info[num*3],
01257                   receive_info[num*3+1],
01258                   receive_info[num*3+2]);
01259       // Add point
01260       Add_edge(I);
01261     }
01262 
01263     // 6. part: Set zero
01264     number_edges_to_be_sent[d]=0;
01265   }
01266 
01267   //  if(edges_where_added) cout << " Edges send " << endl;
01268 
01269   return edges_where_added;
01270 }
01271 
01273 // 7. Lists
01275 void Grid::Add_list_parallel(Index3D I) {
01276   Index3D Inext;
01277   int t;
01278   int my_finest_parallel_level;
01279 
01280   t = I.Tiefe();
01281   if(t<my_coarsest_level-1)
01282     t = my_coarsest_level-1;
01283   // above my_coarsest_level-1 is necessary for prolongation
01284   my_finest_parallel_level = Give_my_finest_parallel_level(I);
01285   //  my_finest_parallel_level =Max_level();
01286 
01287   //  for(;t<=Max_level();++t) {
01288   for(;t<=my_finest_parallel_level;++t) {
01289     if(developer_version)
01290       if(t >= Max_number_levels) {
01291         cout << " \n level zu gross bei Add_list_interior " << endl;
01292       }
01293     start_P_parallel[t] = new P_parallel(I,start_P_parallel[t]);
01294     ++number_parallel_points[t];
01295     
01296     Add_double(I,t,Give_max_num_var());
01297   }
01298 }

Generated on Mon Jan 16 13:23:40 2006 for IPPL by  doxygen 1.4.6