examples/ml_example_epetra_preconditioner_Maxwell.cpp

Go to the documentation of this file.
00001 /*****************************************************************************/
00002 /* Copyright 2002, Sandia Corporation. The United States Government retains  */
00003 /* a nonexclusive license in this software as prescribed in AL 88-1 and AL   */
00004 /* 91-7. Export of this program may require a license from the United States */
00005 /* Government.                                                               */
00006 /*****************************************************************************/
00007 
00008 /*****************************************************************************
00009  * Sample driver for Maxwell equation AMG solver in the ML package. The
00010  * software is tested by setting up a 2-dimensional uniform grid example on 
00011  * a square. For details about the problem at hand, please refer to file
00012  * ml_simple_max.c, of which this file is the C++ counterpart.
00013  *
00014  * This file shows how to use the class
00015  * ML_Epetra::MultiLevelPreconditioner to solve this formulation of the
00016  * Maxwell equations. The class takes care of building the node and edge
00017  * hierarchy, definining the Hiptmair smoother, and setting the coarse
00018  * solver. More information about MultiLevelPreconditioner can be found of the ML
00019  * user's guide.
00020  *****************************************************************************/
00021 
00022 #ifndef HAVE_CONFIG_H
00023 #define HAVE_CONFIG_H
00024 #endif
00025 
00026 #ifdef HAVE_MPI
00027 #include "mpi.h"
00028 #endif
00029 #include "Epetra_Map.h"
00030 #include "Epetra_Vector.h"
00031 #include "Epetra_CrsMatrix.h"
00032 #include "Epetra_LinearProblem.h"
00033 #include "AztecOO.h"
00034 #include "ml_epetra_utils.h"
00035 #include "ml_epetra_preconditioner.h"
00036 #include "Teuchos_ParameterList.hpp"
00037 
00038 /*****************************************************************************/
00039 /* All functions/structures starting with the word 'user' denote things that */
00040 /* are not in ML and are specific to this particular example.                */
00041 /* User defined structure holding information on how the PDE is partitioned  */
00042 /* over the processor system.                                                */
00043 /*****************************************************************************/
00044 struct user_partition {               
00045   int *my_global_ids;      /* my_global_ids[i]: id of ith local unknown.     */
00046   int *needed_external_ids;/* global ids of ghost unknowns.                  */
00047   int Nlocal;              /* Number of local unknowns.                      */
00048   int Nglobal;             /* Number of global unknowns.                     */
00049   int *my_local_ids;       /* my_local_ids[i]: local id of ith global unknown*/
00050   int mypid;               /* processor id                                   */
00051   int nprocs;              /* total number of processors.                    */
00052   int Nghost;              /* number of ghost variables on processor.        */
00053 };
00054 
00055 /*****************************************************************************/
00056 /* User defined structure for performing a matrix-vector product and for     */
00057 /* getting a row of the T matrix (null space).                               */
00058 /*****************************************************************************/
00059 struct user_Tmat_data {
00060   struct user_partition *edge;
00061   struct user_partition *node;
00062   ML_Operator *Kn;
00063 };
00064 
00065 #ifdef ML_MPI
00066 #define COMMUNICATOR MPI_COMM_WORLD
00067 #else
00068 #define COMMUNICATOR AZ_NOT_MPI
00069 #endif
00070 
00071 /*****************************************************************************/
00072 /* Function definitions.                                                     */
00073 /*****************************************************************************/
00074 extern void user_partition_edges(struct user_partition *,
00075                                  struct user_partition *);
00076 extern void user_partition_nodes(struct user_partition *Partition);
00077 extern AZ_MATRIX   *user_Ke_build(struct user_partition *);
00078 extern AZ_MATRIX   *user_Kn_build(struct user_partition *);
00079 extern ML_Operator *user_T_build (struct user_partition *, 
00080                                   struct user_partition *, ML_Operator *, ML_Comm *);
00081 int main(int argc, char *argv[])
00082 {
00083   int    Nnodes=72*72;              /* Total number of nodes in the problem.*/
00084                                     /* 'Nnodes' must be a perfect square.   */
00085 
00086   struct       user_partition Edge_Partition = {NULL, NULL,0,0,NULL,0,0,0}, 
00087                                 Node_Partition = {NULL, NULL,0,0,NULL,0,0,0};
00088 
00089   int          proc_config[AZ_PROC_SIZE];
00090 
00091   /* get processor information (id & # of procs) and set ML's printlevel. */
00092 
00093   // create communicators. In this example Epetra communicators are not
00094   // required (they are automatically defined in
00095   // ML_Operator2EpetraCrsMatrix()), but other codes may need to define
00096   // Epetra_MpiComm or Epetra_SerialComm.
00097 
00098 #ifdef ML_MPI
00099   MPI_Init(&argc,&argv);
00100 #endif
00101   AZ_set_proc_config(proc_config, COMMUNICATOR);
00102   ML_Comm * comm;
00103   ML_Comm_Create( &comm );
00104 
00105   // ================================= //
00106   // C R E A T E   P A R T I T I O N S //
00107   // ================================= //
00108 
00109   /* Set the # of global nodes/edges and partition both the edges and the */
00110   /* nodes over the processors. NOTE: I believe we assume that if an edge */
00111   /* is assigned to a processor at least one of its nodes must be also    */
00112   /* assigned to that processor.                                          */
00113 
00114   Node_Partition.Nglobal = Nnodes;
00115   Edge_Partition.Nglobal = Node_Partition.Nglobal*2;
00116   /* in this simple example the number of edges is 2X the */
00117   /* number of nodes. This will not generally be true.    */
00118 
00119   user_partition_nodes(&Node_Partition);
00120   user_partition_edges(&Edge_Partition, &Node_Partition);
00121   /* IMPORTANT: An edge can be assigned to a processor only if at  */
00122   /*            least one its nodes is assigned to that processor. */
00123 
00124   // ================================================= //
00125   // M A T R I X   C O N S T R U C T I O N   P H A S E //
00126   // ================================================= //
00127   
00128   // Matrix creation is here done as follows:
00129   // - as this example is generated from ml_simple_max.c, matrices are
00130   // first created as Aztec matrices. 
00131   // - then, they are converted to ML_Operator matrices
00132   // - finally, ML_Operator matrices are converted to Epetra_CrsMatrix
00133   // (derived from Epetra_RowMatrix).
00134   //
00135   // The first conversion simply defines some wrappers, so only an
00136   // additional amount of memory is really allocated for the new
00137   // ML_Operator's. Instead, Epetra_CrsMatrix's are created using
00138   // function ML_Operator2EpetraCrsMatrix(). After a call to
00139   // ML_Operator2EpetraCrsMatrix(), two matrices exists: the original
00140   // ML_Operator, and the Epetra matrix. This function supports
00141   // rectangular matrix as well (as required by Epetra_T).
00142   // build the matrices as Aztec matrices (as done in example
00143   // ml_simple_max.c).
00144   //
00145   // As the goal of this example is to show how
00146   // ML_Epetra::MultiLevelPreconditioner works with Epetra matrices, it
00147   // is here not important that some more memory is allocated in the
00148   // conversion. NOTE THAT ML_Epetra::MultiLevelPreconditioner DOES NOT
00149   // REQUIRE THE INPUT MATRICES TO BE DERIVED FROM ANY ML_OPERATOR OR
00150   // SIMILAR. ANY Epetra_RowMatrix CAN BE USED IN INPUT.
00151   
00152   AZ_MATRIX * AZ_Ke = user_Ke_build(&Edge_Partition);
00153   AZ_MATRIX * AZ_Kn = user_Kn_build(&Node_Partition);
00154 
00155   // convert (put wrappers) from Aztec matrices to ML_Operator's
00156 
00157   ML_Operator * ML_Ke, * ML_Kn, * ML_Tmat;
00158 
00159   ML_Ke = ML_Operator_Create( comm );
00160   ML_Kn = ML_Operator_Create( comm );
00161 
00162   AZ_convert_aztec_matrix_2ml_matrix(AZ_Ke,ML_Ke,proc_config);
00163   AZ_convert_aztec_matrix_2ml_matrix(AZ_Kn,ML_Kn,proc_config);
00164 
00165   ML_Tmat = user_T_build(&Edge_Partition, &Node_Partition, 
00166                       ML_Kn, comm);
00167 
00168 
00169   // convert to Epetra_CrsMatrix
00170 
00171   Epetra_CrsMatrix * Epetra_Kn, * Epetra_Ke, * Epetra_T;
00172   
00173   int MaxNumNonzeros;
00174   double CPUTime;
00175 
00176   ML_Operator2EpetraCrsMatrix(ML_Ke,Epetra_Ke,
00177                               MaxNumNonzeros,
00178                               true,CPUTime);
00179 
00180   ML_Operator2EpetraCrsMatrix(ML_Kn,
00181                               Epetra_Kn,MaxNumNonzeros,
00182                               true,CPUTime);
00183 
00184   ML_Operator2EpetraCrsMatrix(ML_Tmat,Epetra_T,MaxNumNonzeros,
00185                               true,CPUTime);  
00186 
00187 
00188   // ==================================================== //
00189   // S E T U P   O F    M L   P R E C O N D I T I O N E R //
00190   // ==================================================== //
00191 
00192   Teuchos::ParameterList MLList;
00193   ML_Epetra::SetDefaults("maxwell", MLList);
00194   
00195   MLList.set("aggregation: type", "Uncoupled");
00196   MLList.set("coarse: max size", 30);
00197   MLList.set("aggregation: threshold", 0.0);
00198 
00199   MLList.set("coarse: type", "Amesos_KLU");
00200 
00201   // this creates the multilevel hierarchy, set the smoother as
00202   // Hiptmair, prepare the coarse solver, etc...
00203 
00204   ML_Epetra::MultiLevelPreconditioner * MLPrec =
00205     new ML_Epetra::MultiLevelPreconditioner(*Epetra_Ke, *Epetra_T, *Epetra_Kn,
00206                                             MLList);
00207 
00208   // ========================================================= //
00209   // D E F I N I T I O N   O F   A Z T E C O O   P R O B L E M //
00210   // ========================================================= //
00211 
00212   // create left-hand side and right-hand side, and populate them with
00213   // random and constant values. Both vectors are defined on the domain
00214   // map of the edge matrix.
00215   // Epetra_Vectors can be created in View mode, to accept pointers to
00216   // double vectors.
00217 
00218   Epetra_Vector LHS(Epetra_Ke->DomainMap()); LHS.Random();
00219   Epetra_Vector RHS(Epetra_Ke->DomainMap()); RHS.PutScalar(1.0);
00220   
00221   // for AztecOO, we need an Epetra_LinearProblem
00222   Epetra_LinearProblem Problem(Epetra_Ke,&LHS,&RHS);
00223   // AztecOO Linear problem
00224   AztecOO solver(Problem);
00225   // set MLPrec as precondititoning operator for AztecOO linear problem
00226   solver.SetPrecOperator(MLPrec);
00227 
00228   // a few options for AztecOO
00229   solver.SetAztecOption(AZ_solver, AZ_cg_condnum);
00230   solver.SetAztecOption(AZ_output, 32);
00231 
00232   // solve with 500 iterations and 1e-12 tolerance  
00233   solver.Iterate(500, 1e-8);
00234 
00235   // =============== //
00236   // C L E A N   U P //
00237   // =============== //
00238   
00239   delete MLPrec;    // destroy phase prints out some information
00240   delete Epetra_Kn;
00241   delete Epetra_Ke;
00242   delete Epetra_T;
00243   
00244   ML_Operator_Destroy( &ML_Ke );
00245   ML_Operator_Destroy( &ML_Kn );
00246   ML_Comm_Destroy( &comm );
00247 
00248   if (Edge_Partition.my_local_ids != NULL) free(Edge_Partition.my_local_ids);
00249   if (Node_Partition.my_local_ids != NULL) free(Node_Partition.my_local_ids);
00250   if (Node_Partition.my_global_ids != NULL) free(Node_Partition.my_global_ids);
00251   if (Edge_Partition.my_global_ids != NULL) free(Edge_Partition.my_global_ids);
00252   if (Node_Partition.needed_external_ids != NULL) 
00253     free(Node_Partition.needed_external_ids);
00254   if (Edge_Partition.needed_external_ids != NULL) 
00255     free(Edge_Partition.needed_external_ids);
00256 
00257   if (AZ_Ke!= NULL) {
00258     AZ_free(AZ_Ke->bindx);
00259     AZ_free(AZ_Ke->val);
00260     AZ_free(AZ_Ke->data_org);
00261     AZ_matrix_destroy(&AZ_Ke);
00262   }
00263   if (AZ_Kn!= NULL) {
00264     AZ_free(AZ_Kn->bindx);
00265     AZ_free(AZ_Kn->val);
00266     AZ_free(AZ_Kn->data_org);
00267     AZ_matrix_destroy(&AZ_Kn);
00268   }
00269 
00270   ML_Operator_Destroy(&ML_Tmat);
00271 #ifdef ML_MPI
00272   MPI_Finalize();
00273 #endif
00274                 
00275   return 0;
00276                 
00277 }
00278 
00279 #define HORIZONTAL 1
00280 #define VERTICAL   2
00281 extern int invindex(int index, int *i, int *j, int n, int *hor_or_vert);
00282 extern int northwest(int i, int j, int n);
00283 extern int southeast(int i, int j, int n);
00284 extern int southwest(int i, int j, int n);
00285 extern int north(int i, int j, int n);
00286 extern int south(int i, int j, int n);
00287 extern int east(int i, int j, int n);
00288 extern int west(int i, int j, int n);
00289 
00290 /********************************************************************/
00291 /* Given the (i,j)th cell return the node indices defining the cell */
00292 /*                                                                  */
00293 /*                          NW-------NE                             */
00294 /*                          |         |                             */
00295 /*                          |         |                             */
00296 /*                          |         |                             */
00297 /*                          SW--------SE                            */
00298 /*                                                                  */
00299 /********************************************************************/
00300 
00301 int northwest(int i, int j, int n) { return(i + (j)*n); }
00302 int southwest(int i, int j, int n) { if (j == 0) j = n;
00303                                        return(i + (j-1)*n);}
00304 int southeast(int i, int j, int n)  { if (j == 0) j = n;    
00305                                        return(i+1 + (j-1)*n);}
00306 
00307 
00308 /********************************************************************/
00309 /* Given the (i,j)th cell return the edge indices defining the cell */
00310 /*                                                                  */
00311 /*                          .--north--.                             */
00312 /*                          |         |                             */
00313 /*                        west       east                           */
00314 /*                          |         |                             */
00315 /*                          .--south--.                             */
00316 /*                                                                  */
00317 /********************************************************************/
00318 
00319 int north(int i, int j, int n) { return(i + (j)*n);}
00320 int south(int i, int j, int n) { return(i + (j-1)*n);}
00321 int west(int i, int j, int n)  { return(i+(j)*n+n*n -1);}
00322 int east(int i, int j, int n)  { return(i+1+(j)*n+n*n -1);}
00323 
00324 /* Given the index of either a 'south' or 'west' edge, return the */
00325 /* cell coordinates (i,j) as well as the orientation (horizontal  */
00326 /* or vertical) of the edge.                                      */
00327 int invindex(int index, int *i, int *j, int n, int *hor_or_vert)
00328 {
00329   *hor_or_vert = HORIZONTAL;
00330   if (index >= n*n) {
00331     *hor_or_vert = VERTICAL;
00332     index -= n*n;
00333   }
00334   *i = (index%n);
00335   *j = (index - (*i))/n;
00336   if ( *hor_or_vert == HORIZONTAL)   (*j) =  ((*j)+1)%n;
00337   if ( *hor_or_vert == VERTICAL)     (*i) =  ((*i)+1)%n;
00338   return 1;
00339 }
00340 
00341 /* Assign unknowns to processors */
00342 void user_partition_nodes(struct user_partition *Partition)
00343 {
00344   int    proc_config[AZ_PROC_SIZE];
00345 
00346   AZ_set_proc_config(proc_config, COMMUNICATOR);
00347 
00348   AZ_input_update(NULL,&(Partition->Nlocal), &(Partition->my_global_ids),
00349                   proc_config, Partition->Nglobal, 1, AZ_linear);
00350   Partition->Nghost = 0;
00351 }
00352 /* Assign unknowns to processors */
00353 void user_partition_edges(struct user_partition *Partition,
00354                           struct user_partition *NodePartition)
00355 {
00356   int i;
00357 
00358   /* Edge partition is derived from node partition */
00359 
00360   Partition->Nlocal = NodePartition->Nlocal*2;
00361   Partition->my_global_ids = (int *) malloc(sizeof(int)*Partition->Nlocal);
00362 
00363   for (i = 0; i < NodePartition->Nlocal; i++)
00364     (Partition->my_global_ids)[i] = (NodePartition->my_global_ids)[i];
00365   for (i = 0; i < NodePartition->Nlocal; i++)
00366     (Partition->my_global_ids)[i+NodePartition->Nlocal] = 
00367                (NodePartition->my_global_ids)[i] + NodePartition->Nglobal;
00368 
00369 }
00370 
00371 
00372 /********************************************************************/
00373 /* Set up Ke_mat                                                    */
00374 /*  1) First set it up as an Aztec DMSR matrix                      */
00375 /*     This stores the diagonal of row j in Ke_val[j] and stores a  */
00376 /*     pointer to row j's off-diagonal nonzeros in Ke_bindx[j] with */
00377 /*     column/nonzero entries in Ke_bindx[Ke_bindx[j]:Ke_bindx[j+1]-1] */
00378 /*     and Ke_val[Ke_bindx[j]:Ke_bindx[j+1]-1].                     */
00379 /*  2) call AZ_transform to convert global column indices to local  */
00380 /*     indices and to set up Aztec's communication structure.       */
00381 /*  3) Stuff the arrays into an Aztec matrix.                       */
00382 /*------------------------------------------------------------------*/
00383 
00384 AZ_MATRIX *user_Ke_build(struct user_partition *Edge_Partition)
00385 {
00386   double dcenter, doff, sigma = .0001;
00387   int ii,jj, horv, i, nx, global_id, nz_ptr, Nlocal_edges;
00388 
00389   /* Aztec matrix and temp variables */
00390 
00391   int       *Ke_bindx, *Ke_data_org = NULL;
00392   double    *Ke_val;
00393   AZ_MATRIX *Ke_mat;
00394   int       proc_config[AZ_PROC_SIZE], *cpntr = NULL;
00395   int       *reordered_glob_edges = NULL, *reordered_edge_externs = NULL;
00396 
00397   Nlocal_edges = Edge_Partition->Nlocal;
00398   nx = (int) sqrt( ((double) Edge_Partition->Nglobal/2) + .00001);
00399 
00400   Ke_bindx = (int    *) malloc((7*Nlocal_edges+1)*sizeof(int));
00401   Ke_val   = (double *) malloc((7*Nlocal_edges+1)*sizeof(double));
00402   Ke_bindx[0] = Nlocal_edges+1;
00403 
00404   dcenter  = 2 + 2.*sigma/((double) ( 3 * nx * nx));
00405   doff = -1 + sigma/((double) ( 6 * nx * nx));
00406 
00407   /* Create a DMSR matrix with global column indices */
00408 
00409   for (i = 0; i < Nlocal_edges; i++) {
00410     global_id = (Edge_Partition->my_global_ids)[i];
00411     invindex(global_id, &ii, &jj, nx, &horv);
00412     nz_ptr = Ke_bindx[i];
00413 
00414     Ke_val[i] = dcenter;
00415 
00416     if (horv == HORIZONTAL) {
00417       if (jj != 0) {
00418         Ke_bindx[nz_ptr] = north(ii,jj,nx);     Ke_val[nz_ptr++] = doff;
00419         Ke_bindx[nz_ptr] = east(ii,jj,nx);      Ke_val[nz_ptr++] = -1.;
00420         if (ii != 0) {Ke_bindx[nz_ptr]=west(ii,jj,nx); Ke_val[nz_ptr++]= 1.;}
00421         jj--;
00422       }
00423       else {
00424         Ke_val[i] = 1. +  2.*sigma/((double) ( 3 * nx * nx));
00425         jj = nx-1;
00426       }
00427       Ke_bindx[nz_ptr] = east(ii,jj,nx);      Ke_val[nz_ptr++] = 1.;
00428       if (ii != 0){ Ke_bindx[nz_ptr]=west(ii,jj,nx);  Ke_val[nz_ptr++]=-1.;}
00429       if (jj != 0){ Ke_bindx[nz_ptr]=south(ii,jj,nx); Ke_val[nz_ptr++]=doff;}
00430     }
00431     else {
00432       if (ii != 0) {
00433         Ke_bindx[nz_ptr] = north(ii,jj,nx);     Ke_val[nz_ptr++] = -1.;
00434         Ke_bindx[nz_ptr] = east(ii,jj,nx);      Ke_val[nz_ptr++] = doff;
00435         if (jj != 0) {Ke_bindx[nz_ptr]=south(ii,jj,nx); Ke_val[nz_ptr++]=1.;}
00436         ii--;
00437       }
00438       else {
00439         Ke_val[i]  = 1 +  2.*sigma/((double) ( 3 * nx * nx));
00440         ii = nx-1;
00441       }
00442       Ke_bindx[nz_ptr] = north(ii,jj,nx);     Ke_val[nz_ptr++] = 1.;
00443       if (ii != 0) {Ke_bindx[nz_ptr]=west(ii,jj,nx);  Ke_val[nz_ptr++]=doff;}
00444       if (jj != 0) {Ke_bindx[nz_ptr]=south(ii,jj,nx); Ke_val[nz_ptr++]=-1.;}
00445     }
00446     Ke_bindx[i+1] = nz_ptr;
00447   }
00448 
00449 
00450   /* Transform the global Aztec matrix into a local Aztec matrix. That is,   */
00451   /* replace global column indices by local indices and set up communication */
00452   /* data structure 'Ke_data_org' that will be used for matvec's.            */
00453 
00454   AZ_set_proc_config(proc_config, COMMUNICATOR);
00455 
00456   AZ_transform_norowreordering(proc_config, &(Edge_Partition->needed_external_ids),
00457                                Ke_bindx, Ke_val, Edge_Partition->my_global_ids,
00458                                &reordered_glob_edges, &reordered_edge_externs, 
00459                                &Ke_data_org, Nlocal_edges, 0, 0, 0, 
00460                                &cpntr,         AZ_MSR_MATRIX);
00461   AZ_free(reordered_glob_edges);
00462   AZ_free(reordered_edge_externs);
00463   Edge_Partition->Nghost = Ke_data_org[AZ_N_external];
00464 
00465   /* Convert old style Aztec matrix to newer style Aztec matrix */
00466 
00467   Ke_mat = AZ_matrix_create( Nlocal_edges );
00468   AZ_set_MSR(Ke_mat, Ke_bindx, Ke_val, Ke_data_org, 0, NULL, AZ_LOCAL);
00469 
00470   return(Ke_mat);
00471 }
00472 
00473 int *reordered_node_externs = NULL;  /* Aztec thing */
00474 
00475 /********************************************************************/
00476 /* Set up Kn_mat                                                    */
00477 /*  1) First set it up as an Aztec DMSR matrix                      */
00478 /*     This stores the diagonal of row j in Ke_val[j] and stores a  */
00479 /*     pointer to row j's off-diagonal nonzeros in Ke_bindx[j] with */
00480 /*     column/nonzero entries in Ke_bindx[Ke_bindx[j]:Ke_bindx[j+1]-1] */
00481 /*     and Ke_val[Ke_bindx[j]:Ke_bindx[j+1]-1].                     */
00482 /*  2) call AZ_transform to convert global column indices to local  */
00483 /*     indices and to set up Aztec's communication structure.       */
00484 /*  3) Stuff the arrays into an Aztec matrix.                       */
00485 /*------------------------------------------------------------------*/
00486 
00487 AZ_MATRIX *user_Kn_build(struct user_partition *Node_Partition)
00488 
00489 {
00490   int *Kn_bindx;
00491   double *Kn_val;
00492   int    proc_config[AZ_PROC_SIZE];
00493   AZ_MATRIX *Kn_mat;
00494   int    *reordered_glob_nodes = NULL, *cpntr = NULL, *Kn_data_org = NULL;
00495   int i, ii, jj, nx, gid, Nlocal_nodes, nz_ptr;
00496 
00497 
00498   Nlocal_nodes = Node_Partition->Nlocal;
00499   Kn_bindx = (int    *) malloc((27*Nlocal_nodes+5)*sizeof(int));
00500   Kn_val   = (double *) malloc((27*Nlocal_nodes+5)*sizeof(double));
00501   Kn_bindx[0] = Nlocal_nodes+1;
00502 
00503   nx = (int) sqrt( ((double) Node_Partition->Nglobal) + .00001);
00504 
00505   for (i = 0; i < Nlocal_nodes; i++) {
00506     gid = (Node_Partition->my_global_ids)[i];
00507 
00508     nz_ptr = Kn_bindx[i];
00509     ii = gid%nx;
00510     jj = (gid - ii)/nx;
00511 
00512 
00513     if (ii != nx-1) { Kn_bindx[nz_ptr] = gid+ 1; Kn_val[nz_ptr++] = -1.;}
00514     if (jj != nx-1) { Kn_bindx[nz_ptr] = gid+nx; Kn_val[nz_ptr++] = -1.;}
00515     if (jj !=    0) { Kn_bindx[nz_ptr] = gid-nx; Kn_val[nz_ptr++] = -1.;}
00516     if (ii !=    0) { Kn_bindx[nz_ptr] = gid- 1; Kn_val[nz_ptr++] = -1.;}
00517 
00518     if ((ii != nx-1) && (jj !=    0)) 
00519       {Kn_bindx[nz_ptr] = gid-nx+1; Kn_val[nz_ptr++] = -1.;}
00520     if ((ii != nx-1) && (jj != nx-1)) 
00521       {Kn_bindx[nz_ptr] = gid+nx+1; Kn_val[nz_ptr++] = -1.;}
00522     if ((ii !=    0) && (jj != nx-1)) 
00523       {Kn_bindx[nz_ptr] = gid+nx-1; Kn_val[nz_ptr++] = -1.;}
00524     if ((ii !=    0) && (jj !=    0)) 
00525       {Kn_bindx[nz_ptr] = gid-nx-1; Kn_val[nz_ptr++] = -1.;}
00526     Kn_val[i] = (double) (nz_ptr - Kn_bindx[i]);
00527     Kn_bindx[i+1] = nz_ptr;
00528   }
00529 
00530   /* Transform the global Aztec matrix into a local Aztec matrix. That is,   */
00531   /* replace global column indices by local indices and set up communication */
00532   /* data structure 'Ke_data_org' that will be used for matvec's.            */
00533 
00534   AZ_set_proc_config(proc_config, COMMUNICATOR);
00535 
00536   AZ_transform_norowreordering(proc_config,&(Node_Partition->needed_external_ids),
00537                                Kn_bindx, Kn_val, Node_Partition->my_global_ids,
00538                                &reordered_glob_nodes, &reordered_node_externs, 
00539                                &Kn_data_org, Nlocal_nodes, 0, 0, 0, 
00540                                &cpntr, AZ_MSR_MATRIX);
00541   Node_Partition->Nghost = Kn_data_org[AZ_N_external];
00542   AZ_free(reordered_glob_nodes);
00543 
00544   /* Convert old style Aztec matrix to newer style Aztec matrix */
00545 
00546   Kn_mat = AZ_matrix_create( Nlocal_nodes );
00547   AZ_set_MSR(Kn_mat, Kn_bindx, Kn_val, Kn_data_org, 0, NULL, AZ_LOCAL);
00548 
00549   return(Kn_mat);
00550 }
00551 
00552 /********************************************************************/
00553 /* Set up T_mat                                                     */
00554 /*  1) First set it up as a DMSR matrix. This is a bit dumb because */
00555 /*     a rectangular matrix doesn't have a diagonal. Anyway, DMSR   */
00556 /*     This stores the diagonal of row j in Ke_val[j] and stores a  */
00557 /*     pointer to row j's off-diagonal nonzeros in Ke_bindx[j] with */
00558 /*     column/nonzero entries in Ke_bindx[Ke_bindx[j]:Ke_bindx[j+1]-1] */
00559 /*     and Ke_val[Ke_bindx[j]:Ke_bindx[j+1]-1].                     */
00560 /*  2) Convert the matrix to CSR format.                            */
00561 /*  3) call a modified Aztec routine to convert global columns to   */
00562 /*     local indices and to make an ML matrix out of it.            */
00563 /*  4) Since the above routine does not compute a communication data*/
00564 /*     structure, we clone Knmat's communication structure (i.e. we */
00565 /*     assume that Tmat and Knmat have the same communication.      */
00566 /*------------------------------------------------------------------*/
00567 
00568 ML_Operator *user_T_build(struct user_partition *Edge_Partition,
00569                       struct user_partition *Node_Partition,
00570                       ML_Operator *Kn_mat, ML_Comm *comm)
00571 {
00572 
00573   int nx, i, ii, jj, horv, Ncols, Nexterns;
00574   int *Tmat_bindx;
00575   double *Tmat_val;
00576   ML_Operator *Tmat;
00577   struct ML_CSR_MSRdata *csr_data;
00578   struct aztec_context *aztec_context;
00579   int global_id;
00580   int Nlocal_nodes, Nlocal_edges; 
00581   int nz_ptr;
00582 
00583   Nlocal_nodes = Node_Partition->Nlocal;
00584   Nlocal_edges = Edge_Partition->Nlocal;
00585 
00586   nx = (int) sqrt( ((double) Node_Partition->Nglobal) + .00001);
00587 
00588   Tmat_bindx = (int    *) malloc((3*Nlocal_edges+1)*sizeof(int));
00589   Tmat_val   = (double *) malloc((3*Nlocal_edges+1)*sizeof(double));
00590 
00591   Tmat_bindx[0] = Nlocal_edges + 1;
00592   for (i = 0; i < Nlocal_edges; i++) {
00593     global_id = (Edge_Partition->my_global_ids)[i];
00594     Tmat_val[i] = 0.0;
00595 
00596     invindex(global_id, &ii, &jj, nx, &horv);
00597     nz_ptr = Tmat_bindx[i];
00598 
00599 
00600     ii--;
00601 
00602     if (horv == HORIZONTAL) {
00603       if(ii != -1) {
00604         Tmat_bindx[nz_ptr] = southwest(ii,jj,nx); Tmat_val[nz_ptr++] = -1.;
00605       }
00606       Tmat_bindx[nz_ptr]   = southeast(ii,jj,nx); Tmat_val[nz_ptr++] =  1.; 
00607     }
00608     else {
00609       if (ii == -1) ii = nx-1;
00610       Tmat_bindx[nz_ptr] = northwest(ii,jj,nx); Tmat_val[nz_ptr++] = -1.;
00611       if (jj != 0) {
00612         Tmat_bindx[nz_ptr] = southwest(ii,jj,nx); Tmat_val[nz_ptr++] =  1.;}
00613     }
00614     Tmat_bindx[i+1] = nz_ptr;
00615 
00616   }
00617 
00618   /* Convert the MSR matrix to a CSR matrix. Then use a modified Aztec */
00619   /* routine to convert the global CSR matrix to a local ML matrix     */
00620   /* Since this routine does not compute the communication structure,  */
00621   /* we assume that it is identical to Kn's and just clone it.         */
00622 
00623   csr_data = (struct ML_CSR_MSRdata *) ML_allocate(sizeof(struct ML_CSR_MSRdata));
00624   csr_data->columns = Tmat_bindx;
00625   csr_data->values  = Tmat_val;
00626   ML_MSR2CSR(csr_data, Nlocal_edges, &Ncols);
00627   aztec_context = (struct aztec_context *) Kn_mat->data;
00628   Nexterns = (aztec_context->Amat->data_org)[AZ_N_external];
00629 
00630 
00631   /*  ML_Comm_Create( &comm); */
00632 
00633   AZ_Tmat_transform2ml(Nexterns,   Node_Partition->needed_external_ids,
00634                        reordered_node_externs,
00635                        Tmat_bindx, Tmat_val, csr_data->rowptr, Nlocal_nodes,
00636                        Node_Partition->my_global_ids,
00637                        comm, Nlocal_edges, &Tmat);
00638   ML_free(csr_data);
00639   Tmat->data_destroy = ML_CSR_MSRdata_Destroy;
00640   ML_CommInfoOP_Clone(&(Tmat->getrow->pre_comm), Kn_mat->getrow->pre_comm);
00641 
00642   return(Tmat);
00643 }

Generated on Fri Oct 26 13:35:11 2007 for FEMAXX (Finite Element Maxwell Eigensolver) by  doxygen 1.4.7