Actual source code: mpimatmatmult.c
petsc-3.14.0 2020-09-29
2: /*
3: Defines matrix-matrix product routines for pairs of MPIAIJ matrices
4: C = A * B
5: */
6: #include <../src/mat/impls/aij/seq/aij.h>
7: #include <../src/mat/utils/freespace.h>
8: #include <../src/mat/impls/aij/mpi/mpiaij.h>
9: #include <petscbt.h>
10: #include <../src/mat/impls/dense/mpi/mpidense.h>
11: #include <petsc/private/vecimpl.h>
12: #include <petsc/private/vecscatterimpl.h>
14: #if defined(PETSC_HAVE_HYPRE)
15: PETSC_INTERN PetscErrorCode MatMatMultSymbolic_AIJ_AIJ_wHYPRE(Mat,Mat,PetscReal,Mat);
16: #endif
18: PETSC_INTERN PetscErrorCode MatProductSymbolic_AB_MPIAIJ_MPIAIJ(Mat C)
19: {
20: PetscErrorCode ierr;
21: Mat_Product *product = C->product;
22: Mat A=product->A,B=product->B;
23: MatProductAlgorithm alg=product->alg;
24: PetscReal fill=product->fill;
25: PetscBool flg;
28: /* scalable */
29: PetscStrcmp(alg,"scalable",&flg);
30: if (flg) {
31: MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);
32: return(0);
33: }
35: /* nonscalable */
36: PetscStrcmp(alg,"nonscalable",&flg);
37: if (flg) {
38: MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A,B,fill,C);
39: return(0);
40: }
42: /* seqmpi */
43: PetscStrcmp(alg,"seqmpi",&flg);
44: if (flg) {
45: MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(A,B,fill,C);
46: return(0);
47: }
49: #if defined(PETSC_HAVE_HYPRE)
50: PetscStrcmp(alg,"hypre",&flg);
51: if (flg) {
52: MatMatMultSymbolic_AIJ_AIJ_wHYPRE(A,B,fill,C);
53: return(0);
54: }
55: #endif
56: SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_SUP,"Mat Product Algorithm is not supported");
57: }
59: PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(void *data)
60: {
62: Mat_APMPI *ptap = (Mat_APMPI*)data;
65: PetscFree2(ptap->startsj_s,ptap->startsj_r);
66: PetscFree(ptap->bufa);
67: MatDestroy(&ptap->P_loc);
68: MatDestroy(&ptap->P_oth);
69: MatDestroy(&ptap->Pt);
70: PetscFree(ptap->api);
71: PetscFree(ptap->apj);
72: PetscFree(ptap->apa);
73: PetscFree(ptap);
74: return(0);
75: }
77: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat A,Mat P,Mat C)
78: {
80: Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data;
81: Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
82: Mat_SeqAIJ *cd =(Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data;
83: PetscScalar *cda=cd->a,*coa=co->a;
84: Mat_SeqAIJ *p_loc,*p_oth;
85: PetscScalar *apa,*ca;
86: PetscInt cm =C->rmap->n;
87: Mat_APMPI *ptap;
88: PetscInt *api,*apj,*apJ,i,k;
89: PetscInt cstart=C->cmap->rstart;
90: PetscInt cdnz,conz,k0,k1;
91: MPI_Comm comm;
92: PetscMPIInt size;
95: MatCheckProduct(C,3);
96: ptap = (Mat_APMPI*)C->product->data;
97: if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be computed. Missing data");
98: PetscObjectGetComm((PetscObject)A,&comm);
99: MPI_Comm_size(comm,&size);
101: if (!ptap->P_oth && size>1) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"AP cannot be reused. Do not call MatProductClear()");
103: /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */
104: /*-----------------------------------------------------*/
105: /* update numerical values of P_oth and P_loc */
106: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
107: MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);
109: /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
110: /*----------------------------------------------------------*/
111: /* get data from symbolic products */
112: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
113: p_oth = NULL;
114: if (size >1) {
115: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
116: }
118: /* get apa for storing dense row A[i,:]*P */
119: apa = ptap->apa;
121: api = ptap->api;
122: apj = ptap->apj;
123: for (i=0; i<cm; i++) {
124: /* compute apa = A[i,:]*P */
125: AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa);
127: /* set values in C */
128: apJ = apj + api[i];
129: cdnz = cd->i[i+1] - cd->i[i];
130: conz = co->i[i+1] - co->i[i];
132: /* 1st off-diagonal part of C */
133: ca = coa + co->i[i];
134: k = 0;
135: for (k0=0; k0<conz; k0++) {
136: if (apJ[k] >= cstart) break;
137: ca[k0] = apa[apJ[k]];
138: apa[apJ[k++]] = 0.0;
139: }
141: /* diagonal part of C */
142: ca = cda + cd->i[i];
143: for (k1=0; k1<cdnz; k1++) {
144: ca[k1] = apa[apJ[k]];
145: apa[apJ[k++]] = 0.0;
146: }
148: /* 2nd off-diagonal part of C */
149: ca = coa + co->i[i];
150: for (; k0<conz; k0++) {
151: ca[k0] = apa[apJ[k]];
152: apa[apJ[k++]] = 0.0;
153: }
154: }
155: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
156: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
157: return(0);
158: }
160: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat A,Mat P,PetscReal fill,Mat C)
161: {
162: PetscErrorCode ierr;
163: MPI_Comm comm;
164: PetscMPIInt size;
165: Mat_APMPI *ptap;
166: PetscFreeSpaceList free_space=NULL,current_space=NULL;
167: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data;
168: Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth;
169: PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz;
170: PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart;
171: PetscInt *lnk,i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi;
172: PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n;
173: PetscBT lnkbt;
174: PetscReal afill;
175: MatType mtype;
178: MatCheckProduct(C,4);
179: if (C->product->data) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Extra product struct not empty");
180: PetscObjectGetComm((PetscObject)A,&comm);
181: MPI_Comm_size(comm,&size);
183: /* create struct Mat_APMPI and attached it to C later */
184: PetscNew(&ptap);
186: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
187: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
189: /* get P_loc by taking all local rows of P */
190: MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);
192: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
193: pi_loc = p_loc->i; pj_loc = p_loc->j;
194: if (size > 1) {
195: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
196: pi_oth = p_oth->i; pj_oth = p_oth->j;
197: } else {
198: p_oth = NULL;
199: pi_oth = NULL; pj_oth = NULL;
200: }
202: /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
203: /*-------------------------------------------------------------------*/
204: PetscMalloc1(am+2,&api);
205: ptap->api = api;
206: api[0] = 0;
208: /* create and initialize a linked list */
209: PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);
211: /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
212: PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space);
213: current_space = free_space;
215: MatPreallocateInitialize(comm,am,pn,dnz,onz);
216: for (i=0; i<am; i++) {
217: /* diagonal portion of A */
218: nzi = adi[i+1] - adi[i];
219: for (j=0; j<nzi; j++) {
220: row = *adj++;
221: pnz = pi_loc[row+1] - pi_loc[row];
222: Jptr = pj_loc + pi_loc[row];
223: /* add non-zero cols of P into the sorted linked list lnk */
224: PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);
225: }
226: /* off-diagonal portion of A */
227: nzi = aoi[i+1] - aoi[i];
228: for (j=0; j<nzi; j++) {
229: row = *aoj++;
230: pnz = pi_oth[row+1] - pi_oth[row];
231: Jptr = pj_oth + pi_oth[row];
232: PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);
233: }
235: apnz = lnk[0];
236: api[i+1] = api[i] + apnz;
238: /* if free space is not available, double the total space in the list */
239: if (current_space->local_remaining<apnz) {
240: PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);
241: nspacedouble++;
242: }
244: /* Copy data into free space, then initialize lnk */
245: PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);
246: MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);
248: current_space->array += apnz;
249: current_space->local_used += apnz;
250: current_space->local_remaining -= apnz;
251: }
253: /* Allocate space for apj, initialize apj, and */
254: /* destroy list of free space and other temporary array(s) */
255: PetscMalloc1(api[am]+1,&ptap->apj);
256: apj = ptap->apj;
257: PetscFreeSpaceContiguous(&free_space,ptap->apj);
258: PetscLLDestroy(lnk,lnkbt);
260: /* malloc apa to store dense row A[i,:]*P */
261: PetscCalloc1(pN,&ptap->apa);
263: /* set and assemble symbolic parallel matrix C */
264: /*---------------------------------------------*/
265: MatSetSizes(C,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);
266: MatSetBlockSizesFromMats(C,A,P);
268: MatGetType(A,&mtype);
269: MatSetType(C,mtype);
270: MatMPIAIJSetPreallocation(C,0,dnz,0,onz);
272: MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api);
273: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
274: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
275: MatPreallocateFinalize(dnz,onz);
277: C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
278: C->ops->productnumeric = MatProductNumeric_AB;
280: /* attach the supporting struct to C for reuse */
281: C->product->data = ptap;
282: C->product->destroy = MatDestroy_MPIAIJ_MatMatMult;
284: /* set MatInfo */
285: afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5;
286: if (afill < 1.0) afill = 1.0;
287: C->info.mallocs = nspacedouble;
288: C->info.fill_ratio_given = fill;
289: C->info.fill_ratio_needed = afill;
291: #if defined(PETSC_USE_INFO)
292: if (api[am]) {
293: PetscInfo3(C,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);
294: PetscInfo1(C,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);
295: } else {
296: PetscInfo(C,"Empty matrix product\n");
297: }
298: #endif
299: return(0);
300: }
302: /* ------------------------------------------------------- */
303: static PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat,Mat,PetscReal,Mat);
304: static PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat,Mat,Mat);
306: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AB(Mat C)
307: {
308: Mat_Product *product = C->product;
309: Mat A = product->A,B=product->B;
312: if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend)
313: SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
315: C->ops->matmultsymbolic = MatMatMultSymbolic_MPIAIJ_MPIDense;
316: C->ops->productsymbolic = MatProductSymbolic_AB;
317: return(0);
318: }
319: /* -------------------------------------------------------------------- */
320: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(Mat C)
321: {
322: Mat_Product *product = C->product;
323: Mat A = product->A,B=product->B;
326: if (A->rmap->rstart != B->rmap->rstart || A->rmap->rend != B->rmap->rend)
327: SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->rmap->rstart,A->rmap->rend,B->rmap->rstart,B->rmap->rend);
329: C->ops->transposematmultsymbolic = MatTransposeMatMultSymbolic_MPIAIJ_MPIDense;
330: C->ops->productsymbolic = MatProductSymbolic_AtB;
331: return(0);
332: }
334: /* --------------------------------------------------------------------- */
335: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense(Mat C)
336: {
338: Mat_Product *product = C->product;
341: switch (product->type) {
342: case MATPRODUCT_AB:
343: MatProductSetFromOptions_MPIAIJ_MPIDense_AB(C);
344: break;
345: case MATPRODUCT_AtB:
346: MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(C);
347: break;
348: default:
349: break;
350: }
351: return(0);
352: }
353: /* ------------------------------------------------------- */
355: typedef struct {
356: Mat workB,workB1;
357: MPI_Request *rwaits,*swaits;
358: PetscInt nsends,nrecvs;
359: MPI_Datatype *stype,*rtype;
360: PetscInt blda;
361: } MPIAIJ_MPIDense;
363: PetscErrorCode MatMPIAIJ_MPIDenseDestroy(void *ctx)
364: {
365: MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*)ctx;
366: PetscErrorCode ierr;
367: PetscInt i;
370: MatDestroy(&contents->workB);
371: MatDestroy(&contents->workB1);
372: for (i=0; i<contents->nsends; i++) {
373: MPI_Type_free(&contents->stype[i]);
374: }
375: for (i=0; i<contents->nrecvs; i++) {
376: MPI_Type_free(&contents->rtype[i]);
377: }
378: PetscFree4(contents->stype,contents->rtype,contents->rwaits,contents->swaits);
379: PetscFree(contents);
380: return(0);
381: }
383: static PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat C)
384: {
385: PetscErrorCode ierr;
386: Mat_MPIAIJ *aij=(Mat_MPIAIJ*)A->data;
387: PetscInt nz=aij->B->cmap->n,nsends,nrecvs,i,nrows_to,j,blda,clda;
388: MPIAIJ_MPIDense *contents;
389: VecScatter ctx=aij->Mvctx;
390: PetscInt Am=A->rmap->n,Bm=B->rmap->n,BN=B->cmap->N,Bbn,Bbn1,bs,nrows_from,numBb;
391: MPI_Comm comm;
392: MPI_Datatype type1,*stype,*rtype;
393: const PetscInt *sindices,*sstarts,*rstarts;
394: PetscMPIInt *disp;
395: PetscBool cisdense;
398: MatCheckProduct(C,4);
399: if (C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data not empty");
400: PetscObjectGetComm((PetscObject)A,&comm);
401: PetscObjectBaseTypeCompare((PetscObject)C,MATMPIDENSE,&cisdense);
402: if (!cisdense) {
403: MatSetType(C,((PetscObject)B)->type_name);
404: }
405: MatSetSizes(C,Am,B->cmap->n,A->rmap->N,BN);
406: MatSetBlockSizesFromMats(C,A,B);
407: MatSetUp(C);
408: MatDenseGetLDA(B,&blda);
409: MatDenseGetLDA(C,&clda);
410: PetscNew(&contents);
412: VecScatterGetRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,NULL,NULL);
413: VecScatterGetRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,NULL,NULL);
415: /* Create column block of B and C for memory scalability when BN is too large */
416: /* Estimate Bbn, column size of Bb */
417: if (nz) {
418: Bbn1 = 2*Am*BN/nz;
419: } else Bbn1 = BN;
421: bs = PetscAbs(B->cmap->bs);
422: Bbn1 = Bbn1/bs *bs; /* Bbn1 is a multiple of bs */
423: if (Bbn1 > BN) Bbn1 = BN;
424: MPI_Allreduce(&Bbn1,&Bbn,1,MPIU_INT,MPI_MAX,comm);
426: /* Enable runtime option for Bbn */
427: PetscOptionsBegin(comm,((PetscObject)C)->prefix,"MatMatMult","Mat");
428: PetscOptionsInt("-matmatmult_Bbn","Number of columns in Bb","MatMatMult",Bbn,&Bbn,NULL);
429: PetscOptionsEnd();
430: Bbn = PetscMin(Bbn,BN);
432: if (Bbn > 0 && Bbn < BN) {
433: numBb = BN/Bbn;
434: Bbn1 = BN - numBb*Bbn;
435: } else numBb = 0;
437: if (numBb) {
438: PetscInfo3(C,"use Bb, BN=%D, Bbn=%D; numBb=%D\n",BN,Bbn,numBb);
439: if (Bbn1) { /* Create workB1 for the remaining columns */
440: PetscInfo2(C,"use Bb1, BN=%D, Bbn1=%D\n",BN,Bbn1);
441: /* Create work matrix used to store off processor rows of B needed for local product */
442: MatCreateSeqDense(PETSC_COMM_SELF,nz,Bbn1,NULL,&contents->workB1);
443: } else contents->workB1 = NULL;
444: }
446: /* Create work matrix used to store off processor rows of B needed for local product */
447: MatCreateSeqDense(PETSC_COMM_SELF,nz,Bbn,NULL,&contents->workB);
449: /* Use MPI derived data type to reduce memory required by the send/recv buffers */
450: PetscMalloc4(nsends,&stype,nrecvs,&rtype,nrecvs,&contents->rwaits,nsends,&contents->swaits);
451: contents->stype = stype;
452: contents->nsends = nsends;
454: contents->rtype = rtype;
455: contents->nrecvs = nrecvs;
456: contents->blda = blda;
458: PetscMalloc1(Bm+1,&disp);
459: for (i=0; i<nsends; i++) {
460: nrows_to = sstarts[i+1]-sstarts[i];
461: for (j=0; j<nrows_to; j++){
462: disp[j] = sindices[sstarts[i]+j]; /* rowB to be sent */
463: }
464: MPI_Type_create_indexed_block(nrows_to,1,(const PetscMPIInt *)disp,MPIU_SCALAR,&type1);
466: MPI_Type_create_resized(type1,0,blda*sizeof(PetscScalar),&stype[i]);
467: MPI_Type_commit(&stype[i]);
468: MPI_Type_free(&type1);
469: }
471: for (i=0; i<nrecvs; i++) {
472: /* received values from a process form a (nrows_from x Bbn) row block in workB (column-wise) */
473: nrows_from = rstarts[i+1]-rstarts[i];
474: disp[0] = 0;
475: MPI_Type_create_indexed_block(1, nrows_from, (const PetscMPIInt *)disp, MPIU_SCALAR, &type1);
476: MPI_Type_create_resized(type1, 0, nz*sizeof(PetscScalar), &rtype[i]);
477: MPI_Type_commit(&rtype[i]);
478: MPI_Type_free(&type1);
479: }
481: PetscFree(disp);
482: VecScatterRestoreRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,NULL,NULL);
483: VecScatterRestoreRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,NULL,NULL);
484: MatSetOption(C,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
485: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
486: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
488: C->product->data = contents;
489: C->product->destroy = MatMPIAIJ_MPIDenseDestroy;
490: C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIDense;
491: return(0);
492: }
494: PETSC_INTERN PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat,Mat,Mat,const PetscBool);
495: /*
496: Performs an efficient scatter on the rows of B needed by this process; this is
497: a modification of the VecScatterBegin_() routines.
499: Input: Bbidx = 0: B = Bb
500: = 1: B = Bb1, see MatMatMultSymbolic_MPIAIJ_MPIDense()
501: */
502: PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,PetscInt Bbidx,Mat C,Mat *outworkB)
503: {
504: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data;
505: PetscErrorCode ierr;
506: const PetscScalar *b;
507: PetscScalar *rvalues;
508: VecScatter ctx = aij->Mvctx;
509: const PetscInt *sindices,*sstarts,*rstarts;
510: const PetscMPIInt *sprocs,*rprocs;
511: PetscInt i,nsends,nrecvs;
512: MPI_Request *swaits,*rwaits;
513: MPI_Comm comm;
514: PetscMPIInt tag=((PetscObject)ctx)->tag,ncols=B->cmap->N,nrows=aij->B->cmap->n,nsends_mpi,nrecvs_mpi;
515: MPIAIJ_MPIDense *contents;
516: Mat workB;
517: MPI_Datatype *stype,*rtype;
518: PetscInt blda;
521: MatCheckProduct(C,4);
522: if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty");
523: contents = (MPIAIJ_MPIDense*)C->product->data;
524: VecScatterGetRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,&sprocs,NULL/*bs*/);
525: VecScatterGetRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,&rprocs,NULL/*bs*/);
526: PetscMPIIntCast(nsends,&nsends_mpi);
527: PetscMPIIntCast(nrecvs,&nrecvs_mpi);
528: if (Bbidx == 0) {
529: workB = *outworkB = contents->workB;
530: } else {
531: workB = *outworkB = contents->workB1;
532: }
533: if (nrows != workB->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Number of rows of workB %D not equal to columns of aij->B %D",workB->cmap->n,nrows);
534: swaits = contents->swaits;
535: rwaits = contents->rwaits;
537: MatDenseGetArrayRead(B,&b);
538: MatDenseGetLDA(B,&blda);
539: if (blda != contents->blda) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Cannot reuse an input matrix with lda %D != %D",blda,contents->blda);
540: MatDenseGetArray(workB,&rvalues);
542: /* Post recv, use MPI derived data type to save memory */
543: PetscObjectGetComm((PetscObject)C,&comm);
544: rtype = contents->rtype;
545: for (i=0; i<nrecvs; i++) {
546: MPI_Irecv(rvalues+(rstarts[i]-rstarts[0]),ncols,rtype[i],rprocs[i],tag,comm,rwaits+i);
547: }
549: stype = contents->stype;
550: for (i=0; i<nsends; i++) {
551: MPI_Isend(b,ncols,stype[i],sprocs[i],tag,comm,swaits+i);
552: }
554: if (nrecvs) {MPI_Waitall(nrecvs_mpi,rwaits,MPI_STATUSES_IGNORE);}
555: if (nsends) {MPI_Waitall(nsends_mpi,swaits,MPI_STATUSES_IGNORE);}
557: VecScatterRestoreRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,&sprocs,NULL);
558: VecScatterRestoreRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,&rprocs,NULL);
559: MatDenseRestoreArrayRead(B,&b);
560: MatDenseRestoreArray(workB,&rvalues);
561: return(0);
562: }
564: static PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C)
565: {
566: PetscErrorCode ierr;
567: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data;
568: Mat_MPIDense *bdense = (Mat_MPIDense*)B->data;
569: Mat_MPIDense *cdense = (Mat_MPIDense*)C->data;
570: Mat workB;
571: MPIAIJ_MPIDense *contents;
574: MatCheckProduct(C,3);
575: if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty");
576: contents = (MPIAIJ_MPIDense*)C->product->data;
577: /* diagonal block of A times all local rows of B */
578: /* TODO: this calls a symbolic multiplication every time, which could be avoided */
579: MatMatMult(aij->A,bdense->A,MAT_REUSE_MATRIX,PETSC_DEFAULT,&cdense->A);
580: if (contents->workB->cmap->n == B->cmap->N) {
581: /* get off processor parts of B needed to complete C=A*B */
582: MatMPIDenseScatter(A,B,0,C,&workB);
584: /* off-diagonal block of A times nonlocal rows of B */
585: MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A,PETSC_TRUE);
586: } else {
587: Mat Bb,Cb;
588: PetscInt BN=B->cmap->N,n=contents->workB->cmap->n,i;
590: for (i=0; i<BN; i+=n) {
591: MatDenseGetSubMatrix(B,i,PetscMin(i+n,BN),&Bb);
592: MatDenseGetSubMatrix(C,i,PetscMin(i+n,BN),&Cb);
594: /* get off processor parts of B needed to complete C=A*B */
595: MatMPIDenseScatter(A,Bb,i+n>BN,C,&workB);
597: /* off-diagonal block of A times nonlocal rows of B */
598: cdense = (Mat_MPIDense*)Cb->data;
599: MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A,PETSC_TRUE);
601: MatDenseRestoreSubMatrix(B,&Bb);
602: MatDenseRestoreSubMatrix(C,&Cb);
603: }
604: }
605: return(0);
606: }
608: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
609: {
611: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data;
612: Mat_SeqAIJ *ad = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
613: Mat_SeqAIJ *cd = (Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data;
614: PetscInt *adi = ad->i,*adj,*aoi=ao->i,*aoj;
615: PetscScalar *ada,*aoa,*cda=cd->a,*coa=co->a;
616: Mat_SeqAIJ *p_loc,*p_oth;
617: PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pj;
618: PetscScalar *pa_loc,*pa_oth,*pa,valtmp,*ca;
619: PetscInt cm = C->rmap->n,anz,pnz;
620: Mat_APMPI *ptap;
621: PetscScalar *apa_sparse;
622: PetscInt *api,*apj,*apJ,i,j,k,row;
623: PetscInt cstart = C->cmap->rstart;
624: PetscInt cdnz,conz,k0,k1,nextp;
625: MPI_Comm comm;
626: PetscMPIInt size;
629: MatCheckProduct(C,3);
630: ptap = (Mat_APMPI*)C->product->data;
631: if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be computed. Missing data");
632: PetscObjectGetComm((PetscObject)C,&comm);
633: MPI_Comm_size(comm,&size);
634: if (!ptap->P_oth && size>1) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"AP cannot be reused. Do not call MatProductClear()");
636: apa_sparse = ptap->apa;
638: /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */
639: /*-----------------------------------------------------*/
640: /* update numerical values of P_oth and P_loc */
641: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
642: MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);
644: /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
645: /*----------------------------------------------------------*/
646: /* get data from symbolic products */
647: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
648: pi_loc = p_loc->i; pj_loc = p_loc->j; pa_loc = p_loc->a;
649: if (size >1) {
650: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
651: pi_oth = p_oth->i; pj_oth = p_oth->j; pa_oth = p_oth->a;
652: } else {
653: p_oth = NULL; pi_oth = NULL; pj_oth = NULL; pa_oth = NULL;
654: }
656: api = ptap->api;
657: apj = ptap->apj;
658: for (i=0; i<cm; i++) {
659: apJ = apj + api[i];
661: /* diagonal portion of A */
662: anz = adi[i+1] - adi[i];
663: adj = ad->j + adi[i];
664: ada = ad->a + adi[i];
665: for (j=0; j<anz; j++) {
666: row = adj[j];
667: pnz = pi_loc[row+1] - pi_loc[row];
668: pj = pj_loc + pi_loc[row];
669: pa = pa_loc + pi_loc[row];
670: /* perform sparse axpy */
671: valtmp = ada[j];
672: nextp = 0;
673: for (k=0; nextp<pnz; k++) {
674: if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
675: apa_sparse[k] += valtmp*pa[nextp++];
676: }
677: }
678: PetscLogFlops(2.0*pnz);
679: }
681: /* off-diagonal portion of A */
682: anz = aoi[i+1] - aoi[i];
683: aoj = ao->j + aoi[i];
684: aoa = ao->a + aoi[i];
685: for (j=0; j<anz; j++) {
686: row = aoj[j];
687: pnz = pi_oth[row+1] - pi_oth[row];
688: pj = pj_oth + pi_oth[row];
689: pa = pa_oth + pi_oth[row];
690: /* perform sparse axpy */
691: valtmp = aoa[j];
692: nextp = 0;
693: for (k=0; nextp<pnz; k++) {
694: if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
695: apa_sparse[k] += valtmp*pa[nextp++];
696: }
697: }
698: PetscLogFlops(2.0*pnz);
699: }
701: /* set values in C */
702: cdnz = cd->i[i+1] - cd->i[i];
703: conz = co->i[i+1] - co->i[i];
705: /* 1st off-diagonal part of C */
706: ca = coa + co->i[i];
707: k = 0;
708: for (k0=0; k0<conz; k0++) {
709: if (apJ[k] >= cstart) break;
710: ca[k0] = apa_sparse[k];
711: apa_sparse[k] = 0.0;
712: k++;
713: }
715: /* diagonal part of C */
716: ca = cda + cd->i[i];
717: for (k1=0; k1<cdnz; k1++) {
718: ca[k1] = apa_sparse[k];
719: apa_sparse[k] = 0.0;
720: k++;
721: }
723: /* 2nd off-diagonal part of C */
724: ca = coa + co->i[i];
725: for (; k0<conz; k0++) {
726: ca[k0] = apa_sparse[k];
727: apa_sparse[k] = 0.0;
728: k++;
729: }
730: }
731: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
732: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
733: return(0);
734: }
736: /* same as MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(), except using LLCondensed to avoid O(BN) memory requirement */
737: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat C)
738: {
739: PetscErrorCode ierr;
740: MPI_Comm comm;
741: PetscMPIInt size;
742: Mat_APMPI *ptap;
743: PetscFreeSpaceList free_space = NULL,current_space=NULL;
744: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
745: Mat_SeqAIJ *ad = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth;
746: PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz;
747: PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart;
748: PetscInt i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi,*lnk,apnz_max=0;
749: PetscInt am=A->rmap->n,pn=P->cmap->n,pm=P->rmap->n,lsize=pn+20;
750: PetscReal afill;
751: MatType mtype;
754: MatCheckProduct(C,4);
755: if (C->product->data) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Extra product struct not empty");
756: PetscObjectGetComm((PetscObject)A,&comm);
757: MPI_Comm_size(comm,&size);
759: /* create struct Mat_APMPI and attached it to C later */
760: PetscNew(&ptap);
762: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
763: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
765: /* get P_loc by taking all local rows of P */
766: MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);
768: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
769: pi_loc = p_loc->i; pj_loc = p_loc->j;
770: if (size > 1) {
771: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
772: pi_oth = p_oth->i; pj_oth = p_oth->j;
773: } else {
774: p_oth = NULL;
775: pi_oth = NULL; pj_oth = NULL;
776: }
778: /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
779: /*-------------------------------------------------------------------*/
780: PetscMalloc1(am+2,&api);
781: ptap->api = api;
782: api[0] = 0;
784: PetscLLCondensedCreate_Scalable(lsize,&lnk);
786: /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
787: PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space);
788: current_space = free_space;
789: MatPreallocateInitialize(comm,am,pn,dnz,onz);
790: for (i=0; i<am; i++) {
791: /* diagonal portion of A */
792: nzi = adi[i+1] - adi[i];
793: for (j=0; j<nzi; j++) {
794: row = *adj++;
795: pnz = pi_loc[row+1] - pi_loc[row];
796: Jptr = pj_loc + pi_loc[row];
797: /* Expand list if it is not long enough */
798: if (pnz+apnz_max > lsize) {
799: lsize = pnz+apnz_max;
800: PetscLLCondensedExpand_Scalable(lsize, &lnk);
801: }
802: /* add non-zero cols of P into the sorted linked list lnk */
803: PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);
804: apnz = *lnk; /* The first element in the list is the number of items in the list */
805: api[i+1] = api[i] + apnz;
806: if (apnz > apnz_max) apnz_max = apnz;
807: }
808: /* off-diagonal portion of A */
809: nzi = aoi[i+1] - aoi[i];
810: for (j=0; j<nzi; j++) {
811: row = *aoj++;
812: pnz = pi_oth[row+1] - pi_oth[row];
813: Jptr = pj_oth + pi_oth[row];
814: /* Expand list if it is not long enough */
815: if (pnz+apnz_max > lsize) {
816: lsize = pnz + apnz_max;
817: PetscLLCondensedExpand_Scalable(lsize, &lnk);
818: }
819: /* add non-zero cols of P into the sorted linked list lnk */
820: PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);
821: apnz = *lnk; /* The first element in the list is the number of items in the list */
822: api[i+1] = api[i] + apnz;
823: if (apnz > apnz_max) apnz_max = apnz;
824: }
825: apnz = *lnk;
826: api[i+1] = api[i] + apnz;
827: if (apnz > apnz_max) apnz_max = apnz;
829: /* if free space is not available, double the total space in the list */
830: if (current_space->local_remaining<apnz) {
831: PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);
832: nspacedouble++;
833: }
835: /* Copy data into free space, then initialize lnk */
836: PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);
837: MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);
839: current_space->array += apnz;
840: current_space->local_used += apnz;
841: current_space->local_remaining -= apnz;
842: }
844: /* Allocate space for apj, initialize apj, and */
845: /* destroy list of free space and other temporary array(s) */
846: PetscMalloc1(api[am]+1,&ptap->apj);
847: apj = ptap->apj;
848: PetscFreeSpaceContiguous(&free_space,ptap->apj);
849: PetscLLCondensedDestroy_Scalable(lnk);
851: /* create and assemble symbolic parallel matrix C */
852: /*----------------------------------------------------*/
853: MatSetSizes(C,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);
854: MatSetBlockSizesFromMats(C,A,P);
855: MatGetType(A,&mtype);
856: MatSetType(C,mtype);
857: MatMPIAIJSetPreallocation(C,0,dnz,0,onz);
859: /* malloc apa for assembly C */
860: PetscCalloc1(apnz_max,&ptap->apa);
862: MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api);
863: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
864: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
865: MatPreallocateFinalize(dnz,onz);
867: C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ;
868: C->ops->productnumeric = MatProductNumeric_AB;
870: /* attach the supporting struct to C for reuse */
871: C->product->data = ptap;
872: C->product->destroy = MatDestroy_MPIAIJ_MatMatMult;
874: /* set MatInfo */
875: afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5;
876: if (afill < 1.0) afill = 1.0;
877: C->info.mallocs = nspacedouble;
878: C->info.fill_ratio_given = fill;
879: C->info.fill_ratio_needed = afill;
881: #if defined(PETSC_USE_INFO)
882: if (api[am]) {
883: PetscInfo3(C,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);
884: PetscInfo1(C,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);
885: } else {
886: PetscInfo(C,"Empty matrix product\n");
887: }
888: #endif
889: return(0);
890: }
892: /* This function is needed for the seqMPI matrix-matrix multiplication. */
893: /* Three input arrays are merged to one output array. The size of the */
894: /* output array is also output. Duplicate entries only show up once. */
895: static void Merge3SortedArrays(PetscInt size1, PetscInt *in1,
896: PetscInt size2, PetscInt *in2,
897: PetscInt size3, PetscInt *in3,
898: PetscInt *size4, PetscInt *out)
899: {
900: int i = 0, j = 0, k = 0, l = 0;
902: /* Traverse all three arrays */
903: while (i<size1 && j<size2 && k<size3) {
904: if (in1[i] < in2[j] && in1[i] < in3[k]) {
905: out[l++] = in1[i++];
906: }
907: else if (in2[j] < in1[i] && in2[j] < in3[k]) {
908: out[l++] = in2[j++];
909: }
910: else if (in3[k] < in1[i] && in3[k] < in2[j]) {
911: out[l++] = in3[k++];
912: }
913: else if (in1[i] == in2[j] && in1[i] < in3[k]) {
914: out[l++] = in1[i];
915: i++, j++;
916: }
917: else if (in1[i] == in3[k] && in1[i] < in2[j]) {
918: out[l++] = in1[i];
919: i++, k++;
920: }
921: else if (in3[k] == in2[j] && in2[j] < in1[i]) {
922: out[l++] = in2[j];
923: k++, j++;
924: }
925: else if (in1[i] == in2[j] && in1[i] == in3[k]) {
926: out[l++] = in1[i];
927: i++, j++, k++;
928: }
929: }
931: /* Traverse two remaining arrays */
932: while (i<size1 && j<size2) {
933: if (in1[i] < in2[j]) {
934: out[l++] = in1[i++];
935: }
936: else if (in1[i] > in2[j]) {
937: out[l++] = in2[j++];
938: }
939: else {
940: out[l++] = in1[i];
941: i++, j++;
942: }
943: }
945: while (i<size1 && k<size3) {
946: if (in1[i] < in3[k]) {
947: out[l++] = in1[i++];
948: }
949: else if (in1[i] > in3[k]) {
950: out[l++] = in3[k++];
951: }
952: else {
953: out[l++] = in1[i];
954: i++, k++;
955: }
956: }
958: while (k<size3 && j<size2) {
959: if (in3[k] < in2[j]) {
960: out[l++] = in3[k++];
961: }
962: else if (in3[k] > in2[j]) {
963: out[l++] = in2[j++];
964: }
965: else {
966: out[l++] = in3[k];
967: k++, j++;
968: }
969: }
971: /* Traverse one remaining array */
972: while (i<size1) out[l++] = in1[i++];
973: while (j<size2) out[l++] = in2[j++];
974: while (k<size3) out[l++] = in3[k++];
976: *size4 = l;
977: }
979: /* This matrix-matrix multiplication algorithm divides the multiplication into three multiplications and */
980: /* adds up the products. Two of these three multiplications are performed with existing (sequential) */
981: /* matrix-matrix multiplications. */
982: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(Mat A, Mat P, PetscReal fill, Mat C)
983: {
984: PetscErrorCode ierr;
985: MPI_Comm comm;
986: PetscMPIInt size;
987: Mat_APMPI *ptap;
988: PetscFreeSpaceList free_space_diag=NULL, current_space=NULL;
989: Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data;
990: Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc;
991: Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data;
992: Mat_SeqAIJ *adpd_seq, *p_off, *aopoth_seq;
993: PetscInt adponz, adpdnz;
994: PetscInt *pi_loc,*dnz,*onz;
995: PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,rstart=A->rmap->rstart;
996: PetscInt *lnk,i, i1=0,pnz,row,*adpoi,*adpoj, *api, *adpoJ, *aopJ, *apJ,*Jptr, aopnz, nspacedouble=0,j,nzi,
997: *apj,apnz, *adpdi, *adpdj, *adpdJ, *poff_i, *poff_j, *j_temp, *aopothi, *aopothj;
998: PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n, p_colstart, p_colend;
999: PetscBT lnkbt;
1000: PetscReal afill;
1001: PetscMPIInt rank;
1002: Mat adpd, aopoth;
1003: MatType mtype;
1004: const char *prefix;
1007: MatCheckProduct(C,4);
1008: if (C->product->data) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Extra product struct not empty");
1009: PetscObjectGetComm((PetscObject)A,&comm);
1010: MPI_Comm_size(comm,&size);
1011: MPI_Comm_rank(comm, &rank);
1012: MatGetOwnershipRangeColumn(P, &p_colstart, &p_colend);
1014: /* create struct Mat_APMPI and attached it to C later */
1015: PetscNew(&ptap);
1017: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
1018: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
1020: /* get P_loc by taking all local rows of P */
1021: MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);
1024: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
1025: pi_loc = p_loc->i;
1027: /* Allocate memory for the i arrays of the matrices A*P, A_diag*P_off and A_offd * P */
1028: PetscMalloc1(am+2,&api);
1029: PetscMalloc1(am+2,&adpoi);
1031: adpoi[0] = 0;
1032: ptap->api = api;
1033: api[0] = 0;
1035: /* create and initialize a linked list, will be used for both A_diag * P_loc_off and A_offd * P_oth */
1036: PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);
1037: MatPreallocateInitialize(comm,am,pn,dnz,onz);
1039: /* Symbolic calc of A_loc_diag * P_loc_diag */
1040: MatGetOptionsPrefix(A,&prefix);
1041: MatProductCreate(a->A,p->A,NULL,&adpd);
1042: MatGetOptionsPrefix(A,&prefix);
1043: MatSetOptionsPrefix(adpd,prefix);
1044: MatAppendOptionsPrefix(adpd,"inner_diag_");
1046: MatProductSetType(adpd,MATPRODUCT_AB);
1047: MatProductSetAlgorithm(adpd,"sorted");
1048: MatProductSetFill(adpd,fill);
1049: MatProductSetFromOptions(adpd);
1050: MatProductSymbolic(adpd);
1052: adpd_seq = (Mat_SeqAIJ*)((adpd)->data);
1053: adpdi = adpd_seq->i; adpdj = adpd_seq->j;
1054: p_off = (Mat_SeqAIJ*)((p->B)->data);
1055: poff_i = p_off->i; poff_j = p_off->j;
1057: /* j_temp stores indices of a result row before they are added to the linked list */
1058: PetscMalloc1(pN+2,&j_temp);
1061: /* Symbolic calc of the A_diag * p_loc_off */
1062: /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
1063: PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space_diag);
1064: current_space = free_space_diag;
1066: for (i=0; i<am; i++) {
1067: /* A_diag * P_loc_off */
1068: nzi = adi[i+1] - adi[i];
1069: for (j=0; j<nzi; j++) {
1070: row = *adj++;
1071: pnz = poff_i[row+1] - poff_i[row];
1072: Jptr = poff_j + poff_i[row];
1073: for (i1 = 0; i1 < pnz; i1++) {
1074: j_temp[i1] = p->garray[Jptr[i1]];
1075: }
1076: /* add non-zero cols of P into the sorted linked list lnk */
1077: PetscLLCondensedAddSorted(pnz,j_temp,lnk,lnkbt);
1078: }
1080: adponz = lnk[0];
1081: adpoi[i+1] = adpoi[i] + adponz;
1083: /* if free space is not available, double the total space in the list */
1084: if (current_space->local_remaining<adponz) {
1085: PetscFreeSpaceGet(PetscIntSumTruncate(adponz,current_space->total_array_size),¤t_space);
1086: nspacedouble++;
1087: }
1089: /* Copy data into free space, then initialize lnk */
1090: PetscLLCondensedClean(pN,adponz,current_space->array,lnk,lnkbt);
1092: current_space->array += adponz;
1093: current_space->local_used += adponz;
1094: current_space->local_remaining -= adponz;
1095: }
1097: /* Symbolic calc of A_off * P_oth */
1098: MatSetOptionsPrefix(a->B,prefix);
1099: MatAppendOptionsPrefix(a->B,"inner_offdiag_");
1100: MatCreate(PETSC_COMM_SELF,&aopoth);
1101: MatMatMultSymbolic_SeqAIJ_SeqAIJ(a->B, ptap->P_oth, fill, aopoth);
1102: aopoth_seq = (Mat_SeqAIJ*)((aopoth)->data);
1103: aopothi = aopoth_seq->i; aopothj = aopoth_seq->j;
1105: /* Allocate space for apj, adpj, aopj, ... */
1106: /* destroy lists of free space and other temporary array(s) */
1108: PetscMalloc1(aopothi[am] + adpoi[am] + adpdi[am]+2, &ptap->apj);
1109: PetscMalloc1(adpoi[am]+2, &adpoj);
1111: /* Copy from linked list to j-array */
1112: PetscFreeSpaceContiguous(&free_space_diag,adpoj);
1113: PetscLLDestroy(lnk,lnkbt);
1115: adpoJ = adpoj;
1116: adpdJ = adpdj;
1117: aopJ = aopothj;
1118: apj = ptap->apj;
1119: apJ = apj; /* still empty */
1121: /* Merge j-arrays of A_off * P, A_diag * P_loc_off, and */
1122: /* A_diag * P_loc_diag to get A*P */
1123: for (i = 0; i < am; i++) {
1124: aopnz = aopothi[i+1] - aopothi[i];
1125: adponz = adpoi[i+1] - adpoi[i];
1126: adpdnz = adpdi[i+1] - adpdi[i];
1128: /* Correct indices from A_diag*P_diag */
1129: for (i1 = 0; i1 < adpdnz; i1++) {
1130: adpdJ[i1] += p_colstart;
1131: }
1132: /* Merge j-arrays of A_diag * P_loc_off and A_diag * P_loc_diag and A_off * P_oth */
1133: Merge3SortedArrays(adponz, adpoJ, adpdnz, adpdJ, aopnz, aopJ, &apnz, apJ);
1134: MatPreallocateSet(i+rstart, apnz, apJ, dnz, onz);
1136: aopJ += aopnz;
1137: adpoJ += adponz;
1138: adpdJ += adpdnz;
1139: apJ += apnz;
1140: api[i+1] = api[i] + apnz;
1141: }
1143: /* malloc apa to store dense row A[i,:]*P */
1144: PetscCalloc1(pN+2,&ptap->apa);
1146: /* create and assemble symbolic parallel matrix C */
1147: MatSetSizes(C,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);
1148: MatSetBlockSizesFromMats(C,A,P);
1149: MatGetType(A,&mtype);
1150: MatSetType(C,mtype);
1151: MatMPIAIJSetPreallocation(C,0,dnz,0,onz);
1154: MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api);
1155: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
1156: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
1157: MatPreallocateFinalize(dnz,onz);
1160: C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
1161: C->ops->productnumeric = MatProductNumeric_AB;
1163: /* attach the supporting struct to C for reuse */
1164: C->product->data = ptap;
1165: C->product->destroy = MatDestroy_MPIAIJ_MatMatMult;
1167: /* set MatInfo */
1168: afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5;
1169: if (afill < 1.0) afill = 1.0;
1170: C->info.mallocs = nspacedouble;
1171: C->info.fill_ratio_given = fill;
1172: C->info.fill_ratio_needed = afill;
1174: #if defined(PETSC_USE_INFO)
1175: if (api[am]) {
1176: PetscInfo3(C,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);
1177: PetscInfo1(C,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);
1178: } else {
1179: PetscInfo(C,"Empty matrix product\n");
1180: }
1181: #endif
1183: MatDestroy(&aopoth);
1184: MatDestroy(&adpd);
1185: PetscFree(j_temp);
1186: PetscFree(adpoj);
1187: PetscFree(adpoi);
1188: return(0);
1189: }
1191: /*-------------------------------------------------------------------------*/
1192: /* This routine only works when scall=MAT_REUSE_MATRIX! */
1193: PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult(Mat P,Mat A,Mat C)
1194: {
1196: Mat_APMPI *ptap;
1197: Mat Pt;
1200: MatCheckProduct(C,3);
1201: ptap = (Mat_APMPI*)C->product->data;
1202: if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be computed. Missing data");
1203: if (!ptap->Pt) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtA cannot be reused. Do not call MatProductClear()");
1205: Pt = ptap->Pt;
1206: MatTranspose(P,MAT_REUSE_MATRIX,&Pt);
1207: MatMatMultNumeric_MPIAIJ_MPIAIJ(Pt,A,C);
1208: return(0);
1209: }
1211: /* This routine is modified from MatPtAPSymbolic_MPIAIJ_MPIAIJ() */
1212: PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat P,Mat A,PetscReal fill,Mat C)
1213: {
1214: PetscErrorCode ierr;
1215: Mat_APMPI *ptap;
1216: Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data;
1217: MPI_Comm comm;
1218: PetscMPIInt size,rank;
1219: PetscFreeSpaceList free_space=NULL,current_space=NULL;
1220: PetscInt pn=P->cmap->n,aN=A->cmap->N,an=A->cmap->n;
1221: PetscInt *lnk,i,k,nsend;
1222: PetscBT lnkbt;
1223: PetscMPIInt tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv;
1224: PetscInt **buf_rj,**buf_ri,**buf_ri_k;
1225: PetscInt len,proc,*dnz,*onz,*owners,nzi;
1226: PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
1227: MPI_Request *swaits,*rwaits;
1228: MPI_Status *sstatus,rstatus;
1229: PetscLayout rowmap;
1230: PetscInt *owners_co,*coi,*coj; /* i and j array of (p->B)^T*A*P - used in the communication */
1231: PetscMPIInt *len_r,*id_r; /* array of length of comm->size, store send/recv matrix values */
1232: PetscInt *Jptr,*prmap=p->garray,con,j,Crmax;
1233: Mat_SeqAIJ *a_loc,*c_loc,*c_oth;
1234: PetscTable ta;
1235: MatType mtype;
1236: const char *prefix;
1239: PetscObjectGetComm((PetscObject)A,&comm);
1240: MPI_Comm_size(comm,&size);
1241: MPI_Comm_rank(comm,&rank);
1243: /* create symbolic parallel matrix C */
1244: MatGetType(A,&mtype);
1245: MatSetType(C,mtype);
1247: C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
1249: /* create struct Mat_APMPI and attached it to C later */
1250: PetscNew(&ptap);
1251: ptap->reuse = MAT_INITIAL_MATRIX;
1253: /* (0) compute Rd = Pd^T, Ro = Po^T */
1254: /* --------------------------------- */
1255: MatTranspose_SeqAIJ(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);
1256: MatTranspose_SeqAIJ(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);
1258: /* (1) compute symbolic A_loc */
1259: /* ---------------------------*/
1260: MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&ptap->A_loc);
1262: /* (2-1) compute symbolic C_oth = Ro*A_loc */
1263: /* ------------------------------------ */
1264: MatGetOptionsPrefix(A,&prefix);
1265: MatSetOptionsPrefix(ptap->Ro,prefix);
1266: MatAppendOptionsPrefix(ptap->Ro,"inner_offdiag_");
1267: MatCreate(PETSC_COMM_SELF,&ptap->C_oth);
1268: MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->A_loc,fill,ptap->C_oth);
1270: /* (3) send coj of C_oth to other processors */
1271: /* ------------------------------------------ */
1272: /* determine row ownership */
1273: PetscLayoutCreate(comm,&rowmap);
1274: rowmap->n = pn;
1275: rowmap->bs = 1;
1276: PetscLayoutSetUp(rowmap);
1277: owners = rowmap->range;
1279: /* determine the number of messages to send, their lengths */
1280: PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);
1281: PetscArrayzero(len_s,size);
1282: PetscArrayzero(len_si,size);
1284: c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
1285: coi = c_oth->i; coj = c_oth->j;
1286: con = ptap->C_oth->rmap->n;
1287: proc = 0;
1288: for (i=0; i<con; i++) {
1289: while (prmap[i] >= owners[proc+1]) proc++;
1290: len_si[proc]++; /* num of rows in Co(=Pt*A) to be sent to [proc] */
1291: len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
1292: }
1294: len = 0; /* max length of buf_si[], see (4) */
1295: owners_co[0] = 0;
1296: nsend = 0;
1297: for (proc=0; proc<size; proc++) {
1298: owners_co[proc+1] = owners_co[proc] + len_si[proc];
1299: if (len_s[proc]) {
1300: nsend++;
1301: len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
1302: len += len_si[proc];
1303: }
1304: }
1306: /* determine the number and length of messages to receive for coi and coj */
1307: PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);
1308: PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);
1310: /* post the Irecv and Isend of coj */
1311: PetscCommGetNewTag(comm,&tagj);
1312: PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);
1313: PetscMalloc1(nsend+1,&swaits);
1314: for (proc=0, k=0; proc<size; proc++) {
1315: if (!len_s[proc]) continue;
1316: i = owners_co[proc];
1317: MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);
1318: k++;
1319: }
1321: /* (2-2) compute symbolic C_loc = Rd*A_loc */
1322: /* ---------------------------------------- */
1323: MatSetOptionsPrefix(ptap->Rd,prefix);
1324: MatAppendOptionsPrefix(ptap->Rd,"inner_diag_");
1325: MatCreate(PETSC_COMM_SELF,&ptap->C_loc);
1326: MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->A_loc,fill,ptap->C_loc);
1327: c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
1329: /* receives coj are complete */
1330: for (i=0; i<nrecv; i++) {
1331: MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);
1332: }
1333: PetscFree(rwaits);
1334: if (nsend) {MPI_Waitall(nsend,swaits,sstatus);}
1336: /* add received column indices into ta to update Crmax */
1337: a_loc = (Mat_SeqAIJ*)(ptap->A_loc)->data;
1339: /* create and initialize a linked list */
1340: PetscTableCreate(an,aN,&ta); /* for compute Crmax */
1341: MatRowMergeMax_SeqAIJ(a_loc,ptap->A_loc->rmap->N,ta);
1343: for (k=0; k<nrecv; k++) {/* k-th received message */
1344: Jptr = buf_rj[k];
1345: for (j=0; j<len_r[k]; j++) {
1346: PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);
1347: }
1348: }
1349: PetscTableGetCount(ta,&Crmax);
1350: PetscTableDestroy(&ta);
1352: /* (4) send and recv coi */
1353: /*-----------------------*/
1354: PetscCommGetNewTag(comm,&tagi);
1355: PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);
1356: PetscMalloc1(len+1,&buf_s);
1357: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
1358: for (proc=0,k=0; proc<size; proc++) {
1359: if (!len_s[proc]) continue;
1360: /* form outgoing message for i-structure:
1361: buf_si[0]: nrows to be sent
1362: [1:nrows]: row index (global)
1363: [nrows+1:2*nrows+1]: i-structure index
1364: */
1365: /*-------------------------------------------*/
1366: nrows = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
1367: buf_si_i = buf_si + nrows+1;
1368: buf_si[0] = nrows;
1369: buf_si_i[0] = 0;
1370: nrows = 0;
1371: for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
1372: nzi = coi[i+1] - coi[i];
1373: buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */
1374: buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */
1375: nrows++;
1376: }
1377: MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);
1378: k++;
1379: buf_si += len_si[proc];
1380: }
1381: for (i=0; i<nrecv; i++) {
1382: MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);
1383: }
1384: PetscFree(rwaits);
1385: if (nsend) {MPI_Waitall(nsend,swaits,sstatus);}
1387: PetscFree4(len_s,len_si,sstatus,owners_co);
1388: PetscFree(len_ri);
1389: PetscFree(swaits);
1390: PetscFree(buf_s);
1392: /* (5) compute the local portion of C */
1393: /* ------------------------------------------ */
1394: /* set initial free space to be Crmax, sufficient for holding nozeros in each row of C */
1395: PetscFreeSpaceGet(Crmax,&free_space);
1396: current_space = free_space;
1398: PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);
1399: for (k=0; k<nrecv; k++) {
1400: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1401: nrows = *buf_ri_k[k];
1402: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
1403: nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
1404: }
1406: MatPreallocateInitialize(comm,pn,an,dnz,onz);
1407: PetscLLCondensedCreate(Crmax,aN,&lnk,&lnkbt);
1408: for (i=0; i<pn; i++) {
1409: /* add C_loc into C */
1410: nzi = c_loc->i[i+1] - c_loc->i[i];
1411: Jptr = c_loc->j + c_loc->i[i];
1412: PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);
1414: /* add received col data into lnk */
1415: for (k=0; k<nrecv; k++) { /* k-th received message */
1416: if (i == *nextrow[k]) { /* i-th row */
1417: nzi = *(nextci[k]+1) - *nextci[k];
1418: Jptr = buf_rj[k] + *nextci[k];
1419: PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);
1420: nextrow[k]++; nextci[k]++;
1421: }
1422: }
1423: nzi = lnk[0];
1425: /* copy data into free space, then initialize lnk */
1426: PetscLLCondensedClean(aN,nzi,current_space->array,lnk,lnkbt);
1427: MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);
1428: }
1429: PetscFree3(buf_ri_k,nextrow,nextci);
1430: PetscLLDestroy(lnk,lnkbt);
1431: PetscFreeSpaceDestroy(free_space);
1433: /* local sizes and preallocation */
1434: MatSetSizes(C,pn,an,PETSC_DETERMINE,PETSC_DETERMINE);
1435: if (P->cmap->bs > 0) {PetscLayoutSetBlockSize(C->rmap,P->cmap->bs);}
1436: if (A->cmap->bs > 0) {PetscLayoutSetBlockSize(C->cmap,A->cmap->bs);}
1437: MatMPIAIJSetPreallocation(C,0,dnz,0,onz);
1438: MatPreallocateFinalize(dnz,onz);
1440: /* members in merge */
1441: PetscFree(id_r);
1442: PetscFree(len_r);
1443: PetscFree(buf_ri[0]);
1444: PetscFree(buf_ri);
1445: PetscFree(buf_rj[0]);
1446: PetscFree(buf_rj);
1447: PetscLayoutDestroy(&rowmap);
1449: /* attach the supporting struct to C for reuse */
1450: C->product->data = ptap;
1451: C->product->destroy = MatDestroy_MPIAIJ_PtAP;
1453: /* C is not ready for use - assembly will be done by MatPtAPNumeric() */
1454: C->assembled = PETSC_FALSE;
1455: return(0);
1456: }
1458: PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat P,Mat A,Mat C)
1459: {
1460: PetscErrorCode ierr;
1461: Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data;
1462: Mat_SeqAIJ *c_seq;
1463: Mat_APMPI *ptap;
1464: Mat A_loc,C_loc,C_oth;
1465: PetscInt i,rstart,rend,cm,ncols,row;
1466: const PetscInt *cols;
1467: const PetscScalar *vals;
1470: MatCheckProduct(C,3);
1471: ptap = (Mat_APMPI*)C->product->data;
1472: if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be computed. Missing data");
1473: if (!ptap->A_loc) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtA cannot be reused. Do not call MatProductClear()");
1474: MatZeroEntries(C);
1476: if (ptap->reuse == MAT_REUSE_MATRIX) {
1477: /* These matrices are obtained in MatTransposeMatMultSymbolic() */
1478: /* 1) get R = Pd^T, Ro = Po^T */
1479: /*----------------------------*/
1480: MatTranspose_SeqAIJ(p->A,MAT_REUSE_MATRIX,&ptap->Rd);
1481: MatTranspose_SeqAIJ(p->B,MAT_REUSE_MATRIX,&ptap->Ro);
1483: /* 2) compute numeric A_loc */
1484: /*--------------------------*/
1485: MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&ptap->A_loc);
1486: }
1488: /* 3) C_loc = Rd*A_loc, C_oth = Ro*A_loc */
1489: A_loc = ptap->A_loc;
1490: ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,A_loc,ptap->C_loc);
1491: ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,A_loc,ptap->C_oth);
1492: C_loc = ptap->C_loc;
1493: C_oth = ptap->C_oth;
1495: /* add C_loc and Co to to C */
1496: MatGetOwnershipRange(C,&rstart,&rend);
1498: /* C_loc -> C */
1499: cm = C_loc->rmap->N;
1500: c_seq = (Mat_SeqAIJ*)C_loc->data;
1501: cols = c_seq->j;
1502: vals = c_seq->a;
1503: for (i=0; i<cm; i++) {
1504: ncols = c_seq->i[i+1] - c_seq->i[i];
1505: row = rstart + i;
1506: MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);
1507: cols += ncols; vals += ncols;
1508: }
1510: /* Co -> C, off-processor part */
1511: cm = C_oth->rmap->N;
1512: c_seq = (Mat_SeqAIJ*)C_oth->data;
1513: cols = c_seq->j;
1514: vals = c_seq->a;
1515: for (i=0; i<cm; i++) {
1516: ncols = c_seq->i[i+1] - c_seq->i[i];
1517: row = p->garray[i];
1518: MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);
1519: cols += ncols; vals += ncols;
1520: }
1521: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
1522: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
1524: ptap->reuse = MAT_REUSE_MATRIX;
1525: return(0);
1526: }
1528: PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat P,Mat A,Mat C)
1529: {
1530: PetscErrorCode ierr;
1531: Mat_Merge_SeqsToMPI *merge;
1532: Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data;
1533: Mat_SeqAIJ *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
1534: Mat_APMPI *ptap;
1535: PetscInt *adj;
1536: PetscInt i,j,k,anz,pnz,row,*cj,nexta;
1537: MatScalar *ada,*ca,valtmp;
1538: PetscInt am=A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n;
1539: MPI_Comm comm;
1540: PetscMPIInt size,rank,taga,*len_s;
1541: PetscInt *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci;
1542: PetscInt **buf_ri,**buf_rj;
1543: PetscInt cnz=0,*bj_i,*bi,*bj,bnz,nextcj; /* bi,bj,ba: local array of C(mpi mat) */
1544: MPI_Request *s_waits,*r_waits;
1545: MPI_Status *status;
1546: MatScalar **abuf_r,*ba_i,*pA,*coa,*ba;
1547: PetscInt *ai,*aj,*coi,*coj,*poJ,*pdJ;
1548: Mat A_loc;
1549: Mat_SeqAIJ *a_loc;
1552: MatCheckProduct(C,3);
1553: ptap = (Mat_APMPI*)C->product->data;
1554: if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be computed. Missing data");
1555: if (!ptap->A_loc) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtA cannot be reused. Do not call MatProductClear()");
1556: PetscObjectGetComm((PetscObject)C,&comm);
1557: MPI_Comm_size(comm,&size);
1558: MPI_Comm_rank(comm,&rank);
1560: merge = ptap->merge;
1562: /* 2) compute numeric C_seq = P_loc^T*A_loc */
1563: /*------------------------------------------*/
1564: /* get data from symbolic products */
1565: coi = merge->coi; coj = merge->coj;
1566: PetscCalloc1(coi[pon]+1,&coa);
1567: bi = merge->bi; bj = merge->bj;
1568: owners = merge->rowmap->range;
1569: PetscCalloc1(bi[cm]+1,&ba);
1571: /* get A_loc by taking all local rows of A */
1572: A_loc = ptap->A_loc;
1573: MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&A_loc);
1574: a_loc = (Mat_SeqAIJ*)(A_loc)->data;
1575: ai = a_loc->i;
1576: aj = a_loc->j;
1578: for (i=0; i<am; i++) {
1579: anz = ai[i+1] - ai[i];
1580: adj = aj + ai[i];
1581: ada = a_loc->a + ai[i];
1583: /* 2-b) Compute Cseq = P_loc[i,:]^T*A[i,:] using outer product */
1584: /*-------------------------------------------------------------*/
1585: /* put the value into Co=(p->B)^T*A (off-diagonal part, send to others) */
1586: pnz = po->i[i+1] - po->i[i];
1587: poJ = po->j + po->i[i];
1588: pA = po->a + po->i[i];
1589: for (j=0; j<pnz; j++) {
1590: row = poJ[j];
1591: cj = coj + coi[row];
1592: ca = coa + coi[row];
1593: /* perform sparse axpy */
1594: nexta = 0;
1595: valtmp = pA[j];
1596: for (k=0; nexta<anz; k++) {
1597: if (cj[k] == adj[nexta]) {
1598: ca[k] += valtmp*ada[nexta];
1599: nexta++;
1600: }
1601: }
1602: PetscLogFlops(2.0*anz);
1603: }
1605: /* put the value into Cd (diagonal part) */
1606: pnz = pd->i[i+1] - pd->i[i];
1607: pdJ = pd->j + pd->i[i];
1608: pA = pd->a + pd->i[i];
1609: for (j=0; j<pnz; j++) {
1610: row = pdJ[j];
1611: cj = bj + bi[row];
1612: ca = ba + bi[row];
1613: /* perform sparse axpy */
1614: nexta = 0;
1615: valtmp = pA[j];
1616: for (k=0; nexta<anz; k++) {
1617: if (cj[k] == adj[nexta]) {
1618: ca[k] += valtmp*ada[nexta];
1619: nexta++;
1620: }
1621: }
1622: PetscLogFlops(2.0*anz);
1623: }
1624: }
1626: /* 3) send and recv matrix values coa */
1627: /*------------------------------------*/
1628: buf_ri = merge->buf_ri;
1629: buf_rj = merge->buf_rj;
1630: len_s = merge->len_s;
1631: PetscCommGetNewTag(comm,&taga);
1632: PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);
1634: PetscMalloc2(merge->nsend+1,&s_waits,size,&status);
1635: for (proc=0,k=0; proc<size; proc++) {
1636: if (!len_s[proc]) continue;
1637: i = merge->owners_co[proc];
1638: MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);
1639: k++;
1640: }
1641: if (merge->nrecv) {MPI_Waitall(merge->nrecv,r_waits,status);}
1642: if (merge->nsend) {MPI_Waitall(merge->nsend,s_waits,status);}
1644: PetscFree2(s_waits,status);
1645: PetscFree(r_waits);
1646: PetscFree(coa);
1648: /* 4) insert local Cseq and received values into Cmpi */
1649: /*----------------------------------------------------*/
1650: PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);
1651: for (k=0; k<merge->nrecv; k++) {
1652: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1653: nrows = *(buf_ri_k[k]);
1654: nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */
1655: nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
1656: }
1658: for (i=0; i<cm; i++) {
1659: row = owners[rank] + i; /* global row index of C_seq */
1660: bj_i = bj + bi[i]; /* col indices of the i-th row of C */
1661: ba_i = ba + bi[i];
1662: bnz = bi[i+1] - bi[i];
1663: /* add received vals into ba */
1664: for (k=0; k<merge->nrecv; k++) { /* k-th received message */
1665: /* i-th row */
1666: if (i == *nextrow[k]) {
1667: cnz = *(nextci[k]+1) - *nextci[k];
1668: cj = buf_rj[k] + *(nextci[k]);
1669: ca = abuf_r[k] + *(nextci[k]);
1670: nextcj = 0;
1671: for (j=0; nextcj<cnz; j++) {
1672: if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */
1673: ba_i[j] += ca[nextcj++];
1674: }
1675: }
1676: nextrow[k]++; nextci[k]++;
1677: PetscLogFlops(2.0*cnz);
1678: }
1679: }
1680: MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);
1681: }
1682: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
1683: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
1685: PetscFree(ba);
1686: PetscFree(abuf_r[0]);
1687: PetscFree(abuf_r);
1688: PetscFree3(buf_ri_k,nextrow,nextci);
1689: return(0);
1690: }
1692: PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(Mat P,Mat A,PetscReal fill,Mat C)
1693: {
1694: PetscErrorCode ierr;
1695: Mat A_loc,POt,PDt;
1696: Mat_APMPI *ptap;
1697: PetscFreeSpaceList free_space=NULL,current_space=NULL;
1698: Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*a=(Mat_MPIAIJ*)A->data;
1699: PetscInt *pdti,*pdtj,*poti,*potj,*ptJ;
1700: PetscInt nnz;
1701: PetscInt *lnk,*owners_co,*coi,*coj,i,k,pnz,row;
1702: PetscInt am =A->rmap->n,pn=P->cmap->n;
1703: MPI_Comm comm;
1704: PetscMPIInt size,rank,tagi,tagj,*len_si,*len_s,*len_ri;
1705: PetscInt **buf_rj,**buf_ri,**buf_ri_k;
1706: PetscInt len,proc,*dnz,*onz,*owners;
1707: PetscInt nzi,*bi,*bj;
1708: PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
1709: MPI_Request *swaits,*rwaits;
1710: MPI_Status *sstatus,rstatus;
1711: Mat_Merge_SeqsToMPI *merge;
1712: PetscInt *ai,*aj,*Jptr,anz,*prmap=p->garray,pon,nspacedouble=0,j;
1713: PetscReal afill =1.0,afill_tmp;
1714: PetscInt rstart = P->cmap->rstart,rmax,aN=A->cmap->N,Armax;
1715: Mat_SeqAIJ *a_loc,*pdt,*pot;
1716: PetscTable ta;
1717: MatType mtype;
1720: PetscObjectGetComm((PetscObject)A,&comm);
1721: /* check if matrix local sizes are compatible */
1722: if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%D, %D) != P (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
1724: MPI_Comm_size(comm,&size);
1725: MPI_Comm_rank(comm,&rank);
1727: /* create struct Mat_APMPI and attached it to C later */
1728: PetscNew(&ptap);
1730: /* get A_loc by taking all local rows of A */
1731: MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&A_loc);
1733: ptap->A_loc = A_loc;
1734: a_loc = (Mat_SeqAIJ*)(A_loc)->data;
1735: ai = a_loc->i;
1736: aj = a_loc->j;
1738: /* determine symbolic Co=(p->B)^T*A - send to others */
1739: /*----------------------------------------------------*/
1740: MatTransposeSymbolic_SeqAIJ(p->A,&PDt);
1741: pdt = (Mat_SeqAIJ*)PDt->data;
1742: pdti = pdt->i; pdtj = pdt->j;
1744: MatTransposeSymbolic_SeqAIJ(p->B,&POt);
1745: pot = (Mat_SeqAIJ*)POt->data;
1746: poti = pot->i; potj = pot->j;
1748: /* then, compute symbolic Co = (p->B)^T*A */
1749: pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors
1750: >= (num of nonzero rows of C_seq) - pn */
1751: PetscMalloc1(pon+1,&coi);
1752: coi[0] = 0;
1754: /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */
1755: nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(poti[pon],ai[am]));
1756: PetscFreeSpaceGet(nnz,&free_space);
1757: current_space = free_space;
1759: /* create and initialize a linked list */
1760: PetscTableCreate(A->cmap->n + a->B->cmap->N,aN,&ta);
1761: MatRowMergeMax_SeqAIJ(a_loc,am,ta);
1762: PetscTableGetCount(ta,&Armax);
1764: PetscLLCondensedCreate_Scalable(Armax,&lnk);
1766: for (i=0; i<pon; i++) {
1767: pnz = poti[i+1] - poti[i];
1768: ptJ = potj + poti[i];
1769: for (j=0; j<pnz; j++) {
1770: row = ptJ[j]; /* row of A_loc == col of Pot */
1771: anz = ai[row+1] - ai[row];
1772: Jptr = aj + ai[row];
1773: /* add non-zero cols of AP into the sorted linked list lnk */
1774: PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);
1775: }
1776: nnz = lnk[0];
1778: /* If free space is not available, double the total space in the list */
1779: if (current_space->local_remaining<nnz) {
1780: PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);
1781: nspacedouble++;
1782: }
1784: /* Copy data into free space, and zero out denserows */
1785: PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);
1787: current_space->array += nnz;
1788: current_space->local_used += nnz;
1789: current_space->local_remaining -= nnz;
1791: coi[i+1] = coi[i] + nnz;
1792: }
1794: PetscMalloc1(coi[pon]+1,&coj);
1795: PetscFreeSpaceContiguous(&free_space,coj);
1796: PetscLLCondensedDestroy_Scalable(lnk); /* must destroy to get a new one for C */
1798: afill_tmp = (PetscReal)coi[pon]/(poti[pon] + ai[am]+1);
1799: if (afill_tmp > afill) afill = afill_tmp;
1801: /* send j-array (coj) of Co to other processors */
1802: /*----------------------------------------------*/
1803: /* determine row ownership */
1804: PetscNew(&merge);
1805: PetscLayoutCreate(comm,&merge->rowmap);
1807: merge->rowmap->n = pn;
1808: merge->rowmap->bs = 1;
1810: PetscLayoutSetUp(merge->rowmap);
1811: owners = merge->rowmap->range;
1813: /* determine the number of messages to send, their lengths */
1814: PetscCalloc1(size,&len_si);
1815: PetscCalloc1(size,&merge->len_s);
1817: len_s = merge->len_s;
1818: merge->nsend = 0;
1820: PetscMalloc1(size+2,&owners_co);
1822: proc = 0;
1823: for (i=0; i<pon; i++) {
1824: while (prmap[i] >= owners[proc+1]) proc++;
1825: len_si[proc]++; /* num of rows in Co to be sent to [proc] */
1826: len_s[proc] += coi[i+1] - coi[i];
1827: }
1829: len = 0; /* max length of buf_si[] */
1830: owners_co[0] = 0;
1831: for (proc=0; proc<size; proc++) {
1832: owners_co[proc+1] = owners_co[proc] + len_si[proc];
1833: if (len_si[proc]) {
1834: merge->nsend++;
1835: len_si[proc] = 2*(len_si[proc] + 1);
1836: len += len_si[proc];
1837: }
1838: }
1840: /* determine the number and length of messages to receive for coi and coj */
1841: PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);
1842: PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);
1844: /* post the Irecv and Isend of coj */
1845: PetscCommGetNewTag(comm,&tagj);
1846: PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);
1847: PetscMalloc1(merge->nsend+1,&swaits);
1848: for (proc=0, k=0; proc<size; proc++) {
1849: if (!len_s[proc]) continue;
1850: i = owners_co[proc];
1851: MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);
1852: k++;
1853: }
1855: /* receives and sends of coj are complete */
1856: PetscMalloc1(size,&sstatus);
1857: for (i=0; i<merge->nrecv; i++) {
1858: PetscMPIInt icompleted;
1859: MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);
1860: }
1861: PetscFree(rwaits);
1862: if (merge->nsend) {MPI_Waitall(merge->nsend,swaits,sstatus);}
1864: /* add received column indices into table to update Armax */
1865: /* Armax can be as large as aN if a P[row,:] is dense, see src/ksp/ksp/tutorials/ex56.c! */
1866: for (k=0; k<merge->nrecv; k++) {/* k-th received message */
1867: Jptr = buf_rj[k];
1868: for (j=0; j<merge->len_r[k]; j++) {
1869: PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);
1870: }
1871: }
1872: PetscTableGetCount(ta,&Armax);
1873: /* printf("Armax %d, an %d + Bn %d = %d, aN %d\n",Armax,A->cmap->n,a->B->cmap->N,A->cmap->n+a->B->cmap->N,aN); */
1875: /* send and recv coi */
1876: /*-------------------*/
1877: PetscCommGetNewTag(comm,&tagi);
1878: PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);
1879: PetscMalloc1(len+1,&buf_s);
1880: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
1881: for (proc=0,k=0; proc<size; proc++) {
1882: if (!len_s[proc]) continue;
1883: /* form outgoing message for i-structure:
1884: buf_si[0]: nrows to be sent
1885: [1:nrows]: row index (global)
1886: [nrows+1:2*nrows+1]: i-structure index
1887: */
1888: /*-------------------------------------------*/
1889: nrows = len_si[proc]/2 - 1;
1890: buf_si_i = buf_si + nrows+1;
1891: buf_si[0] = nrows;
1892: buf_si_i[0] = 0;
1893: nrows = 0;
1894: for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
1895: nzi = coi[i+1] - coi[i];
1896: buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */
1897: buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */
1898: nrows++;
1899: }
1900: MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);
1901: k++;
1902: buf_si += len_si[proc];
1903: }
1904: i = merge->nrecv;
1905: while (i--) {
1906: PetscMPIInt icompleted;
1907: MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);
1908: }
1909: PetscFree(rwaits);
1910: if (merge->nsend) {MPI_Waitall(merge->nsend,swaits,sstatus);}
1911: PetscFree(len_si);
1912: PetscFree(len_ri);
1913: PetscFree(swaits);
1914: PetscFree(sstatus);
1915: PetscFree(buf_s);
1917: /* compute the local portion of C (mpi mat) */
1918: /*------------------------------------------*/
1919: /* allocate bi array and free space for accumulating nonzero column info */
1920: PetscMalloc1(pn+1,&bi);
1921: bi[0] = 0;
1923: /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
1924: nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(pdti[pn],PetscIntSumTruncate(poti[pon],ai[am])));
1925: PetscFreeSpaceGet(nnz,&free_space);
1926: current_space = free_space;
1928: PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);
1929: for (k=0; k<merge->nrecv; k++) {
1930: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1931: nrows = *buf_ri_k[k];
1932: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
1933: nextci[k] = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th received i-structure */
1934: }
1936: PetscLLCondensedCreate_Scalable(Armax,&lnk);
1937: MatPreallocateInitialize(comm,pn,A->cmap->n,dnz,onz);
1938: rmax = 0;
1939: for (i=0; i<pn; i++) {
1940: /* add pdt[i,:]*AP into lnk */
1941: pnz = pdti[i+1] - pdti[i];
1942: ptJ = pdtj + pdti[i];
1943: for (j=0; j<pnz; j++) {
1944: row = ptJ[j]; /* row of AP == col of Pt */
1945: anz = ai[row+1] - ai[row];
1946: Jptr = aj + ai[row];
1947: /* add non-zero cols of AP into the sorted linked list lnk */
1948: PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);
1949: }
1951: /* add received col data into lnk */
1952: for (k=0; k<merge->nrecv; k++) { /* k-th received message */
1953: if (i == *nextrow[k]) { /* i-th row */
1954: nzi = *(nextci[k]+1) - *nextci[k];
1955: Jptr = buf_rj[k] + *nextci[k];
1956: PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);
1957: nextrow[k]++; nextci[k]++;
1958: }
1959: }
1960: nnz = lnk[0];
1962: /* if free space is not available, make more free space */
1963: if (current_space->local_remaining<nnz) {
1964: PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);
1965: nspacedouble++;
1966: }
1967: /* copy data into free space, then initialize lnk */
1968: PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);
1969: MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);
1971: current_space->array += nnz;
1972: current_space->local_used += nnz;
1973: current_space->local_remaining -= nnz;
1975: bi[i+1] = bi[i] + nnz;
1976: if (nnz > rmax) rmax = nnz;
1977: }
1978: PetscFree3(buf_ri_k,nextrow,nextci);
1980: PetscMalloc1(bi[pn]+1,&bj);
1981: PetscFreeSpaceContiguous(&free_space,bj);
1982: afill_tmp = (PetscReal)bi[pn]/(pdti[pn] + poti[pon] + ai[am]+1);
1983: if (afill_tmp > afill) afill = afill_tmp;
1984: PetscLLCondensedDestroy_Scalable(lnk);
1985: PetscTableDestroy(&ta);
1987: MatDestroy(&POt);
1988: MatDestroy(&PDt);
1990: /* create symbolic parallel matrix C - why cannot be assembled in Numeric part */
1991: /*-------------------------------------------------------------------------------*/
1992: MatSetSizes(C,pn,A->cmap->n,PETSC_DETERMINE,PETSC_DETERMINE);
1993: MatSetBlockSizes(C,PetscAbs(P->cmap->bs),PetscAbs(A->cmap->bs));
1994: MatGetType(A,&mtype);
1995: MatSetType(C,mtype);
1996: MatMPIAIJSetPreallocation(C,0,dnz,0,onz);
1997: MatPreallocateFinalize(dnz,onz);
1998: MatSetBlockSize(C,1);
1999: for (i=0; i<pn; i++) {
2000: row = i + rstart;
2001: nnz = bi[i+1] - bi[i];
2002: Jptr = bj + bi[i];
2003: MatSetValues(C,1,&row,nnz,Jptr,NULL,INSERT_VALUES);
2004: }
2005: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
2006: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
2007: merge->bi = bi;
2008: merge->bj = bj;
2009: merge->coi = coi;
2010: merge->coj = coj;
2011: merge->buf_ri = buf_ri;
2012: merge->buf_rj = buf_rj;
2013: merge->owners_co = owners_co;
2015: /* attach the supporting struct to C for reuse */
2016: C->product->data = ptap;
2017: C->product->destroy = MatDestroy_MPIAIJ_PtAP;
2018: ptap->merge = merge;
2020: C->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ;
2022: #if defined(PETSC_USE_INFO)
2023: if (bi[pn] != 0) {
2024: PetscInfo3(C,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);
2025: PetscInfo1(C,"Use MatTransposeMatMult(A,B,MatReuse,%g,&C) for best performance.\n",(double)afill);
2026: } else {
2027: PetscInfo(C,"Empty matrix product\n");
2028: }
2029: #endif
2030: return(0);
2031: }
2033: /* ---------------------------------------------------------------- */
2034: static PetscErrorCode MatProductSymbolic_AtB_MPIAIJ_MPIAIJ(Mat C)
2035: {
2037: Mat_Product *product = C->product;
2038: Mat A=product->A,B=product->B;
2039: PetscReal fill=product->fill;
2040: PetscBool flg;
2043: /* scalable */
2044: PetscStrcmp(product->alg,"scalable",&flg);
2045: if (flg) {
2046: MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);
2047: goto next;
2048: }
2050: /* nonscalable */
2051: PetscStrcmp(product->alg,"nonscalable",&flg);
2052: if (flg) {
2053: MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A,B,fill,C);
2054: goto next;
2055: }
2057: /* matmatmult */
2058: PetscStrcmp(product->alg,"at*b",&flg);
2059: if (flg) {
2060: Mat At;
2061: Mat_APMPI *ptap;
2063: MatTranspose(A,MAT_INITIAL_MATRIX,&At);
2064: MatMatMultSymbolic_MPIAIJ_MPIAIJ(At,B,fill,C);
2065: ptap = (Mat_APMPI*)C->product->data;
2066: if (ptap) {
2067: ptap->Pt = At;
2068: C->product->destroy = MatDestroy_MPIAIJ_PtAP;
2069: }
2070: C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult;
2071: goto next;
2072: }
2074: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatProduct type is not supported");
2076: next:
2077: C->ops->productnumeric = MatProductNumeric_AtB;
2078: return(0);
2079: }
2081: /* ---------------------------------------------------------------- */
2082: /* Set options for MatMatMultxxx_MPIAIJ_MPIAIJ */
2083: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AB(Mat C)
2084: {
2086: Mat_Product *product = C->product;
2087: Mat A=product->A,B=product->B;
2088: #if defined(PETSC_HAVE_HYPRE)
2089: const char *algTypes[4] = {"scalable","nonscalable","seqmpi","hypre"};
2090: PetscInt nalg = 4;
2091: #else
2092: const char *algTypes[3] = {"scalable","nonscalable","seqmpi"};
2093: PetscInt nalg = 3;
2094: #endif
2095: PetscInt alg = 1; /* set nonscalable algorithm as default */
2096: PetscBool flg;
2097: MPI_Comm comm;
2100: /* Check matrix local sizes */
2101: PetscObjectGetComm((PetscObject)C,&comm);
2102: if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
2104: /* Set "nonscalable" as default algorithm */
2105: PetscStrcmp(C->product->alg,"default",&flg);
2106: if (flg) {
2107: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2109: /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2110: if (B->cmap->N > 100000) { /* may switch to scalable algorithm as default */
2111: MatInfo Ainfo,Binfo;
2112: PetscInt nz_local;
2113: PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable;
2115: MatGetInfo(A,MAT_LOCAL,&Ainfo);
2116: MatGetInfo(B,MAT_LOCAL,&Binfo);
2117: nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated);
2119: if (B->cmap->N > product->fill*nz_local) alg_scalable_loc = PETSC_TRUE;
2120: MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);
2122: if (alg_scalable) {
2123: alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2124: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2125: PetscInfo2(B,"Use scalable algorithm, BN %D, fill*nz_allocated %g\n",B->cmap->N,product->fill*nz_local);
2126: }
2127: }
2128: }
2130: /* Get runtime option */
2131: if (product->api_user) {
2132: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatMatMult","Mat");
2133: PetscOptionsEList("-matmatmult_via","Algorithmic approach","MatMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);
2134: PetscOptionsEnd();
2135: } else {
2136: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_AB","Mat");
2137: PetscOptionsEList("-matproduct_ab_via","Algorithmic approach","MatMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);
2138: PetscOptionsEnd();
2139: }
2140: if (flg) {
2141: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2142: }
2144: C->ops->productsymbolic = MatProductSymbolic_AB_MPIAIJ_MPIAIJ;
2145: return(0);
2146: }
2148: /* Set options for MatTransposeMatMultXXX_MPIAIJ_MPIAIJ */
2149: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AtB(Mat C)
2150: {
2152: Mat_Product *product = C->product;
2153: Mat A=product->A,B=product->B;
2154: const char *algTypes[3] = {"scalable","nonscalable","at*b"};
2155: PetscInt nalg = 3;
2156: PetscInt alg = 1; /* set default algorithm */
2157: PetscBool flg;
2158: MPI_Comm comm;
2161: /* Check matrix local sizes */
2162: PetscObjectGetComm((PetscObject)C,&comm);
2163: if (A->rmap->rstart != B->rmap->rstart || A->rmap->rend != B->rmap->rend) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%D, %D) != B (%D,%D)",A->rmap->rstart,A->rmap->rend,B->rmap->rstart,B->rmap->rend);
2165: /* Set default algorithm */
2166: PetscStrcmp(C->product->alg,"default",&flg);
2167: if (flg) {
2168: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2169: }
2171: /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2172: if (alg && B->cmap->N > 100000) { /* may switch to scalable algorithm as default */
2173: MatInfo Ainfo,Binfo;
2174: PetscInt nz_local;
2175: PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable;
2177: MatGetInfo(A,MAT_LOCAL,&Ainfo);
2178: MatGetInfo(B,MAT_LOCAL,&Binfo);
2179: nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated);
2181: if (B->cmap->N > product->fill*nz_local) alg_scalable_loc = PETSC_TRUE;
2182: MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);
2184: if (alg_scalable) {
2185: alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2186: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2187: PetscInfo2(B,"Use scalable algorithm, BN %D, fill*nz_allocated %g\n",B->cmap->N,product->fill*nz_local);
2188: }
2189: }
2191: /* Get runtime option */
2192: if (product->api_user) {
2193: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatTransposeMatMult","Mat");
2194: PetscOptionsEList("-mattransposematmult_via","Algorithmic approach","MatTransposeMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);
2195: PetscOptionsEnd();
2196: } else {
2197: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_AtB","Mat");
2198: PetscOptionsEList("-matproduct_atb_via","Algorithmic approach","MatTransposeMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);
2199: PetscOptionsEnd();
2200: }
2201: if (flg) {
2202: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2203: }
2205: C->ops->productsymbolic = MatProductSymbolic_AtB_MPIAIJ_MPIAIJ;
2206: return(0);
2207: }
2209: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_PtAP(Mat C)
2210: {
2212: Mat_Product *product = C->product;
2213: Mat A=product->A,P=product->B;
2214: MPI_Comm comm;
2215: PetscBool flg;
2216: PetscInt alg=1; /* set default algorithm */
2217: #if !defined(PETSC_HAVE_HYPRE)
2218: const char *algTypes[4] = {"scalable","nonscalable","allatonce","allatonce_merged"};
2219: PetscInt nalg=4;
2220: #else
2221: const char *algTypes[5] = {"scalable","nonscalable","allatonce","allatonce_merged","hypre"};
2222: PetscInt nalg=5;
2223: #endif
2224: PetscInt pN=P->cmap->N;
2227: /* Check matrix local sizes */
2228: PetscObjectGetComm((PetscObject)C,&comm);
2229: if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
2230: if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
2232: /* Set "nonscalable" as default algorithm */
2233: PetscStrcmp(C->product->alg,"default",&flg);
2234: if (flg) {
2235: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2237: /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2238: if (pN > 100000) {
2239: MatInfo Ainfo,Pinfo;
2240: PetscInt nz_local;
2241: PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable;
2243: MatGetInfo(A,MAT_LOCAL,&Ainfo);
2244: MatGetInfo(P,MAT_LOCAL,&Pinfo);
2245: nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated);
2247: if (pN > product->fill*nz_local) alg_scalable_loc = PETSC_TRUE;
2248: MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);
2250: if (alg_scalable) {
2251: alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2252: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2253: }
2254: }
2255: }
2257: /* Get runtime option */
2258: if (product->api_user) {
2259: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatPtAP","Mat");
2260: PetscOptionsEList("-matptap_via","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);
2261: PetscOptionsEnd();
2262: } else {
2263: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_PtAP","Mat");
2264: PetscOptionsEList("-matproduct_ptap_via","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);
2265: PetscOptionsEnd();
2266: }
2267: if (flg) {
2268: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2269: }
2271: C->ops->productsymbolic = MatProductSymbolic_PtAP_MPIAIJ_MPIAIJ;
2272: return(0);
2273: }
2275: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_RARt(Mat C)
2276: {
2277: Mat_Product *product = C->product;
2278: Mat A = product->A,R=product->B;
2281: /* Check matrix local sizes */
2282: if (A->cmap->n != R->cmap->n || A->rmap->n != R->cmap->n) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A local (%D, %D), R local (%D,%D)",A->rmap->n,A->rmap->n,R->rmap->n,R->cmap->n);
2284: C->ops->productsymbolic = MatProductSymbolic_RARt_MPIAIJ_MPIAIJ;
2285: return(0);
2286: }
2288: /*
2289: Set options for ABC = A*B*C = A*(B*C); ABC's algorithm must be chosen from AB's algorithm
2290: */
2291: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_ABC(Mat C)
2292: {
2294: Mat_Product *product = C->product;
2295: PetscBool flg = PETSC_FALSE;
2296: PetscInt alg = 1; /* default algorithm */
2297: const char *algTypes[3] = {"scalable","nonscalable","seqmpi"};
2298: PetscInt nalg = 3;
2301: /* Set default algorithm */
2302: PetscStrcmp(C->product->alg,"default",&flg);
2303: if (flg) {
2304: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2305: }
2307: /* Get runtime option */
2308: if (product->api_user) {
2309: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatMatMatMult","Mat");
2310: PetscOptionsEList("-matmatmatmult_via","Algorithmic approach","MatMatMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);
2311: PetscOptionsEnd();
2312: } else {
2313: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_ABC","Mat");
2314: PetscOptionsEList("-matproduct_abc_via","Algorithmic approach","MatProduct_ABC",algTypes,nalg,algTypes[alg],&alg,&flg);
2315: PetscOptionsEnd();
2316: }
2317: if (flg) {
2318: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2319: }
2321: C->ops->matmatmultsymbolic = MatMatMatMultSymbolic_MPIAIJ_MPIAIJ_MPIAIJ;
2322: C->ops->productsymbolic = MatProductSymbolic_ABC;
2323: return(0);
2324: }
2326: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ(Mat C)
2327: {
2329: Mat_Product *product = C->product;
2332: switch (product->type) {
2333: case MATPRODUCT_AB:
2334: MatProductSetFromOptions_MPIAIJ_AB(C);
2335: break;
2336: case MATPRODUCT_AtB:
2337: MatProductSetFromOptions_MPIAIJ_AtB(C);
2338: break;
2339: case MATPRODUCT_PtAP:
2340: MatProductSetFromOptions_MPIAIJ_PtAP(C);
2341: break;
2342: case MATPRODUCT_RARt:
2343: MatProductSetFromOptions_MPIAIJ_RARt(C);
2344: break;
2345: case MATPRODUCT_ABC:
2346: MatProductSetFromOptions_MPIAIJ_ABC(C);
2347: break;
2348: default:
2349: break;
2350: }
2351: return(0);
2352: }