Actual source code: neprefine.c
slepc-3.16.0 2021-09-30
1: /*
2: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3: SLEPc - Scalable Library for Eigenvalue Problem Computations
4: Copyright (c) 2002-2021, Universitat Politecnica de Valencia, Spain
6: This file is part of SLEPc.
7: SLEPc is distributed under a 2-clause BSD license (see LICENSE).
8: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
9: */
10: /*
11: Newton refinement for NEP, simple version
12: */
14: #include <slepc/private/nepimpl.h>
15: #include <slepcblaslapack.h>
17: #define NREF_MAXIT 10
19: typedef struct {
20: VecScatter *scatter_id,nst;
21: Mat *A;
22: Vec nv,vg,v,w;
23: FN *fn;
24: } NEPSimpNRefctx;
26: typedef struct {
27: Mat M1;
28: Vec M2,M3;
29: PetscScalar M4,m3;
30: } NEP_REFINE_MATSHELL;
32: static PetscErrorCode MatMult_FS(Mat M ,Vec x,Vec y)
33: {
34: PetscErrorCode ierr;
35: NEP_REFINE_MATSHELL *ctx;
36: PetscScalar t;
39: MatShellGetContext(M,&ctx);
40: VecDot(x,ctx->M3,&t);
41: t *= ctx->m3/ctx->M4;
42: MatMult(ctx->M1,x,y);
43: VecAXPY(y,-t,ctx->M2);
44: return(0);
45: }
47: static PetscErrorCode NEPSimpleNRefSetUp(NEP nep,NEPSimpNRefctx **ctx_)
48: {
50: PetscInt i,si,j,n0,m0,nloc,*idx1,*idx2,ne;
51: IS is1,is2;
52: NEPSimpNRefctx *ctx;
53: Vec v;
54: PetscMPIInt rank,size;
57: PetscCalloc1(1,ctx_);
58: ctx = *ctx_;
59: if (nep->npart==1) {
60: ctx->A = nep->A;
61: ctx->fn = nep->f;
62: nep->refinesubc = NULL;
63: ctx->scatter_id = NULL;
64: } else {
65: PetscMalloc2(nep->nt,&ctx->A,nep->npart,&ctx->scatter_id);
67: /* Duplicate matrices */
68: for (i=0;i<nep->nt;i++) {
69: MatCreateRedundantMatrix(nep->A[i],0,PetscSubcommChild(nep->refinesubc),MAT_INITIAL_MATRIX,&ctx->A[i]);
70: PetscLogObjectParent((PetscObject)nep,(PetscObject)ctx->A[i]);
71: }
72: MatCreateVecs(ctx->A[0],&ctx->v,NULL);
73: PetscLogObjectParent((PetscObject)nep,(PetscObject)ctx->v);
75: /* Duplicate FNs */
76: PetscMalloc1(nep->nt,&ctx->fn);
77: for (i=0;i<nep->nt;i++) {
78: FNDuplicate(nep->f[i],PetscSubcommChild(nep->refinesubc),&ctx->fn[i]);
79: }
81: /* Create scatters for sending vectors to each subcommucator */
82: BVGetColumn(nep->V,0,&v);
83: VecGetOwnershipRange(v,&n0,&m0);
84: BVRestoreColumn(nep->V,0,&v);
85: VecGetLocalSize(ctx->v,&nloc);
86: PetscMalloc2(m0-n0,&idx1,m0-n0,&idx2);
87: VecCreateMPI(PetscObjectComm((PetscObject)nep),nloc,PETSC_DECIDE,&ctx->vg);
88: for (si=0;si<nep->npart;si++) {
89: j = 0;
90: for (i=n0;i<m0;i++) {
91: idx1[j] = i;
92: idx2[j++] = i+nep->n*si;
93: }
94: ISCreateGeneral(PetscObjectComm((PetscObject)nep),(m0-n0),idx1,PETSC_COPY_VALUES,&is1);
95: ISCreateGeneral(PetscObjectComm((PetscObject)nep),(m0-n0),idx2,PETSC_COPY_VALUES,&is2);
96: BVGetColumn(nep->V,0,&v);
97: VecScatterCreate(v,is1,ctx->vg,is2,&ctx->scatter_id[si]);
98: BVRestoreColumn(nep->V,0,&v);
99: ISDestroy(&is1);
100: ISDestroy(&is2);
101: }
102: PetscFree2(idx1,idx2);
103: }
104: if (nep->scheme==NEP_REFINE_SCHEME_EXPLICIT) {
105: MPI_Comm_rank(PetscObjectComm((PetscObject)ctx->A[0]),&rank);
106: MPI_Comm_size(PetscObjectComm((PetscObject)ctx->A[0]),&size);
107: if (size>1) {
108: if (nep->npart==1) {
109: BVGetColumn(nep->V,0,&v);
110: } else v = ctx->v;
111: VecGetOwnershipRange(v,&n0,&m0);
112: ne = (rank == size-1)?nep->n:0;
113: VecCreateMPI(PetscObjectComm((PetscObject)ctx->A[0]),ne,PETSC_DECIDE,&ctx->nv);
114: PetscMalloc1(m0-n0,&idx1);
115: for (i=n0;i<m0;i++) {
116: idx1[i-n0] = i;
117: }
118: ISCreateGeneral(PetscObjectComm((PetscObject)ctx->A[0]),(m0-n0),idx1,PETSC_COPY_VALUES,&is1);
119: VecScatterCreate(v,is1,ctx->nv,is1,&ctx->nst);
120: if (nep->npart==1) {
121: BVRestoreColumn(nep->V,0,&v);
122: }
123: PetscFree(idx1);
124: ISDestroy(&is1);
125: }
126: } return(0);
127: }
129: /*
130: Gather Eigenpair idx from subcommunicator with color sc
131: */
132: static PetscErrorCode NEPSimpleNRefGatherEigenpair(NEP nep,NEPSimpNRefctx *ctx,PetscInt sc,PetscInt idx,PetscInt *fail)
133: {
135: PetscMPIInt nproc,p;
136: MPI_Comm comm=((PetscObject)nep)->comm;
137: Vec v;
138: PetscScalar *array;
141: MPI_Comm_size(comm,&nproc);
142: p = (nproc/nep->npart)*(sc+1)+PetscMin(sc+1,nproc%nep->npart)-1;
143: if (nep->npart>1) {
144: /* Communicate convergence successful */
145: MPI_Bcast(fail,1,MPIU_INT,p,comm);
146: if (!(*fail)) {
147: /* Process 0 of subcommunicator sc broadcasts the eigenvalue */
148: MPI_Bcast(&nep->eigr[idx],1,MPIU_SCALAR,p,comm);
149: /* Gather nep->V[idx] from the subcommuniator sc */
150: BVGetColumn(nep->V,idx,&v);
151: if (nep->refinesubc->color==sc) {
152: VecGetArray(ctx->v,&array);
153: VecPlaceArray(ctx->vg,array);
154: }
155: VecScatterBegin(ctx->scatter_id[sc],ctx->vg,v,INSERT_VALUES,SCATTER_REVERSE);
156: VecScatterEnd(ctx->scatter_id[sc],ctx->vg,v,INSERT_VALUES,SCATTER_REVERSE);
157: if (nep->refinesubc->color==sc) {
158: VecResetArray(ctx->vg);
159: VecRestoreArray(ctx->v,&array);
160: }
161: BVRestoreColumn(nep->V,idx,&v);
162: }
163: } else {
164: if (nep->scheme==NEP_REFINE_SCHEME_EXPLICIT && !(*fail)) {
165: MPI_Bcast(&nep->eigr[idx],1,MPIU_SCALAR,p,comm);
166: }
167: }
168: return(0);
169: }
171: static PetscErrorCode NEPSimpleNRefScatterEigenvector(NEP nep,NEPSimpNRefctx *ctx,PetscInt sc,PetscInt idx)
172: {
174: Vec v;
175: PetscScalar *array;
178: if (nep->npart>1) {
179: BVGetColumn(nep->V,idx,&v);
180: if (nep->refinesubc->color==sc) {
181: VecGetArray(ctx->v,&array);
182: VecPlaceArray(ctx->vg,array);
183: }
184: VecScatterBegin(ctx->scatter_id[sc],v,ctx->vg,INSERT_VALUES,SCATTER_FORWARD);
185: VecScatterEnd(ctx->scatter_id[sc],v,ctx->vg,INSERT_VALUES,SCATTER_FORWARD);
186: if (nep->refinesubc->color==sc) {
187: VecResetArray(ctx->vg);
188: VecRestoreArray(ctx->v,&array);
189: }
190: BVRestoreColumn(nep->V,idx,&v);
191: }
192: return(0);
193: }
195: static PetscErrorCode NEPSimpleNRefSetUpSystem(NEP nep,NEPSimpNRefctx *ctx,Mat *A,PetscInt idx,Mat *Mt,Mat *T,Mat *P,PetscBool ini,Vec t,Vec v)
196: {
197: PetscErrorCode ierr;
198: PetscInt i,st,ml,m0,n0,m1,mg;
199: PetscInt *dnz,*onz,ncols,*cols2=NULL,*nnz,nt=nep->nt;
200: PetscScalar zero=0.0,*coeffs,*coeffs2;
201: PetscMPIInt rank,size;
202: MPI_Comm comm;
203: const PetscInt *cols;
204: const PetscScalar *vals,*array;
205: NEP_REFINE_MATSHELL *fctx;
206: Vec w=ctx->w;
207: Mat M;
210: PetscMalloc2(nt,&coeffs,nt,&coeffs2);
211: switch (nep->scheme) {
212: case NEP_REFINE_SCHEME_SCHUR:
213: if (ini) {
214: PetscCalloc1(1,&fctx);
215: MatGetSize(A[0],&m0,&n0);
216: MatCreateShell(PetscObjectComm((PetscObject)A[0]),PETSC_DECIDE,PETSC_DECIDE,m0,n0,fctx,T);
217: MatShellSetOperation(*T,MATOP_MULT,(void(*)(void))MatMult_FS);
218: } else {
219: MatShellGetContext(*T,&fctx);
220: }
221: M=fctx->M1;
222: break;
223: case NEP_REFINE_SCHEME_MBE:
224: M=*T;
225: break;
226: case NEP_REFINE_SCHEME_EXPLICIT:
227: M=*Mt;
228: break;
229: }
230: if (ini) {
231: MatDuplicate(A[0],MAT_COPY_VALUES,&M);
232: } else {
233: MatCopy(A[0],M,DIFFERENT_NONZERO_PATTERN);
234: }
235: for (i=0;i<nt;i++) {
236: FNEvaluateFunction(ctx->fn[i],nep->eigr[idx],coeffs+i);
237: }
238: if (coeffs[0]!=1.0) {
239: MatScale(M,coeffs[0]);
240: }
241: for (i=1;i<nt;i++) {
242: MatAXPY(M,coeffs[i],A[i],(ini)?nep->mstr:SUBSET_NONZERO_PATTERN);
243: }
244: for (i=0;i<nt;i++) {
245: FNEvaluateDerivative(ctx->fn[i],nep->eigr[idx],coeffs2+i);
246: }
247: st = 0;
248: for (i=0;i<nt && PetscAbsScalar(coeffs2[i])==0.0;i++) st++;
249: MatMult(A[st],v,w);
250: if (coeffs2[st]!=1.0) {
251: VecScale(w,coeffs2[st]);
252: }
253: for (i=st+1;i<nt;i++) {
254: MatMult(A[i],v,t);
255: VecAXPY(w,coeffs2[i],t);
256: }
258: switch (nep->scheme) {
259: case NEP_REFINE_SCHEME_EXPLICIT:
260: comm = PetscObjectComm((PetscObject)A[0]);
261: MPI_Comm_rank(comm,&rank);
262: MPI_Comm_size(comm,&size);
263: MatGetSize(M,&mg,NULL);
264: MatGetOwnershipRange(M,&m0,&m1);
265: if (ini) {
266: MatCreate(comm,T);
267: MatGetLocalSize(M,&ml,NULL);
268: if (rank==size-1) ml++;
269: MatSetSizes(*T,ml,ml,mg+1,mg+1);
270: MatSetFromOptions(*T);
271: MatSetUp(*T);
272: /* Preallocate M */
273: if (size>1) {
274: MatPreallocateInitialize(comm,ml,ml,dnz,onz);
275: for (i=m0;i<m1;i++) {
276: MatGetRow(M,i,&ncols,&cols,NULL);
277: MatPreallocateSet(i,ncols,cols,dnz,onz);
278: MatPreallocateSet(i,1,&mg,dnz,onz);
279: MatRestoreRow(M,i,&ncols,&cols,NULL);
280: }
281: if (rank==size-1) {
282: PetscCalloc1(mg+1,&cols2);
283: for (i=0;i<mg+1;i++) cols2[i]=i;
284: MatPreallocateSet(m1,mg+1,cols2,dnz,onz);
285: PetscFree(cols2);
286: }
287: MatMPIAIJSetPreallocation(*T,0,dnz,0,onz);
288: MatPreallocateFinalize(dnz,onz);
289: } else {
290: PetscCalloc1(mg+1,&nnz);
291: for (i=0;i<mg;i++) {
292: MatGetRow(M,i,&ncols,NULL,NULL);
293: nnz[i] = ncols+1;
294: MatRestoreRow(M,i,&ncols,NULL,NULL);
295: }
296: nnz[mg] = mg+1;
297: MatSeqAIJSetPreallocation(*T,0,nnz);
298: PetscFree(nnz);
299: }
300: *Mt = M;
301: *P = *T;
302: }
304: /* Set values */
305: VecGetArrayRead(w,&array);
306: for (i=m0;i<m1;i++) {
307: MatGetRow(M,i,&ncols,&cols,&vals);
308: MatSetValues(*T,1,&i,ncols,cols,vals,INSERT_VALUES);
309: MatRestoreRow(M,i,&ncols,&cols,&vals);
310: MatSetValues(*T,1,&i,1,&mg,array+i-m0,INSERT_VALUES);
311: }
312: VecRestoreArrayRead(w,&array);
313: VecConjugate(v);
314: MPI_Comm_size(PetscObjectComm((PetscObject)A[0]),&size);
315: MPI_Comm_rank(PetscObjectComm((PetscObject)A[0]),&rank);
316: if (size>1) {
317: if (rank==size-1) {
318: PetscMalloc1(nep->n,&cols2);
319: for (i=0;i<nep->n;i++) cols2[i]=i;
320: }
321: VecScatterBegin(ctx->nst,v,ctx->nv,INSERT_VALUES,SCATTER_FORWARD);
322: VecScatterEnd(ctx->nst,v,ctx->nv,INSERT_VALUES,SCATTER_FORWARD);
323: VecGetArrayRead(ctx->nv,&array);
324: if (rank==size-1) {
325: MatSetValues(*T,1,&mg,nep->n,cols2,array,INSERT_VALUES);
326: MatSetValues(*T,1,&mg,1,&mg,&zero,INSERT_VALUES);
327: }
328: VecRestoreArrayRead(ctx->nv,&array);
329: } else {
330: PetscMalloc1(m1-m0,&cols2);
331: for (i=0;i<m1-m0;i++) cols2[i]=m0+i;
332: VecGetArrayRead(v,&array);
333: MatSetValues(*T,1,&mg,m1-m0,cols2,array,INSERT_VALUES);
334: MatSetValues(*T,1,&mg,1,&mg,&zero,INSERT_VALUES);
335: VecRestoreArrayRead(v,&array);
336: }
337: VecConjugate(v);
338: MatAssemblyBegin(*T,MAT_FINAL_ASSEMBLY);
339: MatAssemblyEnd(*T,MAT_FINAL_ASSEMBLY);
340: PetscFree(cols2);
341: break;
342: case NEP_REFINE_SCHEME_SCHUR:
343: fctx->M2 = ctx->w;
344: fctx->M3 = v;
345: fctx->m3 = 1.0+PetscConj(nep->eigr[idx])*nep->eigr[idx];
346: fctx->M4 = PetscConj(nep->eigr[idx]);
347: fctx->M1 = M;
348: if (ini) {
349: MatDuplicate(M,MAT_COPY_VALUES,P);
350: } else {
351: MatCopy(M,*P,SAME_NONZERO_PATTERN);
352: }
353: if (fctx->M4!=0.0) {
354: VecConjugate(v);
355: VecPointwiseMult(t,v,w);
356: VecConjugate(v);
357: VecScale(t,-fctx->m3/fctx->M4);
358: MatDiagonalSet(*P,t,ADD_VALUES);
359: }
360: break;
361: case NEP_REFINE_SCHEME_MBE:
362: *T = M;
363: *P = M;
364: break;
365: }
366: PetscFree2(coeffs,coeffs2);
367: return(0);
368: }
370: PetscErrorCode NEPNewtonRefinementSimple(NEP nep,PetscInt *maxits,PetscReal tol,PetscInt k)
371: {
372: PetscErrorCode ierr;
373: PetscInt i,n,its,idx=0,*idx_sc,*its_sc,color,*fail_sc;
374: PetscMPIInt rank,size;
375: Mat Mt=NULL,T=NULL,P=NULL;
376: MPI_Comm comm;
377: Vec r,v,dv,rr=NULL,dvv=NULL,t[2];
378: const PetscScalar *array;
379: PetscScalar *array2,deig=0.0,tt[2],ttt;
380: PetscReal norm,error;
381: PetscBool ini=PETSC_TRUE,sc_pend,solved=PETSC_FALSE;
382: NEPSimpNRefctx *ctx;
383: NEP_REFINE_MATSHELL *fctx=NULL;
384: KSPConvergedReason reason;
387: PetscLogEventBegin(NEP_Refine,nep,0,0,0);
388: NEPSimpleNRefSetUp(nep,&ctx);
389: its = (maxits)?*maxits:NREF_MAXIT;
390: comm = (nep->npart==1)?PetscObjectComm((PetscObject)nep):PetscSubcommChild(nep->refinesubc);
391: if (!nep->refineksp) { NEPRefineGetKSP(nep,&nep->refineksp); }
392: if (nep->npart==1) {
393: BVGetColumn(nep->V,0,&v);
394: } else v = ctx->v;
395: VecDuplicate(v,&ctx->w);
396: VecDuplicate(v,&r);
397: VecDuplicate(v,&dv);
398: VecDuplicate(v,&t[0]);
399: VecDuplicate(v,&t[1]);
400: if (nep->npart==1) { BVRestoreColumn(nep->V,0,&v); }
401: MPI_Comm_size(comm,&size);
402: MPI_Comm_rank(comm,&rank);
403: VecGetLocalSize(r,&n);
404: PetscMalloc3(nep->npart,&idx_sc,nep->npart,&its_sc,nep->npart,&fail_sc);
405: for (i=0;i<nep->npart;i++) fail_sc[i] = 0;
406: for (i=0;i<nep->npart;i++) its_sc[i] = 0;
407: color = (nep->npart==1)?0:nep->refinesubc->color;
409: /* Loop performing iterative refinements */
410: while (!solved) {
411: for (i=0;i<nep->npart;i++) {
412: sc_pend = PETSC_TRUE;
413: if (its_sc[i]==0) {
414: idx_sc[i] = idx++;
415: if (idx_sc[i]>=k) {
416: sc_pend = PETSC_FALSE;
417: } else {
418: NEPSimpleNRefScatterEigenvector(nep,ctx,i,idx_sc[i]);
419: }
420: } else { /* Gather Eigenpair from subcommunicator i */
421: NEPSimpleNRefGatherEigenpair(nep,ctx,i,idx_sc[i],&fail_sc[i]);
422: }
423: while (sc_pend) {
424: if (!fail_sc[i]) {
425: NEPComputeError(nep,idx_sc[i],NEP_ERROR_RELATIVE,&error);
426: }
427: if (error<=tol || its_sc[i]>=its || fail_sc[i]) {
428: idx_sc[i] = idx++;
429: its_sc[i] = 0;
430: fail_sc[i] = 0;
431: if (idx_sc[i]<k) { NEPSimpleNRefScatterEigenvector(nep,ctx,i,idx_sc[i]); }
432: } else {
433: sc_pend = PETSC_FALSE;
434: its_sc[i]++;
435: }
436: if (idx_sc[i]>=k) sc_pend = PETSC_FALSE;
437: }
438: }
439: solved = PETSC_TRUE;
440: for (i=0;i<nep->npart&&solved;i++) solved = PetscNot(idx_sc[i]<k);
441: if (idx_sc[color]<k) {
442: #if !defined(PETSC_USE_COMPLEX)
443: if (nep->eigi[idx_sc[color]]!=0.0) SETERRQ(PetscObjectComm((PetscObject)nep),PETSC_ERR_SUP,"Simple Refinement not implemented in real scalar for complex eigenvalues");
444: #endif
445: if (nep->npart==1) {
446: BVGetColumn(nep->V,idx_sc[color],&v);
447: } else v = ctx->v;
448: NEPSimpleNRefSetUpSystem(nep,ctx,ctx->A,idx_sc[color],&Mt,&T,&P,ini,t[0],v);
449: KSPSetOperators(nep->refineksp,T,P);
450: if (ini) {
451: KSPSetFromOptions(nep->refineksp);
452: if (nep->scheme==NEP_REFINE_SCHEME_EXPLICIT) {
453: MatCreateVecs(T,&dvv,NULL);
454: VecDuplicate(dvv,&rr);
455: }
456: ini = PETSC_FALSE;
457: }
458: switch (nep->scheme) {
459: case NEP_REFINE_SCHEME_EXPLICIT:
460: MatMult(Mt,v,r);
461: VecGetArrayRead(r,&array);
462: if (rank==size-1) {
463: VecGetArray(rr,&array2);
464: PetscArraycpy(array2,array,n);
465: array2[n] = 0.0;
466: VecRestoreArray(rr,&array2);
467: } else {
468: VecPlaceArray(rr,array);
469: }
470: KSPSolve(nep->refineksp,rr,dvv);
471: KSPGetConvergedReason(nep->refineksp,&reason);
472: if (reason>0) {
473: if (rank != size-1) {
474: VecResetArray(rr);
475: }
476: VecRestoreArrayRead(r,&array);
477: VecGetArrayRead(dvv,&array);
478: VecPlaceArray(dv,array);
479: VecAXPY(v,-1.0,dv);
480: VecNorm(v,NORM_2,&norm);
481: VecScale(v,1.0/norm);
482: VecResetArray(dv);
483: if (rank==size-1) nep->eigr[idx_sc[color]] -= array[n];
484: VecRestoreArrayRead(dvv,&array);
485: } else fail_sc[color] = 1;
486: break;
487: case NEP_REFINE_SCHEME_MBE:
488: MatMult(T,v,r);
489: /* Mixed block elimination */
490: VecConjugate(v);
491: KSPSolveTranspose(nep->refineksp,v,t[0]);
492: KSPGetConvergedReason(nep->refineksp,&reason);
493: if (reason>0) {
494: VecConjugate(t[0]);
495: VecDot(ctx->w,t[0],&tt[0]);
496: KSPSolve(nep->refineksp,ctx->w,t[1]);
497: KSPGetConvergedReason(nep->refineksp,&reason);
498: if (reason>0) {
499: VecDot(t[1],v,&tt[1]);
500: VecDot(r,t[0],&ttt);
501: tt[0] = ttt/tt[0];
502: VecAXPY(r,-tt[0],ctx->w);
503: KSPSolve(nep->refineksp,r,dv);
504: KSPGetConvergedReason(nep->refineksp,&reason);
505: if (reason>0) {
506: VecDot(dv,v,&ttt);
507: tt[1] = ttt/tt[1];
508: VecAXPY(dv,-tt[1],t[1]);
509: deig = tt[0]+tt[1];
510: }
511: }
512: VecConjugate(v);
513: VecAXPY(v,-1.0,dv);
514: VecNorm(v,NORM_2,&norm);
515: VecScale(v,1.0/norm);
516: nep->eigr[idx_sc[color]] -= deig;
517: fail_sc[color] = 0;
518: } else {
519: VecConjugate(v);
520: fail_sc[color] = 1;
521: }
522: break;
523: case NEP_REFINE_SCHEME_SCHUR:
524: fail_sc[color] = 1;
525: MatShellGetContext(T,&fctx);
526: if (fctx->M4!=0.0) {
527: MatMult(fctx->M1,v,r);
528: KSPSolve(nep->refineksp,r,dv);
529: KSPGetConvergedReason(nep->refineksp,&reason);
530: if (reason>0) {
531: VecDot(dv,v,&deig);
532: deig *= -fctx->m3/fctx->M4;
533: VecAXPY(v,-1.0,dv);
534: VecNorm(v,NORM_2,&norm);
535: VecScale(v,1.0/norm);
536: nep->eigr[idx_sc[color]] -= deig;
537: fail_sc[color] = 0;
538: }
539: }
540: break;
541: }
542: if (nep->npart==1) { BVRestoreColumn(nep->V,idx_sc[color],&v); }
543: }
544: }
545: VecDestroy(&t[0]);
546: VecDestroy(&t[1]);
547: VecDestroy(&dv);
548: VecDestroy(&ctx->w);
549: VecDestroy(&r);
550: PetscFree3(idx_sc,its_sc,fail_sc);
551: VecScatterDestroy(&ctx->nst);
552: if (nep->npart>1) {
553: VecDestroy(&ctx->vg);
554: VecDestroy(&ctx->v);
555: for (i=0;i<nep->nt;i++) {
556: MatDestroy(&ctx->A[i]);
557: }
558: for (i=0;i<nep->npart;i++) {
559: VecScatterDestroy(&ctx->scatter_id[i]);
560: }
561: PetscFree2(ctx->A,ctx->scatter_id);
562: }
563: if (fctx && nep->scheme==NEP_REFINE_SCHEME_SCHUR) {
564: MatDestroy(&P);
565: MatDestroy(&fctx->M1);
566: PetscFree(fctx);
567: }
568: if (nep->scheme==NEP_REFINE_SCHEME_EXPLICIT) {
569: MatDestroy(&Mt);
570: VecDestroy(&dvv);
571: VecDestroy(&rr);
572: VecDestroy(&ctx->nv);
573: if (nep->npart>1) {
574: for (i=0;i<nep->nt;i++) { FNDestroy(&ctx->fn[i]); }
575: PetscFree(ctx->fn);
576: }
577: }
578: if (nep->scheme==NEP_REFINE_SCHEME_MBE) {
579: if (nep->npart>1) {
580: for (i=0;i<nep->nt;i++) { FNDestroy(&ctx->fn[i]); }
581: PetscFree(ctx->fn);
582: }
583: }
584: MatDestroy(&T);
585: PetscFree(ctx);
586: PetscLogEventEnd(NEP_Refine,nep,0,0,0);
587: return(0);
588: }