submodule(solver3d_m) solver3d_CERFACS_s !! Iterative MPI solver based on CERFACS library !! !! https://www.cerfacs.fr/algor/Softs/GMRES/ !! !! V. Fraysse, L. Giraud, S. Gratton, and J. Langou, !! A set of GMRES routines for real and complex arithmetics on high !! performance computers, !! CERFACS Technical Report TR/PA/03/3, public domain software available !! on www.cerfacs/algor/Softs, 2003 implicit none contains module subroutine create_CERFACS(self, comm, krylov_method, & ndim_loc, resmax, & matvec, precondl, precondr, & maxiter, nrestart, & dbgout) class(solver3d_CERFACS_t), intent(inout) :: self integer, intent(in) :: comm character(len=*), intent(in) :: krylov_method integer, intent(in) :: ndim_loc real(FP), intent(in) :: resmax procedure(matvec_interface) :: matvec procedure(precond_interface), optional :: precondl procedure(precond_interface), optional :: precondr integer, intent(in), optional :: maxiter integer, intent(in), optional :: nrestart integer, intent(in), optional :: dbgout integer :: ierr ! Set debug output level self%outi = 0 if (present(dbgout)) then if (is_master()) then self%outi = dbgout endif if (dbgout >= 3) then self%outi = dbgout ! every rank writes endif endif ! Krylow method (only RGMRES allowed) select case(krylov_method) case('RGMRES') if (self%outi >=1) then write(get_stdout(),*) 'Krylov method: ', krylov_method endif case default call handle_error('Krylov method not valid for CERFACS solver, & available methods are [RGMRES]', & PARALLAX_ERR_SOLVER3D, __LINE__, __FILE__, & additional_info=& error_info_t(krylov_method)) end select ! Set solver parameters call init_dgmres(self%icntl, self%cntl) self%icntl(1) = get_stderr() self%icntl(2) = get_stderr() if (self%outi >= 1) then self%icntl(3) = get_stdout() else self%icntl(3) = 0 endif self%matvec => matvec ! Set preconditioner if ( (present(precondl)) .and. (present(precondr)) ) then self%icntl(4) = 3 self%precondl => precondl self%precondr => precondr elseif ( (present(precondl)) .and. (.not.present(precondr)) ) then self%icntl(4) = 1 self%precondl => precondl elseif ( (.not.present(precondl)) .and. (present(precondr)) ) then self%icntl(4) = 2 self%precondr => precondr else self%icntl(4) = 0 endif self%icntl(6) = 1 ! provide initial guess self%icntl(7) = maxiter * nrestart self%nrestart = nrestart ! Set tolerances self%cntl(1) = resmax ! Dimension of problem self%ndim_loc = ndim_loc call MPI_allreduce(ndim_loc, self%ndim_glob, 1, MPI_INTEGER, & MPI_SUM, comm, ierr) ! Length of work array self%lwork = nrestart * nrestart & + nrestart * (ndim_loc + 5) & + 5 * ndim_loc + 2 end subroutine module subroutine solve_CERFACS(self, comm, rhs, sol, res, info, res_true) class(solver3d_CERFACS_t), intent(inout) :: self integer, intent(in) :: comm real(FP), dimension(self%ndim_loc), intent(in) :: rhs real(FP), dimension(self%ndim_loc), intent(inout) :: sol real(FP), intent(out) :: res integer, intent(out) :: info real(FP), intent(out), optional :: res_true integer :: i, iscal, ierr integer, dimension(5) :: irc integer, dimension(3) :: iinfo real(FP), dimension(3) :: rinfo integer, dimension(1) :: idummy real(FP), dimension(self%lwork) :: work ! Load initial guess and rhs into work !$omp parallel default(none) private(i) shared(self, sol, rhs, work) !$omp do do i = 1, self%ndim_loc work(i) = sol(i) work(i+self%ndim_loc) = rhs(i) enddo !$omp end do !$omp end parallel ! Reverse communication interface 100 call drive_dgmres(self%ndim_glob, self%ndim_loc, self%nrestart, & self%lwork, work, irc, self%icntl, self%cntl, & iinfo, rinfo) select case(irc(1)) case(0) ! Normal exit if (iinfo(1)==0) then info = iinfo(2) res = rinfo(1) if (present(res_true)) then res_true = rinfo(2) endif if (self%outi > 0) then write(get_stdout(),*)'3d solver (CERFACS) successful' write(get_stdout(), 303) info, res, rinfo(2) 303 FORMAT(3X,'niter =', I6, & '; res (preconditioned) = ', ES14.6E3, & '; res (unpreconditioned) = ', ES14.6E3) endif else info = iinfo(1) res = FP_NAN if (present(res_true)) then res_true = FP_NAN endif write(get_stdout(),*)'3d solver (CERFACS) failed, info = ',& info endif ! Fetch solution from work !$omp parallel default(none) private(i) shared(self, sol, work) !$omp do do i = 1, self%ndim_loc sol(i) = work(i) enddo !$omp end do !$omp end parallel return case(1) ! Perform the matrix vector product z ← Ax. call self%matvec(work(irc(2)), work(irc(4)), idummy) goto 100 case(2) ! Perform the left preconditioning z ← M−1x call self%precondl(work(irc(2)), work(irc(4)), idummy) goto 100 case(3) ! Perform the right preconditioning z ← M−1x call self%precondr(work(irc(2)), work(irc(4)), idummy) goto 100 case(4) ! Perform (several) scalar products z=x*y (via dgemv) call dgemv('C', self%ndim_loc, irc(5), 1.0_FP, work(irc(2)), & self%ndim_loc, work(irc(3)), 1, 0.0_FP, & work(irc(4)), 1) call MPI_allreduce(MPI_IN_PLACE, work(irc(4)), irc(5), MPI_FP, & MPI_SUM, comm, ierr) goto 100 case default call handle_error('irc not valid', & PARALLAX_ERR_SOLVER3D, __LINE__, __FILE__) end select end subroutine module subroutine destructor_CERFACS(self) type(solver3d_CERFACS_t), intent(inout) :: self self%matvec => null() self%precondl => null() self%precondr => null() end subroutine end submodule