test_diffusion.f90 Source File


Source Code

module auxiliaries_test_diffusion_m
    !! Auxiliary routines for test_diffusion program
    use MPI
    use comm_handling_m, only: get_communicator
    use precision_m, only : FP, MPI_FP
    use constants_m, only : TWO_PI
    use csrmat_m, only : csrmat_t, csr_transpose, csr_times_vec
    use equilibrium_m, only : equilibrium_t
    use mesh_cart_m, only : mesh_cart_t
    use fieldline_tracer_m, only : trace
    use mpi_mapping_auxiliaries_m, only : getdata_fwdbwdplane
    implicit none

    public :: complete_support_map
    public :: parallel_gradient_supp
    public :: parallel_divergence_supp
    public :: parallel_diffusion_supp
    public :: parallel_diffusion_shortley_weller
    public :: fci_norms

contains

    subroutine complete_support_map(comm, equi, mesh, mesh_staggered, &
                                    vol, qsupp_fwd, qsupp_bwd, &
                                    psupp_fwd, psupp_bwd)
        !! Completes the operators for the support operator method
        !! - Turns map matrices qsupp into parallel gradient matrices
        !! - Establishes parallel divergence matrices psupp
        integer, intent(in) :: comm
        !! MPI communicator
        class(equilibrium_t), intent(inout) :: equi
        !! Equilibrium
        type(mesh_cart_t), intent(in) :: mesh
        !! Mesh
        type(mesh_cart_t), intent(in) :: mesh_staggered
        !! Mesh at toroidally staggered (fwd) position
        real(FP), dimension(mesh%get_n_points()), intent(in) :: vol
        !! Flux box volumes
        type(csrmat_t), intent(inout) :: qsupp_fwd
        !! Map matrix on input, parallel gradient matrix on output
        !! (fwd direction)
        type(csrmat_t), intent(inout) :: qsupp_bwd
        !! Map matrix on input, parallel gradient matrix on output
        !! (bwd direction)
        type(csrmat_t), allocatable, intent(out) :: psupp_fwd
        !! Parallel divergence matrix (fwd direction)
        type(csrmat_t), allocatable, intent(out) :: psupp_bwd
        !! Parallel divergence matrix (bwd direction)

        type(mesh_cart_t), allocatable :: mesh_wrk
        integer :: l, k, j
        real(FP) :: phi_init, x, y, dphi_half, ds_a, ds_b, vol_a, vol_b
        real(FP), dimension(mesh_staggered%get_n_points()) :: ds_stag, &
                vol_stag
        real(FP), allocatable, dimension(:) :: vol_stag_bwd
        type(csrmat_t), allocatable :: wrk_csr


        ! Further auxiliary quantities defined on forward staggered grid
        phi_init = mesh_staggered%get_phi()
        dphi_half = mesh_staggered%get_phi() - mesh%get_phi()

        do l = 1, mesh_staggered%get_n_points()
            x = mesh_staggered%get_x(l)
            y = mesh_staggered%get_y(l)

            call trace(x, y, phi_init, dphi_half, equi, &
                       arclen=ds_a, fluxexp=vol_a)
            call trace(x, y, phi_init, -dphi_half, equi, &
                       arclen=ds_b, fluxexp=vol_b)

            ds_stag(l) = ds_a + ds_b
            vol_stag(l) = (vol_a + vol_b) * &
                mesh_staggered%get_spacing_f()**2 * equi%btor(x, y, phi_init)
        enddo

        ! Further auxiliary quantities defined on backward staggered grid
        allocate(mesh_wrk)
        call mesh_staggered%exchange_mesh_mpi(get_communicator(), &
                                              step=-1, &
                                              mesh_received=mesh_wrk)
        allocate(vol_stag_bwd(mesh_wrk%get_n_points()))
        phi_init = mesh_wrk%get_phi()
        do l = 1, mesh_wrk%get_n_points()
            x = mesh_wrk%get_x(l)
            y = mesh_wrk%get_y(l)

            call trace(x, y, phi_init, dphi_half, equi, fluxexp=vol_a)

            call trace(x, y, phi_init, -dphi_half, equi, fluxexp=vol_b)

            vol_stag_bwd(l) = (vol_a + vol_b) * &
                    mesh_wrk%get_spacing_f()**2 * &
                    equi%btor(x, y, phi_init)
        enddo
        deallocate(mesh_wrk)

        ! Factors for qsupp matrices
        ! to convert them into parallel gradient matrices
        do l = 1, mesh_staggered%get_n_points()

            do k = qsupp_fwd%i(l), qsupp_fwd%i(l+1) - 1
                qsupp_fwd%val(k) = qsupp_fwd%val(k) / ds_stag(l)
            enddo

            do k = qsupp_bwd%i(l), qsupp_bwd%i(l+1) - 1
                qsupp_bwd%val(k) = -qsupp_bwd%val(k) / ds_stag(l)
            enddo
        enddo

        ! Compute parallel divergence matrices P = - 1/vol * Qsupp^T * vol_stag
        call getdata_fwdbwdplane(comm, step=-1, &
                                 acsr_send=qsupp_fwd, acsr_recv=wrk_csr)

        call csr_transpose(wrk_csr, psupp_bwd)
        deallocate(wrk_csr)
        call csr_transpose(qsupp_bwd, psupp_fwd)

        do l = 1, mesh%get_n_points()
            do k = psupp_fwd%i(l), psupp_fwd%i(l+1) - 1
                j = psupp_fwd%j(k)
                psupp_fwd%val(k) = -1.0_FP / vol(l) * psupp_fwd%val(k) &
                                   * vol_stag(j)
            enddo

            do k = psupp_bwd%i(l), psupp_bwd%i(l+1) - 1
                j = psupp_bwd%j(k)
                psupp_bwd%val(k) = -1.0_FP / vol(l) * psupp_bwd%val(k) &
                                   * vol_stag_bwd(j)
            enddo
        enddo

    end subroutine

    subroutine parallel_diffusion_shortley_weller(comm, mesh, &
                                                  qnaive_fwd, qnaive_bwd, &
                                                  ds_fwd, ds_bwd, u, du)
        !! Parallel diffusion according to Shortley-Weller scheme
        integer, intent(in) :: comm
        !! MPI communicator
        type(mesh_cart_t), intent(in) :: mesh
        !! Mesh
        type(csrmat_t), intent(in) :: qnaive_fwd
        !! Map matrix (fwd direction)
        type(csrmat_t), intent(in) :: qnaive_bwd
        !! Map matrix (bwd direction)
        real(FP), dimension(mesh%get_n_points()), intent(in) :: &
                ds_fwd
        !! Distances along magnetic fiedl line to fwd mesh
        real(FP), dimension(mesh%get_n_points()), intent(in) :: &
                ds_bwd
        !! Distances along magnetic fiedl line to bwd mesh
        real(FP), dimension(mesh%get_n_points()), intent(in) :: u
        !! Variable u
        real(FP), dimension(mesh%get_n_points()), intent(out) :: du
        !! Parallel diffusion of variable u

        integer:: n_fetch, l
        real(FP), allocatable, dimension(:) :: u_fetch
        real(FP), dimension(mesh%get_n_points()) :: uwrk1, uwrk2

        call getdata_fwdbwdplane(comm, 1, mesh%get_n_points(), u, &
                                 n_fetch, u_fetch)
        !$omp parallel
        call csr_times_vec(qnaive_fwd, u_fetch, uwrk1)
        !$omp end parallel
        call getdata_fwdbwdplane(comm, -1, mesh%get_n_points(), u, &
                                 n_fetch, u_fetch)
        !$omp parallel private(l)
        call csr_times_vec(qnaive_bwd, u_fetch, uwrk2)
        !$omp do
        do l = 1, mesh%get_n_points()
            du(l) = 2 * uwrk1(l) / (ds_bwd(l) * (ds_bwd(l) + ds_fwd(l))) &
                  + 2 * uwrk2(l) / (ds_fwd(l) * (ds_bwd(l) + ds_fwd(l))) &
                  - 2 * u(l) / (ds_fwd(l) * ds_bwd(l))
        end do
        !$omp end do
        !$omp end parallel

        deallocate(u_fetch)

    end subroutine

    subroutine parallel_gradient_supp(comm, mesh, mesh_staggered, &
                                      qsupp_fwd, qsupp_bwd, u, du)
        !! Parallel gradient (from full to staggered mesh)
        integer, intent(in) :: comm
        !! MPI communicator
        type(mesh_cart_t), intent(in) :: mesh
        !! Mesh
        type(mesh_cart_t), intent(in) :: mesh_staggered
        !! Mesh at toroidally staggered (fwd) position
        type(csrmat_t), intent(in) :: qsupp_fwd
        !! Parallel gradient matrix (fwd direction)
        type(csrmat_t), intent(in) :: qsupp_bwd
        !! Parallel gradient matrix (fwd direction)
        real(FP), dimension(mesh%get_n_points()), intent(in) :: u
        !! Variable u
        real(FP), dimension(mesh_staggered%get_n_points()), intent(out) :: du
        !! Parallel gradient of variable u on staggered grid

        integer:: n_fetch, l
        real(FP), allocatable, dimension(:) :: u_fetch
        real(FP), dimension(mesh_staggered%get_n_points()) :: uwrk1, uwrk2

        call getdata_fwdbwdplane(comm, 1, mesh%get_n_points(), u, &
                                 n_fetch, u_fetch)
        !$omp parallel private(l)
        call csr_times_vec(qsupp_fwd, u_fetch, uwrk1)
        call csr_times_vec(qsupp_bwd, u, uwrk2)
        !$omp do
        do l = 1, mesh_staggered%get_n_points()
            du(l) = uwrk1(l) + uwrk2(l)
        end do
        !$omp end do
        !$omp end parallel

        deallocate(u_fetch)

    end subroutine

    subroutine parallel_divergence_supp(comm, mesh, mesh_staggered, &
                                        psupp_fwd, psupp_bwd, &
                                        u, du)
        !! Parallel divergence (from staggered to full mesh)
        integer, intent(in) :: comm
        !! MPI communicator
        type(mesh_cart_t), intent(in) :: mesh
        !! Mesh
        type(mesh_cart_t), intent(in) :: mesh_staggered
        !! Mesh at toroidally staggered (fwd) position
        type(csrmat_t), intent(in) :: psupp_fwd
        !! Parallel divergence matrix (fwd direction)
        type(csrmat_t), intent(in) :: psupp_bwd
        !! Parallel divergence matrix (fwd direction)
        real(FP), dimension(mesh_staggered%get_n_points()), intent(in) :: u
        !! Variable u on staggered grid
        real(FP), dimension(mesh%get_n_points()), intent(out) :: du
        !! Parallel divergence of u on full grid

        integer:: n_fetch, l
        real(FP), allocatable, dimension(:) :: u_fetch
        real(FP), dimension(mesh%get_n_points()) :: uwrk1, uwrk2

        call getdata_fwdbwdplane(comm, -1, mesh_staggered%get_n_points(), u, &
                                 n_fetch, u_fetch)
        !$omp parallel
        call csr_times_vec(psupp_bwd, u_fetch, uwrk1)
        call csr_times_vec(psupp_fwd, u, uwrk2)
        !$omp do
        do l = 1, mesh%get_n_points()
            du(l) = uwrk1(l) + uwrk2(l)
        end do
        !$omp end do
        !$omp end parallel

        deallocate(u_fetch)

    end subroutine

    subroutine parallel_diffusion_supp(comm, mesh, mesh_staggered, &
                                       qsupp_fwd, qsupp_bwd, &
                                       psupp_fwd, psupp_bwd, u, du)
        !! Parallel divergence (from staggered to full mesh)
        integer, intent(in) :: comm
        !! MPI communicator
        type(mesh_cart_t), intent(in) :: mesh
        !! Mesh
        type(mesh_cart_t), intent(in) :: mesh_staggered
        !! Mesh at toroidally staggered (fwd) position
        type(csrmat_t), intent(in) :: qsupp_fwd
        !! Parallel gradient matrix (fwd direction)
        type(csrmat_t), intent(in) :: qsupp_bwd
        !! Parallel gradient matrix (fwd direction)
        type(csrmat_t), intent(in) :: psupp_fwd
        !! Parallel divergence matrix (fwd direction)
        type(csrmat_t), intent(in) :: psupp_bwd
        !! Parallel divergence matrix (fwd direction)
        real(FP), dimension(mesh%get_n_points()), intent(in) :: u
        !! Variable u
        real(FP), dimension(mesh%get_n_points()), intent(out) :: du
        !! Parallel diffusion

        real(FP), dimension(mesh_staggered%get_n_points()) :: pargrad

        call parallel_gradient_supp(comm, mesh, mesh_staggered, &
                                    qsupp_fwd, qsupp_bwd, u, pargrad)
        call parallel_divergence_supp(comm,  mesh, mesh_staggered, &
                                      psupp_fwd, psupp_bwd, pargrad, du)

    end subroutine


    subroutine fci_norms(comm, mesh, vol, u, nrm1, nrm2, nrm_max)
        !! Computes L1, L2 and Lsup norm of solution
        !! Integretaion is performed with weighing of FCI flux box volumes
        integer, intent(in) :: comm
        !! MPI communicator
        type(mesh_cart_t), intent(in) :: mesh
        !! Mesh
        real(FP), dimension(mesh%get_n_points()), intent(in) :: vol
        !! Flux box volumes
        real(FP), dimension(mesh%get_n_points()), intent(in) :: u
        !! Variable u
        real(FP), intent(out) :: nrm1
        !! L1 Norm
        real(FP), intent(out) :: nrm2
        !! L2 Norm
        real(FP), intent(out) :: nrm_max
        !! Supremumsnorm

        integer :: l, ierr
        real(FP) :: vol_tot

        vol_tot = 0.0_FP
        nrm1 = 0.0_FP
        nrm2 = 0.0_FP
        nrm_max = 0.0_FP
        !$omp parallel private(l) reduction(+:nrm1, nrm2, vol_tot) &
        !$omp reduction(max: nrm_max)
        !$omp do
        do l = 1, mesh%get_n_points()
            vol_tot = vol_tot + vol(l)
            nrm1 = nrm1 + u(l) * vol(l)
            nrm2 = nrm2 + u(l)**2 * vol(l)
            nrm_max = max(nrm_max, abs(u(l)))
        enddo
        !$omp end do
        !$omp end parallel

        call MPI_allreduce(MPI_IN_PLACE, vol_tot, 1, &
                           MPI_FP, MPI_SUM, comm, ierr)
        call MPI_allreduce(MPI_IN_PLACE, nrm1, 1, &
                           MPI_FP, MPI_SUM, comm, ierr)
        call MPI_allreduce(MPI_IN_PLACE, nrm2, 1, &
                           MPI_FP, MPI_SUM, comm, ierr)
        call MPI_allreduce(MPI_IN_PLACE, nrm_max, 1, &
                           MPI_FP, MPI_MAX, comm, ierr)

        nrm1 = nrm1 / vol_tot
        nrm2 = sqrt(nrm2 / vol_tot)

    end subroutine

end module

module snapshots_test_diffusion_m
    !! Auxiliary module for test_diffusion for writing snapshopts to file
    use netcdf
    use precision_m, only : FP
    use error_handling_m, only : handle_error_netcdf
    use mesh_cart_m, only : mesh_cart_t
    implicit none

    integer, private, save :: nsnaps
    integer, private, save :: nf90_id
    integer, private, save :: iddim_n_points, iddim_snaps
    integer, private, save :: id_tau, id_u_naive, id_u_supp

    public :: initialize_snapsfile
    public :: write_to_snapsfile

contains

    subroutine initialize_snapsfile(mesh, filename)
        !! Creates a snapfile and sets dimension and variables
        type(mesh_cart_t), intent(in) :: mesh
        !! Mesh
        character(len=*), intent(in) :: filename
        !! Filename of netcdf file

        integer :: nf90_stat

        ! create file
        nf90_stat = nf90_create(filename, NF90_NETCDF4, nf90_id )
        call handle_error_netcdf(nf90_stat, __LINE__, __FILE__)

        ! define dimensions
        nf90_stat = nf90_def_dim(nf90_id, 'n_points', &
                                 mesh%get_n_points(), iddim_n_points)
        call handle_error_netcdf(nf90_stat, __LINE__, __FILE__)

        nf90_stat = nf90_def_dim(nf90_id, 'snaps', NF90_UNLIMITED, iddim_snaps)
        call handle_error_netcdf(nf90_stat, __LINE__, __FILE__)

        ! Define variables
        nf90_stat = nf90_def_var(nf90_id, 'tau', NF90_DOUBLE, &
                                 iddim_snaps, id_tau)
        call handle_error_netcdf(nf90_stat, __LINE__, __FILE__)

        nf90_stat = nf90_def_var(nf90_id, 'u_naive', NF90_DOUBLE, &
                                 (/iddim_n_points, iddim_snaps/), id_u_naive)
        call handle_error_netcdf(nf90_stat, __LINE__, __FILE__)

        nf90_stat = nf90_def_var(nf90_id, 'u_supp', NF90_DOUBLE, &
                                 (/iddim_n_points, iddim_snaps/), id_u_supp)
        call handle_error_netcdf(nf90_stat, __LINE__, __FILE__)

        ! end definition phase
        nf90_stat = nf90_enddef(nf90_id)
        call handle_error_netcdf(nf90_stat, __LINE__, __FILE__)

        nsnaps = 0

    end subroutine

    subroutine write_to_snapsfile(mesh, tau, u_naive, u_supp)
        !! Writes variables to snapsfile
        type(mesh_cart_t), intent(in) :: mesh
        !! Mesh
        real(FP), intent(in) :: tau
        !! Time
        real(FP), dimension(mesh%get_n_points()), intent(in) :: u_naive
        !! Variable of naive solution
        real(FP), dimension(mesh%get_n_points()), intent(in) :: u_supp
        !! Variable of support operator solution

        integer :: offset_1d(1), offset_2d(2)

        integer :: nf90_stat

        nsnaps = nsnaps + 1

        offset_1d = (/nsnaps/)
        nf90_stat = nf90_put_var(nf90_id, id_tau, tau, offset_1d )
        call handle_error_netcdf(nf90_stat, __LINE__, __FILE__)

        offset_2d = (/1, nsnaps/)
        nf90_stat = nf90_put_var(nf90_id, id_u_naive, u_naive, offset_2d )
        call handle_error_netcdf(nf90_stat, __LINE__, __FILE__)

        nf90_stat = nf90_put_var(nf90_id, id_u_supp, u_supp, offset_2d )
        call handle_error_netcdf(nf90_stat, __LINE__, __FILE__)

    end subroutine

    subroutine finalize_snapsfile()
        !! Closes snapsfile

        integer :: nf90_stat

        nf90_stat = nf90_close(nf90_id)
        call handle_error_netcdf(nf90_stat, __LINE__, __FILE__)

    end subroutine

end module

program test_diffusion
    !! This program solves the heat equation with the methods provided by
    !! PARALLAX. It serves as an example program using the functionality
    !! provided by PARALLAX as well as an integration test and a diagnostic
    !! tool.
    use MPI
    use iso_fortran_env
    !$ use omp_lib
    use netcdf
    use parallax_build_info_m, only: git_hash, git_date
    use perf_m, only : perf_start, perf_stop, perf_print
    use precision_m, only : FP
    use error_handling_m, only : handle_error, handle_error_netcdf, error_info_t
    use status_codes_m, only : PARALLAX_WRN_GENERAL, PARALLAX_ERR_PARAMETERS
    use screen_io_m, only: get_stdout
    use comm_handling_m, only: is_master, get_communicator
    use constants_m, only : TWO_PI
    use equilibrium_m, only : equilibrium_t
    use equilibrium_factory_m, only: create_equilibrium, &
                                     get_equilibrium_identifier
    use mesh_cart_m, only : mesh_cart_t
    use fieldline_tracer_m, only : trace
    use map_factory_m, only : create_map_matrix
    use elementary_functions_m, only : gaussian
    use csrmat_m, only : csrmat_t
    use auxiliaries_test_diffusion_m, only : complete_support_map, &
        parallel_diffusion_shortley_weller, parallel_diffusion_supp, fci_norms
    use snapshots_test_diffusion_m, only : initialize_snapsfile, &
        write_to_snapsfile, finalize_snapsfile
#ifdef ENABLE_VTK
    use vtk_fortran, only : vtk_file
    use vis_vtk3d_m, only : write_vtk_mesh
#endif
    implicit none

    logical :: omp_on
    integer :: ierr, comm, rank, nprocs, num_threads
    integer :: io_error, nf90_stat, nf90_id
    character(len=150) :: errmsg
    character(len=5) :: rank_c

    class(equilibrium_t), allocatable :: equi
    type(mesh_cart_t) :: mesh
    ! Mesh at toroidal position phi_k
    type(mesh_cart_t) :: mesh_staggered
    ! Mesh at forward staggered toroidal position phi_{k+1/2} = phi_{k}+dphi/2
    type(mesh_cart_t), allocatable :: mesh_target
    ! Target meshes, used temporarily to establish map

    integer :: eq_id, l, t, isnaps, funit
    real(FP) :: x, y, phi, dphi, tau
    real(FP) :: vol_a, vol_b
    real(FP), allocatable, dimension(:) :: ds_fwd, ds_bwd, vol
    type(csrmat_t) :: qnaive_bwd, qnaive_fwd
    type(csrmat_t) :: qsupp_bwd, qsupp_fwd
    type(csrmat_t), allocatable :: psupp_fwd, psupp_bwd
    real(FP), allocatable, dimension(:) :: u_naive, u_supp, du
    real(FP) ::  nrm1_naive, nrm2_naive, nrm_max_naive
    real(FP) ::  nrm1_supp, nrm2_supp, nrm_max_supp
#ifdef ENABLE_VTK
    type(vtk_file) :: vtk2d, vtk3d
    integer :: vtk_stat
#endif

    ! Parameters controling the case
    logical :: run_default_case = .false.
    character(len=64) :: geometry = 'CIRCULAR'
    integer :: nplanes = 4
    logical :: write_case_to_files = .false.
    logical :: write_vtk_files = .false.
    logical :: write_log_file = .false.

    ! Grid (within plane) parameters
    real(FP) :: spacing_f = 4.0E-3_FP
    integer :: size_neighbor = 2
    integer :: size_ghost_layer = 2
    integer :: reorder_size = 8
    logical :: extend_beyond_wall = .false.

    ! Parameters for map
    integer :: intorder = 3
    integer :: xorder = 1
    logical :: use_fixed_stencil = .false.
    logical :: use_gauss_quadrature = .false.

    ! Parameters for initial state
    real(FP) :: xc_gauss = 0.3_FP
    real(FP) :: yc_gauss = 0.0_FP
    real(FP) :: wx_gauss = 0.025_FP
    real(FP) :: wy_gauss = 0.025_FP

    ! Time-stepping parameters
    real(FP) :: dtau = 1.0E-2_FP
    integer :: nsnaps = 10
    integer :: nt_per_snaps = 100

    namelist / case_params / geometry, nplanes, write_case_to_files, &
                             write_vtk_files, write_log_file
    namelist / grid_params / spacing_f, size_neighbor, size_ghost_layer, &
                             reorder_size, extend_beyond_wall
    namelist / map_params / intorder, xorder, use_fixed_stencil, &
                            use_gauss_quadrature
    namelist / init_state_params / xc_gauss, yc_gauss, wx_gauss, wy_gauss
    namelist / tstep_params / dtau, nsnaps, nt_per_snaps

    ! Initialize MPI
    call MPI_init(ierr)
    call MPI_comm_rank(get_communicator(), rank, ierr)
    call MPI_comm_size(get_communicator(), nprocs, ierr)
    write(rank_c,'(I5.5)')rank

    ! Get OMP information
    omp_on = .false.
    num_threads = 0
    !$omp parallel
    !$omp single
    !$ omp_on = .true.
    !$ num_threads = omp_get_num_threads()
    !$omp end single
    !$omp end parallel

    ! Write out MPI and OMP information
    if (is_master()) then
        write(get_stdout(),*)''
        write(get_stdout(),*)'Running benchmark_helmholtz_solvers'
        write(get_stdout(),*)'git hash: ', git_hash
        write(get_stdout(),*)'git date: ', git_date
        write(get_stdout(),*)''
        write(get_stdout(),*)''
        write(get_stdout(),*)'This file was compiled by ', &
                       compiler_version(), ' using the options ', &
                       compiler_options()
        write(get_stdout(),*)''
        write(get_stdout(),*)''
        write(get_stdout(),*)'Number of MPI-processes = ',nprocs
        if (omp_on) then
            write(get_stdout(),*)'OMP switched on:'
            write(get_stdout(),*)'    Number of OMP-threads = ',num_threads
        else
            write(get_stdout(),*)'OMP switched off'
        endif
        write(get_stdout(),*)''
    endif

    ! Read and process input parameters
    open(newunit=funit, file='params.in', status='old', action='read', &
         iostat=io_error)
    if (io_error /= 0) then
        run_default_case = .true.
        call handle_error('No parameter file provided, running default case', &
                           PARALLAX_WRN_GENERAL, __LINE__, __FILE__)
    else
        read(funit, nml=case_params, iostat=io_error, iomsg=errmsg)
        if (io_error /= 0) then
            call handle_error(errmsg, PARALLAX_ERR_PARAMETERS, &
                              __LINE__, __FILE__)
        endif

        rewind(funit)
        read(funit, nml=grid_params, iostat=io_error, iomsg=errmsg)
        if (io_error /= 0) then
            call handle_error(errmsg, PARALLAX_ERR_PARAMETERS, &
                              __LINE__, __FILE__)
        endif

        rewind(funit)
        read(funit, nml=map_params, iostat=io_error, iomsg=errmsg)
        if (io_error /= 0) then
            call handle_error(errmsg, PARALLAX_ERR_PARAMETERS, &
                              __LINE__, __FILE__)
        endif

        rewind(funit)
        read(funit, nml=init_state_params, iostat=io_error, iomsg=errmsg)
        if (io_error /= 0) then
            call handle_error(errmsg, PARALLAX_ERR_PARAMETERS, &
                              __LINE__, __FILE__)
        endif

        rewind(funit)
        read(funit, nml=tstep_params, iostat=io_error, iomsg=errmsg)
        if (io_error /= 0) then
            call handle_error(errmsg, PARALLAX_ERR_PARAMETERS, &
                              __LINE__, __FILE__)
        endif

        close(funit)
    endif

    if (is_master()) then
        write(get_stdout(),nml=case_params)
        write(get_stdout(),nml=grid_params)
        write(get_stdout(),nml=map_params)
        write(get_stdout(),nml=init_state_params)
        write(get_stdout(),nml=tstep_params)
    endif

    ! Check if number of planes agree with number of MPI processes
    if (nplanes /= nprocs) then
        call handle_error('Number MPI processes does not match nplanes', &
                          PARALLAX_ERR_PARAMETERS, __LINE__, __FILE__, &
                          additional_info=error_info_t('nplanes, nprocs :', &
                                                       [nplanes, nprocs]))
    endif

    ! Create eqilibrium
    eq_id = get_equilibrium_identifier(geometry)
    if (run_default_case) then
        write(get_stdout(),*)'Using default equilibrium'
        call create_equilibrium(equi, eq_id)
    else
        call create_equilibrium(equi, eq_id, 'params.in')
    endif

    ! Create mesh and staggered mesh
    phi = TWO_PI * rank / (1.0_FP * nprocs)
    dphi = TWO_PI / nprocs

    call mesh%init(equi, phi, lvl=1, &
                   spacing_f=spacing_f, size_neighbor=2, &
                   size_ghost_layer=1, extend_beyond_wall=extend_beyond_wall, &
                   dbgout=1)
    call mesh%reorder(reorder_size, dbgout=1)

    call mesh_staggered%init(equi, phi+dphi/2.0_FP, lvl=1, &
                   spacing_f=spacing_f, size_neighbor=2, &
                   size_ghost_layer=1, extend_beyond_wall=extend_beyond_wall, &
                   dbgout=1)
    call mesh_staggered%reorder(reorder_size, dbgout=1)

    ! Write mesh
    if (write_case_to_files) then
        nf90_stat = nf90_create('mesh_'//rank_c//'.nc', NF90_NETCDF4, nf90_id)
        call handle_error_netcdf(nf90_stat, __LINE__, __FILE__)
        call mesh%write_netcdf(nf90_id)
        nf90_stat = nf90_close(nf90_id)
        call handle_error_netcdf(nf90_stat, __LINE__, __FILE__)
    endif

    ! Compute flux box volumes
    allocate(vol(mesh%get_n_points()))
    do l = 1, mesh%get_n_points()
        x = mesh%get_x(l)
        y = mesh%get_y(l)
        ! Flux box volumes
        call trace(x_init=x, y_init=y, phi_init=phi, &
            dphi=dphi/2.0_FP, equi=equi, fluxexp=vol_a)

        call trace(x_init=x, y_init=y, phi_init=phi, &
            dphi=-dphi/2.0_FP, equi=equi, fluxexp=vol_b)

        vol(l) = (vol_a + vol_b) * mesh%get_spacing_f()**2 * &
            equi%btor(x, y, phi)
    enddo

    ! Compute map matrices for naive scheme
    call perf_start('map_create_naive')
    allocate(ds_fwd(mesh%get_n_points()))
    allocate(ds_bwd(mesh%get_n_points()))
    allocate(mesh_target)
    comm = get_communicator()
    call mesh%exchange_mesh_mpi(comm, &
                                step=-1, &
                                mesh_received=mesh_target)
    call create_map_matrix(qnaive_bwd, comm, equi, &
            mesh, mesh_target, phi_direction=-1, &
            intorder=intorder, arclength_array=ds_fwd, dbgout=1)
    deallocate(mesh_target)

    allocate(mesh_target)
    call mesh%exchange_mesh_mpi(comm, &
                                step=1, &
                                mesh_received=mesh_target)
    call create_map_matrix(qnaive_fwd, comm, equi, &
            mesh, mesh_target, phi_direction=1, &
            intorder=intorder, arclength_array=ds_bwd, dbgout=1)
    deallocate(mesh_target)
    call perf_stop('map_create_naive')

    ! Compute map matrices for support scheme
    call perf_start('map_create_supp')
    allocate(mesh_target)
    call mesh%exchange_mesh_mpi(comm, &
                                step=1, &
                                mesh_received=mesh_target)
    call create_map_matrix(qsupp_fwd, comm, equi, &
            mesh_staggered, mesh_target, phi_direction=1, &
            intorder=intorder, xorder=xorder, &
            flux_box_surface_integral=.true., &
            use_fixed_stencil=use_fixed_stencil, &
            use_gauss_quadrature=use_gauss_quadrature, dbgout=1)
    deallocate(mesh_target)

    call create_map_matrix(qsupp_bwd, comm, equi, &
            mesh_staggered, mesh, phi_direction=-1, &
            intorder=intorder, xorder=xorder, &
            flux_box_surface_integral=.true., &
            use_fixed_stencil=use_fixed_stencil, &
            use_gauss_quadrature=use_gauss_quadrature, dbgout=1)

    call complete_support_map(comm, equi, mesh, mesh_staggered, &
                                vol, qsupp_fwd, qsupp_bwd, &
                                psupp_fwd, psupp_bwd)
    call perf_stop('map_create_supp')

    ! If enabled create VTK meshes for 3D visualisation
#ifdef ENABLE_VTK
    if (write_vtk_files) then
        vtk_stat = vtk2d%initialize(format='binary', &
                                    filename='vtk2d_'//rank_c//'.vtu', &
                                    mesh_topology='UnstructuredGrid')
        vtk_stat = vtk3d%initialize(format='binary', &
                                    filename='vtk3d_'//rank_c//'.vtu', &
                                    mesh_topology='UnstructuredGrid')

        call write_vtk_mesh(equi, mesh, phi, dphi/2.0_FP, dphi/2.0_FP, &
                            vtk2d, vtk3d, quadratic_hex = .false., dbgout = 1)

        vtk_stat = vtk3d%xml_writer%write_piece()
        vtk_stat = vtk3d%finalize()

        vtk_stat = vtk2d%xml_writer%write_piece()
        vtk_stat = vtk2d%finalize()
    endif
#else
    if (write_vtk_files) then
        call handle_error('Requesting vtk output, but code was compiled &
                           without VTK. Recompile with &
                           -DPARALLAX_ENABLE_VTK=ON', &
                          PARALLAX_WRN_GENERAL, __LINE__, __FILE__)
    endif
#endif


    ! Allocate and initialize the fields
    allocate(u_naive(mesh%get_n_points()))
    allocate(u_supp(mesh%get_n_points()))
    !$omp parallel default(none) private(l, x, y) &
    !$omp shared(mesh, rank, nplanes, xc_gauss, yc_gauss, wx_gauss, wy_gauss, &
    !$omp        u_naive, u_supp)
    !$omp do
    do l= 1, mesh%get_n_points()
        x = mesh%get_x(l)
        y = mesh%get_y(l)
        if (rank == 0) then
            u_naive(l) = nplanes * gaussian(xc_gauss, wx_gauss, x) * &
                                   gaussian(yc_gauss, wy_gauss, y)
        else
            u_naive(l) = 0.0_FP
        endif
        u_supp(l) = u_naive(l)
    enddo
    !$omp end do
    !$omp end parallel

    ! Setup snapshot and log file
    tau = 0.0_FP
    if (write_case_to_files) then
        call initialize_snapsfile(mesh, 'snaps_'//rank_c//'.nc')
        call write_to_snapsfile(mesh, tau, u_naive, u_supp)
    endif

     ! Writes logs to job.out for integration test on pipeline
    if (write_log_file .and. is_master()) then
        open(newunit=funit, file='job.out', action='write', status='replace', &
             iostat=io_error)
        if (io_error /= 0) then
            call handle_error("Creating job.out file failed", &
                              PARALLAX_WRN_GENERAL, __LINE__, __FILE__)
        endif
    endif

    ! Timestepping
    allocate(du(mesh%get_n_points()))
    do isnaps = 1, nsnaps
        call perf_start('timeloop')
        do t = 1, nt_per_snaps

            call perf_start('../shortley_weller')
            call parallel_diffusion_shortley_weller(comm, &
                    mesh, qnaive_fwd, qnaive_bwd, ds_fwd, ds_bwd, &
                    u_naive, du)
            !$omp parallel do default(none) shared(mesh, dtau, u_naive, du)
            do l = 1, mesh%get_n_points()
                u_naive(l) = u_naive(l) + dtau * du(l)
            end do
            !$omp end parallel do
            call perf_stop('../shortley_weller')

            call perf_start('../support')
            call parallel_diffusion_supp(comm, &
                    mesh, mesh_staggered, &
                    qsupp_fwd, qsupp_bwd, psupp_fwd, psupp_bwd, &
                    u_supp, du)
            !$omp parallel do default(none) shared(mesh, dtau, u_supp, du)
            do l = 1, mesh%get_n_points()
                u_supp(l) = u_supp(l) + dtau * du(l)
            end do
            !$omp end parallel do
            call perf_stop('../support')

            tau = tau + dtau
        enddo
        call perf_stop('timeloop')

        ! Compute norms
        call fci_norms(comm, mesh, vol, u_naive, &
                       nrm1_naive, nrm2_naive, nrm_max_naive)
        call fci_norms(comm, mesh, vol, u_supp, &
                       nrm1_supp, nrm2_supp, nrm_max_supp)

         if (is_master()) write(get_stdout(),433)isnaps,tau, &
                nrm1_naive, nrm2_naive, nrm_max_naive, &
                nrm1_supp, nrm2_supp, nrm_max_supp
        433 FORMAT('----------------------------------------------------', / &
                    3X,'isnaps = ', I4,';',5X,'tau = ', ES14.6E3, / &
                    3X,'nrm1_naive = 'ES14.6E3,3X,'nrm2_naive = 'ES14.6E3, &
                    3X,'nrm_max_naive = 'ES14.6E3 ,/ &
                    3X,'nrm1_supp = 'ES14.6E3,3X,'nrm2_supp = 'ES14.6E3, &
                    3X,'nrm_max_supp = 'ES14.6E3 ,/ &
                    '----------------------------------------------------')

        if (write_case_to_files) then
            call write_to_snapsfile(mesh, tau, u_naive, u_supp)
        endif

        if (write_log_file .and. is_master()) then
            write(funit, 300)isnaps
            write(funit, 301)tau
            write(funit, 301)nrm1_naive
            write(funit, 301)nrm2_naive
            write(funit, 301)nrm_max_naive
            write(funit, 301)nrm1_supp
            write(funit, 301)nrm2_supp
            write(funit, 301)nrm_max_supp
            300 FORMAT(I8)
            301 FORMAT(ES22.14E3)
        endif
    enddo

    if (write_case_to_files) then
        call finalize_snapsfile()
    endif

    if (write_log_file .and. is_master()) then
        close(funit)
    endif

    call perf_print()

    call MPI_finalize(ierr)

end program