diff --git a/src/finalize_global_grid.jl b/src/finalize_global_grid.jl index 3c3ffb7..a39e47d 100644 --- a/src/finalize_global_grid.jl +++ b/src/finalize_global_grid.jl @@ -11,11 +11,11 @@ Finalize the global grid (and also MPI by default). # Arguments !!! note "Advanced keyword arguments" - - `finalize_MPI::Bool=true`: whether to finalize MPI (`true`) or not (`false`). + - `finalize_MPI::Bool=(MPI.Initialized() && !MPI.Finalized())`: whether to finalize MPI (`true`) or not (`false`). See also: [`init_global_grid`](@ref) """ -function finalize_global_grid(;finalize_MPI::Bool=true) +function finalize_global_grid(;finalize_MPI::Bool=(MPI.Initialized() && !MPI.Finalized())) check_initialized(); free_gather_buffer(); free_update_halo_buffers(); diff --git a/src/init_global_grid.jl b/src/init_global_grid.jl index 2224131..40c960e 100644 --- a/src/init_global_grid.jl +++ b/src/init_global_grid.jl @@ -19,7 +19,7 @@ Initialize a Cartesian grid of MPI processes (and also MPI itself by default) de - `disp::Integer=1`: the displacement argument to `MPI.Cart_shift` in order to determine the neighbors. - `reorder::Integer=1`: the reorder argument to `MPI.Cart_create` in order to create the Cartesian process topology. - `comm::MPI.Comm=MPI.COMM_WORLD`: the input communicator argument to `MPI.Cart_create` in order to create the Cartesian process topology. - - `init_MPI::Bool=true`: whether to initialize MPI (`true`) or not (`false`). + - `init_MPI::Bool=!MPI.Initialized()`: whether to initialize MPI (`true`) or not (`false`). - `select_device::Bool=true`: whether to automatically select the device (GPU) (`true`) or not (`false`) if CUDA is functional. If `true`, it selects the device corresponding to the node-local MPI rank. This method of device selection suits both single and multi-device compute nodes and is recommended in general. It is also the default method of device selection of the *function* [`select_device`](@ref). For more information, refer to the documentation of MPI.jl / MPI. @@ -39,7 +39,7 @@ Initialize a Cartesian grid of MPI processes (and also MPI itself by default) de See also: [`finalize_global_grid`](@ref), [`select_device`](@ref) """ -function init_global_grid(nx::Integer, ny::Integer, nz::Integer; dimx::Integer=0, dimy::Integer=0, dimz::Integer=0, periodx::Integer=0, periody::Integer=0, periodz::Integer=0, overlapx::Integer=2, overlapy::Integer=2, overlapz::Integer=2, disp::Integer=1, reorder::Integer=1, comm::MPI.Comm=MPI.COMM_WORLD, init_MPI::Bool=true, select_device::Bool=true, quiet::Bool=false) +function init_global_grid(nx::Integer, ny::Integer, nz::Integer; dimx::Integer=0, dimy::Integer=0, dimz::Integer=0, periodx::Integer=0, periody::Integer=0, periodz::Integer=0, overlapx::Integer=2, overlapy::Integer=2, overlapz::Integer=2, disp::Integer=1, reorder::Integer=1, comm::MPI.Comm=MPI.COMM_WORLD, init_MPI::Bool=!MPI.Initialized(), select_device::Bool=true, quiet::Bool=false) if grid_is_initialized() error("The global grid has already been initialized.") end nxyz = [nx, ny, nz]; dims = [dimx, dimy, dimz];