|
integer(mpiint) function | mpi_gbl::mpi_mod_comm (comm_opt) |
| Comm or default comm. More...
|
|
subroutine | mpi_gbl::mpi_mod_rank (rank, comm) |
| Wrapper around MPI_Comm_rank. More...
|
|
subroutine | mpi_gbl::mpi_mod_barrier (error, comm) |
| Interface to the routine MPI_BARRIER. More...
|
|
subroutine | mpi_gbl::mpi_xermsg (mod_name, routine_name, err_msg, err, level) |
| Analogue of the xermsg routine. This routine uses xermsg to first print out the error message and then either aborts the program or exits the routine depending on the level of the message (warning/error) which is defined in the same way as in xermsg. More...
|
|
subroutine | mpi_gbl::check_mpi_running |
| This is a lightweight routine which aborts the program if MPI is not found running. More...
|
|
subroutine | mpi_gbl::mpi_mod_print_info (u) |
| Display information about current MPI setup. More...
|
|
subroutine | mpi_gbl::mpi_mod_start (do_stdout, allow_shared_memory) |
| Initializes MPI, assigns the rank for each process and finds out how many processors we're using. It also maps the current floating point precision (kind=cfp) to the corresponding MPI numeric type. This routine also sets the unit for standard output that each rank will use (the value of stdout). In case of serial run the value stdout is not set here and is left to the default value input_unit as specified in the module const. More...
|
|
subroutine | mpi_gbl::setup_intranode_local_mem_communicator |
| It is not used anywhere in the code yet but it might become useful later on. It creates intra-node communicators without the memory sharing capability. More...
|
|
logical function | mpi_gbl::strings_are_same (str1, str2, length) |
|
subroutine | mpi_gbl::mpi_mod_finalize |
| Terminates the MPI session and stops the program. It is a blocking routine. More...
|
|
subroutine | mpi_gbl::mpi_mod_create_block_array (this, n, elemtype) |
| Set up a new MPI type describing a long array composed of blocks. More...
|
|
subroutine | mpi_gbl::mpi_mod_free_block_array (this) |
| Release MPI handle of the block array type. More...
|
|
subroutine | mpi_gbl::mpi_mod_bcast_logical (val, from, comm) |
|
subroutine | mpi_gbl::mpi_mod_bcast_int32 (val, from, comm) |
|
subroutine | mpi_gbl::mpi_mod_bcast_int64 (val, from, comm) |
|
subroutine | mpi_gbl::mpi_mod_bcast_int32_array (val, from, comm) |
|
subroutine | mpi_gbl::mpi_mod_bcast_int64_array (val, from, comm) |
|
subroutine | mpi_gbl::mpi_mod_bcast_int32_3d_array (val, from, comm) |
|
subroutine | mpi_gbl::mpi_mod_bcast_int64_3d_array (val, from, comm) |
|
subroutine | mpi_gbl::mpi_mod_bcast_wp (val, from, comm) |
|
subroutine | mpi_gbl::mpi_mod_bcast_ep (val, from, comm) |
|
subroutine | mpi_gbl::mpi_mod_bcast_wp_array (val, from, comm) |
|
subroutine | mpi_gbl::mpi_mod_bcast_ep_array (val, from, comm) |
|
subroutine | mpi_gbl::mpi_mod_bcast_wp_3d_array (val, from, comm) |
|
subroutine | mpi_gbl::mpi_mod_bcast_ep_3d_array (val, from, comm) |
|
subroutine | mpi_gbl::mpi_mod_bcast_character (val, from, comm) |
|
subroutine | mpi_gbl::mpi_mod_bcast_character_array (val, from, comm) |
|
subroutine | mpi_gbl::mpi_mod_send_wp_array (to, buffer, tag, n, comm) |
|
subroutine | mpi_gbl::mpi_mod_send_ep_array (to, buffer, tag, n, comm) |
|
subroutine | mpi_gbl::mpi_mod_send_int32_array (to, buffer, tag, n, comm) |
|
subroutine | mpi_gbl::mpi_mod_send_int64_array (to, buffer, tag, n, comm) |
|
subroutine | mpi_gbl::mpi_mod_isend_int32_array (to, buffer, tag, n, comm) |
|
subroutine | mpi_gbl::mpi_mod_isend_int64_array (to, buffer, tag, n, comm) |
|
subroutine | mpi_gbl::mpi_mod_recv_wp_array (from, tag, buffer, n, comm) |
|
subroutine | mpi_gbl::mpi_mod_recv_ep_array (from, tag, buffer, n, comm) |
|
subroutine | mpi_gbl::mpi_mod_recv_int32_array (from, tag, buffer, n, comm) |
|
subroutine | mpi_gbl::mpi_mod_recv_int64_array (from, tag, buffer, n, comm) |
|
subroutine | mpi_gbl::mpi_mod_send_wp_array_dynamic (to, buffer, tag, comm) |
|
subroutine | mpi_gbl::mpi_mod_send_ep_array_dynamic (to, buffer, tag, comm) |
|
subroutine | mpi_gbl::mpi_mod_recv_wp_array_dynamic (from, tag, buffer, n, comm) |
|
subroutine | mpi_gbl::mpi_mod_recv_ep_array_dynamic (from, tag, buffer, n, comm) |
|
subroutine | mpi_gbl::mpi_mod_file_open_read (filename, fh, ierr, comm) |
|
subroutine | mpi_gbl::mpi_mod_file_set_view_wp (fh, disp, ierr, wp_dummy) |
| Sets view for a file containing floating point numbers with kind=wp. More...
|
|
subroutine | mpi_gbl::mpi_mod_file_set_view_ep (fh, disp, ierr, ep_dummy) |
| Sets view for a file containing floating point numbers with kind=ep. More...
|
|
subroutine | mpi_gbl::mpi_mod_file_read_wp (fh, buffer, buflen, ierr) |
|
subroutine | mpi_gbl::mpi_mod_file_read_ep (fh, buffer, buflen, ierr) |
|
subroutine | mpi_gbl::mpi_mod_file_close (fh, ierr) |
|
subroutine | mpi_gbl::mpi_reduce_inplace_sum_wp (buffer, nelem) |
|
subroutine | mpi_gbl::mpi_reduce_inplace_sum_ep (buffer, nelem) |
|
subroutine | mpi_gbl::mpi_reduce_sum_wp (src, dest, nelem) |
|
subroutine | mpi_gbl::mpi_reduce_sum_ep (src, dest, nelem) |
|
subroutine | mpi_gbl::mpi_reduceall_sum_wp (src, dest, nelem, comm) |
|
subroutine | mpi_gbl::mpi_reduceall_sum_ep (src, dest, nelem, comm) |
|
subroutine | mpi_gbl::mpi_reduceall_max_int32 (src, dest, comm_opt) |
| Choose largest elements among processes from 32-bit integer arrays. More...
|
|
subroutine | mpi_gbl::mpi_reduceall_max_int64 (src, dest, comm_opt) |
| Choose largest elements among processes from 64-bit integer arrays. More...
|
|
subroutine | mpi_gbl::mpi_reduceall_min_int32 (src, dest, comm_opt) |
| Choose smallest elements among processes from 32-bit integer arrays. More...
|
|
subroutine | mpi_gbl::mpi_reduceall_min_int64 (src, dest, comm_opt) |
| Choose smallest elements among processes from 64-bit integer arrays. More...
|
|
subroutine | mpi_gbl::naive_mpi_reduce_inplace_sum_wp (buffer, nelem, split) |
|
subroutine | mpi_gbl::naive_mpi_reduce_inplace_sum_ep (buffer, nelem, split) |
|
subroutine | mpi_gbl::mpi_reduceall_inplace_sum_wp (buffer, nelem, comm) |
|
subroutine | mpi_gbl::mpi_reduceall_inplace_sum_ep (buffer, nelem, comm) |
|
subroutine | mpi_gbl::mpi_mod_rotate_arrays_around_ring_wp (elem_count, int_array, wp_array, max_num_elements, comm_opt) |
| This will rotate a combination of the number of elements, an integer array and float array once around in a ring formation. More...
|
|
subroutine | mpi_gbl::mpi_mod_rotate_arrays_around_ring_ep (elem_count, int_array, ep_array, max_num_elements, comm_opt) |
| This will rotate a combination of the number of elements, an integer array and float array once around in a ring formation. More...
|
|
subroutine | mpi_gbl::mpi_mod_rotate_wp_arrays_around_ring (elem_count, wp_array, max_num_elements, communicator) |
| This will rotate a combination of the number of elements, an integer array and float array once around in a ring formation. More...
|
|
subroutine | mpi_gbl::mpi_mod_rotate_ep_arrays_around_ring (elem_count, ep_array, max_num_elements, communicator) |
| This will rotate a combination of the number of elements, an integer array and float array once around in a ring formation. More...
|
|
subroutine | mpi_gbl::mpi_mod_rotate_int_arrays_around_ring (elem_count, int_array, max_num_elements, communicator) |
| This will rotate a combination of the number of elements, an integer array and float array once around in a ring formation. More...
|
|
subroutine | mpi_gbl::mpi_mod_allgather_int32 (send, receive, comm) |
| All gather for one integer send from every process. More...
|
|
subroutine | mpi_gbl::mpi_mod_allgather_int64 (send, receive, comm) |
|
subroutine | mpi_gbl::mpi_mod_allgatherv_in_place_int32_array (sendcount, receive, comm) |
|
subroutine | mpi_gbl::mpi_mod_allgatherv_in_place_int64_array (sendcount, receive, comm) |
|
subroutine | mpi_gbl::mpi_mod_allgatherv_in_place_real64_array (sendcount, receive, comm) |
|
subroutine | mpi_gbl::mpi_mod_allgatherv_in_place_real128_array (sendcount, receive, comm) |
|
subroutine | mpi_gbl::mpi_mod_allgather_character (send, receive, comm) |
| All gather for one string of length MPI_MAX_PROCESSOR_NAME send from every process. More...
|
|
subroutine | mpi_gbl::mpi_mod_gatherv_int32 (sendbuf, recvbuf, recvcounts, displs, to, comm) |
| Gather for 32bit integer array send from every process and received on a given rank. More...
|
|
subroutine | mpi_gbl::mpi_mod_gatherv_int64 (sendbuf, recvbuf, recvcounts, displs, to, comm) |
| Gather for 64bit integer array send from every process and received on a given rank. More...
|
|
subroutine | mpi_gbl::mpi_mod_gatherv_real64 (sendbuf, recvbuf, recvcounts, displs, to, comm) |
| Gather for 64bit float array send from every process and received on a given rank. More...
|
|
subroutine | mpi_gbl::mpi_mod_gatherv_real128 (sendbuf, recvbuf, recvcounts, displs, to, comm) |
| Gather for 128bit float array send from every process and received on a given rank. More...
|
|
real(wp) function | mpi_gbl::mpi_mod_wtime () |
| Interface to the routine MPI_BARRIER. More...
|
|
subroutine | mpi_gbl::mpi_mod_file_open_write (filename, fh, ierr, comm) |
|
subroutine | mpi_gbl::mpi_mod_file_write_int32 (fh, n) |
| Let master process write one 4-byte integer to stream file. More...
|
|
subroutine | mpi_gbl::mpi_mod_file_write_array1d_int32 (fh, array, length) |
| Let master process write array of 4-byte integers to stream file. More...
|
|
subroutine | mpi_gbl::mpi_mod_file_write_array2d_int32 (fh, array, length1, length2) |
| Let master process write 2d array of 4-byte integers to stream file. More...
|
|
subroutine | mpi_gbl::mpi_mod_file_write_real64 (fh, x) |
| Let master process write an 8-byte real to stream file. More...
|
|
subroutine | mpi_gbl::mpi_mod_file_write_array1d_real64 (fh, array, length) |
| Let master process write array of 8-byte reals to stream file. More...
|
|
subroutine | mpi_gbl::mpi_mod_file_write_array2d_real64 (fh, array, length1, length2) |
| Let master process write 2d array of 8-byte reals to stream file. More...
|
|
subroutine | mpi_gbl::mpi_mod_file_write_array3d_real64 (fh, array, length1, length2, length3) |
| Let master process write 3d array of 8-byte reals to stream file. More...
|
|
subroutine | mpi_gbl::mpi_mod_file_set_size (fh, sz) |
| Collectively resize file. More...
|
|
subroutine | mpi_gbl::mpi_mod_file_write_darray2d_real64 (fh, m, n, nprow, npcol, rb, cb, locA, myrows, mycols, comm) |
| Write block-cyclically distributed matrix to a stream file. More...
|
|
subroutine | mpi_gbl::mpi_mod_scan_sum_int32 (src, dst, comm_opt) |
| Wrapper aroung MPI_Scan. More...
|
|
subroutine | mpi_gbl::mpi_mod_scan_sum_int64 (src, dst, comm_opt) |
| Wrapper aroung MPI_Scan. More...
|
|
|
integer, parameter, public | mpi_gbl::mpiint = kind(0) |
|
integer, parameter, public | mpi_gbl::mpiaddr = longint |
|
integer, parameter, public | mpi_gbl::mpicnt = longint |
|
integer, parameter, public | mpi_gbl::mpiofs = longint |
|
logical, public, protected | mpi_gbl::mpi_running = .false. |
| This can be set only once at the beginning of the program run by a call to MPI_MOD_START. The value of this variable is accessed by MPI_MOD_CHECK which aborts the program if MPI is not running. This variable stays .false. if the library was compiled without MPI support. More...
|
|
logical, public, protected | mpi_gbl::mpi_started = .false. |
| This is similar to mpi_running, but will be always set to true once MPI_MOD_START is called, whether or not the library was built with MPI support. So, while mpi_running can be used to determine whether the program is running under MPI, the variable mpi_started indicates whether the call to MPI_MOD_START had been already done (even in serial build). More...
|
|
integer(kind=mpiint), public, protected | mpi_gbl::nprocs = -1 |
| Total number of processes. Set by mpi_mod_start. More...
|
|
integer(kind=mpiint), public, protected | mpi_gbl::myrank = -1 |
| The local process number (rank). Set by mpi_mod_start. More...
|
|
integer(kind=mpiint), public, protected | mpi_gbl::local_nprocs = 1 |
| Total number of processes on the node this task is bound to. Set by mpi_mod_start. More...
|
|
integer(kind=mpiint), public, protected | mpi_gbl::local_rank = 0 |
| The local process number (rank) on the node this task is bound to. Set by mpi_mod_start. More...
|
|
logical, public, protected | mpi_gbl::shared_enabled = .false. |
| The routine mpi_start creates the shared_communicator which groups all tasks sitting on the same node. If we're using MPI 3.0 standard the shared_communicator enables creation of shared memory areas. In this case shared_enabled is set to .true. If we're not using MPI 3.0 then shared_enabled is set to .false. More...
|
|
integer(kind=mpiint), parameter, public | mpi_gbl::master = 0 |
| ID of the master process. More...
|
|
integer, public, protected | mpi_gbl::max_data_count = huge(dummy_32bit_integer) |
| Largest integer (e.g. number of elements) supported by the standard MPI 32-bit integer API. More...
|
|
character(len=mpi_max_processor_name), parameter, public, protected | mpi_gbl::procname = "N/A" |
| Name of the processor on which the current process is running. More...
|
|
integer(kind=mpiint), public, protected | mpi_gbl::shared_communicator = -1 |
| Intra-node communicator created by mpi_mod_start. More...
|
|