MPI-SCATCI 2.0
An MPI version of SCATCI
Loading...
Searching...
No Matches
Dispatcher_module.F90
Go to the documentation of this file.
1! Copyright 2019
2!
3! For a comprehensive list of the developers that contributed to these codes
4! see the UK-AMOR website.
5!
6! This file is part of UKRmol-in (UKRmol+ suite).
7!
8! UKRmol-in is free software: you can redistribute it and/or modify
9! it under the terms of the GNU General Public License as published by
10! the Free Software Foundation, either version 3 of the License, or
11! (at your option) any later version.
12!
13! UKRmol-in is distributed in the hope that it will be useful,
14! but WITHOUT ANY WARRANTY; without even the implied warranty of
15! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16! GNU General Public License for more details.
17!
18! You should have received a copy of the GNU General Public License
19! along with UKRmol-in (in source/COPYING). Alternatively, you can also visit
20! <https://www.gnu.org/licenses/>.
21
22!> \brief Dispatcher module
23!> \authors A Al-Refaie
24!> \date 2017
25!>
26!> This module provides the automatic selection of the correct classes needed by MPI SCATCI.
27!> These classes are the Integrals, Matricies and Diagonalizers
28!>
29!> \note 30/01/2017 - Ahmed Al-Refaie: Initial revision.
30!> \note 16/01/2019 - Jakub Benda: Unifom coding style and expanded documentation.
31!>
32module dispatcher_module
33
34 use precisn, only: longint, wp
35 use const_gbl, only: stdout
36 use mpi_gbl, only: nprocs, mpi_xermsg
38 use options_module, only: options
39 use basematrix_module, only: basematrix
40 use writermatrix_module, only: writermatrix
41 use diagonalizer_module, only: basediagonalizer
42 use lapackdiagonalizer_module, only: lapackdiagonalizer
43 use davidsondiagonalizer_module, only: davidsondiagonalizer
44#ifdef arpack
45 use arpackdiagonalizer_module, only: arpackdiagonalizer
46#endif
47 use matrixelement_module, only: matrixelementvector
48 use timing_module, only: master_timer
49 use baseintegral_module, only: baseintegral
50 use sweden_module, only: swedenintegral
51 use alchemy_module, only: alchemyintegral
52 use ukrmol_module, only: ukrmolintegral
53 !use BaseCorePotential_module, only: BaseCorePotential
54 !use MOLPROCorePotential_module, only: MOLPROCorePotential
55#ifdef slepc
56 use slepcmatrix_module, only: slepcmatrix, initialize_slepc
57 use slepcdiagonalizer_module, only: slepcdiagonalizer
58 !use SLEPCDavidsonDiagonalizer_module, only: SLEPCDavidsonDiagonalizer
59#endif
60#ifdef scalapack
61 use scalapackmatrix_module, only: scalapackmatrix
62 use scalapackdiagonalizer_module, only: scalapackdiagonalizer
63#endif
64#ifdef useelpa
65 use elpamatrix_module, only: elpamatrix
66 use elpadiagonalizer_module, only: elpadiagonalizer, initialize_elpa
67#endif
68
69 implicit none
70
71 public dispatchmatrixanddiagonalizer, dispatchintegral, initialize_libraries
72
73 private
74
75contains
76
77 !> \brief Initialize libraries used by MPI-SCATCI
78 !> \authors J Benda
79 !> \date 2019
80 !>
81 !> Currently only SLEPc needs global initialization.
82 !>
83 subroutine initialize_libraries
84#if defined(usempi) && defined(slepc)
85 call initialize_slepc
86#endif
87#if defined(usempi) && defined(scalapack) && defined(useelpa)
88 call initialize_elpa
89#endif
90 end subroutine initialize_libraries
91
92
93 !> \brief This subroutine determines which matrix format/diagonalizer pair to be used by SCATCI in build diagonalization
94 !> \authors A Al-Refaie
95 !> \date 2017
96 !>
97 !> It determines what type of diagonalizer based on the matrix size and number of eigenpairs
98 !> and whether to use the Legacy SCATCI diagonalizers for nprocs = 1 / -Dnompi option or to use an MPI one-
99 !>
100 !> \param[in] diag_choice From Options, used to force a particular diagonalizer.
101 !> \param[in] force_serial Whether to use serial method even in distributed run.
102 !> \param[in] matrix_size Size of Matrix. Used with number_of_eigenpairs to select the correct matrix format.
103 !> \param[in] number_of_eigenpairs No. of eigensolutions requested.
104 !> \param[out] matrix The selected matrix format.
105 !> \param[out] diagonalizer The selected diagonalizer.
106 !> \param[in] integral A constructed and loaded integral class. This is required to write out the matrix header.
107 !> \param[in] matrix_io The IO unit for the matrix header.
108 !>
109 subroutine dispatchmatrixanddiagonalizer (diag_choice, force_serial, matrix_size, number_of_eigenpairs, matrix, diagonalizer, &
110 integral, matrix_io)
111
112 use parallelization_module, only: grid => process_grid
113
114 class(basematrix), pointer, intent(out) :: matrix
115 class(basediagonalizer), pointer, intent(out) :: diagonalizer
116 class(baseintegral), intent(in) :: integral
117
118 integer, intent(in) :: diag_choice, force_serial
119 integer, intent(in) :: matrix_size, number_of_eigenpairs
120 integer, intent(in) :: matrix_io
121 integer :: final_choice
122
123 !Now if we forced a diagonalizer then use that, other wise use the rules
124 if (diag_choice /= scatci_decide) then
125 final_choice = diag_choice
126 else
127 if (number_of_eigenpairs > real(matrix_size) * 0.2) then ! More the 20% of eigenvectors requires a dense diagonalizer
128 final_choice = dense_diag
129 else if (number_of_eigenpairs <= 3) then ! Less than three, we use davidson
130 final_choice = davidson_diag
131 else !Inbetween we use an iterative library like ARPACK or SLEPC
132 final_choice = iterative_diag
133 end if
134 end if
135
136#ifdef usempi
137 !If we are dealing with an MPI diagonalization then we select the appropriate version of the diagonalizer
138 if (nprocs > 1 .and. force_serial == 0) then
139 select case (final_choice)
140 case (dense_diag) ! The MPI DENSE diagonalization is SCALAPACK
141#ifdef scalapack
142#ifdef useelpa
143 write (stdout, "('ELPA chosen as diagonalizer of choice. Hohoho!')")
144 if (grid % gprows > matrix_size .or. grid % gpcols > matrix_size) then
145 write (stdout, "('Too many processes for ELPA (BLACS grid size > matrix size). Defaulting to SCALAPACK.')")
146 write (stdout, "(3(a,i0))") 'nprocs = ', grid%gprows, '×', grid%gpcols, ' and matrix_size = ', matrix_size
147 allocate(scalapackmatrix::matrix)
148 allocate(scalapackdiagonalizer::diagonalizer)
149 else
150 allocate(elpamatrix::matrix)
151 allocate(elpadiagonalizer::diagonalizer)
152 end if
153#else
154 write (stdout, "('ScaLAPACK chosen as diagonalizer of choice. Nice!')")
155 allocate(scalapackmatrix::matrix)
156 allocate(scalapackdiagonalizer::diagonalizer)
157#endif
158#else
159 call mpi_xermsg('Dispatcher_module', 'DispatchMatrixAndDiagonalizer', &
160 'ScaLAPACK needed for dense diagonalization!', 1, 1)
161#endif
162 case (davidson_diag) !Use the SCATCI Davidson diagonalizer
163 write (stdout, "('Davidson chosen as diagonalizer of choice. Sweet!')")
164#ifdef slepc
165 write (stdout, "('SLEPc chosen as diagonalizer of choice. Amazing!')")
166 allocate(slepcmatrix::matrix)
167 allocate(slepcdiagonalizer::diagonalizer)
168#elif defined(scalapack)
169#ifdef useelpa
170 write (stdout, "('Compiled without SLEPc, defaulting to ELPA')")
171 if (grid % gprows > matrix_size .or. grid % gpcols > matrix_size) then
172 write (stdout, "('Too many processes for ELPA (BLACS grid size > matrix size). Defaulting to SCALAPACK.')")
173 write (stdout, "(3(a,i0))") 'nprocs = ', grid%gprows, '×', grid%gpcols, ' and matrix_size = ', matrix_size
174 allocate(scalapackmatrix::matrix)
175 allocate(scalapackdiagonalizer::diagonalizer)
176 else
177 allocate(elpamatrix::matrix)
178 allocate(elpadiagonalizer::diagonalizer)
179 end if
180#else
181 write (stdout, "('Compiled without SLEPc, defaulting to ScaLAPACK')")
182 allocate(scalapackmatrix::matrix)
183 allocate(scalapackdiagonalizer::diagonalizer)
184#endif
185#else
186 call mpi_xermsg('Dispatcher_module', 'DispatchMatrixAndDiagonalizer', &
187 'SLEPc or ScaLAPACK needed for distributed diagonalization!', 1, 1)
188#endif
189 case (iterative_diag) ! ITERATIVE uses SLEPC
190#ifdef slepc
191 write (stdout, "('SLEPc chosen as diagonalizer of choice. Amazing!')")
192 allocate(slepcmatrix::matrix)
193 allocate(slepcdiagonalizer::diagonalizer)
194#elif defined(scalapack)
195#ifdef useelpa
196 write (stdout, "('Compiled without SLEPc, defaulting to ELPA')")
197 if (grid % gprows > matrix_size .or. grid % gpcols > matrix_size) then
198 write (stdout, "('Too many processes for ELPA (BLACS grid size > matrix size). Defaulting to SCALAPACK.')")
199 write (stdout, "(3(a,i0))") 'nprocs = ', grid%gprows, '×', grid%gpcols, ' and matrix_size = ', matrix_size
200 allocate(scalapackmatrix::matrix)
201 allocate(scalapackdiagonalizer::diagonalizer)
202 else
203 allocate(elpamatrix::matrix)
204 allocate(elpadiagonalizer::diagonalizer)
205 end if
206#else
207 write (stdout, "('Compiled without SLEPc, defaulting to ScaLAPACK')")
208 allocate(scalapackmatrix::matrix)
209 allocate(scalapackdiagonalizer::diagonalizer)
210#endif
211#else
212 call mpi_xermsg('Dispatcher_module', 'DispatchMatrixAndDiagonalizer', &
213 'SLEPc or ScaLAPACK needed for distributed diagonalization!', 1, 1)
214#endif
215 case default
216 write (stdout, "('That is not a diagonalizer I would have chosen tbh, lets not do it')")
217 call mpi_xermsg('Dispatcher_module', 'DispatchMatrixAndDiagonalizer', &
218 'Invalid diagonalizer flag chosen', 1, 1)
219 end select
220 return
221 end if
222#endif
223
224 select case (final_choice)
225 case (dense_diag) !Use our rolled LAPACK diagonalizer
226 write (stdout, "('LAPACK chosen as diagonalizer of choice. Nice!')")
227 !allocate(MatrixElementVector::matrix)
228 call integral % write_matrix_header(matrix_io, matrix_size)
229 allocate(writermatrix::matrix)
230 allocate(lapackdiagonalizer::diagonalizer)
231 case (davidson_diag) !Use the SCATCI Davidson diagonalizer
232 write (stdout, "('Davidson chosen as diagonalizer of choice. Sweet!')")
233 call integral % write_matrix_header(matrix_io, matrix_size)
234 allocate(writermatrix::matrix)
235 allocate(davidsondiagonalizer::diagonalizer)
236 case (iterative_diag) !Use the SCATCI ARPACK diagonalizer
237#ifdef arpack
238 write (stdout, "('ARPACK chosen as diagonalizer of choice. Amazing!')")
239 call integral % write_matrix_header(matrix_io, matrix_size)
240 allocate(writermatrix::matrix)
241 allocate(arpackdiagonalizer::diagonalizer)
242#else
243 write (stdout, "('ARPACK not available - using LAPACK. Sorry!')")
244 call integral % write_matrix_header(matrix_io, matrix_size)
245 allocate(writermatrix::matrix)
246 allocate(lapackdiagonalizer::diagonalizer)
247#endif
248 case default
249 write (stdout, "('That is not a diagonalizer I would have chosen tbh, lets not do it')")
250 stop "Invalid diagonalizer flag chosen"
251 end select
252
253 end subroutine dispatchmatrixanddiagonalizer
254
255
256 !> \brief This subroutine dispatches the correct integral class based on simple parameters
257 !> \authors A Al-Refaie
258 !> \date 2017
259 !>
260 !> \param[in] sym_group_flag Which symmetry group, this determines either ALCHEMY or SWEDEN/UKRMOL+.
261 !> \param[in] ukrmolplus_integrals For D_2h_, determines either SWEDEN or UKRMOL+.
262 !> \param[out] integral The resultant integral object.
263 !>
264 subroutine dispatchintegral (sym_group_flag, ukrmolplus_integrals, integral)
265 class(baseintegral), pointer, intent(out) :: integral
266 integer, intent(in) :: sym_group_flag
267 logical, intent(in) :: ukrmolplus_integrals
268
269 if (sym_group_flag /= symtype_d2h) then
270 allocate(alchemyintegral::integral)
271 else
272 if (ukrmolplus_integrals) then
273 allocate(ukrmolintegral::integral)
274 else
275 allocate(swedenintegral::integral)
276 end if
277 end if
278
279 end subroutine dispatchintegral
280
281
282 !subroutine DispatchECP(ecp_type,ecp_filename,vals_defined,num_targ_symm,num_targ_states_symm,spatial_symm,spin_symm,ecp)
283 ! integer,intent(in) :: ecp_type
284 ! character(len=line_len),intent(in) :: ecp_filename
285 ! logical,intent(in) :: vals_defined
286 ! integer,intent(in) :: num_targ_symm,num_targ_states_symm(num_targ_symm),spatial_symm(num_targ_symm),spin_symm(num_targ_symm)
287 ! class(BaseCorePotential),pointer,intent(out) :: ecp
288 !
289 ! if(ecp_type == ECP_TYPE_NULL) then
290 ! ecp => null()
291 ! return
292 ! else if(vals_defined == .true.) then
293 ! select case(ecp_type)
294 ! case(ECP_TYPE_MOLPRO)
295 ! allocate(MOLPROCorePotential::ecp)
296 ! case default
297 ! stop "Unrecognized ECP type"
298 ! end select
299 !
300 ! call ecp%construct(num_targ_symm,num_targ_states_symm,spatial_symm,spin_symm)
301 !
302 ! call ecp%parse_ecp(ecp_filename)
303 ! else
304 ! write(stdout,"('TARGET SPATIAL SYMMETRY AND MULTIPLICITY NOT DEFINED IN INPUT')")
305 ! stop "TARGET SPATIAL SYMMETRY AND MULTIPLICITY NOT DEFINED IN INPUT"
306 ! endif
307 !
308 !end subroutine DispatchECP
309
310end module dispatcher_module
MPI SCATCI Constants module.
integer, parameter scatci_decide
SCATCI chooses the diagonalizer.
integer, parameter iterative_diag
Force an iterative (like ARPACK) diagonalizer.
integer, parameter davidson_diag
Force a Davidson diagonalizer.
integer, parameter dense_diag
Force a dense diagonalizer (all eigenvalues)
integer, parameter symtype_d2h
This describes D_2_h symmetries.