MPI-SCATCI  2.0
An MPI version of SCATCI
LapackDiagonalizer_module.f90
Go to the documentation of this file.
1 ! Copyright 2019
2 !
3 ! For a comprehensive list of the developers that contributed to these codes
4 ! see the UK-AMOR website.
5 !
6 ! This file is part of UKRmol-in (UKRmol+ suite).
7 !
8 ! UKRmol-in is free software: you can redistribute it and/or modify
9 ! it under the terms of the GNU General Public License as published by
10 ! the Free Software Foundation, either version 3 of the License, or
11 ! (at your option) any later version.
12 !
13 ! UKRmol-in is distributed in the hope that it will be useful,
14 ! but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 ! GNU General Public License for more details.
17 !
18 ! You should have received a copy of the GNU General Public License
19 ! along with UKRmol-in (in source/COPYING). Alternatively, you can also visit
20 ! <https://www.gnu.org/licenses/>.
21 
32 
33  use blas_lapack_gbl, only: blasint
34  use precisn, only: longint, wp
35  use const_gbl, only: stdout
37  use basematrix_module, only: basematrix
41  use options_module, only: options
42  use mpi_gbl, only: master, myrank, nprocs, mpi_mod_bcast, mpi_mod_barrier
44 
45  implicit none
46 
48  contains
49  procedure :: diagonalize => choose_matelem
50  procedure :: diagonalize_generic
51  procedure, public :: diagonalize_writer
52  !procedure, public :: choose_matelem
53  end type
54 
55 contains
56 
57  subroutine choose_matelem (this, matrix_elements, num_eigenpair, dresult, all_procs, option, integrals)
58  class(lapackdiagonalizer) :: this
59  class(diagonalizerresult) :: dresult
60  class(basematrix), intent(in) :: matrix_elements
61  class(baseintegral), intent(in) :: integrals
62  type(options), intent(in) :: option
63  integer, intent(in) :: num_eigenpair
64  logical, intent(in) :: all_procs
65 
66  select type(matrix_elements)
67  type is (writermatrix)
68  call this % diagonalize_writer(matrix_elements, num_eigenpair, dresult, all_procs, option, integrals)
69  !class is (BaseMatrix)
70  !call this%diagonalize_generic(matrix_elements,num_eigenpair,result,all_procs)
71  !class is (BaseMatrix)
72  !call this%DoNonSLEPCMat(matrix_elements,num_eigenpair,eigen,vecs,maxit,maxtol)
73  end select
74  end subroutine choose_matelem
75 
76 
77  subroutine diagonalize_writer (this, matrix_elements, num_eigenpair, dresult, all_procs, option, integrals)
78  class(lapackdiagonalizer) :: this
79  class(diagonalizerresult) :: dresult
80  class(writermatrix), intent(in) :: matrix_elements
81  class(baseintegral), intent(in) :: integrals
82  type(options), intent(in) :: option
83  integer, intent(in) :: num_eigenpair
84  logical, intent(in) :: all_procs
85  real(wp) :: EONE
86  real(wp), dimension(1) :: eshift
87  real(wp), allocatable, target :: hmt(:,:), eig(:)
88  integer :: matrix_unit
89  integer :: matrix_size, error, ido, jdo
90  integer :: num_matrix_elements_per_record, num_elems
91 
92  character(len=4), dimension(30) :: NAMP
93  integer, dimension(10) :: NHD
94  integer, dimension(20) :: KEYCSF, NHE
95  real(kind=wp), dimension(41) :: dtnuc
96 
97  eshift = 0
98 
99  write (stdout, "('Diagonalization done with LAPACK')")
100  write (stdout, "('Parameters:')")
101  write (stdout, "('N: ',i8)") matrix_elements % get_matrix_size()
102  write (stdout, "('Requested # of eigenpairs',i8)") num_eigenpair
103 
104  allocate(eig(matrix_elements % get_matrix_size()))
105  matrix_size = 0
106 
107  if (myrank == master) then
108  !Get our marix unit
109  matrix_unit = matrix_elements % get_matrix_unit()
110 
111  !Lets follow scatcis example even though we can get
112  !these information from the matrix class itself
113  rewind matrix_unit
114  read (matrix_unit) matrix_size, num_matrix_elements_per_record, nhd, namp, nhe, dtnuc
115 
116  num_elems = matrix_elements % get_size()
117 
118  allocate(hmt(matrix_size,matrix_size))
119 
120  call hmat(hmt, 1, 1, eshift, 0, matrix_size, stdout, matrix_unit, num_matrix_elements_per_record, eone)
121  call qldiag(matrix_size, hmt, eig)
122 
123  if (iand(option % vector_storage_method, pass_to_cdenprop) /= 0) then
124  call dresult % export_header(option, integrals)
125  end if
126  call dresult % write_header(option, integrals)
127  call dresult % handle_eigenvalues(eig(1:num_eigenpair), matrix_elements % diagonal, num_eigenpair, matrix_size)
128 
129  do ido = 1, num_eigenpair
130  call dresult % handle_eigenvector(hmt(:,ido), matrix_size)
131  end do
132  end if
133 
134  call mpi_mod_barrier(error)
135 
136  if (iand(dresult % vector_storage, save_continuum_coeffs) /= 0 .and. &
137  iand(dresult % vector_storage, save_l2_coeffs) == 0) then
138  write (stdout, '(/," Only the continuum part of the eigenvectors will be saved to disk.")')
139  write (stdout, '(" Index of the last continuum configuration: ",i10)') dresult % continuum_dimen
140  end if
141 
142  if (all_procs) then
143  if (myrank /= master) then
144  matrix_size = matrix_elements%get_matrix_size()
145  allocate(hmt(matrix_size,matrix_size))
146  end if
147 
148  call mpi_mod_barrier(error)
149  call mpi_mod_bcast(eig, master)
150 
151  if (myrank /= master) then
152  call dresult % handle_eigenvalues(eig(1:num_eigenpair), matrix_elements % diagonal, num_eigenpair, matrix_size)
153  end if
154 
155  do ido = 1, num_eigenpair
156  call mpi_mod_bcast(hmt(1:matrix_size,ido), master)
157  if (myrank /= master) call dresult % handle_eigenvector(hmt(1:matrix_size,ido), matrix_size)
158  end do
159  end if
160 
161  if (iand(dresult % vector_storage, pass_to_cdenprop) /= 0) then
162  !Eigenvectors:
163  if (allocated(dresult % ci_vec % CV)) deallocate(dresult % ci_vec % CV)
164  call move_alloc(hmt, dresult % ci_vec % CV)
165  dresult % ci_vec % CV_is_scalapack = .false.
166 
167  !Eigenvalues and phases:
168  call dresult % export_eigenvalues(eig(1:num_eigenpair), matrix_elements % diagonal, &
169  num_eigenpair, matrix_size, dresult % ci_vec % ei, dresult % ci_vec % iphz)
170 
171  write (stdout, '(/," Eigensolutions exported into the CIVect structure.",/)')
172  end if
173 
174  deallocate(eig)
175 
176  end subroutine diagonalize_writer
177 
178 
179  subroutine diagonalize_generic (this, matrix_elements, num_eigenpair, eigen, vecs, all_procs)
180  class(lapackdiagonalizer) :: this
181  class(basematrix), intent(in) :: matrix_elements
182  integer, intent(in) :: num_eigenpair
183  real(wp), intent(inout) :: eigen(:)
184  real(wp), intent(inout) :: vecs(:,:)
185  logical, intent(in) :: all_procs
186 
187  real(wp) :: coeff
188  real(wp), pointer :: pointer_mat(:,:)
189  real(wp), allocatable, target :: matrix(:)
190  real(wp), allocatable :: t_eigen(:), work(:)
191  integer(blasint), allocatable :: iwork(:)
192  integer(blasint) :: matrix_size, nelems, lwork, liwork
193  integer :: num_elements, ido, jdo, i, j, ierror, info
194 
195  !Since we're dealing with lapack we dont need to worry about the optional arguments
196 
197  matrix_size = matrix_elements % get_matrix_size()
198 
199  !Write out the parameters
200  write (stdout, "('Diagonalization done with LAPACK')")
201  write (stdout, "('Parameters:')")
202  write (stdout, "('N: ',i8)") matrix_size
203  write (stdout, "('Requested # of eigenpairs',i8)") num_eigenpair
204  write (stdout, "('Number of matrix elements',i8)") matrix_elements % get_size()
205 
206  nelems = int(matrix_size, longint) * int(matrix_size, longint)
207 
208  if (nprocs > 1) then
209  stop "Serial diagonalizer used in MPI, use SCALAPACK instead"
210  end if
211 
212  !Lets allocate our matrix, this potentially uses a lot of memory
213  allocate(matrix(nelems), stat = ierror)
214  pointer_mat(1:matrix_size,1:matrix_size) => matrix
215 
216  if (ierror /= 0) then
217  stop "LapackDiagonalizer --matrix-- Out of memory"
218  end if
219 
220  allocate(t_eigen(matrix_size), stat = ierror)
221 
222  if (ierror /= 0) then
223  stop "LapackDiagonalizer --eigen_-- Out of memory"
224  end if
225 
226  num_elements = matrix_elements % get_size()
227  t_eigen(:) = 0.0
228  matrix = 0.0
229  eigen(:) = 0.0
230 
231  !lets fill the matrix with the coefficients
232  do ido = 1, num_elements
233  call matrix_elements % get_matrix_element(ido, i, j, coeff)
234  pointer_mat(i,j) = pointer_mat(i,j) + coeff
235  !if (matrix_size == 475) write(91,"(2i8,D16.8)") i, j, val(ido)
236  end do
237 
238  lwork = -1
239  liwork = -1
240 
241  !Our starting work array
242  allocate(work(1), iwork(1))
243 
244  write (stdout, "('Diagonalizing......')", advance = 'no')
245 
246  call dsyevd('V', 'L', matrix_size, matrix, matrix_size, t_eigen, work, lwork,iwork, liwork, info)
247 
248  if (info /= 0) then
249  write (stdout, "(' dsyev returned ',i8)") info
250  stop 'lapack_dsyev - dsyev failed'
251  end if
252 
253  lwork = int(work(1))
254  liwork = int(iwork(1))
255 
256  deallocate(work, iwork)
257  allocate(work(lwork), iwork(liwork))
258 
259  call dsyevd('V', 'L', matrix_size, matrix, matrix_size, t_eigen, work, lwork, iwork, liwork, info)
260 
261  if (info /= 0) then
262  write (stdout, "(' dsyev returned ',i8)") info
263  stop 'lapack_dsyev - dsyev failed'
264  end if
265 
266  write (stdout, "('done!')")
267 
268  eigen(1:num_eigenpair) = t_eigen(1:num_eigenpair)
269 
270  do ido = 1, num_eigenpair
271  do jdo = 1, matrix_size
272  vecs(jdo,ido)=pointer_mat(jdo,ido)
273  end do
274  end do
275 
276  !Clean up
277  deallocate(matrix)
278  deallocate(t_eigen)
279  deallocate(work)
280 
281  end subroutine diagonalize_generic
282 
283 end module lapackdiagonalizer_module
writermatrix_module
Write matrix module.
Definition: WriterMatrix_module.f90:31
diagonalizerresult_module::diagonalizerresult
Output from diagonalization.
Definition: DiagonalizerResult_module.f90:48
lapackdiagonalizer_module::diagonalize_writer
subroutine diagonalize_writer(this, matrix_elements, num_eigenpair, dresult, all_procs, option, integrals)
Definition: LapackDiagonalizer_module.f90:78
consts_mpi_ci::save_l2_coeffs
integer, parameter save_l2_coeffs
Keep only L2 section of the eigenvectors (omit continuum channels)
Definition: consts_mpi_ci.f90:112
options_module::options
Calculation setup for a single Hamiltonian diagonalization.
Definition: Options_module.f90:74
lapackdiagonalizer_module::lapackdiagonalizer
Definition: LapackDiagonalizer_module.f90:47
writermatrix_module::writermatrix
Matrix associated with a disk drive.
Definition: WriterMatrix_module.f90:56
consts_mpi_ci::save_continuum_coeffs
integer, parameter save_continuum_coeffs
Omit L2 section of the eigenvectors (only keep continuum channels)
Definition: consts_mpi_ci.f90:108
basematrix_module
Base matrix module.
Definition: BaseMatrix_module.f90:28
consts_mpi_ci
MPI SCATCI Constants module.
Definition: consts_mpi_ci.f90:31
basematrix_module::basematrix
Base matrix type.
Definition: BaseMatrix_module.f90:47
baseintegral_module
Base integral module.
Definition: BaseIntegral_module.f90:28
diagonalizerresult_module
Diagonalizer result data object.
Definition: DiagonalizerResult_module.f90:28
diagonalizer_module
Base type for all diagonalizers.
Definition: Diagonalizer_module.f90:30
diagonalizer_module::basediagonalizer
Definition: Diagonalizer_module.f90:34
baseintegral_module::baseintegral
Definition: BaseIntegral_module.f90:41
lapackdiagonalizer_module::choose_matelem
subroutine choose_matelem(this, matrix_elements, num_eigenpair, dresult, all_procs, option, integrals)
Definition: LapackDiagonalizer_module.f90:58
options_module
Options module.
Definition: Options_module.f90:41
lapackdiagonalizer_module
Diagonalizer type using LAPACK backend.
Definition: LapackDiagonalizer_module.f90:31
consts_mpi_ci::pass_to_cdenprop
integer, parameter pass_to_cdenprop
Eigenpairs will be saved in memory and passed to CDENPROP and outer interface.
Definition: consts_mpi_ci.f90:110
lapackdiagonalizer_module::diagonalize_generic
subroutine diagonalize_generic(this, matrix_elements, num_eigenpair, eigen, vecs, all_procs)
Definition: LapackDiagonalizer_module.f90:180