MPI-SCATCI 2.0
An MPI version of SCATCI
Loading...
Searching...
No Matches
SolutionHandler_module.F90
Go to the documentation of this file.
1! Copyright 2019
2!
3! For a comprehensive list of the developers that contributed to these codes
4! see the UK-AMOR website.
5!
6! This file is part of UKRmol-in (UKRmol+ suite).
7!
8! UKRmol-in is free software: you can redistribute it and/or modify
9! it under the terms of the GNU General Public License as published by
10! the Free Software Foundation, either version 3 of the License, or
11! (at your option) any later version.
12!
13! UKRmol-in is distributed in the hope that it will be useful,
14! but WITHOUT ANY WARRANTY; without even the implied warranty of
15! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16! GNU General Public License for more details.
17!
18! You should have received a copy of the GNU General Public License
19! along with UKRmol-in (in source/COPYING). Alternatively, you can also visit
20! <https://www.gnu.org/licenses/>.
21
22!> \brief Solution handler module
23!> \authors A Al-Refaie
24!> \date 2017
25!>
26!> Manages the eigevalue and eigenvector output. These are passed to individual subroutines
27!> by the caller; solution handler itself contains just a little basic information.
28!>
29!> \note 16/01/2019 - Jakub Benda: Unifom coding style and expanded documentation.
30!>
31module solutionhandler_module
32
33 use blas_lapack_gbl, only: blasint
34 use cdenprop_defs, only: civect, maxnuc
35 use const_gbl, only: stdout
37 use precisn, only: wp
38 use mpi_gbl, only: master, mpi_xermsg, mpi_mod_isend, mpi_mod_recv
39 use params, only: ccidata, c8stars, cblank, cpoly
40 use scatci_routines, only: movep, civio, movew
41 use baseintegral_module, only: baseintegral
42 use diagonalizerresult_module, only: diagonalizerresult
43 use options_module, only: options
44 use parallelization_module, only: grid => process_grid
45 use sweden_module, only: swedenintegral
46 use ukrmol_module, only: ukrmolintegral
47
48 implicit none
49
50 !> \brief Solution writer
51 !> \authors A Al-Refaie, J Benda
52 !> \date 2017 - 2019
53 !>
54 !> Provides a comfortable interface to eigenvector disk output.
55 !> By default writes the *fort.25* file.
56 !>
57 type, extends(diagonalizerresult) :: solutionhandler
58 integer :: io_unit = 25
59 integer :: symmetry_type
60 integer :: num_eigenpairs
61 real(wp), allocatable :: energy_shifts(:)
62 real(wp) :: core_energy
63 real(wp) :: energy_shift
64 integer, allocatable :: phase(:)
65 integer :: size_phase
66 integer :: vec_dimen
67 integer :: nciset
68 integer :: current_eigenvector = 0
69 integer :: print_all_eigs = 0
70 contains
71 procedure, public :: construct
72 procedure, public :: write_header => write_header_sol
73 procedure, public :: export_header => export_header_sol
74 procedure, public :: export_eigenvalues
75 procedure, public :: shift_eigenvalues
76 procedure, public :: handle_eigenvalues => write_eigenvalues
77 procedure, public :: handle_eigenvector => write_eigenvector
78 procedure, public :: finalize_solutions => finalize_solutions_sol
79 procedure, public :: read_from_file
80 procedure, public :: destroy
81 end type solutionhandler
82
83contains
84
85 !> \brief Set up solution handler
86 !> \authors A Al-Refaie
87 !> \date 2017
88 !>
89 !> Initialize internal parameters, most notably the number of the output unit.
90 !>
91 subroutine construct (this, opts)
92
93 class(solutionhandler) :: this
94 class(options), intent(in) :: opts
95
96 integer :: n
97
98 this % symmetry_type = opts % sym_group_flag
99 this % io_unit = opts % ci_output_unit
100 this % continuum_dimen = opts % seq_num_last_continuum_csf
101 this % vec_dimen = opts % contracted_mat_size
102 this % num_eigenpairs = opts % num_eigenpairs
103 this % vector_storage = opts % vector_storage_method
104 this % print_all_eigs = opts % print_flags(2)
105
106 if (.not.(allocated(this % energy_shifts))) then
107 allocate(this % energy_shifts(this % num_eigenpairs))
108 n = min(this % num_eigenpairs, size(opts % energy_shifts))
109 this % energy_shifts = 0.0_wp
110 this % energy_shifts(1:n) = opts % energy_shifts(1:n)
111 end if
112
113 end subroutine construct
114
115
116 !> \brief Release resources
117 !> \authors J Benda
118 !> \date 2019
119 !>
120 !> Release all allocated memory (phases and the CDENPROP data vector).
121 !>
122 subroutine destroy (this)
123
124 class(solutionhandler), intent(inout) :: this
125
126 if (allocated(this % energy_shifts)) then
127 deallocate (this % energy_shifts)
128 end if
129
130 if (allocated(this % phase)) then
131 deallocate (this % phase)
132 end if
133
134 call this % ci_vec % final_CV
135
136 end subroutine destroy
137
138
139 !> \brief Start writing eigenvector record
140 !> \authors A Al-Refaie
141 !> \date 2017
142 !>
143 !> Opens or rewinds the output file, positions it to the beginning of the requested record
144 !> and writes basic information about the solution, excluding phase information, eigenvalues
145 !> and eigenvectors.
146 !>
147 subroutine write_header_sol (this, option, integrals)
148
149 class(solutionhandler) :: this
150 class(options), intent(in) :: option
151 class(baseintegral), intent(in) :: integrals
152 integer :: Nset = 1
153 integer :: print_header
154 integer :: nnuc, nrec, i, buf(1), n = 1, tag = 0
155 integer :: ierror, nth, nhd(10)
156
157 !first lets quickly grab the core_energy while we're here
158 this % core_energy = integrals % get_core_energy()
159
160 if (grid % grank /= master) return
161 this % nciset = option % output_ci_set_number
162 if (option % output_ci_set_number < 0) return
163
164 ! If output unit dataset for these solutions follows after another dataset in the same unit, we need to wait
165 ! for a signal from master of the MPI group responsible for writing of that dataset first, so that we do not
166 ! access the unit concurrently.
167 if (option % preceding_writer >= 0) then
168 call mpi_mod_recv(option % preceding_writer, tag, buf, n)
169 end if
170
171 this % size_phase = min(size(option % phase_index), this % vec_dimen)
172
173 if (.not.(allocated(this % phase))) then
174 allocate(this % phase(this % vec_dimen))
175 this % phase = 0
176 this % phase(1:this % size_phase) = option % phase_index(1:this % size_phase)
177 end if
178
179 nset = option % output_ci_set_number
180 nth = nset
181
182 print_header = option % print_dataset_heading
183
184 if (this % symmetry_type >= symtype_d2h) then
185
186 ! Move to the correct position in the file. The subroutine movep can, in theory, change the value of "nth". However,
187 ! OptionsSet should have made sure that the values of output_ci_set_number are such that this will never happen.
188 ! (I.e. that we are writing always the next unit in line.) Otherwise we would risk locking of the MPI groups and/or
189 ! overwriting some MPI group results by other MPI groups.
190 call movep(this % io_unit, nth, ierror, print_header, stdout)
191
192 if (ierror /= 0) then
193 call mpi_xermsg('SolutionHandler_module', 'write_header_sol', &
194 ' ERROR POSITIONING FOR OUTPUT OF CI COEFFICIENTS', 1, 1)
195 end if
196
197 nset = nth
198 nnuc = integrals % get_num_nuclei()
199 nrec = nnuc + this % num_eigenpairs + 1
200
201 write (this % io_unit) c8stars, cblank, cblank, ccidata
202 write (this % io_unit) nset, nrec, option % diag_name(1:120), nnuc, option % contracted_mat_size, &
203 this % num_eigenpairs, option % lambda, option % spin, option % spin_z, &
204 option % num_electrons, this % core_energy, option % num_target_sym, &
205 (option % lambda_continuum_orbitals_target(i), &
206 option % num_continuum_orbitals_target(i), i = 1, option % num_target_sym)
207
208 call integrals % write_geometries(this % io_unit)
209
210 else
211
212 ! Move to the correct position in the file
213 call movew(this % io_unit, nth, ierror, print_header, stdout)
214
215 if (ierror /= 0) then
216 call mpi_xermsg('SolutionHandler_module', 'write_header_sol', &
217 ' ERROR POSITIONING FOR OUTPUT OF CI COEFFICIENTS', 2, 1)
218 end if
219
220 nhd( :) = 0
221 nhd( 2) = option % contracted_mat_size
222 nhd( 3) = this % num_eigenpairs
223 nhd( 4) = option % num_syms
224 nhd( 8) = option % contracted_mat_size
225 nhd( 9) = integrals % get_num_nuclei()
226 nhd(10) = merge(1, 0, this % num_eigenpairs < option % contracted_mat_size)
227
228 ! We are using alchemy integrals
229 write (this % io_unit) nth, nhd, option % diag_name(1:120), integrals % NHE, integrals % DTNUC
230
231 end if
232
233 write (stdout, "(' CI data will be stored as set number',I3)") nset
234
235 end subroutine write_header_sol
236
237
238 !> \brief Write basic information into a CI vector
239 !> \authors A Al-Refaie, J Benda
240 !> \date 2017 - 2019
241 !>
242 !> Store general data related to the eigenvectors (like global quantum numbers, nuclear information
243 !> and number of eigenvectors) to the provided CDENPROP data vector. This is used when passing
244 !> data from MPI-SCATCI directly to the CDENPROP library.
245 !>
246 subroutine export_header_sol (this, option, integrals)
247
248 class(solutionhandler) :: this
249 class(options), intent(in) :: option
250 class(baseintegral), intent(in) :: integrals
251
252 integer :: err, i
253
254 !first lets quickly grab the core_energy while we're here
255 this % core_energy = integrals % get_core_energy()
256 this % size_phase = min(size(option % phase_index), this % vec_dimen)
257
258 if (.not.(allocated(this % phase))) then
259 allocate(this % phase(this % vec_dimen), stat = err)
260 if (err /= 0) then
261 call mpi_xermsg('SolutionHandler_module', 'export_header_sol', &
262 'Error allocating internal phase array', 1, 1)
263 end if
264 this % phase = 0
265 this % phase(1:this % size_phase) = option % phase_index(1:this % size_phase)
266 end if
267
268 if (this % symmetry_type >= symtype_d2h) then
269 this % ci_vec % Nset = option % output_ci_set_number
270 this % ci_vec % nrec = integrals % get_num_nuclei() + this % num_eigenpairs + 1
271 this % ci_vec % name = option % diag_name
272 this % ci_vec % nnuc = integrals % get_num_nuclei()
273 this % ci_vec % nocsf = option % contracted_mat_size
274 this % ci_vec % nstat = this % num_eigenpairs
275 this % ci_vec % mgvn = option % lambda
276 this % ci_vec % s = option % spin
277 this % ci_vec % sz = option % spin_z
278 this % ci_vec % nelt = option % num_electrons
279 this % ci_vec % e0 = this % core_energy
280
281 if (this % ci_vec % nnuc > maxnuc) then
282 call mpi_xermsg('SolutionHandler_module', 'export_header_sol', &
283 'Increase the value of maxnuc in cdenprop_defs and recompile', 2, 1)
284 end if
285
286 select type (integrals)
287 type is (ukrmolintegral)
288 this % ci_vec % cname (1:this % ci_vec % nnuc) = integrals % cname (1:this % ci_vec % nnuc)
289 this % ci_vec % charge(1:this % ci_vec % nnuc) = integrals % charge(1:this % ci_vec % nnuc)
290 this % ci_vec % xnuc (1:this % ci_vec % nnuc) = integrals % xnuc (1:this % ci_vec % nnuc)
291 this % ci_vec % ynuc (1:this % ci_vec % nnuc) = integrals % ynuc (1:this % ci_vec % nnuc)
292 this % ci_vec % znuc (1:this % ci_vec % nnuc) = integrals % znuc (1:this % ci_vec % nnuc)
293 type is (swedenintegral)
294 this % ci_vec % cname (1:this % ci_vec % nnuc) = integrals % cname (1:this % ci_vec % nnuc)
295 this % ci_vec % charge(1:this % ci_vec % nnuc) = integrals % charge(1:this % ci_vec % nnuc)
296 this % ci_vec % xnuc (1:this % ci_vec % nnuc) = integrals % xnuc (1:this % ci_vec % nnuc)
297 this % ci_vec % ynuc (1:this % ci_vec % nnuc) = integrals % ynuc (1:this % ci_vec % nnuc)
298 this % ci_vec % znuc (1:this % ci_vec % nnuc) = integrals % znuc (1:this % ci_vec % nnuc)
299 class default
300 call mpi_xermsg('SolutionHandler_module', 'export_header_sol', &
301 'Geometry export implemented only for UKRMOLIntegral, SWEDENIntegral', 3, 1)
302 end select
303
304 write (stdout, '(/,"CI VECTOR HEADER DATA:")')
305 write (stdout, '("----------------------")')
306 write (stdout, *) this % ci_vec % nset, &
307 this % ci_vec % nrec, &
308 this % ci_vec % NAME, &
309 this % ci_vec % nnuc, &
310 this % ci_vec % nocsf, &
311 this % ci_vec % nstat, &
312 this % ci_vec % mgvn, &
313 this % ci_vec % s, &
314 this % ci_vec % sz, &
315 this % ci_vec % nelt, &
316 this % ci_vec % e0
317 do i = 1, this % ci_vec % nnuc
318 write (stdout, *) this % ci_vec % cname(i), &
319 this % ci_vec % xnuc(i), &
320 this % ci_vec % ynuc(i), &
321 this % ci_vec % znuc(i), &
322 this % ci_vec % charge(i)
323 end do
324 write (stdout, '("----------------------")')
325 else
326 call mpi_xermsg('SolutionHandler_module', 'export_header_sol', &
327 'Output of ALCHEMY header not implemented yet', 4, 1)
328 end if
329
330 write (stdout, '(" CI data will be stored in memory for use in subsequent subroutines")')
331
332 end subroutine export_header_sol
333
334
335 !> \brief Shift eigenvalues according to the NAMELIST input ESHIFT
336 !> \authors T Meltzer
337 !> \date 2020
338 !>
339 !> Shift eigenvalues according to the NAMELIST input ESHIFT. This subroutine is called only
340 !> when some of the energy_shifts is non-zero.
341 !>
342 subroutine shift_eigenvalues (this, output_eigenvalues, num_eigenpairs)
343
344 class(solutionhandler) :: this
345 integer, intent(in) :: num_eigenpairs
346 real(wp), intent(inout) :: output_eigenvalues(num_eigenpairs)
347 integer :: ido, jdo, neig
348
349 if (grid % grank /= master) return
350 if (this % nciset < 0) return
351
352 output_eigenvalues = output_eigenvalues + this % energy_shifts
353
354 neig = this % num_eigenpairs
355 if (this % print_all_eigs == 0) then
356 neig = min(this % num_eigenpairs, max_neig)
357 end if
358
359 write (stdout, '("ENERGY SHIFTS TO BE APPLIED")')
360 do ido = 1, neig, 5
361 write (stdout, "(5F20.10)") (this % energy_shifts(jdo), jdo = ido, min(ido + 4, neig))
362 end do
363
364 end subroutine shift_eigenvalues
365
366
367 !> \brief Save Hamiltonian eigen-energies to disk file
368 !> \authors A Al-Refaie
369 !> \date 2017
370 !>
371 !> Continues the output operation commenced in \ref write_header_sol by writing the phase information,
372 !> eigenvalues and diagonals(?).
373 !>
374 subroutine write_eigenvalues (this, eigenvalues, diagonals, num_eigenpairs, vec_dimen)
375
376 class(solutionhandler) :: this
377 integer, intent(in) :: num_eigenpairs, vec_dimen
378 real(wp), intent(in) :: eigenvalues(num_eigenpairs), diagonals(vec_dimen)
379 real(wp) :: output_eigenvalues(num_eigenpairs)
380 integer :: ido, jdo, neig
381
382 if (grid % grank /= master) return
383 if (this % nciset < 0) return
384
385 output_eigenvalues = eigenvalues
386
387 neig = this % num_eigenpairs
388 if (this % print_all_eigs == 0) then
389 neig = min(this % num_eigenpairs, max_neig)
390 end if
391
392 if (sum(abs(this % energy_shifts)) .gt. 0_wp) then
393 write (stdout, '("ORIGINAL EIGEN-ENERGIES")')
394 do ido = 1, neig, 5
395 write (stdout, "(16X,5F20.10)") (output_eigenvalues(jdo) + this % core_energy, jdo = ido, min(ido + 4, neig))
396 end do
397 call this % shift_eigenvalues(output_eigenvalues, num_eigenpairs)
398 end if
399
400 write (stdout, '("EIGEN-ENERGIES")')
401 do ido = 1, neig, 5
402 write (stdout, "(16X,5F20.10)") (output_eigenvalues(jdo) + this % core_energy, jdo = ido, min(ido + 4, neig))
403 end do
404 write (this % io_unit) this % phase, output_eigenvalues, diagonals
405
406 end subroutine write_eigenvalues
407
408
409 !> \brief Write eigevalues and phases to supplied arrays
410 !> \authors A Al-Refaie
411 !> \date 2017
412 !>
413 !> Copies (a subset of) eigenvalues and phases stored in the calling objecst to supplied
414 !> arrays. This is used when preparing diagonalization data for processing in CDENPROP.
415 !>
416 subroutine export_eigenvalues (this, eigenvalues, diagonals, num_eigenpairs, vec_dimen, ei, iphz)
417
418 class(solutionhandler) :: this
419 integer, intent(in) :: num_eigenpairs, vec_dimen
420 real(wp), intent(in) :: eigenvalues(num_eigenpairs), diagonals(vec_dimen)
421 real(wp), allocatable :: ei(:)
422 real(wp) :: output_eigenvalues(num_eigenpairs)
423 integer, allocatable :: iphz(:)
424 integer :: ido, jdo, err
425
426 output_eigenvalues = eigenvalues
427
428 if (sum(abs(this % energy_shifts)) .gt. 0_wp) then
429 call this % shift_eigenvalues(output_eigenvalues, num_eigenpairs)
430 end if
431
432 if (allocated(ei)) deallocate(ei)
433 allocate(ei, source = output_eigenvalues, stat = err)
434 if (err /= 0) then
435 call mpi_xermsg('SolutionHandler_module', 'export_eigenvalues', 'Error allocating ei', size(output_eigenvalues), 1)
436 end if
437
438 if (allocated(iphz)) deallocate(iphz)
439 allocate(iphz, source = this % phase, stat = err)
440 if (err /= 0) then
441 call mpi_xermsg('SolutionHandler_module', 'export_eigenvalues', 'Error allocating iphz', size(this % phase), 1)
442 end if
443
444 end subroutine export_eigenvalues
445
446
447 !> \brief Save Hamiltonian eigen-vectors to disk file
448 !> \authors A Al-Refaie
449 !> \date 2017
450 !>
451 !> Completes the output operation commenced in \ref write_header_sol by writing the eigenvectors
452 !> (one per a call).
453 !>
454 subroutine write_eigenvector (this, eigenvector, vec_dimen)
455
456 class(solutionhandler) :: this
457 integer, intent(in) :: vec_dimen
458 real(wp), intent(in) :: eigenvector(vec_dimen)
459 integer :: start_dimen, end_dimen, ido, j
460
461 if (grid % grank /= master) return
462 if (this % nciset < 0) return
463
464 this % current_eigenvector = this % current_eigenvector + 1
465
466 start_dimen = 1
467 end_dimen = this % vec_dimen
468
469 if (iand(this % vector_storage, save_l2_coeffs) == 0) then
470 end_dimen = this % continuum_dimen
471 end if
472 if (iand(this % vector_storage, save_continuum_coeffs) == 0) then
473 start_dimen = this % continuum_dimen + 1
474 end if
475
476 write (this % io_unit) this % current_eigenvector, (eigenvector(j), j = start_dimen, end_dimen)
477
478 end subroutine write_eigenvector
479
480
481 !> \brief Import eigensolution from file
482 !> \authors J Benda
483 !> \date 2022 - 2025
484 !>
485 !> Instead of calculating the eigenpairs, simple read them from a file written earlier.
486 !> Store them in a distributed CIvect structure for processing in CDENPROP.
487 !>
488 subroutine read_from_file (this, option)
489
490 class(solutionhandler) :: this
491 class(options), intent(in) :: option
492 integer :: print_header, ierror, nset, nrec, nnuc, nth, nocsf, nstat, mgvn, nelt, i, j, m
493 integer(blasint) :: one = 1
494 real(wp) :: e0, s, sz
495 real(wp), allocatable :: vec(:), ei(:), phase(:)
496 character(len=NAME_LEN_MAX) :: diag_name
497
498 nset = option % output_ci_set_number
499 nth = nset
500 print_header = option % print_dataset_heading
501
502 this % ci_vec % CV_is_scalapack = .true.
503 this % ci_vec % blacs_context = grid % gcntxt
504 this % ci_vec % myrow = grid % mygrow
505 this % ci_vec % mycol = grid % mygcol
506 this % ci_vec % nprow = grid % gprows
507 this % ci_vec % npcol = grid % gpcols
508
509 call this % ci_vec % init_CV(this % vec_dimen, this % num_eigenpairs)
510
511 allocate (this % ci_vec % ei(this % num_eigenpairs), ei(this % num_eigenpairs))
512 allocate (this % ci_vec % iphz(this % num_eigenpairs), phase(this % num_eigenpairs))
513 allocate (vec(this % vec_dimen))
514
515 if (grid % grank == master) then
516 call movep(this % io_unit, nth, ierror, print_header, stdout)
517
518 if (ierror /= 0) then
519 call mpi_xermsg('SolutionHandler_module', 'read_from_file', 'Failed to read eigensolution', 1, 1)
520 end if
521
522 ! read header to get the number of nuclei
523 read (this % io_unit)
524 read (this % io_unit) nset, nrec, diag_name, nnuc, nocsf, nstat, mgvn, s, sz, nelt, e0
525
526 ! skip nuclear information
527 do i = 1, nnuc
528 read (this % io_unit)
529 end do
530
531 ! read the phases and eigenenergies
532 read (this % io_unit) phase, ei
533 else
534 phase = 0
535 ei = 0
536 end if
537
538#if defined(usempi) && defined(scalapack)
539 ! broadcast the data to other tasks
540 call dgsum2d(grid % gcntxt, 'all', ' ', this % vec_dimen, one, phase, this % vec_dimen, -one, -one)
541 call dgsum2d(grid % gcntxt, 'all', ' ', this % vec_dimen, one, ei, this % vec_dimen, -one, -one)
542#endif
543
544 this % ci_vec % iphz = phase
545 this % ci_vec % ei = ei
546
547 ! read all requested eigenvectors
548 do j = 1, this % num_eigenpairs
549 vec = 0
550 ! master writes the eigenvector
551 if (grid % grank == master) then
552 read (this % io_unit) m, vec
553 end if
554#if defined(usempi) && defined(scalapack)
555 ! master broadcasts the eigenvector
556 call dgsum2d(grid % gcntxt, 'all', ' ', this % vec_dimen, one, vec, this % vec_dimen, -one, -one)
557#endif
558 ! all processes independently include the elements into their storage
559 do i = 1, this % vec_dimen
560 call this % ci_vec % set_CV_element(vec(i), i, j)
561 end do
562 end do
563
564 end subroutine read_from_file
565
566
567 !> \brief Finalize writing the eigenvector disk file
568 !> \authors J Benda
569 !> \date 2019
570 !>
571 !> To avoid concurrent, conflicting accesses to the disk file in case that multiple datasets are to be written to it,
572 !> by different processes, MPI-SCATCI uses a semaphore system. Once a process finishes writing its results into a dataset,
573 !> it notifies the next process in line that wants to write to the same file unit (but into the subsequent dataset).
574 !>
575 subroutine finalize_solutions_sol (this, option)
576
577 class(solutionhandler) :: this
578 class(options), intent(in) :: option
579
580 integer :: buf(1), tag = 0, n = 1
581
582 flush(this % io_unit)
583 close(this % io_unit)
584
585 if (option % following_writer >= 0) then
586 call mpi_mod_isend(option % following_writer, buf, tag, n)
587 end if
588
589 end subroutine finalize_solutions_sol
590
591end module solutionhandler_module
MPI SCATCI Constants module.
integer, parameter read_from_file
Do not diagonalize, just read from file.
integer, parameter max_neig
Default limit on the number of eigenvalues printed.
integer, parameter name_len_max
The maximum length of a name.
integer, parameter symtype_d2h
This describes D_2_h symmetries.
integer, parameter save_l2_coeffs
Keep only L2 section of the eigenvectors (omit continuum channels)
integer, parameter save_continuum_coeffs
Omit L2 section of the eigenvectors (only keep continuum channels)