-
Notifications
You must be signed in to change notification settings - Fork 1
/
qs_linres_current.F
3012 lines (2806 loc) · 139 KB
/
qs_linres_current.F
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
!--------------------------------------------------------------------------------------------------!
! CP2K: A general program to perform molecular dynamics simulations !
! Copyright 2000-2024 CP2K developers group <https://cp2k.org> !
! !
! SPDX-License-Identifier: GPL-2.0-or-later !
!--------------------------------------------------------------------------------------------------!
! **************************************************************************************************
!> \brief given the response wavefunctions obtained by the application
!> of the (rxp), p, and ((dk-dl)xp) operators,
!> here the current density vector (jx, jy, jz)
!> is computed for the 3 directions of the magnetic field (Bx, By, Bz)
!> \par History
!> created 02-2006 [MI]
!> \author MI
! **************************************************************************************************
MODULE qs_linres_current
USE ao_util, ONLY: exp_radius_very_extended
USE basis_set_types, ONLY: get_gto_basis_set,&
gto_basis_set_p_type,&
gto_basis_set_type
USE cell_types, ONLY: cell_type,&
pbc
USE cp_array_utils, ONLY: cp_2d_i_p_type,&
cp_2d_r_p_type
USE cp_control_types, ONLY: dft_control_type
USE cp_dbcsr_api, ONLY: &
dbcsr_add_block_node, dbcsr_convert_offsets_to_sizes, dbcsr_copy, dbcsr_create, &
dbcsr_deallocate_matrix, dbcsr_distribution_type, dbcsr_finalize, dbcsr_get_block_p, &
dbcsr_p_type, dbcsr_set, dbcsr_type, dbcsr_type_antisymmetric, dbcsr_type_no_symmetry
USE cp_dbcsr_cp2k_link, ONLY: cp_dbcsr_alloc_block_from_nbl
USE cp_dbcsr_operations, ONLY: cp_dbcsr_plus_fm_fm_t,&
cp_dbcsr_sm_fm_multiply,&
dbcsr_allocate_matrix_set,&
dbcsr_deallocate_matrix_set
USE cp_fm_basic_linalg, ONLY: cp_fm_scale_and_add,&
cp_fm_trace
USE cp_fm_struct, ONLY: cp_fm_struct_create,&
cp_fm_struct_release,&
cp_fm_struct_type
USE cp_fm_types, ONLY: cp_fm_create,&
cp_fm_release,&
cp_fm_set_all,&
cp_fm_to_fm,&
cp_fm_type
USE cp_log_handling, ONLY: cp_get_default_logger,&
cp_logger_get_default_io_unit,&
cp_logger_type,&
cp_to_string
USE cp_output_handling, ONLY: cp_p_file,&
cp_print_key_finished_output,&
cp_print_key_should_output,&
cp_print_key_unit_nr
USE cp_realspace_grid_cube, ONLY: cp_pw_to_cube
USE cube_utils, ONLY: compute_cube_center,&
cube_info_type,&
return_cube
USE gaussian_gridlevels, ONLY: gridlevel_info_type
USE grid_api, ONLY: &
GRID_FUNC_AB, GRID_FUNC_ADBmDAB_X, GRID_FUNC_ADBmDAB_Y, GRID_FUNC_ADBmDAB_Z, &
GRID_FUNC_ARDBmDARB_XX, GRID_FUNC_ARDBmDARB_XY, GRID_FUNC_ARDBmDARB_XZ, &
GRID_FUNC_ARDBmDARB_YX, GRID_FUNC_ARDBmDARB_YY, GRID_FUNC_ARDBmDARB_YZ, &
GRID_FUNC_ARDBmDARB_ZX, GRID_FUNC_ARDBmDARB_ZY, GRID_FUNC_ARDBmDARB_ZZ, &
collocate_pgf_product
USE input_constants, ONLY: current_gauge_atom
USE input_section_types, ONLY: section_get_ivals,&
section_get_lval,&
section_vals_get_subs_vals,&
section_vals_type
USE kinds, ONLY: default_path_length,&
default_string_length,&
dp
USE mathconstants, ONLY: twopi
USE memory_utilities, ONLY: reallocate
USE message_passing, ONLY: mp_para_env_type
USE orbital_pointers, ONLY: ncoset
USE particle_list_types, ONLY: particle_list_type
USE particle_methods, ONLY: get_particle_set
USE particle_types, ONLY: particle_type
USE pw_env_types, ONLY: pw_env_get,&
pw_env_type
USE pw_methods, ONLY: pw_axpy,&
pw_integrate_function,&
pw_scale,&
pw_zero
USE pw_pool_types, ONLY: pw_pool_type
USE pw_types, ONLY: pw_c1d_gs_type,&
pw_r3d_rs_type
USE qs_environment_types, ONLY: get_qs_env,&
qs_environment_type
USE qs_kind_types, ONLY: get_qs_kind,&
get_qs_kind_set,&
qs_kind_type
USE qs_linres_atom_current, ONLY: calculate_jrho_atom,&
calculate_jrho_atom_coeff,&
calculate_jrho_atom_rad
USE qs_linres_op, ONLY: fac_vecp,&
fm_scale_by_pbc_AC,&
ind_m2,&
set_vecp,&
set_vecp_rev
USE qs_linres_types, ONLY: current_env_type,&
get_current_env
USE qs_matrix_pools, ONLY: qs_matrix_pools_type
USE qs_mo_types, ONLY: get_mo_set,&
mo_set_type
USE qs_neighbor_list_types, ONLY: get_iterator_info,&
neighbor_list_iterate,&
neighbor_list_iterator_create,&
neighbor_list_iterator_p_type,&
neighbor_list_iterator_release,&
neighbor_list_set_p_type
USE qs_operators_ao, ONLY: build_lin_mom_matrix,&
rRc_xyz_der_ao
USE qs_rho_types, ONLY: qs_rho_get
USE qs_subsys_types, ONLY: qs_subsys_get,&
qs_subsys_type
USE realspace_grid_types, ONLY: realspace_grid_desc_p_type,&
realspace_grid_desc_type,&
realspace_grid_type,&
rs_grid_create,&
rs_grid_mult_and_add,&
rs_grid_release,&
rs_grid_zero
USE rs_pw_interface, ONLY: density_rs2pw
USE task_list_methods, ONLY: distribute_tasks,&
rs_distribute_matrix,&
task_list_inner_loop
USE task_list_types, ONLY: atom_pair_type,&
reallocate_tasks,&
task_type
#include "./base/base_uses.f90"
IMPLICIT NONE
PRIVATE
! *** Public subroutines ***
PUBLIC :: current_build_current, current_build_chi, calculate_jrho_resp
CHARACTER(len=*), PARAMETER, PRIVATE :: moduleN = 'qs_linres_current'
TYPE box_type
INTEGER :: n = -1
REAL(dp), POINTER, DIMENSION(:, :) :: r => NULL()
END TYPE box_type
REAL(dp), DIMENSION(3, 3, 3), PARAMETER :: Levi_Civita = RESHAPE((/ &
0.0_dp, 0.0_dp, 0.0_dp, 0.0_dp, 0.0_dp, -1.0_dp, 0.0_dp, 1.0_dp, 0.0_dp, &
0.0_dp, 0.0_dp, 1.0_dp, 0.0_dp, 0.0_dp, 0.0_dp, -1.0_dp, 0.0_dp, 0.0_dp, &
0.0_dp, -1.0_dp, 0.0_dp, 1.0_dp, 0.0_dp, 0.0_dp, 0.0_dp, 0.0_dp, 0.0_dp/), (/3, 3, 3/))
CONTAINS
! **************************************************************************************************
!> \brief First calculate the density matrixes, for each component of the current
!> they are 3 because of the r dependent terms
!> Next it collocates on the grid to have J(r)
!> In the GAPW case one need to collocate on the PW grid only the soft part
!> while the rest goes on Lebedev grids
!> The contributions to the shift and to the susceptibility will be
!> calculated separately and added only at the end
!> The calculation of the shift tensor is performed on the position of the atoms
!> and on other selected points in real space summing up the contributions
!> from the PW grid current density and the local densities
!> Spline interpolation is used
!> \param current_env ...
!> \param qs_env ...
!> \param iB ...
!> \author MI
!> \note
!> The susceptibility is needed to compute the G=0 term of the shift
!> in reciprocal space. \chi_{ij} = \int (r x Jj)_i
!> (where Jj id the current density generated by the field in direction j)
!> To calculate the susceptibility on the PW grids it is necessary to apply
!> the position operator yet another time.
!> This cannot be done on directly on the full J(r) because it is not localized
!> Therefore it is done state by state (see linres_nmr_shift)
! **************************************************************************************************
SUBROUTINE current_build_current(current_env, qs_env, iB)
!
TYPE(current_env_type) :: current_env
TYPE(qs_environment_type), POINTER :: qs_env
INTEGER, INTENT(IN) :: iB
CHARACTER(LEN=*), PARAMETER :: routineN = 'current_build_current'
CHARACTER(LEN=default_path_length) :: ext, filename, my_pos
INTEGER :: handle, idir, iiB, iiiB, ispin, istate, &
j, jstate, nao, natom, nmo, nspins, &
nstates(2), output_unit, unit_nr
INTEGER, ALLOCATABLE, DIMENSION(:) :: first_sgf, last_sgf
INTEGER, DIMENSION(:), POINTER :: row_blk_sizes
LOGICAL :: append_cube, gapw, mpi_io
REAL(dp) :: dk(3), jrho_tot_G(3, 3), &
jrho_tot_R(3, 3), maxocc, scale_fac
REAL(dp), ALLOCATABLE, DIMENSION(:, :) :: ddk
REAL(dp), EXTERNAL :: DDOT
TYPE(cell_type), POINTER :: cell
TYPE(cp_2d_i_p_type), DIMENSION(:), POINTER :: center_list
TYPE(cp_fm_type), ALLOCATABLE, DIMENSION(:) :: p_psi1, psi1
TYPE(cp_fm_type), DIMENSION(:), POINTER :: psi0_order
TYPE(cp_fm_type), DIMENSION(:, :), POINTER :: psi1_D, psi1_p, psi1_rxp
TYPE(cp_fm_type), POINTER :: mo_coeff
TYPE(cp_logger_type), POINTER :: logger
TYPE(dbcsr_distribution_type), POINTER :: dbcsr_dist
TYPE(dbcsr_p_type), DIMENSION(:), POINTER :: density_matrix0, density_matrix_a, &
density_matrix_ii, density_matrix_iii
TYPE(dft_control_type), POINTER :: dft_control
TYPE(mo_set_type), DIMENSION(:), POINTER :: mos
TYPE(mp_para_env_type), POINTER :: para_env
TYPE(neighbor_list_set_p_type), DIMENSION(:), &
POINTER :: sab_all
TYPE(particle_list_type), POINTER :: particles
TYPE(particle_type), DIMENSION(:), POINTER :: particle_set
TYPE(pw_c1d_gs_type), DIMENSION(:), POINTER :: jrho1_g
TYPE(pw_env_type), POINTER :: pw_env
TYPE(pw_pool_type), POINTER :: auxbas_pw_pool
TYPE(pw_r3d_rs_type) :: wf_r
TYPE(pw_r3d_rs_type), DIMENSION(:), POINTER :: jrho1_r
TYPE(qs_kind_type), DIMENSION(:), POINTER :: qs_kind_set
TYPE(qs_matrix_pools_type), POINTER :: mpools
TYPE(qs_subsys_type), POINTER :: subsys
TYPE(realspace_grid_desc_type), POINTER :: auxbas_rs_desc
TYPE(section_vals_type), POINTER :: current_section
CALL timeset(routineN, handle)
!
NULLIFY (logger, current_section, density_matrix0, density_matrix_a, &
density_matrix_ii, density_matrix_iii, cell, dft_control, mos, &
particle_set, pw_env, auxbas_rs_desc, auxbas_pw_pool, &
para_env, center_list, mo_coeff, jrho1_r, jrho1_g, &
psi1_p, psi1_D, psi1_rxp, sab_all, qs_kind_set)
logger => cp_get_default_logger()
output_unit = cp_logger_get_default_io_unit(logger)
!
!
CALL get_current_env(current_env=current_env, &
center_list=center_list, &
psi1_rxp=psi1_rxp, &
psi1_D=psi1_D, &
psi1_p=psi1_p, &
psi0_order=psi0_order, &
nstates=nstates, &
nao=nao)
!
!
CALL get_qs_env(qs_env=qs_env, &
cell=cell, &
dft_control=dft_control, &
mos=mos, &
mpools=mpools, &
pw_env=pw_env, &
para_env=para_env, &
subsys=subsys, &
sab_all=sab_all, &
particle_set=particle_set, &
qs_kind_set=qs_kind_set, &
dbcsr_dist=dbcsr_dist)
CALL qs_subsys_get(subsys, particles=particles)
gapw = dft_control%qs_control%gapw
nspins = dft_control%nspins
natom = SIZE(particle_set, 1)
!
! allocate temporary arrays
ALLOCATE (psi1(nspins), p_psi1(nspins))
DO ispin = 1, nspins
CALL cp_fm_create(psi1(ispin), psi0_order(ispin)%matrix_struct)
CALL cp_fm_create(p_psi1(ispin), psi0_order(ispin)%matrix_struct)
CALL cp_fm_set_all(psi1(ispin), 0.0_dp)
CALL cp_fm_set_all(p_psi1(ispin), 0.0_dp)
END DO
!
!
CALL dbcsr_allocate_matrix_set(density_matrix0, nspins)
CALL dbcsr_allocate_matrix_set(density_matrix_a, nspins)
CALL dbcsr_allocate_matrix_set(density_matrix_ii, nspins)
CALL dbcsr_allocate_matrix_set(density_matrix_iii, nspins)
!
! prepare for allocation
ALLOCATE (first_sgf(natom))
ALLOCATE (last_sgf(natom))
CALL get_particle_set(particle_set, qs_kind_set, &
first_sgf=first_sgf, &
last_sgf=last_sgf)
ALLOCATE (row_blk_sizes(natom))
CALL dbcsr_convert_offsets_to_sizes(first_sgf, row_blk_sizes, last_sgf)
DEALLOCATE (first_sgf)
DEALLOCATE (last_sgf)
!
!
DO ispin = 1, nspins
!
!density_matrix0
ALLOCATE (density_matrix0(ispin)%matrix)
CALL dbcsr_create(matrix=density_matrix0(ispin)%matrix, &
name="density_matrix0", &
dist=dbcsr_dist, matrix_type=dbcsr_type_no_symmetry, &
row_blk_size=row_blk_sizes, col_blk_size=row_blk_sizes, &
nze=0, mutable_work=.TRUE.)
CALL cp_dbcsr_alloc_block_from_nbl(density_matrix0(ispin)%matrix, sab_all)
!
!density_matrix_a
ALLOCATE (density_matrix_a(ispin)%matrix)
CALL dbcsr_copy(density_matrix_a(ispin)%matrix, density_matrix0(ispin)%matrix, &
name="density_matrix_a")
!
!density_matrix_ii
ALLOCATE (density_matrix_ii(ispin)%matrix)
CALL dbcsr_copy(density_matrix_ii(ispin)%matrix, density_matrix0(ispin)%matrix, &
name="density_matrix_ii")
!
!density_matrix_iii
ALLOCATE (density_matrix_iii(ispin)%matrix)
CALL dbcsr_copy(density_matrix_iii(ispin)%matrix, density_matrix0(ispin)%matrix, &
name="density_matrix_iii")
END DO
!
DEALLOCATE (row_blk_sizes)
!
!
current_section => section_vals_get_subs_vals(qs_env%input, "PROPERTIES%LINRES%CURRENT")
!
!
jrho_tot_G = 0.0_dp
jrho_tot_R = 0.0_dp
!
! Lets go!
CALL set_vecp(iB, iiB, iiiB)
DO ispin = 1, nspins
nmo = nstates(ispin)
mo_coeff => psi0_order(ispin)
!maxocc = max_occ(ispin)
!
CALL get_mo_set(mo_set=mos(ispin), maxocc=maxocc)
!
!
! Build the first density matrix
CALL dbcsr_set(density_matrix0(ispin)%matrix, 0.0_dp)
CALL cp_dbcsr_plus_fm_fm_t(sparse_matrix=density_matrix0(ispin)%matrix, &
matrix_v=mo_coeff, matrix_g=mo_coeff, &
ncol=nmo, alpha=maxocc)
!
! Allocate buffer vectors
ALLOCATE (ddk(3, nmo))
!
! Construct the 3 density matrices for the field in direction iB
!
! First the full matrix psi_a_iB
ASSOCIATE (psi_a_iB => psi1(ispin), psi_buf => p_psi1(ispin))
CALL cp_fm_set_all(psi_a_iB, 0.0_dp)
CALL cp_fm_set_all(psi_buf, 0.0_dp)
! psi_a_iB = - (R_\nu-dk)_ii psi1_piiiB + (R_\nu-dk)_iii psi1_piiB
!
! contributions from the response psi1_p_ii and psi1_p_iii
DO istate = 1, current_env%nbr_center(ispin)
dk(1:3) = current_env%centers_set(ispin)%array(1:3, istate)
!
! Copy the vector in the full matrix psi1
!nstate_loc = center_list(ispin)%array(1,icenter+1)-center_list(ispin)%array(1,icenter)
DO j = center_list(ispin)%array(1, istate), center_list(ispin)%array(1, istate + 1) - 1
jstate = center_list(ispin)%array(2, j)
CALL cp_fm_to_fm(psi1_p(ispin, iiB), psi_a_iB, 1, jstate, jstate)
CALL cp_fm_to_fm(psi1_p(ispin, iiiB), psi_buf, 1, jstate, jstate)
ddk(:, jstate) = dk(1:3)
END DO
END DO ! istate
CALL fm_scale_by_pbc_AC(psi_a_iB, current_env%basisfun_center, ddk, cell, iiiB)
CALL fm_scale_by_pbc_AC(psi_buf, current_env%basisfun_center, ddk, cell, iiB)
CALL cp_fm_scale_and_add(-1.0_dp, psi_a_iB, 1.0_dp, psi_buf)
!
!psi_a_iB = psi_a_iB + psi1_rxp
!
! contribution from the response psi1_rxp
CALL cp_fm_scale_and_add(-1.0_dp, psi_a_iB, 1.0_dp, psi1_rxp(ispin, iB))
!
!psi_a_iB = psi_a_iB - psi1_D
IF (current_env%full) THEN
!
! contribution from the response psi1_D
CALL cp_fm_scale_and_add(1.0_dp, psi_a_iB, -1.0_dp, psi1_D(ispin, iB))
END IF
!
! Multiply by the occupation number for the density matrix
!
! Build the first density matrix
CALL dbcsr_set(density_matrix_a(ispin)%matrix, 0.0_dp)
CALL cp_dbcsr_plus_fm_fm_t(sparse_matrix=density_matrix_a(ispin)%matrix, &
matrix_v=mo_coeff, matrix_g=psi_a_iB, &
ncol=nmo, alpha=maxocc)
END ASSOCIATE
!
! Build the second density matrix
CALL dbcsr_set(density_matrix_iii(ispin)%matrix, 0.0_dp)
CALL cp_dbcsr_plus_fm_fm_t(sparse_matrix=density_matrix_iii(ispin)%matrix, &
matrix_v=mo_coeff, matrix_g=psi1_p(ispin, iiiB), &
ncol=nmo, alpha=maxocc)
!
! Build the third density matrix
CALL dbcsr_set(density_matrix_ii(ispin)%matrix, 0.0_dp)
CALL cp_dbcsr_plus_fm_fm_t(sparse_matrix=density_matrix_ii(ispin)%matrix, &
matrix_v=mo_coeff, matrix_g=psi1_p(ispin, iiB), &
ncol=nmo, alpha=maxocc)
DO idir = 1, 3
!
! Calculate the current density on the pw grid (only soft if GAPW)
! idir is the cartesian component of the response current density
! generated by the magnetic field pointing in cartesian direction iB
! Use the qs_rho_type already used for rho during the scf
CALL qs_rho_get(current_env%jrho1_set(idir)%rho, rho_r=jrho1_r)
CALL qs_rho_get(current_env%jrho1_set(idir)%rho, rho_g=jrho1_g)
ASSOCIATE (jrho_rspace => jrho1_r(ispin), jrho_gspace => jrho1_g(ispin))
CALL pw_zero(jrho_rspace)
CALL pw_zero(jrho_gspace)
CALL calculate_jrho_resp(density_matrix0(ispin)%matrix, &
density_matrix_a(ispin)%matrix, &
density_matrix_ii(ispin)%matrix, &
density_matrix_iii(ispin)%matrix, &
iB, idir, jrho_rspace, jrho_gspace, qs_env, &
current_env, gapw)
scale_fac = cell%deth/twopi
CALL pw_scale(jrho_rspace, scale_fac)
CALL pw_scale(jrho_gspace, scale_fac)
jrho_tot_G(idir, iB) = pw_integrate_function(jrho_gspace, isign=-1)
jrho_tot_R(idir, iB) = pw_integrate_function(jrho_rspace, isign=-1)
END ASSOCIATE
IF (output_unit > 0) THEN
WRITE (output_unit, '(T2,2(A,E24.16))') 'Integrated j_'&
&//ACHAR(idir + 119)//ACHAR(iB + 119)//'(r): G-space=', &
jrho_tot_G(idir, iB), ' R-space=', jrho_tot_R(idir, iB)
END IF
!
END DO ! idir
!
! Deallocate buffer vectors
DEALLOCATE (ddk)
!
END DO ! ispin
IF (gapw) THEN
DO idir = 1, 3
!
! compute the atomic response current densities on the spherical grids
! First the sparse matrices are multiplied by the expansion coefficients
! this is the equivalent of the CPC for the charge density
CALL calculate_jrho_atom_coeff(qs_env, current_env, &
density_matrix0, &
density_matrix_a, &
density_matrix_ii, &
density_matrix_iii, &
iB, idir)
!
! Then the radial parts are computed on the local radial grid, atom by atom
! 8 functions are computed for each atom, per grid point
! and per LM angular momentum. The multiplication by the Clebsh-Gordon
! coefficients or they correspondent for the derivatives, is also done here
CALL calculate_jrho_atom_rad(qs_env, current_env, idir)
!
! The current on the atomic grids
CALL calculate_jrho_atom(current_env, qs_env, iB, idir)
END DO ! idir
END IF
!
! Cube files
IF (BTEST(cp_print_key_should_output(logger%iter_info, current_section,&
& "PRINT%CURRENT_CUBES"), cp_p_file)) THEN
append_cube = section_get_lval(current_section, "PRINT%CURRENT_CUBES%APPEND")
my_pos = "REWIND"
IF (append_cube) THEN
my_pos = "APPEND"
END IF
!
CALL pw_env_get(pw_env, auxbas_rs_desc=auxbas_rs_desc, &
auxbas_pw_pool=auxbas_pw_pool)
!
CALL auxbas_pw_pool%create_pw(wf_r)
!
DO idir = 1, 3
CALL pw_zero(wf_r)
CALL qs_rho_get(current_env%jrho1_set(idir)%rho, rho_r=jrho1_r)
DO ispin = 1, nspins
CALL pw_axpy(jrho1_r(ispin), wf_r, 1.0_dp)
END DO
!
IF (gapw) THEN
! Add the local hard and soft contributions
! This can be done atom by atom by a spline extrapolation of the values
! on the spherical grid to the grid points.
CPABORT("GAPW needs to be finalized")
END IF
filename = "jresp"
mpi_io = .TRUE.
WRITE (ext, '(a2,I1,a2,I1,a5)') "iB", iB, "_d", idir, ".cube"
WRITE (ext, '(a2,a1,a2,a1,a5)') "iB", ACHAR(iB + 119), "_d", ACHAR(idir + 119), ".cube"
unit_nr = cp_print_key_unit_nr(logger, current_section, "PRINT%CURRENT_CUBES", &
extension=TRIM(ext), middle_name=TRIM(filename), &
log_filename=.FALSE., file_position=my_pos, &
mpi_io=mpi_io)
CALL cp_pw_to_cube(wf_r, unit_nr, "RESPONSE CURRENT DENSITY ", &
particles=particles, &
stride=section_get_ivals(current_section, "PRINT%CURRENT_CUBES%STRIDE"), &
mpi_io=mpi_io)
CALL cp_print_key_finished_output(unit_nr, logger, current_section,&
& "PRINT%CURRENT_CUBES", mpi_io=mpi_io)
END DO
!
CALL auxbas_pw_pool%give_back_pw(wf_r)
END IF ! current cube
!
! Integrated current response checksum
IF (output_unit > 0) THEN
WRITE (output_unit, '(T2,A,E24.16)') 'CheckSum R-integrated j=', &
SQRT(DDOT(9, jrho_tot_R(1, 1), 1, jrho_tot_R(1, 1), 1))
END IF
!
!
! Dellocate grids for the calculation of jrho and the shift
CALL cp_fm_release(psi1)
CALL cp_fm_release(p_psi1)
CALL dbcsr_deallocate_matrix_set(density_matrix0)
CALL dbcsr_deallocate_matrix_set(density_matrix_a)
CALL dbcsr_deallocate_matrix_set(density_matrix_ii)
CALL dbcsr_deallocate_matrix_set(density_matrix_iii)
!
! Finalize
CALL timestop(handle)
!
END SUBROUTINE current_build_current
! **************************************************************************************************
!> \brief Calculation of the idir component of the response current density
!> in the presence of a constant magnetic field in direction iB
!> the current density is collocated on the pw grid in real space
!> \param mat_d0 ...
!> \param mat_jp ...
!> \param mat_jp_rii ...
!> \param mat_jp_riii ...
!> \param iB ...
!> \param idir ...
!> \param current_rs ...
!> \param current_gs ...
!> \param qs_env ...
!> \param current_env ...
!> \param soft_valid ...
!> \param retain_rsgrid ...
!> \note
!> The collocate is done in three parts, one for each density matrix
!> In all cases the density matrices and therefore the collocation
!> are not symmetric, that means that all the pairs (ab and ba) have
!> to be considered separately
!>
!> mat_jp_{\mu\nu} is multiplied by
!> f_{\mu\nu} = \phi_{\mu} (d\phi_{\nu}/dr)_{idir} -
!> (d\phi_{\mu}/dr)_{idir} \phi_{\nu}
!>
!> mat_jp_rii_{\mu\nu} is multiplied by
!> f_{\mu\nu} = \phi_{\mu} (r - R_{\nu})_{iiiB} (d\phi_{\nu}/dr)_{idir} -
!> (d\phi_{\mu}/dr)_{idir} (r - R_{\nu})_{iiiB} \phi_{\nu} +
!> \phi_{\mu} \phi_{\nu} (last term only if iiiB=idir)
!>
!> mat_jp_riii_{\mu\nu} is multiplied by
!> (be careful: change in sign with respect to previous)
!> f_{\mu\nu} = -\phi_{\mu} (r - R_{\nu})_{iiB} (d\phi_{\nu}/dr)_{idir} +
!> (d\phi_{\mu}/dr)_{idir} (r - R_{\nu})_{iiB} \phi_{\nu} -
!> \phi_{\mu} \phi_{\nu} (last term only if iiB=idir)
!>
!> All the terms sum up to the same grid
! **************************************************************************************************
SUBROUTINE calculate_jrho_resp(mat_d0, mat_jp, mat_jp_rii, mat_jp_riii, iB, idir, &
current_rs, current_gs, qs_env, current_env, soft_valid, retain_rsgrid)
TYPE(dbcsr_type), POINTER :: mat_d0, mat_jp, mat_jp_rii, mat_jp_riii
INTEGER, INTENT(IN) :: iB, idir
TYPE(pw_r3d_rs_type), INTENT(INOUT) :: current_rs
TYPE(pw_c1d_gs_type), INTENT(INOUT) :: current_gs
TYPE(qs_environment_type), POINTER :: qs_env
TYPE(current_env_type) :: current_env
LOGICAL, INTENT(IN), OPTIONAL :: soft_valid, retain_rsgrid
CHARACTER(LEN=*), PARAMETER :: routineN = 'calculate_jrho_resp'
INTEGER, PARAMETER :: max_tasks = 2000
CHARACTER(LEN=default_string_length) :: basis_type
INTEGER :: adbmdab_func, bcol, brow, cindex, curr_tasks, handle, i, iatom, iatom_old, idir2, &
igrid_level, iiB, iiiB, ikind, ikind_old, ipgf, iset, iset_old, itask, ithread, jatom, &
jatom_old, jkind, jkind_old, jpgf, jset, jset_old, maxco, maxpgf, maxset, maxsgf, &
maxsgf_set, na1, na2, natom, nb1, nb2, ncoa, ncob, nimages, nkind, nseta, nsetb, ntasks, &
nthread, sgfa, sgfb
INTEGER, DIMENSION(:), POINTER :: la_max, la_min, lb_max, lb_min, npgfa, &
npgfb, nsgfa, nsgfb
INTEGER, DIMENSION(:, :), POINTER :: first_sgfa, first_sgfb
LOGICAL :: atom_pair_changed, den_found, &
den_found_a, distributed_rs_grids, &
do_igaim, my_retain_rsgrid, my_soft
REAL(dp), DIMENSION(:, :, :), POINTER :: my_current, my_gauge, my_rho
REAL(KIND=dp) :: eps_rho_rspace, f, kind_radius_a, &
kind_radius_b, Lxo2, Lyo2, Lzo2, &
prefactor, radius, scale, scale2, zetp
REAL(KIND=dp), DIMENSION(3) :: ra, rab, rb, rp
REAL(KIND=dp), DIMENSION(:), POINTER :: set_radius_a, set_radius_b
REAL(KIND=dp), DIMENSION(:, :), POINTER :: jp_block_a, jp_block_b, jp_block_c, jp_block_d, &
jpab_a, jpab_b, jpab_c, jpab_d, jpblock_a, jpblock_b, jpblock_c, jpblock_d, rpgfa, rpgfb, &
sphi_a, sphi_b, work, zeta, zetb
REAL(KIND=dp), DIMENSION(:, :, :), POINTER :: jpabt_a, jpabt_b, jpabt_c, jpabt_d, workt
TYPE(atom_pair_type), DIMENSION(:), POINTER :: atom_pair_recv, atom_pair_send
TYPE(cell_type), POINTER :: cell
TYPE(cube_info_type), DIMENSION(:), POINTER :: cube_info
TYPE(dbcsr_p_type), DIMENSION(:), POINTER :: deltajp_a, deltajp_b, deltajp_c, &
deltajp_d
TYPE(dbcsr_type), POINTER :: mat_a, mat_b, mat_c, mat_d
TYPE(dft_control_type), POINTER :: dft_control
TYPE(gridlevel_info_type), POINTER :: gridlevel_info
TYPE(gto_basis_set_p_type), DIMENSION(:), POINTER :: basis_set_list
TYPE(gto_basis_set_type), POINTER :: basis_set_a, basis_set_b, orb_basis_set
TYPE(mp_para_env_type), POINTER :: para_env
TYPE(neighbor_list_iterator_p_type), &
DIMENSION(:), POINTER :: nl_iterator
TYPE(neighbor_list_set_p_type), DIMENSION(:), &
POINTER :: sab_orb
TYPE(particle_type), DIMENSION(:), POINTER :: particle_set
TYPE(pw_env_type), POINTER :: pw_env
TYPE(qs_kind_type), DIMENSION(:), POINTER :: qs_kind_set
TYPE(qs_kind_type), POINTER :: qs_kind
TYPE(realspace_grid_desc_p_type), DIMENSION(:), &
POINTER :: rs_descs
TYPE(realspace_grid_type), DIMENSION(:), POINTER :: rs_current, rs_rho
TYPE(realspace_grid_type), DIMENSION(:, :), &
POINTER :: rs_gauge
TYPE(task_type), DIMENSION(:), POINTER :: tasks
NULLIFY (qs_kind, cell, dft_control, orb_basis_set, rs_rho, &
qs_kind_set, sab_orb, particle_set, rs_current, pw_env, &
rs_descs, para_env, set_radius_a, set_radius_b, la_max, la_min, &
lb_max, lb_min, npgfa, npgfb, nsgfa, nsgfb, rpgfa, rpgfb, &
sphi_a, sphi_b, zeta, zetb, first_sgfa, first_sgfb, tasks, &
workt, mat_a, mat_b, mat_c, mat_d, rs_gauge)
NULLIFY (deltajp_a, deltajp_b, deltajp_c, deltajp_d)
NULLIFY (jp_block_a, jp_block_b, jp_block_c, jp_block_d)
NULLIFY (jpblock_a, jpblock_b, jpblock_c, jpblock_d)
NULLIFY (jpabt_a, jpabt_b, jpabt_c, jpabt_d)
NULLIFY (atom_pair_send, atom_pair_recv)
CALL timeset(routineN, handle)
!
! Set pointers for the different gauge
! If do_igaim is False the current_env is never needed
do_igaim = current_env%gauge .EQ. current_gauge_atom
mat_a => mat_jp
mat_b => mat_jp_rii
mat_c => mat_jp_riii
IF (do_igaim) mat_d => mat_d0
my_retain_rsgrid = .FALSE.
IF (PRESENT(retain_rsgrid)) my_retain_rsgrid = retain_rsgrid
CALL get_qs_env(qs_env=qs_env, &
qs_kind_set=qs_kind_set, &
cell=cell, &
dft_control=dft_control, &
particle_set=particle_set, &
sab_all=sab_orb, &
para_env=para_env, &
pw_env=pw_env)
IF (do_igaim) CALL get_current_env(current_env=current_env, rs_gauge=rs_gauge)
! Component of appearing in the vector product rxp, iiB and iiiB
CALL set_vecp(iB, iiB, iiiB)
!
!
scale2 = 0.0_dp
idir2 = 1
IF (idir .NE. iB) THEN
CALL set_vecp_rev(idir, iB, idir2)
scale2 = fac_vecp(idir, iB, idir2)
END IF
!
! *** assign from pw_env
gridlevel_info => pw_env%gridlevel_info
cube_info => pw_env%cube_info
! Check that the neighbor list with all the pairs is associated
CPASSERT(ASSOCIATED(sab_orb))
! *** set up the pw multi-grids
CPASSERT(ASSOCIATED(pw_env))
CALL pw_env_get(pw_env, rs_descs=rs_descs, rs_grids=rs_rho)
distributed_rs_grids = .FALSE.
DO igrid_level = 1, gridlevel_info%ngrid_levels
IF (.NOT. ALL(rs_descs(igrid_level)%rs_desc%perd == 1)) THEN
distributed_rs_grids = .TRUE.
END IF
END DO
eps_rho_rspace = dft_control%qs_control%eps_rho_rspace
nthread = 1
! *** Allocate work storage ***
CALL get_qs_kind_set(qs_kind_set=qs_kind_set, &
maxco=maxco, &
maxsgf=maxsgf, &
maxsgf_set=maxsgf_set)
Lxo2 = SQRT(SUM(cell%hmat(:, 1)**2))/2.0_dp
Lyo2 = SQRT(SUM(cell%hmat(:, 2)**2))/2.0_dp
Lzo2 = SQRT(SUM(cell%hmat(:, 3)**2))/2.0_dp
my_soft = .FALSE.
IF (PRESENT(soft_valid)) my_soft = soft_valid
IF (my_soft) THEN
basis_type = "ORB_SOFT"
ELSE
basis_type = "ORB"
END IF
nkind = SIZE(qs_kind_set)
CALL reallocate(jpabt_a, 1, maxco, 1, maxco, 0, nthread - 1)
CALL reallocate(jpabt_b, 1, maxco, 1, maxco, 0, nthread - 1)
CALL reallocate(jpabt_c, 1, maxco, 1, maxco, 0, nthread - 1)
CALL reallocate(jpabt_d, 1, maxco, 1, maxco, 0, nthread - 1)
CALL reallocate(workt, 1, maxco, 1, maxsgf_set, 0, nthread - 1)
CALL reallocate_tasks(tasks, max_tasks)
ntasks = 0
curr_tasks = SIZE(tasks)
! get maximum numbers
natom = SIZE(particle_set)
maxset = 0
maxpgf = 0
! hard code matrix index (no kpoints)
nimages = dft_control%nimages
CPASSERT(nimages == 1)
cindex = 1
DO ikind = 1, nkind
qs_kind => qs_kind_set(ikind)
CALL get_qs_kind(qs_kind=qs_kind, basis_set=orb_basis_set)
IF (.NOT. ASSOCIATED(orb_basis_set)) CYCLE
CALL get_gto_basis_set(gto_basis_set=orb_basis_set, npgf=npgfa, nset=nseta)
maxset = MAX(nseta, maxset)
maxpgf = MAX(MAXVAL(npgfa), maxpgf)
END DO
! *** Initialize working density matrix ***
! distributed rs grids require a matrix that will be changed (distribute_tasks)
! whereas this is not the case for replicated grids
ALLOCATE (deltajp_a(1), deltajp_b(1), deltajp_c(1), deltajp_d(1))
IF (distributed_rs_grids) THEN
ALLOCATE (deltajp_a(1)%matrix, deltajp_b(1)%matrix, deltajp_c(1)%matrix)
IF (do_igaim) THEN
ALLOCATE (deltajp_d(1)%matrix)
END IF
CALL dbcsr_create(deltajp_a(1)%matrix, template=mat_a, name='deltajp_a')
CALL dbcsr_create(deltajp_b(1)%matrix, template=mat_a, name='deltajp_b')
CALL dbcsr_create(deltajp_c(1)%matrix, template=mat_a, name='deltajp_c')
IF (do_igaim) CALL dbcsr_create(deltajp_d(1)%matrix, template=mat_a, name='deltajp_d')
ELSE
deltajp_a(1)%matrix => mat_a !mat_jp
deltajp_b(1)%matrix => mat_b !mat_jp_rii
deltajp_c(1)%matrix => mat_c !mat_jp_riii
IF (do_igaim) deltajp_d(1)%matrix => mat_d !mat_d0
END IF
ALLOCATE (basis_set_list(nkind))
DO ikind = 1, nkind
qs_kind => qs_kind_set(ikind)
CALL get_qs_kind(qs_kind=qs_kind, basis_set=basis_set_a, basis_type=basis_type)
IF (ASSOCIATED(basis_set_a)) THEN
basis_set_list(ikind)%gto_basis_set => basis_set_a
ELSE
NULLIFY (basis_set_list(ikind)%gto_basis_set)
END IF
END DO
CALL neighbor_list_iterator_create(nl_iterator, sab_orb)
DO WHILE (neighbor_list_iterate(nl_iterator) == 0)
CALL get_iterator_info(nl_iterator, ikind=ikind, jkind=jkind, iatom=iatom, jatom=jatom, r=rab)
basis_set_a => basis_set_list(ikind)%gto_basis_set
IF (.NOT. ASSOCIATED(basis_set_a)) CYCLE
basis_set_b => basis_set_list(jkind)%gto_basis_set
IF (.NOT. ASSOCIATED(basis_set_b)) CYCLE
ra(:) = pbc(particle_set(iatom)%r, cell)
! basis ikind
first_sgfa => basis_set_a%first_sgf
la_max => basis_set_a%lmax
la_min => basis_set_a%lmin
npgfa => basis_set_a%npgf
nseta = basis_set_a%nset
nsgfa => basis_set_a%nsgf_set
rpgfa => basis_set_a%pgf_radius
set_radius_a => basis_set_a%set_radius
kind_radius_a = basis_set_a%kind_radius
sphi_a => basis_set_a%sphi
zeta => basis_set_a%zet
! basis jkind
first_sgfb => basis_set_b%first_sgf
lb_max => basis_set_b%lmax
lb_min => basis_set_b%lmin
npgfb => basis_set_b%npgf
nsetb = basis_set_b%nset
nsgfb => basis_set_b%nsgf_set
rpgfb => basis_set_b%pgf_radius
set_radius_b => basis_set_b%set_radius
kind_radius_b = basis_set_b%kind_radius
sphi_b => basis_set_b%sphi
zetb => basis_set_b%zet
IF (ABS(rab(1)) > Lxo2 .OR. ABS(rab(2)) > Lyo2 .OR. ABS(rab(3)) > Lzo2) THEN
CYCLE
END IF
brow = iatom
bcol = jatom
CALL dbcsr_get_block_p(matrix=mat_a, row=brow, col=bcol, &
block=jp_block_a, found=den_found_a)
CALL dbcsr_get_block_p(matrix=mat_b, row=brow, col=bcol, &
block=jp_block_b, found=den_found)
CALL dbcsr_get_block_p(matrix=mat_c, row=brow, col=bcol, &
block=jp_block_c, found=den_found)
IF (do_igaim) CALL dbcsr_get_block_p(matrix=mat_d, row=brow, col=bcol, &
block=jp_block_d, found=den_found)
IF (.NOT. ASSOCIATED(jp_block_a)) CYCLE
IF (distributed_rs_grids) THEN
NULLIFY (jpblock_a, jpblock_b, jpblock_c, jpblock_d)
CALL dbcsr_add_block_node(deltajp_a(1)%matrix, brow, bcol, jpblock_a)
jpblock_a = jp_block_a
CALL dbcsr_add_block_node(deltajp_b(1)%matrix, brow, bcol, jpblock_b)
jpblock_b = jp_block_b
CALL dbcsr_add_block_node(deltajp_c(1)%matrix, brow, bcol, jpblock_c)
jpblock_c = jp_block_c
IF (do_igaim) THEN
CALL dbcsr_add_block_node(deltajp_d(1)%matrix, brow, bcol, jpblock_d)
jpblock_d = jp_block_d
END IF
ELSE
jpblock_a => jp_block_a
jpblock_b => jp_block_b
jpblock_c => jp_block_c
IF (do_igaim) jpblock_d => jp_block_d
END IF
CALL task_list_inner_loop(tasks, ntasks, curr_tasks, rs_descs, &
dft_control, cube_info, gridlevel_info, cindex, &
iatom, jatom, rpgfa, rpgfb, zeta, zetb, kind_radius_b, &
set_radius_a, set_radius_b, ra, rab, &
la_max, la_min, lb_max, lb_min, npgfa, npgfb, nseta, nsetb)
END DO
CALL neighbor_list_iterator_release(nl_iterator)
DEALLOCATE (basis_set_list)
IF (distributed_rs_grids) THEN
CALL dbcsr_finalize(deltajp_a(1)%matrix)
CALL dbcsr_finalize(deltajp_b(1)%matrix)
CALL dbcsr_finalize(deltajp_c(1)%matrix)
IF (do_igaim) CALL dbcsr_finalize(deltajp_d(1)%matrix)
END IF
! sorts / redistributes the task list
CALL distribute_tasks(rs_descs=rs_descs, ntasks=ntasks, natoms=natom, tasks=tasks, &
atom_pair_send=atom_pair_send, atom_pair_recv=atom_pair_recv, &
symmetric=.FALSE., reorder_rs_grid_ranks=.TRUE., &
skip_load_balance_distributed=.FALSE.)
ALLOCATE (rs_current(gridlevel_info%ngrid_levels))
DO igrid_level = 1, gridlevel_info%ngrid_levels
! Here we need to reallocate the distributed rs_grids, which may have been reordered
! by distribute_tasks
IF (rs_descs(igrid_level)%rs_desc%distributed .AND. .NOT. my_retain_rsgrid) THEN
CALL rs_grid_release(rs_rho(igrid_level))
CALL rs_grid_create(rs_rho(igrid_level), rs_descs(igrid_level)%rs_desc)
END IF
CALL rs_grid_zero(rs_rho(igrid_level))
CALL rs_grid_create(rs_current(igrid_level), rs_descs(igrid_level)%rs_desc)
CALL rs_grid_zero(rs_current(igrid_level))
END DO
!
! we need to build the gauge here
IF (.NOT. current_env%gauge_init .AND. do_igaim) THEN
CALL current_set_gauge(current_env, qs_env)
current_env%gauge_init = .TRUE.
END IF
!
! for any case double check the bounds !
IF (do_igaim) THEN
DO igrid_level = 1, gridlevel_info%ngrid_levels
my_rho => rs_rho(igrid_level)%r
my_current => rs_current(igrid_level)%r
IF (LBOUND(my_rho, 3) .NE. LBOUND(my_current, 3) .OR. &
LBOUND(my_rho, 2) .NE. LBOUND(my_current, 2) .OR. &
LBOUND(my_rho, 1) .NE. LBOUND(my_current, 1) .OR. &
UBOUND(my_rho, 3) .NE. UBOUND(my_current, 3) .OR. &
UBOUND(my_rho, 2) .NE. UBOUND(my_current, 2) .OR. &
UBOUND(my_rho, 1) .NE. UBOUND(my_current, 1)) THEN
WRITE (*, *) 'LBOUND(my_rho,3),LBOUND(my_current,3)', LBOUND(my_rho, 3), LBOUND(my_current, 3)
WRITE (*, *) 'LBOUND(my_rho,2),LBOUND(my_current,2)', LBOUND(my_rho, 2), LBOUND(my_current, 2)
WRITE (*, *) 'LBOUND(my_rho,1),LBOUND(my_current,1)', LBOUND(my_rho, 1), LBOUND(my_current, 1)
WRITE (*, *) 'UBOUND(my_rho,3),UBOUND(my_current,3)', UBOUND(my_rho, 3), UBOUND(my_current, 3)
WRITE (*, *) 'UBOUND(my_rho,2),UBOUND(my_current,2)', UBOUND(my_rho, 2), UBOUND(my_current, 2)
WRITE (*, *) 'UBOUND(my_rho,1),UBOUND(my_current,1)', UBOUND(my_rho, 1), UBOUND(my_current, 1)
CPABORT("Bug")
END IF
my_gauge => rs_gauge(1, igrid_level)%r
IF (LBOUND(my_rho, 3) .NE. LBOUND(my_gauge, 3) .OR. &
LBOUND(my_rho, 2) .NE. LBOUND(my_gauge, 2) .OR. &
LBOUND(my_rho, 1) .NE. LBOUND(my_gauge, 1) .OR. &
UBOUND(my_rho, 3) .NE. UBOUND(my_gauge, 3) .OR. &
UBOUND(my_rho, 2) .NE. UBOUND(my_gauge, 2) .OR. &
UBOUND(my_rho, 1) .NE. UBOUND(my_gauge, 1)) THEN
WRITE (*, *) 'LBOUND(my_rho,3),LBOUND(my_gauge,3)', LBOUND(my_rho, 3), LBOUND(my_gauge, 3)
WRITE (*, *) 'LBOUND(my_rho,2),LBOUND(my_gauge,2)', LBOUND(my_rho, 2), LBOUND(my_gauge, 2)
WRITE (*, *) 'LBOUND(my_rho,1),LBOUND(my_gauge,1)', LBOUND(my_rho, 1), LBOUND(my_gauge, 1)
WRITE (*, *) 'UBOUND(my_rho,3),UbOUND(my_gauge,3)', UBOUND(my_rho, 3), UBOUND(my_gauge, 3)
WRITE (*, *) 'UBOUND(my_rho,2),UBOUND(my_gauge,2)', UBOUND(my_rho, 2), UBOUND(my_gauge, 2)
WRITE (*, *) 'UBOUND(my_rho,1),UBOUND(my_gauge,1)', UBOUND(my_rho, 1), UBOUND(my_gauge, 1)
CPABORT("Bug")
END IF
END DO
END IF
!
!-------------------------------------------------------------
IF (distributed_rs_grids) THEN
CALL rs_distribute_matrix(rs_descs=rs_descs, pmats=deltajp_a, &
atom_pair_send=atom_pair_send, atom_pair_recv=atom_pair_recv, &
nimages=nimages, scatter=.TRUE.)
CALL rs_distribute_matrix(rs_descs=rs_descs, pmats=deltajp_b, &
atom_pair_send=atom_pair_send, atom_pair_recv=atom_pair_recv, &
nimages=nimages, scatter=.TRUE.)
CALL rs_distribute_matrix(rs_descs=rs_descs, pmats=deltajp_c, &
atom_pair_send=atom_pair_send, atom_pair_recv=atom_pair_recv, &
nimages=nimages, scatter=.TRUE.)
IF (do_igaim) CALL rs_distribute_matrix(rs_descs=rs_descs, pmats=deltajp_d, &
atom_pair_send=atom_pair_send, atom_pair_recv=atom_pair_recv, &
nimages=nimages, scatter=.TRUE.)
END IF
ithread = 0
jpab_a => jpabt_a(:, :, ithread)
jpab_b => jpabt_b(:, :, ithread)
jpab_c => jpabt_c(:, :, ithread)
IF (do_igaim) jpab_d => jpabt_d(:, :, ithread)
work => workt(:, :, ithread)
iatom_old = -1; jatom_old = -1; iset_old = -1; jset_old = -1
ikind_old = -1; jkind_old = -1
loop_tasks: DO itask = 1, ntasks
igrid_level = tasks(itask)%grid_level
cindex = tasks(itask)%image
iatom = tasks(itask)%iatom
jatom = tasks(itask)%jatom
iset = tasks(itask)%iset
jset = tasks(itask)%jset
ipgf = tasks(itask)%ipgf
jpgf = tasks(itask)%jpgf
! apparently generalised collocation not implemented correctly yet
CPASSERT(tasks(itask)%dist_type .NE. 2)
IF (iatom .NE. iatom_old .OR. jatom .NE. jatom_old) THEN
ikind = particle_set(iatom)%atomic_kind%kind_number
jkind = particle_set(jatom)%atomic_kind%kind_number
IF (iatom .NE. iatom_old) ra(:) = pbc(particle_set(iatom)%r, cell)
brow = iatom
bcol = jatom
IF (ikind .NE. ikind_old) THEN
CALL get_qs_kind(qs_kind_set(ikind), basis_set=orb_basis_set, &
basis_type=basis_type)
CALL get_gto_basis_set(gto_basis_set=orb_basis_set, &
first_sgf=first_sgfa, &
lmax=la_max, &
lmin=la_min, &