Skip to content

Commit

Permalink
Workarounds for the ICEs of the Intel ifx compiler (cp2k#3630)
Browse files Browse the repository at this point in the history
  • Loading branch information
mkrack authored Aug 15, 2024
1 parent 5f2f414 commit 8d4226d
Show file tree
Hide file tree
Showing 5 changed files with 132 additions and 96 deletions.
10 changes: 5 additions & 5 deletions src/deepmd_wrapper.F
Original file line number Diff line number Diff line change
Expand Up @@ -134,11 +134,11 @@ SUBROUTINE DeepPotCompute(model, natom, coord, atype, cell, energy, force, viria
MARK_USED(coord)
MARK_USED(atype)
MARK_USED(cell)
MARK_USED(energy)
MARK_USED(force)
MARK_USED(virial)
MARK_USED(atomic_energy)
MARK_USED(atomic_virial)
energy = 0.0_dp
force = 0.0_dp
virial = 0.0_dp
atomic_energy = 0.0_dp
atomic_virial = 0.0_dp
#endif

CALL timestop(handle)
Expand Down
2 changes: 1 addition & 1 deletion src/fm/cp_fm_cusolver_api.F
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ END SUBROUTINE cp_fm_diag_cusolver_c
#else
MARK_USED(matrix)
MARK_USED(eigenvectors)
MARK_USED(eigenvalues)
eigenvalues = 0.0_dp
MARK_USED(n)
MARK_USED(nmo)
MARK_USED(eigenvalues_buffer)
Expand Down
38 changes: 24 additions & 14 deletions src/mp2_ri_gpw.F
Original file line number Diff line number Diff line change
Expand Up @@ -1941,9 +1941,10 @@ SUBROUTINE mp2_redistribute_gamma(Gamma_P_ia, ij_index, my_B_size, &
Lstart_pos = ranges_info_array(1, irep, comm_exchange%mepos)
start_point = ranges_info_array(3, irep, comm_exchange%mepos)
end_point = ranges_info_array(4, irep, comm_exchange%mepos)
!$OMP PARALLEL DO DEFAULT(NONE) PRIVATE(kkk,lll,iiB) &
!$OMP SHARED(start_point,end_point,Lstart_pos,my_block_size,&
!$OMP Gamma_P_ia,my_i,my_B_size,Y_i_aP)
!$OMP PARALLEL DO DEFAULT(NONE) &
!$OMP PRIVATE(kkk,lll,iiB) &
!$OMP SHARED(start_point,end_point,Lstart_pos,my_block_size,&
!$OMP Gamma_P_ia,my_i,my_B_size,Y_i_aP)
DO kkk = start_point, end_point
lll = kkk - start_point + Lstart_pos
DO iiB = 1, my_block_size
Expand All @@ -1952,7 +1953,7 @@ SUBROUTINE mp2_redistribute_gamma(Gamma_P_ia, ij_index, my_B_size, &
Y_i_aP(1:my_B_size, lll, iiB)
END DO
END DO
!$OMP END PARALLEL DO
!$OMP END PARALLEL DO
END DO
CALL timestop(handle2)

Expand All @@ -1974,16 +1975,17 @@ SUBROUTINE mp2_redistribute_gamma(Gamma_P_ia, ij_index, my_B_size, &
Lstart_pos = ranges_info_array(1, irep, proc_send)
start_point = ranges_info_array(3, irep, proc_send)
end_point = ranges_info_array(4, irep, proc_send)
!$OMP PARALLEL DO DEFAULT(NONE) PRIVATE(kkk,lll,iiB) &
!$OMP SHARED(start_point,end_point,Lstart_pos,my_block_size,&
!$OMP BI_C_send,my_B_size,Y_i_aP)
!$OMP PARALLEL DO DEFAULT(NONE) &
!$OMP PRIVATE(kkk,lll,iiB) &
!$OMP SHARED(start_point,end_point,Lstart_pos,my_block_size,&
!$OMP BI_C_send,my_B_size,Y_i_aP)
DO kkk = start_point, end_point
lll = kkk - start_point + Lstart_pos
DO iiB = 1, my_block_size
BI_C_send(1:my_B_size, iiB, kkk) = Y_i_aP(1:my_B_size, lll, iiB)
END DO
END DO
!$OMP END PARALLEL DO
!$OMP END PARALLEL DO
END DO
CALL timestop(handle2)

Expand All @@ -2007,12 +2009,13 @@ SUBROUTINE mp2_redistribute_gamma(Gamma_P_ia, ij_index, my_B_size, &
DO irep = 0, num_integ_group - 1
start_point = ranges_info_array(3, irep, comm_exchange%mepos)
end_point = ranges_info_array(4, irep, comm_exchange%mepos)
!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(start_point,end_point,my_block_size,&
!$OMP Gamma_P_ia,rec_i,iiB,my_B_size,BI_C_rec)
!$OMP PARALLEL WORKSHARE DEFAULT(NONE) &
!$OMP SHARED(start_point,end_point,my_block_size,&
!$OMP Gamma_P_ia,rec_i,iiB,my_B_size,BI_C_rec)
Gamma_P_ia(:, rec_i:rec_i + my_block_size - 1, start_point:end_point) = &
Gamma_P_ia(:, rec_i:rec_i + my_block_size - 1, start_point:end_point) + &
BI_C_rec(1:my_B_size, :, start_point:end_point)
!$OMP END PARALLEL WORKSHARE
!$OMP END PARALLEL WORKSHARE
END DO
CALL timestop(handle2)
Expand Down Expand Up @@ -2049,12 +2052,19 @@ SUBROUTINE mp2_redistribute_gamma(Gamma_P_ia, ij_index, my_B_size, &
DO irep = 0, num_integ_group - 1
start_point = ranges_info_array(3, irep, comm_exchange%mepos)
end_point = ranges_info_array(4, irep, comm_exchange%mepos)
!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(start_point,end_point,my_block_size,&
!$OMP Gamma_P_ia,rec_i,iiB,my_B_size,BI_C_rec)
#if defined(__MKL)
Gamma_P_ia(:, rec_i:rec_i + my_block_size - 1, start_point:end_point) = &
Gamma_P_ia(:, rec_i:rec_i + my_block_size - 1, start_point:end_point) + &
BI_C_rec(1:my_B_size, :, start_point:end_point)
!$OMP END PARALLEL WORKSHARE
#else
!$OMP PARALLEL WORKSHARE DEFAULT(NONE) &
!$OMP SHARED(start_point,end_point,my_block_size,&
!$OMP Gamma_P_ia,rec_i,my_B_size,BI_C_rec)
Gamma_P_ia(:, rec_i:rec_i + my_block_size - 1, start_point:end_point) = &
Gamma_P_ia(:, rec_i:rec_i + my_block_size - 1, start_point:end_point) + &
BI_C_rec(1:my_B_size, :, start_point:end_point)
!$OMP END PARALLEL WORKSHARE
#endif
END DO
CALL timestop(handle2)

Expand Down
35 changes: 22 additions & 13 deletions src/pw/pw_methods.F
Original file line number Diff line number Diff line change
Expand Up @@ -246,9 +246,14 @@ SUBROUTINE pw_zero_${kind}$_${space}$ (pw)

CALL timeset(routineN, handle)

! This OMP clause causes an internal compiler error (ICE) with ifx (<=2024.2.1)
#if defined(__MKL)
pw%array = ${type2type("0.0_dp", "r3d", kind)}$
#else
!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
pw%array = ${type2type("0.0_dp", "r3d", kind)}$
!$OMP END PARALLEL WORKSHARE
#endif

CALL timestop(handle)

Expand Down Expand Up @@ -424,8 +429,8 @@ SUBROUTINE pw_gather_p_${kind}$ (pw, c, scale)
ngpts => SIZE(pw%pw_grid%gsq), ghat => pw%pw_grid%g_hat, yzq => pw%pw_grid%para%yzq)

IF (PRESENT(scale)) THEN
!$OMP PARALLEL DO DEFAULT(NONE), &
!$OMP PRIVATE(l, m, mn, n), &
!$OMP PARALLEL DO DEFAULT(NONE) &
!$OMP PRIVATE(l, m, mn, n) &
!$OMP SHARED(c, pw, scale)
DO gpt = 1, ngpts
l = mapl(ghat(1, gpt)) + 1
Expand All @@ -436,8 +441,8 @@ SUBROUTINE pw_gather_p_${kind}$ (pw, c, scale)
END DO
!$OMP END PARALLEL DO
ELSE
!$OMP PARALLEL DO DEFAULT(NONE), &
!$OMP PRIVATE(l, m, mn, n), &
!$OMP PARALLEL DO DEFAULT(NONE) &
!$OMP PRIVATE(l, m, mn, n) &
!$OMP SHARED(c, pw)
DO gpt = 1, ngpts
l = mapl(ghat(1, gpt)) + 1
Expand Down Expand Up @@ -482,8 +487,8 @@ SUBROUTINE pw_scatter_p_${kind}$ (pw, c, scale)
IF (.NOT. PRESENT(scale)) c = z_zero

IF (PRESENT(scale)) THEN
!$OMP PARALLEL DO DEFAULT(NONE), &
!$OMP PRIVATE(l, m, mn, n), &
!$OMP PARALLEL DO DEFAULT(NONE) &
!$OMP PRIVATE(l, m, mn, n) &
!$OMP SHARED(c, pw, scale)
DO gpt = 1, ngpts
l = mapl(ghat(1, gpt)) + 1
Expand All @@ -494,8 +499,8 @@ SUBROUTINE pw_scatter_p_${kind}$ (pw, c, scale)
END DO
!$OMP END PARALLEL DO
ELSE
!$OMP PARALLEL DO DEFAULT(NONE), &
!$OMP PRIVATE(l, m, mn, n), &
!$OMP PARALLEL DO DEFAULT(NONE) &
!$OMP PRIVATE(l, m, mn, n) &
!$OMP SHARED(c, pw)
DO gpt = 1, ngpts
l = mapl(ghat(1, gpt)) + 1
Expand All @@ -515,8 +520,8 @@ SUBROUTINE pw_scatter_p_${kind}$ (pw, c, scale)
ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq), yzq => pw%pw_grid%para%yzq)

IF (PRESENT(scale)) THEN
!$OMP PARALLEL DO DEFAULT(NONE), &
!$OMP PRIVATE(l, m, mn, n), &
!$OMP PARALLEL DO DEFAULT(NONE) &
!$OMP PRIVATE(l, m, mn, n) &
!$OMP SHARED(c, pw, scale)
DO gpt = 1, ngpts
l = mapl(ghat(1, gpt)) + 1
Expand All @@ -527,7 +532,7 @@ SUBROUTINE pw_scatter_p_${kind}$ (pw, c, scale)
END DO
!$OMP END PARALLEL DO
ELSE
!$OMP PARALLEL DO DEFAULT(NONE), &
!$OMP PARALLEL DO DEFAULT(NONE) &
!$OMP PRIVATE(l, m, mn, n) &
!$OMP SHARED(c, pw)
DO gpt = 1, ngpts
Expand Down Expand Up @@ -609,15 +614,19 @@ SUBROUTINE pw_copy_${kind}$_${kind2}$_${space}$ (pw1, pw2)

IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
IF (ng1 >= ng2) THEN
!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
!$OMP PARALLEL DO DEFAULT(NONE) &
!$OMP PRIVATE(i,j) &
!$OMP SHARED(ng2, pw1, pw2)
DO i = 1, ng2
j = pw2%pw_grid%gidx(i)
pw2%array(i) = ${type2type("pw1%array(j)", kind, kind2)}$
END DO
!$OMP END PARALLEL DO
ELSE
CALL pw_zero(pw2)
!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
!$OMP PARALLEL DO DEFAULT(NONE) &
!$OMP PRIVATE(i,j) &
!$OMP SHARED(ng1, pw1, pw2)
DO i = 1, ng1
j = pw2%pw_grid%gidx(i)
pw2%array(j) = ${type2type("pw1%array(i)", kind, kind2)}$
Expand Down
Loading

0 comments on commit 8d4226d

Please sign in to comment.