Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Gpu screening #38

Merged
merged 24 commits into from
Sep 21, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
66 changes: 63 additions & 3 deletions tdgl/finite_volume/operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

import numpy as np
import scipy.sparse as sp
from scipy.sparse._sparsetools import csr_sample_offsets

try:
import cupy # type: ignore
Expand All @@ -15,6 +16,63 @@
from .mesh import Mesh


def _get_spmatrix_offsets(
spmatrix: sp.spmatrix, i: np.ndarray, j: np.ndarray, n_samples: int
) -> np.ndarray:
"""Calculates the sparse matrix offsets for a set of rows ``i`` and columns ``j``."""
# See _set_many() at
# https://github.com/scipy/scipy/blob/3f9a8c80e281e746225092621935b88c1ce68040/scipy/sparse/_compressed.py#L901
i, j, M, N = spmatrix._prepare_indices(i, j)
offsets = np.empty(n_samples, dtype=spmatrix.indices.dtype)
ret = csr_sample_offsets(
M, N, spmatrix.indptr, spmatrix.indices, n_samples, i, j, offsets
)
if ret == 1:
spmatrix.sum_duplicates()
csr_sample_offsets(
M, N, spmatrix.indptr, spmatrix.indices, n_samples, i, j, offsets
)
return offsets, (i, j, M, N)


def _get_spmatrix_offsets_cupy(spmatrix, i, j):
"""Calculates the sparse matrix offsets for a set of rows ``i`` and columns ``j``."""
# See _set_many() at
# https://github.com/cupy/cupy/blob/5c32e40af32f6f9627e09d47ecfeb7e9281ccab2/cupyx/scipy/sparse/_compressed.py#L525
i, j, M, N = spmatrix._prepare_indices(i, j)
new_sp = csr_matrix(
(
cupy.arange(spmatrix.nnz, dtype=cupy.float32),
spmatrix.indices,
spmatrix.indptr,
),
shape=(M, N),
)
offsets = new_sp._get_arrayXarray(i, j, not_found_val=-1).astype(cupy.int32).ravel()
return offsets, (i, j, M, N)


def _spmatrix_set_many(spmatrix, i, j, x):
"""spmatrix.__setitem__()"""
if sp.issparse(spmatrix):
i, j = spmatrix._swap((i, j))
offsets, (i, j, M, N) = _get_spmatrix_offsets(spmatrix, i, j, len(x))
else:
i, j = spmatrix._swap(i, j)
offsets, (i, j, M, N) = _get_spmatrix_offsets_cupy(spmatrix, i, j)

mask = offsets > -1
spmatrix.data[offsets[mask]] = x[mask]
if not mask.all():
# only insertions remain
mask = ~mask
i = i[mask]
i[i < 0] += M
j = j[mask]
j[j < 0] += N
spmatrix._insert_many(i, j, x[mask])


def build_divergence(mesh: Mesh) -> sp.csr_array:
"""Build the divergence matrix that takes the divergence of a function living
on the edges onto the sites.
Expand Down Expand Up @@ -293,7 +351,7 @@ def set_link_exponents(self, link_exponents: np.ndarray) -> None:
)
if self.sparse_solver is SparseSolver.CUPY:
self.psi_gradient = csr_matrix(self.psi_gradient)
self.psi_laplacian = csc_matrix(self.psi_laplacian)
self.psi_laplacian = csr_matrix(self.psi_laplacian)
self.gradient_weights = cupy.asarray(self.gradient_weights)
self.laplacian_weights = cupy.asarray(self.laplacian_weights)
return
Expand All @@ -313,7 +371,8 @@ def set_link_exponents(self, link_exponents: np.ndarray) -> None:
values = self.gradient_weights * link_variables
rows = self.gradient_link_rows
cols = self.gradient_link_cols
self.psi_gradient[rows, cols] = values
# self.psi_gradient[rows, cols] = values
_spmatrix_set_many(self.psi_gradient, rows, cols, values)
# Update Laplacian for psi
areas = self.areas
weights = self.laplacian_weights
Expand All @@ -332,7 +391,8 @@ def set_link_exponents(self, link_exponents: np.ndarray) -> None:
else:
rows = self.laplacian_link_rows
cols = self.laplacian_link_cols
self.psi_laplacian[rows, cols] = values
# self.psi_laplacian[rows, cols] = values
_spmatrix_set_many(self.psi_laplacian, rows, cols, values)

def get_supercurrent(self, psi: np.ndarray):
"""Compute the supercurrent on the edges.
Expand Down
38 changes: 36 additions & 2 deletions tdgl/parameter.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,8 +128,24 @@ def __init__(self, func: Callable, time_dependent: bool = False, **kwargs):
)

def _hash_args(self, x, y, z, t) -> str:
def _coerce_to_tuple(a):
try:
return tuple(_coerce_to_tuple(i) for i in a)
except TypeError:
return a

def _to_tuple(items):
results = []
for key, value in items:
if isinstance(value, dict):
value = _to_tuple(value.items())
elif isinstance(value, (list, np.ndarray)):
value = _coerce_to_tuple(value)
results.append((key, value))
return tuple(results)

return (
hex(hash(tuple(self.kwargs.items())))
hex(hash(_to_tuple(self.kwargs.items())))
+ hashlib.sha1(np.ascontiguousarray(x)).hexdigest()
+ hashlib.sha1(np.ascontiguousarray(y)).hexdigest()
+ hashlib.sha1(np.ascontiguousarray(z)).hexdigest()
Expand Down Expand Up @@ -225,7 +241,25 @@ def __eq__(self, other) -> bool:
if self.func.__code__ != other.func.__code__:
return False

return self.kwargs == other.kwargs
if set(self.kwargs) != set(other.kwargs):
return False

def array_safe_equals(a, b) -> bool:
"""Check if a and b are equal, even if they are numpy arrays."""
if a is b:
return True
if isinstance(a, np.ndarray) and isinstance(b, np.ndarray):
return a.shape == b.shape and np.allclose(a, b)
try:
return a == b
except TypeError:
return NotImplemented

for key in self.kwargs:
if not array_safe_equals(self.kwargs[key], other.kwargs[key]):
return False

return True


class CompositeParameter(Parameter):
Expand Down
5 changes: 1 addition & 4 deletions tdgl/solution/solution.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,10 +204,7 @@ def _compute_vorticity(self) -> None:
j_nm_site = mesh.get_quantity_on_site(self.tdgl_data.normal_current)
j_site = j_sc_site + j_nm_site
gradient = build_gradient(mesh)
normalized_directions = (
mesh.edge_mesh.directions
/ np.linalg.norm(mesh.edge_mesh.directions, axis=1)[:, np.newaxis]
)
normalized_directions = mesh.edge_mesh.normalized_directions
grad_jx = gradient @ j_site[:, 0]
grad_jy = gradient @ j_site[:, 1]
djy_dx = grad_jy * normalized_directions[:, 0]
Expand Down
46 changes: 18 additions & 28 deletions tdgl/solver/solve.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
# flake8: noqa

import itertools
import logging
import math
Expand Down Expand Up @@ -102,19 +100,21 @@ def solve(
sites = device.points
edges = mesh.edge_mesh.edges
num_edges = len(edges)
edge_directions = mesh.edge_mesh.directions
normalized_directions = mesh.edge_mesh.normalized_directions
length_units = ureg(device.length_units)
xi = device.coherence_length.magnitude
probe_points = device.probe_point_indices
u = device.layer.u
gamma = device.layer.gamma
K0 = device.K0
A0 = device.A0
probe_points = device.probe_point_indices

# The vector potential is evaluated on the mesh edges,
# where the edge coordinates are in dimensionful units.
x, y = xi * mesh.edge_mesh.centers.T
edge_centers = xi * mesh.edge_mesh.centers
edge_xs, edge_ys = edge_centers.T
Bc2 = device.Bc2
z = device.layer.z0 * xi * np.ones_like(x)
z0 = device.layer.z0 * np.ones_like(edge_xs)
J_scale = 4 * ((ureg(current_units) / length_units) / K0).to_base_units()
assert "dimensionless" in str(J_scale.units), str(J_scale.units)
J_scale = J_scale.magnitude
Expand All @@ -136,9 +136,9 @@ def solve(
.magnitude
)
A_kwargs = dict(t=0) if time_dependent_vector_potential else dict()
vector_potential = applied_vector_potential_(x, y, z, **A_kwargs)
vector_potential = applied_vector_potential_(edge_xs, edge_ys, z0, **A_kwargs)
vector_potential = np.asarray(vector_potential)[:, :2]
if vector_potential.shape != x.shape + (2,):
if vector_potential.shape != edge_xs.shape + (2,):
raise ValueError(
f"Unexpected shape for vector_potential: {vector_potential.shape}."
)
Expand Down Expand Up @@ -216,14 +216,13 @@ def current_func(t):
if use_cupy:
epsilon = cupy.asarray(epsilon)
mu_boundary = cupy.asarray(mu_boundary)
edge_directions = cupy.asarray(edge_directions)
normalized_directions = cupy.asarray(normalized_directions)
vector_potential = cupy.asarray(vector_potential)

new_A_induced = None
if options.include_screening:
A_scale = (ureg("mu_0") / (4 * np.pi) * K0 / Bc2).to_base_units().magnitude
areas = A_scale * mesh.areas
edge_centers = mesh.edge_mesh.centers
A_scale = (ureg("mu_0") / (4 * np.pi) * K0 / A0).to(1 / length_units)
areas = A_scale.magnitude * mesh.areas * xi**2
if use_cupy:
areas = cupy.asarray(areas)
edge_centers = cupy.asarray(edge_centers)
Expand Down Expand Up @@ -281,14 +280,14 @@ def update(
if time_dependent_vector_potential:
vector_potential = (
vector_potential_scale
* applied_vector_potential_(x, y, z, t=time)[:, :2]
* applied_vector_potential_(edge_xs, edge_ys, z0, t=time)[:, :2]
)
if use_cupy:
vector_potential = cupy.asarray(vector_potential)
dA_dt = xp.einsum(
"ij, ij -> i",
(vector_potential - A_applied) / dt,
edge_directions,
normalized_directions,
)
if not options.include_screening:
if xp.any(xp.absolute(dA_dt) > 0):
Expand Down Expand Up @@ -318,7 +317,7 @@ def update(
dA_dt = xp.einsum(
"ij, ij -> i",
(vector_potential + A_induced - A_applied) / dt,
edge_directions,
normalized_directions,
)

# Adjust the time step and calculate the new the order parameter
Expand All @@ -339,23 +338,14 @@ def update(
)
# Compute the supercurrent, scalar potential, and normal current
supercurrent = operators.get_supercurrent(psi)
if time_dependent_vector_potential:
rhs = divergence @ (supercurrent - dA_dt) - (
mu_boundary_laplacian @ mu_boundary
)
else:
rhs = (divergence @ supercurrent) - (
mu_boundary_laplacian @ mu_boundary
)
rhs = (divergence @ (supercurrent - dA_dt)) - (
mu_boundary_laplacian @ mu_boundary
)
if use_pardiso:
mu = pypardiso.spsolve(mu_laplacian, rhs)
elif use_cupy:
mu = mu_laplacian_lu(cupy.asarray(rhs))
else:
mu = mu_laplacian_lu(rhs)
normal_current = -(mu_gradient @ mu)
if time_dependent_vector_potential:
normal_current -= dA_dt
normal_current = -(mu_gradient @ mu) - dA_dt

if not options.include_screening:
break
Expand Down
2 changes: 1 addition & 1 deletion tdgl/sources/loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def CurrentLoop(
loop_vector_potential,
current=current,
radius=radius,
center=center,
center=tuple(center),
current_units=current_units,
field_units=field_units,
length_units=length_units,
Expand Down
5 changes: 1 addition & 4 deletions tdgl/visualization/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,10 +75,7 @@ def get_plot_data(
j_nm_site = mesh.get_quantity_on_site(normal_current)
j_site = j_sc_site + j_nm_site
gradient = build_gradient(mesh)
normalized_directions = (
mesh.edge_mesh.directions
/ np.linalg.norm(mesh.edge_mesh.directions, axis=1)[:, np.newaxis]
)
normalized_directions = mesh.edge_mesh.normalized_directions
grad_jx = gradient @ j_site[:, 0]
grad_jy = gradient @ j_site[:, 1]
djy_dx = grad_jy * normalized_directions[:, 0]
Expand Down
Loading