Skip to content

Commit

Permalink
Cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
loganbvh committed Sep 18, 2023
1 parent a88aadf commit bc359fc
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 25 deletions.
33 changes: 13 additions & 20 deletions tdgl/finite_volume/operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,12 @@
from .mesh import Mesh


def get_offsets(
def _get_spmatrix_offsets(
spmatrix: sp.spmatrix, i: np.ndarray, j: np.ndarray, n_samples: int
) -> np.ndarray:
"""Calculates the sparse matrix offsets for a set of rows ``i`` and columns ``j``."""
# See _set_many() at
# https://github.com/scipy/scipy/blob/3f9a8c80e281e746225092621935b88c1ce68040/scipy/sparse/_compressed.py#L901
spmatrix = spmatrix.asformat("csr", copy=False)
i, j, M, N = spmatrix._prepare_indices(i, j)
offsets = np.empty(n_samples, dtype=spmatrix.indices.dtype)
Expand All @@ -30,8 +33,7 @@ def get_offsets(
csr_sample_offsets(
M, N, spmatrix.indptr, spmatrix.indices, n_samples, i, j, offsets
)

assert -1 not in offsets
assert -1 not in offsets, "This function cannot change the sparsity of the matrix"
return offsets


Expand Down Expand Up @@ -312,7 +314,7 @@ def set_link_exponents(self, link_exponents: np.ndarray) -> None:
weights=self.laplacian_weights,
)
n_samples = len(self.link_exponents)
self.psi_gradient_offsets = get_offsets(
self.psi_gradient_offsets = _get_spmatrix_offsets(
self.psi_gradient,
self.gradient_link_rows,
self.gradient_link_cols,
Expand All @@ -326,7 +328,7 @@ def set_link_exponents(self, link_exponents: np.ndarray) -> None:
else:
rows = self.laplacian_link_rows
cols = self.laplacian_link_cols
self.psi_laplacian_offsets = get_offsets(
self.psi_laplacian_offsets = _get_spmatrix_offsets(
self.psi_laplacian, rows, cols, len(self.laplacian_link_rows)
)
if self.sparse_solver is SparseSolver.CUPY:
Expand All @@ -347,32 +349,23 @@ def set_link_exponents(self, link_exponents: np.ndarray) -> None:
-1j * xp.einsum("ij, ij -> i", self.link_exponents, directions)
)
with warnings.catch_warnings():
# This is slightly faster than re-creating the sparse matrices
# from scratch.
# This is faster than re-creating the sparse matrices from scratch.
warnings.filterwarnings("ignore", category=sp.SparseEfficiencyWarning)
# To speed things up, we directly update the underlying data rather than
# using __setitem__ (square brackets). This is a bit of a hack.
# Update gradient for psi
# rows, cols = self.gradient_link_rows, self.gradient_link_cols
# self.psi_gradient[rows, cols] = self.gradient_weights * link_variables
self.psi_gradient.data[self.psi_gradient_offsets] = (
self.gradient_weights * link_variables
)
values = self.gradient_weights * link_variables
self.psi_gradient.data[self.psi_gradient_offsets] = values
# Update Laplacian for psi
areas0 = self.areas[edges[:, 0]]
areas1 = self.areas[edges[:, 1]]
# # Only update rows that are not fixed by boundary conditions
# if self.fix_psi:
# free_rows = self.laplacian_free_rows[: len(self.laplacian_link_rows)]
# rows = self.laplacian_link_rows[free_rows]
# cols = self.laplacian_link_cols[free_rows]
# else:
# rows = self.laplacian_link_rows
# cols = self.laplacian_link_cols
values = xp.concatenate(
[
self.laplacian_weights * link_variables / areas0,
self.laplacian_weights * link_variables.conjugate() / areas1,
]
)
# Only update rows that are not fixed by boundary conditions
if self.fix_psi:
free_rows = self.laplacian_free_rows[: len(self.laplacian_link_rows)]
values = values[free_rows]
Expand Down
8 changes: 3 additions & 5 deletions tdgl/solver/solve.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
except ModuleNotFoundError:
cupy = None

from .. import distance
from ..device.device import Device, TerminalInfo
from ..finite_volume.operators import MeshOperators
from ..parameter import Parameter
Expand Down Expand Up @@ -102,6 +101,7 @@ def solve(
mesh = device.mesh
sites = device.points
edges = mesh.edge_mesh.edges
num_edges = len(edges)
edge_directions = mesh.edge_mesh.directions
length_units = ureg(device.length_units)
xi = device.coherence_length.magnitude
Expand Down Expand Up @@ -228,7 +228,7 @@ def current_func(t):
areas = cupy.asarray(areas)
edge_centers = cupy.asarray(edge_centers)
sites = cupy.asarray(sites)
new_A_induced = cupy.empty((len(edges), 2), dtype=float)
new_A_induced = cupy.empty((num_edges, 2), dtype=float)

# Running list of the max abs change in |psi|^2 between subsequent solve steps.
# This list is used to calculate the adaptive time step.
Expand Down Expand Up @@ -258,7 +258,6 @@ def update(
else:
assert cupy is not None
assert isinstance(psi, cupy.ndarray)

xp = cupy
A_induced = induced_vector_potential
A_applied = applied_vector_potential
Expand Down Expand Up @@ -368,7 +367,7 @@ def update(
)
if use_cupy:
threads_per_block = 512
num_blocks = math.ceil(len(edges) / threads_per_block)
num_blocks = math.ceil(num_edges / threads_per_block)
get_A_induced_cupy[num_blocks, (threads_per_block, 2)](
J_site, areas, sites, edge_centers, new_A_induced
)
Expand Down Expand Up @@ -418,7 +417,6 @@ def update(

# Set the initial conditions.
if seed_solution is None:
num_edges = len(edges)
parameters = {
"psi": psi_init,
"mu": mu_init,
Expand Down

0 comments on commit bc359fc

Please sign in to comment.