Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] [BlockSparseArrays] (Truncated) SVD for Abstract(Block)(Sparse)Array #1572

Draft
wants to merge 14 commits into
base: main
Choose a base branch
from
Original file line number Diff line number Diff line change
Expand Up @@ -589,3 +589,28 @@ macro view!(expr)
@capture(expr, array_[indices__])
return :(view!($(esc(array)), $(esc.(indices)...)))
end

# SVD additions
# -------------
using LinearAlgebra: Algorithm
using BlockArrays: BlockedMatrix

# svd first calls `eigencopy_oftype` to create something that can be in-place SVD'd
# Here, we hijack this system to determine if there is any structure we can exploit
# default: SVD is most efficient with BlockedArray
function eigencopy_oftype(A::AbstractBlockArray, S)
return BlockedMatrix{S}(A)
end

function svd!(A::BlockedMatrix; full::Bool=false, alg::Algorithm=default_svd_alg(A))
F = svd!(parent(A); full, alg)

# restore block pattern
m = length(F.S)
bax1, bax2, bax3 = axes(A, 1), blockedrange([m]), axes(A, 2)

u = BlockedArray(F.U, (bax1, bax2))
s = BlockedVector(F.S, (bax2,))
vt = BlockedArray(F.Vt, (bax2, bax3))
return SVD(u, s, vt)
end
14 changes: 14 additions & 0 deletions NDTensors/src/lib/BlockSparseArrays/src/BlockSparseArrays.jl
Original file line number Diff line number Diff line change
@@ -1,12 +1,22 @@
module BlockSparseArrays

# factorizations
include("factorizations/svd.jl")
include("factorizations/tsvd.jl")

# possible upstream contributions
include("BlockArraysExtensions/BlockArraysExtensions.jl")

# interface functions that don't have to specialize
include("blocksparsearrayinterface/blocksparsearrayinterface.jl")
include("blocksparsearrayinterface/linearalgebra.jl")
include("blocksparsearrayinterface/blockzero.jl")
include("blocksparsearrayinterface/broadcast.jl")
include("blocksparsearrayinterface/map.jl")
include("blocksparsearrayinterface/arraylayouts.jl")
include("blocksparsearrayinterface/views.jl")

# functions defined for any abstractblocksparsearray
include("abstractblocksparsearray/abstractblocksparsearray.jl")
include("abstractblocksparsearray/wrappedabstractblocksparsearray.jl")
include("abstractblocksparsearray/abstractblocksparsematrix.jl")
Expand All @@ -17,8 +27,12 @@ include("abstractblocksparsearray/sparsearrayinterface.jl")
include("abstractblocksparsearray/broadcast.jl")
include("abstractblocksparsearray/map.jl")
include("abstractblocksparsearray/linearalgebra.jl")

# functions specifically for BlockSparseArray
include("blocksparsearray/defaults.jl")
include("blocksparsearray/blocksparsearray.jl")
include("blocksparsearray/blockdiagonalarray.jl")

include("BlockArraysSparseArrayInterfaceExt/BlockArraysSparseArrayInterfaceExt.jl")
include("../ext/BlockSparseArraysTensorAlgebraExt/src/BlockSparseArraysTensorAlgebraExt.jl")
include("../ext/BlockSparseArraysGradedAxesExt/src/BlockSparseArraysGradedAxesExt.jl")
Expand Down
Original file line number Diff line number Diff line change
@@ -1 +1,22 @@
const AbstractBlockSparseMatrix{T} = AbstractBlockSparseArray{T,2}

# SVD is implemented by trying to
# 1. Attempt to find a block-diagonal implementation by permuting
# 2. Fallback to AbstractBlockArray implementation via BlockedArray
function svd(
A::AbstractBlockSparseMatrix; full::Bool=false, alg::Algorithm=default_svd_alg(A)
)
T = LinearAlgebra.eigtype(eltype(A))
A′ = try_to_blockdiagonal(A)

if isnothing(A′)
# not block-diagonal, fall back to dense case
Adense = eigencopy_oftype(A, T)
return svd!(Adense; full, alg)
end

# compute block-by-block and permute back
A″, (I, J) = A′
F = svd!(eigencopy_oftype(A″, T); full, alg)
return SVD(F.U[Block.(I), Block.(J)], F.S, F.Vt)
end
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
# type alias for block-diagonal
using LinearAlgebra: Diagonal

const BlockDiagonal{T,A,Axes,V<:AbstractVector{A}} = BlockSparseMatrix{
T,A,Diagonal{A,V},Axes
}

function BlockDiagonal(blocks::AbstractVector{<:AbstractMatrix})
return BlockSparseArray(
Diagonal(blocks), (blockedrange(size.(blocks, 1)), blockedrange(size.(blocks, 2)))
)
end

# Cast to block-diagonal implementation if permuted-blockdiagonal
function try_to_blockdiagonal_perm(A)
inds = map(x -> Int.(Tuple(x)), vec(collect(block_stored_indices(A))))
I = first.(inds)
allunique(I) || return nothing
J = last.(inds)
p = sortperm(J)
Jsorted = J[p]
allunique(Jsorted) || return nothing
return Block.(I[p], Jsorted)
end

"""
try_to_blockdiagonal(A)

Attempt to find a permutation of blocks that makes `A` blockdiagonal. If unsuccesful,
returns nothing, otherwise returns both the blockdiagonal `B` as well as the permutation `I, J`.
"""
function try_to_blockdiagonal(A::AbstractBlockSparseMatrix)
perm = try_to_blockdiagonal_perm(A)
isnothing(perm) && return perm
I = first.(Tuple.(perm))
J = last.(Tuple.(perm))
diagblocks = map(invperm(I), J) do i, j
return A[Block(i, j)]
end
return BlockDiagonal(diagblocks), perm
end

# SVD implementation
function eigencopy_oftype(A::BlockDiagonal, S)
diag = map(Base.Fix2(eigencopy_oftype, S), A.blocks.diag)
return BlockDiagonal(diag)
end

function svd(A::BlockDiagonal; kwargs...)
return svd!(eigencopy_oftype(A, LinearAlgebra.eigtype(eltype(A))); kwargs...)
end
function svd!(A::BlockDiagonal; full::Bool=false, alg::Algorithm=default_svd_alg(A))
# TODO: handle full
F = map(a -> svd!(a; full, alg), blocks(A).diag)
Us = map(Base.Fix2(getproperty, :U), F)
Ss = map(Base.Fix2(getproperty, :S), F)
Vts = map(Base.Fix2(getproperty, :Vt), F)
return SVD(BlockDiagonal(Us), mortar(Ss), BlockDiagonal(Vts))
end
206 changes: 206 additions & 0 deletions NDTensors/src/lib/BlockSparseArrays/src/factorizations/svd.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,206 @@
using LinearAlgebra:
LinearAlgebra, Factorization, Algorithm, default_svd_alg, Adjoint, Transpose
using BlockArrays: AbstractBlockMatrix, BlockedArray, BlockedMatrix, BlockedVector
using BlockArrays: BlockLayout

# Singular Value Decomposition:
# need new type to deal with U and V having possible different types
# this is basically a carbon copy of the LinearAlgebra implementation.
# additionally, by default we implement a fallback to the LinearAlgebra implementation
# in hope to support as many foreign types as possible that chose to extend those methods.

# TODO: add this to MatrixFactorizations
# TODO: decide where this goes
# TODO: decide whether or not to restrict types to be blocked.
"""
SVD <: Factorization

Matrix factorization type of the singular value decomposition (SVD) of a matrix `A`.
This is the return type of [`svd(_)`](@ref), the corresponding matrix factorization function.

If `F::SVD` is the factorization object, `U`, `S`, `V` and `Vt` can be obtained
via `F.U`, `F.S`, `F.V` and `F.Vt`, such that `A = U * Diagonal(S) * Vt`.
The singular values in `S` are sorted in descending order.

Iterating the decomposition produces the components `U`, `S`, and `V`.

# Examples
```jldoctest
julia> A = [1. 0. 0. 0. 2.; 0. 0. 3. 0. 0.; 0. 0. 0. 0. 0.; 0. 2. 0. 0. 0.]
4×5 Matrix{Float64}:
1.0 0.0 0.0 0.0 2.0
0.0 0.0 3.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 2.0 0.0 0.0 0.0

julia> F = svd(A)
SVD{Float64, Float64, Matrix{Float64}, Vector{Float64}}
U factor:
4×4 Matrix{Float64}:
0.0 1.0 0.0 0.0
1.0 0.0 0.0 0.0
0.0 0.0 0.0 1.0
0.0 0.0 -1.0 0.0
singular values:
4-element Vector{Float64}:
3.0
2.23606797749979
2.0
0.0
Vt factor:
4×5 Matrix{Float64}:
-0.0 0.0 1.0 -0.0 0.0
0.447214 0.0 0.0 0.0 0.894427
0.0 -1.0 0.0 0.0 0.0
0.0 0.0 0.0 1.0 0.0

julia> F.U * Diagonal(F.S) * F.Vt
4×5 Matrix{Float64}:
1.0 0.0 0.0 0.0 2.0
0.0 0.0 3.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 2.0 0.0 0.0 0.0

julia> u, s, v = F; # destructuring via iteration

julia> u == F.U && s == F.S && v == F.V
true
```
"""
struct SVD{T,Tr,M<:AbstractArray{T},C<:AbstractVector{Tr},N<:AbstractArray{T}} <:
Factorization{T}
U::M
S::C
Vt::N
function SVD{T,Tr,M,C,N}(
U, S, Vt
) where {T,Tr,M<:AbstractArray{T},C<:AbstractVector{Tr},N<:AbstractArray{T}}
Base.require_one_based_indexing(U, S, Vt)
return new{T,Tr,M,C,N}(U, S, Vt)
end
end
function SVD(U::AbstractArray{T}, S::AbstractVector{Tr}, Vt::AbstractArray{T}) where {T,Tr}
return SVD{T,Tr,typeof(U),typeof(S),typeof(Vt)}(U, S, Vt)
end
function SVD{T}(U::AbstractArray, S::AbstractVector{Tr}, Vt::AbstractArray) where {T,Tr}
return SVD(
convert(AbstractArray{T}, U),
convert(AbstractVector{Tr}, S),
convert(AbstractArray{T}, Vt),
)
end

function SVD{T}(F::SVD) where {T}
return SVD(
convert(AbstractMatrix{T}, F.U),
convert(AbstractVector{real(T)}, F.S),
convert(AbstractMatrix{T}, F.Vt),
)
end
LinearAlgebra.Factorization{T}(F::SVD) where {T} = SVD{T}(F)

# iteration for destructuring into components
Base.iterate(S::SVD) = (S.U, Val(:S))
Base.iterate(S::SVD, ::Val{:S}) = (S.S, Val(:V))
Base.iterate(S::SVD, ::Val{:V}) = (S.V, Val(:done))
Base.iterate(::SVD, ::Val{:done}) = nothing

function Base.getproperty(F::SVD, d::Symbol)
if d === :V
return getfield(F, :Vt)'
else
return getfield(F, d)
end
end

function Base.propertynames(F::SVD, private::Bool=false)
return private ? (:V, fieldnames(typeof(F))...) : (:U, :S, :V, :Vt)
end

Base.size(A::SVD, dim::Integer) = dim == 1 ? size(A.U, dim) : size(A.Vt, dim)
Base.size(A::SVD) = (size(A, 1), size(A, 2))

function Base.show(io::IO, mime::MIME{Symbol("text/plain")}, F::SVD)
summary(io, F)
println(io)
println(io, "U factor:")
show(io, mime, F.U)
println(io, "\nsingular values:")
show(io, mime, F.S)
println(io, "\nVt factor:")
return show(io, mime, F.Vt)
end

Base.adjoint(usv::SVD) = SVD(adjoint(usv.Vt), usv.S, adjoint(usv.U))
Base.transpose(usv::SVD) = SVD(transpose(usv.Vt), usv.S, transpose(usv.U))

# Conversion
Base.AbstractMatrix(F::SVD) = (F.U * Diagonal(F.S)) * F.Vt
Base.AbstractArray(F::SVD) = AbstractMatrix(F)
Base.Matrix(F::SVD) = Array(AbstractArray(F))
Base.Array(F::SVD) = Matrix(F)
SVD(usv::SVD) = usv
SVD(usv::LinearAlgebra.SVD) = SVD(usv.U, usv.S, usv.Vt)

# functions default to LinearAlgebra
# ----------------------------------
"""
svd!(A; full::Bool = false, alg::Algorithm = default_svd_alg(A)) -> SVD

`svd!` is the same as [`svd`](@ref), but saves space by
overwriting the input `A`, instead of creating a copy. See documentation of [`svd`](@ref) for details.
"""
svd!(A; kwargs...) = SVD(LinearAlgebra.svd!(A; kwargs...))

"""
svd(A; full::Bool = false, alg::Algorithm = default_svd_alg(A)) -> SVD

Compute the singular value decomposition (SVD) of `A` and return an `SVD` object.

`U`, `S`, `V` and `Vt` can be obtained from the factorization `F` with `F.U`,
`F.S`, `F.V` and `F.Vt`, such that `A = U * Diagonal(S) * Vt`.
The algorithm produces `Vt` and hence `Vt` is more efficient to extract than `V`.
The singular values in `S` are sorted in descending order.

Iterating the decomposition produces the components `U`, `S`, and `V`.

If `full = false` (default), a "thin" SVD is returned. For an ``M
\\times N`` matrix `A`, in the full factorization `U` is ``M \\times M``
and `V` is ``N \\times N``, while in the thin factorization `U` is ``M
\\times K`` and `V` is ``N \\times K``, where ``K = \\min(M,N)`` is the
number of singular values.

`alg` specifies which algorithm and LAPACK method to use for SVD:
- `alg = DivideAndConquer()` (default): Calls `LAPACK.gesdd!`.
- `alg = QRIteration()`: Calls `LAPACK.gesvd!` (typically slower but more accurate) .

!!! compat "Julia 1.3"
The `alg` keyword argument requires Julia 1.3 or later.

# Examples
```jldoctest
julia> A = rand(4,3);

julia> F = svd(A); # Store the Factorization Object

julia> A ≈ F.U * Diagonal(F.S) * F.Vt
true

julia> U, S, V = F; # destructuring via iteration

julia> A ≈ U * Diagonal(S) * V'
true

julia> Uonly, = svd(A); # Store U only

julia> Uonly == U
true
```
"""
svd(A; kwargs...) =
SVD(svd!(eigencopy_oftype(A, LinearAlgebra.eigtype(eltype(A))); kwargs...))

LinearAlgebra.svdvals(usv::SVD{<:Any,T}) where {T} = (usv.S)::Vector{T}

# Added here to avoid type-piracy
eigencopy_oftype(A, S) = LinearAlgebra.eigencopy_oftype(A, S)
Loading
Loading