Skip to content

Commit

Permalink
rewrite spdiagm with Pair API (#23757)
Browse files Browse the repository at this point in the history
instead of using one tuple with diagonals and one tuple with vectors
  • Loading branch information
fredrikekre authored and KristofferC committed Oct 11, 2017
1 parent 8afbe45 commit 5295feb
Show file tree
Hide file tree
Showing 9 changed files with 93 additions and 76 deletions.
7 changes: 7 additions & 0 deletions NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -503,6 +503,13 @@ Deprecated or removed

* `contains(eq, itr, item)` is deprecated in favor of `any` with a predicate ([#23716]).

* `spdiagm(x::AbstractVector)` has been deprecated in favor of `sparse(Diagonal(x))`
alternatively `spdiagm(0 => x)` ([#23757]).

* `spdiagm(x::AbstractVector, d::Integer)` and `spdiagm(x::Tuple{<:AbstractVector}, d::Tuple{<:Integer})`
have been deprecated in favor of `spdiagm(d => x)` and `spdiagm(d[1] => x[1], d[2] => x[2], ...)`
respectively. The new `spdiagm` implementation now always returns a square matrix ([#23757]).

* Constructors for `LibGit2.UserPasswordCredentials` and `LibGit2.SSHCredentials` which take a
`prompt_if_incorrect` argument are deprecated. Instead, prompting behavior is controlled using
the `allow_prompt` keyword in the `LibGit2.CredentialPayload` constructor ([#23690]).
Expand Down
29 changes: 28 additions & 1 deletion base/deprecated.jl
Original file line number Diff line number Diff line change
Expand Up @@ -1638,7 +1638,7 @@ export hex2num

# PR 23341
import .LinAlg: diagm
@deprecate diagm(A::SparseMatrixCSC) spdiagm(sparsevec(A))
@deprecate diagm(A::SparseMatrixCSC) sparse(Diagonal(sparsevec(A)))

# PR #23373
@deprecate diagm(A::BitMatrix) diagm(vec(A))
Expand Down Expand Up @@ -1757,6 +1757,33 @@ end

@deprecate contains(eq::Function, itr, x) any(y->eq(y,x), itr)

# PR #23757
import .SparseArrays.spdiagm
@deprecate spdiagm(x::AbstractVector) sparse(Diagonal(x))
function spdiagm(x::AbstractVector, d::Number)
depwarn(string("spdiagm(x::AbstractVector, d::Number) is deprecated, use ",
"spdiagm(d => x) instead, which now returns a square matrix. To preserve the old ",
"behaviour, use sparse(SparseArrays.spdiagm_internal(d => x)...)"), :spdiagm)
I, J, V = SparseArrays.spdiagm_internal(d => x)
return sparse(I, J, V)
end
function spdiagm(x, d)
depwarn(string("spdiagm((x1, x2, ...), (d1, d2, ...)) is deprecated, use ",
"spdiagm(d1 => x1, d2 => x2, ...) instead, which now returns a square matrix. ",
"To preserve the old behaviour, use ",
"sparse(SparseArrays.spdiagm_internal(d1 => x1, d2 => x2, ...)...)"), :spdiagm)
I, J, V = SparseArrays.spdiagm_internal((d[i] => x[i] for i in 1:length(x))...)
return sparse(I, J, V)
end
function spdiagm(x, d, m::Integer, n::Integer)
depwarn(string("spdiagm((x1, x2, ...), (d1, d2, ...), m, n) is deprecated, use ",
"spdiagm(d1 => x1, d2 => x2, ...) instead, which now returns a square matrix. ",
"To specify a non-square matrix and preserve the old behaviour, use ",
"I, J, V = SparseArrays.spdiagm_internal(d1 => x1, d2 => x2, ...); sparse(I, J, V, m, n)"), :spdiagm)
I, J, V = SparseArrays.spdiagm_internal((d[i] => x[i] for i in 1:length(x))...)
return sparse(I, J, V, m, n)
end

# PR #23690
# `SSHCredentials` and `UserPasswordCredentials` constructors using `prompt_if_incorrect`
# are deprecated in base/libgit2/types.jl.
Expand Down
6 changes: 3 additions & 3 deletions base/linalg/arnoldi.jl
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ final residual vector `resid`.
# Examples
```jldoctest
julia> A = spdiagm(1:4);
julia> A = Diagonal(1:4);
julia> λ, ϕ = eigs(A, nev = 2);
Expand Down Expand Up @@ -145,7 +145,7 @@ final residual vector `resid`.
# Examples
```jldoctest
julia> A = speye(4, 4); B = spdiagm(1:4);
julia> A = speye(4, 4); B = Diagonal(1:4);
julia> λ, ϕ = eigs(A, B, nev = 2);
Expand Down Expand Up @@ -379,7 +379,7 @@ iterations derived from [`eigs`](@ref).
# Examples
```jldoctest
julia> A = spdiagm(1:4);
julia> A = Diagonal(1:4);
julia> s = svds(A, nsv = 2)[1];
Expand Down
73 changes: 28 additions & 45 deletions base/sparse/sparsematrix.jl
Original file line number Diff line number Diff line change
Expand Up @@ -1087,7 +1087,7 @@ For expert drivers and additional information, see [`permute!`](@ref).
# Examples
```jldoctest
julia> A = spdiagm([1, 2, 3, 4], 0, 4, 4) + spdiagm([5, 6, 7], 1, 4, 4)
julia> A = spdiagm(0 => [1, 2, 3, 4], 1 => [5, 6, 7])
4×4 SparseMatrixCSC{Int64,Int64} with 7 stored entries:
[1, 1] = 1
[1, 2] = 5
Expand Down Expand Up @@ -1144,7 +1144,7 @@ and no space beyond that passed in. If `trim` is `true`, this method trims `A.ro
# Examples
```jldoctest
julia> A = spdiagm([1, 2, 3, 4])
julia> A = sparse(Diagonal([1, 2, 3, 4]))
4×4 SparseMatrixCSC{Int64,Int64} with 4 stored entries:
[1, 1] = 1
[2, 2] = 2
Expand Down Expand Up @@ -2935,7 +2935,7 @@ stored and otherwise do nothing. Derivative forms:
# Examples
```jldoctest
julia> A = spdiagm([1, 2, 3, 4])
julia> A = sparse(Diagonal([1, 2, 3, 4]))
4×4 SparseMatrixCSC{Int64,Int64} with 4 stored entries:
[1, 1] = 1
[2, 2] = 2
Expand Down Expand Up @@ -3280,57 +3280,48 @@ function istril(A::SparseMatrixCSC)
return true
end

# Create a sparse diagonal matrix by specifying multiple diagonals
# packed into a tuple, alongside their diagonal offsets and matrix shape

function spdiagm_internal(B, d)
ndiags = length(d)
if length(B) != ndiags; throw(ArgumentError("first argument should be a tuple of length(d)=$ndiags arrays of diagonals")); end
function spdiagm_internal(kv::Pair{<:Integer,<:AbstractVector}...)
ncoeffs = 0
for vec in B
ncoeffs += length(vec)
for p in kv
ncoeffs += length(p.second)
end
I = Vector{Int}(ncoeffs)
J = Vector{Int}(ncoeffs)
V = Vector{promote_type(map(eltype, B)...)}(ncoeffs)
id = 0
V = Vector{promote_type(map(x -> eltype(x.second), kv)...)}(ncoeffs)
i = 0
for vec in B
id += 1
diag = d[id]
numel = length(vec)
if diag < 0
row = -diag
for p in kv
dia = p.first
vect = p.second
numel = length(vect)
if dia < 0
row = -dia
col = 0
elseif diag > 0
elseif dia > 0
row = 0
col = diag
col = dia
else
row = 0
col = 0
end
range = 1+i:numel+i
I[range] = row+1:row+numel
J[range] = col+1:col+numel
copy!(view(V, range), vec)
r = 1+i:numel+i
I[r] = row+1:row+numel
J[r] = col+1:col+numel
copy!(view(V, r), vect)
i += numel
end

return (I,J,V)
return I, J, V
end

"""
spdiagm(B, d[, m, n])
spdiagm(kv::Pair{<:Integer,<:AbstractVector}...)
Construct a sparse diagonal matrix. `B` is a tuple of vectors containing the diagonals and
`d` is a tuple containing the positions of the diagonals. In the case the input contains only
one diagonal, `B` can be a vector (instead of a tuple) and `d` can be the diagonal position
(instead of a tuple), defaulting to 0 (diagonal). Optionally, `m` and `n` specify the size
of the resulting sparse matrix.
Construct a square sparse diagonal matrix from `Pair`s of vectors and diagonals.
Vector `kv.second` will be placed on the `kv.first` diagonal.
# Examples
```jldoctest
julia> spdiagm(([1,2,3,4],[4,3,2,1]),(-1,1))
julia> spdiagm(-1 => [1,2,3,4], 1 => [4,3,2,1])
5×5 SparseMatrixCSC{Int64,Int64} with 8 stored entries:
[2, 1] = 1
[1, 2] = 4
Expand All @@ -3342,20 +3333,12 @@ julia> spdiagm(([1,2,3,4],[4,3,2,1]),(-1,1))
[4, 5] = 1
```
"""
function spdiagm(B, d, m::Integer, n::Integer)
(I,J,V) = spdiagm_internal(B, d)
return sparse(I,J,V,m,n)
end

function spdiagm(B, d)
(I,J,V) = spdiagm_internal(B, d)
return sparse(I,J,V)
function spdiagm(kv::Pair{<:Integer,<:AbstractVector}...)
I, J, V = spdiagm_internal(kv...)
n = max(dimlub(I), dimlub(J))
return sparse(I, J, V, n, n)
end

spdiagm(B::AbstractVector, d::Number, m::Integer, n::Integer) = spdiagm((B,), (d,), m, n)

spdiagm(B::AbstractVector, d::Number=0) = spdiagm((B,), (d,))

## expand a colptr or rowptr into a dense index vector
function expandptr(V::Vector{<:Integer})
if V[1] != 1 throw(ArgumentError("first index must be one")) end
Expand Down
2 changes: 1 addition & 1 deletion test/linalg/special.jl
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ end
# dense matrices, or dense vectors
densevec = ones(N)
densemat = diagm(ones(N))
spmat = spdiagm(ones(N))
spmat = sparse(Diagonal(ones(N)))
for specialmat in specialmats
# --> Tests applicable only to pairs of matrices
for othermat in (spmat, densemat)
Expand Down
4 changes: 2 additions & 2 deletions test/linalg/uniformscaling.jl
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@ srand(123)
@test one(UniformScaling(rand(Complex128))) == one(UniformScaling{Complex128})
@test eltype(one(UniformScaling(rand(Complex128)))) == Complex128
@test -one(UniformScaling(2)) == UniformScaling(-1)
@test sparse(3I,4,5) == spdiagm(fill(3,4),0,4,5)
@test sparse(3I,5,4) == spdiagm(fill(3,4),0,5,4)
@test sparse(3I,4,5) == sparse(1:4, 1:4, 3, 4, 5)
@test sparse(3I,5,4) == sparse(1:4, 1:4, 3, 5, 4)
@test norm(UniformScaling(1+im)) sqrt(2)
end

Expand Down
2 changes: 1 addition & 1 deletion test/perf/sparse/fem.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
# assemble the finite-difference laplacian
function fdlaplacian(N)
# create a 1D laplacian and a sparse identity
fdl1 = spdiagm((ones(N-1),-2*ones(N),ones(N-1)), [-1,0,1])
fdl1 = spdiagm(-1 => ones(N-1), 0 => -2*ones(N), 1 => ones(N-1))
# laplace operator on the full grid
return kron(speye(N), fdl1) + kron(fdl1, speye(N))
end
Expand Down
2 changes: 1 addition & 1 deletion test/sparse/cholmod.jl
Original file line number Diff line number Diff line change
Expand Up @@ -564,7 +564,7 @@ Asp = As[p,p]
LDp = sparse(ldltfact(Asp, perm=[1,2,3])[:LD])
# LDp = sparse(Fs[:LD])
Lp, dp = Base.SparseArrays.CHOLMOD.getLd!(copy(LDp))
Dp = spdiagm(dp)
Dp = sparse(Diagonal(dp))
@test Fs\b Af\b
@test Fs[:UP]\(Fs[:PtLD]\b) Af\b
@test Fs[:DUP]\(Fs[:PtL]\b) Af\b
Expand Down
44 changes: 22 additions & 22 deletions test/sparse/sparse.jl
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,7 @@ end
b = sprandn(5, 5, 0.2)
@test (maximum(abs.(a\b - Array(a)\Array(b))) < 1000*eps())

a = spdiagm(randn(5)) + im*spdiagm(randn(5))
a = sparse(Diagonal(randn(5) + im*randn(5)))
b = randn(5,3)
@test (maximum(abs.(a*b - Array(a)*b)) < 100*eps())
@test (maximum(abs.(a'b - Array(a)'b)) < 100*eps())
Expand Down Expand Up @@ -483,12 +483,6 @@ end
end
end

@testset "construction of diagonal SparseMatrixCSCs" begin
@test Array(spdiagm((ones(2), ones(2)), (0, -1), 3, 3)) ==
[1.0 0.0 0.0; 1.0 1.0 0.0; 0.0 1.0 0.0]
@test Array(spdiagm(ones(2), -1, 3, 3)) == diagm(ones(2), -1)
end

@testset "issue #5190" begin
@test_throws ArgumentError sparsevec([3,5,7],[0.1,0.0,3.2],4)
end
Expand Down Expand Up @@ -1302,10 +1296,6 @@ end
@test norm(Array(D) - Array(S)) == 0.0
end

@testset "spdiagm promotion" begin
@test spdiagm(([1,2],[3.5],[4+5im]), (0,1,-1), 2,2) == [1 3.5; 4+5im 2]
end

@testset "error conditions for reshape, and squeeze" begin
local A = sprand(Bool, 5, 5, 0.2)
@test_throws DimensionMismatch reshape(A,(20, 2))
Expand Down Expand Up @@ -1419,10 +1409,18 @@ end
end

@testset "spdiagm" begin
v = sprand(10, 0.4)
@test spdiagm(v)::SparseMatrixCSC == diagm(Vector(v))
@test spdiagm(sparse(ones(5)))::SparseMatrixCSC == speye(5)
@test spdiagm(sparse(zeros(5)))::SparseMatrixCSC == spzeros(5,5)
@test spdiagm(0 => ones(2), -1 => ones(2)) == [1.0 0.0 0.0; 1.0 1.0 0.0; 0.0 1.0 0.0]
@test spdiagm(0 => ones(2), 1 => ones(2)) == [1.0 1.0 0.0; 0.0 1.0 1.0; 0.0 0.0 0.0]

for (x, y) in ((rand(5), rand(4)),(sparse(rand(5)), sparse(rand(4))))
@test spdiagm(-1 => x)::SparseMatrixCSC == diagm(x, -1)
@test spdiagm( 0 => x)::SparseMatrixCSC == diagm(x, 0) == sparse(Diagonal(x))
@test spdiagm(-1 => x)::SparseMatrixCSC == diagm(x, -1)
@test spdiagm(0 => x, -1 => y)::SparseMatrixCSC == diagm(x) + diagm(y, -1)
@test spdiagm(0 => x, 1 => y)::SparseMatrixCSC == diagm(x) + diagm(y, 1)
end
# promotion
@test spdiagm(0 => [1,2], 1 => [3.5], -1 => [4+5im]) == [1 3.5; 4+5im 2]
end

@testset "diag" begin
Expand Down Expand Up @@ -1699,16 +1697,16 @@ end

@testset "factorization" begin
local A
A = spdiagm(rand(5)) + sprandn(5, 5, 0.2) + im*sprandn(5, 5, 0.2)
A = sparse(Diagonal(rand(5))) + sprandn(5, 5, 0.2) + im*sprandn(5, 5, 0.2)
A = A + A'
@test !Base.USE_GPL_LIBS || abs(det(factorize(Hermitian(A)))) abs(det(factorize(Array(A))))
A = spdiagm(rand(5)) + sprandn(5, 5, 0.2) + im*sprandn(5, 5, 0.2)
A = sparse(Diagonal(rand(5))) + sprandn(5, 5, 0.2) + im*sprandn(5, 5, 0.2)
A = A*A'
@test !Base.USE_GPL_LIBS || abs(det(factorize(Hermitian(A)))) abs(det(factorize(Array(A))))
A = spdiagm(rand(5)) + sprandn(5, 5, 0.2)
A = sparse(Diagonal(rand(5))) + sprandn(5, 5, 0.2)
A = A + A.'
@test !Base.USE_GPL_LIBS || abs(det(factorize(Symmetric(A)))) abs(det(factorize(Array(A))))
A = spdiagm(rand(5)) + sprandn(5, 5, 0.2)
A = sparse(Diagonal(rand(5))) + sprandn(5, 5, 0.2)
A = A*A.'
@test !Base.USE_GPL_LIBS || abs(det(factorize(Symmetric(A)))) abs(det(factorize(Array(A))))
@test factorize(triu(A)) == triu(A)
Expand Down Expand Up @@ -1778,7 +1776,7 @@ end
N = 4
densevec = ones(N)
densemat = diagm(ones(N))
spmat = spdiagm(ones(N))
spmat = sparse(Diagonal(ones(N)))
# Test that concatenations of pairs of sparse matrices yield sparse arrays
@test issparse(vcat(spmat, spmat))
@test issparse(hcat(spmat, spmat))
Expand Down Expand Up @@ -1819,7 +1817,8 @@ end
# are called. (Issue #18705.) EDIT: #19239 unified broadcast over a single sparse matrix,
# eliminating the former operation classes.
@testset "issue #18705" begin
@test isa(sin.(spdiagm(1.0:5.0)), SparseMatrixCSC)
S = sparse(Diagonal(collect(1.0:5.0)))
@test isa(sin.(S), SparseMatrixCSC)
end

@testset "issue #19225" begin
Expand Down Expand Up @@ -1857,7 +1856,8 @@ end
# Check that `broadcast` methods specialized for unary operations over
# `SparseMatrixCSC`s determine a reasonable return type.
@testset "issue #18974" begin
@test eltype(sin.(spdiagm(Int64(1):Int64(4)))) == Float64
S = sparse(Diagonal(collect(Int64(1):Int64(4))))
@test eltype(sin.(S)) == Float64
end

# Check calling of unary minus method specialized for SparseMatrixCSCs
Expand Down

0 comments on commit 5295feb

Please sign in to comment.