From 5295feb473e0e6ccf695c91bb45e05908ce381c8 Mon Sep 17 00:00:00 2001 From: Fredrik Ekre Date: Wed, 11 Oct 2017 09:35:15 +0200 Subject: [PATCH] rewrite spdiagm with Pair API (#23757) instead of using one tuple with diagonals and one tuple with vectors --- NEWS.md | 7 ++++ base/deprecated.jl | 29 +++++++++++++- base/linalg/arnoldi.jl | 6 +-- base/sparse/sparsematrix.jl | 73 ++++++++++++++--------------------- test/linalg/special.jl | 2 +- test/linalg/uniformscaling.jl | 4 +- test/perf/sparse/fem.jl | 2 +- test/sparse/cholmod.jl | 2 +- test/sparse/sparse.jl | 44 ++++++++++----------- 9 files changed, 93 insertions(+), 76 deletions(-) diff --git a/NEWS.md b/NEWS.md index dc7c581d46cae..a29165e0fee9b 100644 --- a/NEWS.md +++ b/NEWS.md @@ -503,6 +503,13 @@ Deprecated or removed * `contains(eq, itr, item)` is deprecated in favor of `any` with a predicate ([#23716]). + * `spdiagm(x::AbstractVector)` has been deprecated in favor of `sparse(Diagonal(x))` + alternatively `spdiagm(0 => x)` ([#23757]). + + * `spdiagm(x::AbstractVector, d::Integer)` and `spdiagm(x::Tuple{<:AbstractVector}, d::Tuple{<:Integer})` + have been deprecated in favor of `spdiagm(d => x)` and `spdiagm(d[1] => x[1], d[2] => x[2], ...)` + respectively. The new `spdiagm` implementation now always returns a square matrix ([#23757]). + * Constructors for `LibGit2.UserPasswordCredentials` and `LibGit2.SSHCredentials` which take a `prompt_if_incorrect` argument are deprecated. Instead, prompting behavior is controlled using the `allow_prompt` keyword in the `LibGit2.CredentialPayload` constructor ([#23690]). diff --git a/base/deprecated.jl b/base/deprecated.jl index 6792411802d48..3d9016d14b801 100644 --- a/base/deprecated.jl +++ b/base/deprecated.jl @@ -1638,7 +1638,7 @@ export hex2num # PR 23341 import .LinAlg: diagm -@deprecate diagm(A::SparseMatrixCSC) spdiagm(sparsevec(A)) +@deprecate diagm(A::SparseMatrixCSC) sparse(Diagonal(sparsevec(A))) # PR #23373 @deprecate diagm(A::BitMatrix) diagm(vec(A)) @@ -1757,6 +1757,33 @@ end @deprecate contains(eq::Function, itr, x) any(y->eq(y,x), itr) +# PR #23757 +import .SparseArrays.spdiagm +@deprecate spdiagm(x::AbstractVector) sparse(Diagonal(x)) +function spdiagm(x::AbstractVector, d::Number) + depwarn(string("spdiagm(x::AbstractVector, d::Number) is deprecated, use ", + "spdiagm(d => x) instead, which now returns a square matrix. To preserve the old ", + "behaviour, use sparse(SparseArrays.spdiagm_internal(d => x)...)"), :spdiagm) + I, J, V = SparseArrays.spdiagm_internal(d => x) + return sparse(I, J, V) +end +function spdiagm(x, d) + depwarn(string("spdiagm((x1, x2, ...), (d1, d2, ...)) is deprecated, use ", + "spdiagm(d1 => x1, d2 => x2, ...) instead, which now returns a square matrix. ", + "To preserve the old behaviour, use ", + "sparse(SparseArrays.spdiagm_internal(d1 => x1, d2 => x2, ...)...)"), :spdiagm) + I, J, V = SparseArrays.spdiagm_internal((d[i] => x[i] for i in 1:length(x))...) + return sparse(I, J, V) +end +function spdiagm(x, d, m::Integer, n::Integer) + depwarn(string("spdiagm((x1, x2, ...), (d1, d2, ...), m, n) is deprecated, use ", + "spdiagm(d1 => x1, d2 => x2, ...) instead, which now returns a square matrix. ", + "To specify a non-square matrix and preserve the old behaviour, use ", + "I, J, V = SparseArrays.spdiagm_internal(d1 => x1, d2 => x2, ...); sparse(I, J, V, m, n)"), :spdiagm) + I, J, V = SparseArrays.spdiagm_internal((d[i] => x[i] for i in 1:length(x))...) + return sparse(I, J, V, m, n) +end + # PR #23690 # `SSHCredentials` and `UserPasswordCredentials` constructors using `prompt_if_incorrect` # are deprecated in base/libgit2/types.jl. diff --git a/base/linalg/arnoldi.jl b/base/linalg/arnoldi.jl index ec0b8e0338190..1918cf626bcca 100644 --- a/base/linalg/arnoldi.jl +++ b/base/linalg/arnoldi.jl @@ -52,7 +52,7 @@ final residual vector `resid`. # Examples ```jldoctest -julia> A = spdiagm(1:4); +julia> A = Diagonal(1:4); julia> λ, ϕ = eigs(A, nev = 2); @@ -145,7 +145,7 @@ final residual vector `resid`. # Examples ```jldoctest -julia> A = speye(4, 4); B = spdiagm(1:4); +julia> A = speye(4, 4); B = Diagonal(1:4); julia> λ, ϕ = eigs(A, B, nev = 2); @@ -379,7 +379,7 @@ iterations derived from [`eigs`](@ref). # Examples ```jldoctest -julia> A = spdiagm(1:4); +julia> A = Diagonal(1:4); julia> s = svds(A, nsv = 2)[1]; diff --git a/base/sparse/sparsematrix.jl b/base/sparse/sparsematrix.jl index 589047a9fe893..613578ccadcef 100644 --- a/base/sparse/sparsematrix.jl +++ b/base/sparse/sparsematrix.jl @@ -1087,7 +1087,7 @@ For expert drivers and additional information, see [`permute!`](@ref). # Examples ```jldoctest -julia> A = spdiagm([1, 2, 3, 4], 0, 4, 4) + spdiagm([5, 6, 7], 1, 4, 4) +julia> A = spdiagm(0 => [1, 2, 3, 4], 1 => [5, 6, 7]) 4×4 SparseMatrixCSC{Int64,Int64} with 7 stored entries: [1, 1] = 1 [1, 2] = 5 @@ -1144,7 +1144,7 @@ and no space beyond that passed in. If `trim` is `true`, this method trims `A.ro # Examples ```jldoctest -julia> A = spdiagm([1, 2, 3, 4]) +julia> A = sparse(Diagonal([1, 2, 3, 4])) 4×4 SparseMatrixCSC{Int64,Int64} with 4 stored entries: [1, 1] = 1 [2, 2] = 2 @@ -2935,7 +2935,7 @@ stored and otherwise do nothing. Derivative forms: # Examples ```jldoctest -julia> A = spdiagm([1, 2, 3, 4]) +julia> A = sparse(Diagonal([1, 2, 3, 4])) 4×4 SparseMatrixCSC{Int64,Int64} with 4 stored entries: [1, 1] = 1 [2, 2] = 2 @@ -3280,57 +3280,48 @@ function istril(A::SparseMatrixCSC) return true end -# Create a sparse diagonal matrix by specifying multiple diagonals -# packed into a tuple, alongside their diagonal offsets and matrix shape -function spdiagm_internal(B, d) - ndiags = length(d) - if length(B) != ndiags; throw(ArgumentError("first argument should be a tuple of length(d)=$ndiags arrays of diagonals")); end +function spdiagm_internal(kv::Pair{<:Integer,<:AbstractVector}...) ncoeffs = 0 - for vec in B - ncoeffs += length(vec) + for p in kv + ncoeffs += length(p.second) end I = Vector{Int}(ncoeffs) J = Vector{Int}(ncoeffs) - V = Vector{promote_type(map(eltype, B)...)}(ncoeffs) - id = 0 + V = Vector{promote_type(map(x -> eltype(x.second), kv)...)}(ncoeffs) i = 0 - for vec in B - id += 1 - diag = d[id] - numel = length(vec) - if diag < 0 - row = -diag + for p in kv + dia = p.first + vect = p.second + numel = length(vect) + if dia < 0 + row = -dia col = 0 - elseif diag > 0 + elseif dia > 0 row = 0 - col = diag + col = dia else row = 0 col = 0 end - range = 1+i:numel+i - I[range] = row+1:row+numel - J[range] = col+1:col+numel - copy!(view(V, range), vec) + r = 1+i:numel+i + I[r] = row+1:row+numel + J[r] = col+1:col+numel + copy!(view(V, r), vect) i += numel end - - return (I,J,V) + return I, J, V end """ - spdiagm(B, d[, m, n]) + spdiagm(kv::Pair{<:Integer,<:AbstractVector}...) -Construct a sparse diagonal matrix. `B` is a tuple of vectors containing the diagonals and -`d` is a tuple containing the positions of the diagonals. In the case the input contains only -one diagonal, `B` can be a vector (instead of a tuple) and `d` can be the diagonal position -(instead of a tuple), defaulting to 0 (diagonal). Optionally, `m` and `n` specify the size -of the resulting sparse matrix. +Construct a square sparse diagonal matrix from `Pair`s of vectors and diagonals. +Vector `kv.second` will be placed on the `kv.first` diagonal. # Examples ```jldoctest -julia> spdiagm(([1,2,3,4],[4,3,2,1]),(-1,1)) +julia> spdiagm(-1 => [1,2,3,4], 1 => [4,3,2,1]) 5×5 SparseMatrixCSC{Int64,Int64} with 8 stored entries: [2, 1] = 1 [1, 2] = 4 @@ -3342,20 +3333,12 @@ julia> spdiagm(([1,2,3,4],[4,3,2,1]),(-1,1)) [4, 5] = 1 ``` """ -function spdiagm(B, d, m::Integer, n::Integer) - (I,J,V) = spdiagm_internal(B, d) - return sparse(I,J,V,m,n) -end - -function spdiagm(B, d) - (I,J,V) = spdiagm_internal(B, d) - return sparse(I,J,V) +function spdiagm(kv::Pair{<:Integer,<:AbstractVector}...) + I, J, V = spdiagm_internal(kv...) + n = max(dimlub(I), dimlub(J)) + return sparse(I, J, V, n, n) end -spdiagm(B::AbstractVector, d::Number, m::Integer, n::Integer) = spdiagm((B,), (d,), m, n) - -spdiagm(B::AbstractVector, d::Number=0) = spdiagm((B,), (d,)) - ## expand a colptr or rowptr into a dense index vector function expandptr(V::Vector{<:Integer}) if V[1] != 1 throw(ArgumentError("first index must be one")) end diff --git a/test/linalg/special.jl b/test/linalg/special.jl index 991cea0604b1e..9e77c72537deb 100644 --- a/test/linalg/special.jl +++ b/test/linalg/special.jl @@ -141,7 +141,7 @@ end # dense matrices, or dense vectors densevec = ones(N) densemat = diagm(ones(N)) - spmat = spdiagm(ones(N)) + spmat = sparse(Diagonal(ones(N))) for specialmat in specialmats # --> Tests applicable only to pairs of matrices for othermat in (spmat, densemat) diff --git a/test/linalg/uniformscaling.jl b/test/linalg/uniformscaling.jl index c739d15336510..d3ce6677b4be9 100644 --- a/test/linalg/uniformscaling.jl +++ b/test/linalg/uniformscaling.jl @@ -16,8 +16,8 @@ srand(123) @test one(UniformScaling(rand(Complex128))) == one(UniformScaling{Complex128}) @test eltype(one(UniformScaling(rand(Complex128)))) == Complex128 @test -one(UniformScaling(2)) == UniformScaling(-1) - @test sparse(3I,4,5) == spdiagm(fill(3,4),0,4,5) - @test sparse(3I,5,4) == spdiagm(fill(3,4),0,5,4) + @test sparse(3I,4,5) == sparse(1:4, 1:4, 3, 4, 5) + @test sparse(3I,5,4) == sparse(1:4, 1:4, 3, 5, 4) @test norm(UniformScaling(1+im)) ≈ sqrt(2) end diff --git a/test/perf/sparse/fem.jl b/test/perf/sparse/fem.jl index 77fb47e7c6782..80559921c9eba 100644 --- a/test/perf/sparse/fem.jl +++ b/test/perf/sparse/fem.jl @@ -5,7 +5,7 @@ # assemble the finite-difference laplacian function fdlaplacian(N) # create a 1D laplacian and a sparse identity - fdl1 = spdiagm((ones(N-1),-2*ones(N),ones(N-1)), [-1,0,1]) + fdl1 = spdiagm(-1 => ones(N-1), 0 => -2*ones(N), 1 => ones(N-1)) # laplace operator on the full grid return kron(speye(N), fdl1) + kron(fdl1, speye(N)) end diff --git a/test/sparse/cholmod.jl b/test/sparse/cholmod.jl index cb49c6ca627c7..9e00297a13d78 100644 --- a/test/sparse/cholmod.jl +++ b/test/sparse/cholmod.jl @@ -564,7 +564,7 @@ Asp = As[p,p] LDp = sparse(ldltfact(Asp, perm=[1,2,3])[:LD]) # LDp = sparse(Fs[:LD]) Lp, dp = Base.SparseArrays.CHOLMOD.getLd!(copy(LDp)) -Dp = spdiagm(dp) +Dp = sparse(Diagonal(dp)) @test Fs\b ≈ Af\b @test Fs[:UP]\(Fs[:PtLD]\b) ≈ Af\b @test Fs[:DUP]\(Fs[:PtL]\b) ≈ Af\b diff --git a/test/sparse/sparse.jl b/test/sparse/sparse.jl index ba1800964334e..bb72591375b7d 100644 --- a/test/sparse/sparse.jl +++ b/test/sparse/sparse.jl @@ -236,7 +236,7 @@ end b = sprandn(5, 5, 0.2) @test (maximum(abs.(a\b - Array(a)\Array(b))) < 1000*eps()) - a = spdiagm(randn(5)) + im*spdiagm(randn(5)) + a = sparse(Diagonal(randn(5) + im*randn(5))) b = randn(5,3) @test (maximum(abs.(a*b - Array(a)*b)) < 100*eps()) @test (maximum(abs.(a'b - Array(a)'b)) < 100*eps()) @@ -483,12 +483,6 @@ end end end -@testset "construction of diagonal SparseMatrixCSCs" begin - @test Array(spdiagm((ones(2), ones(2)), (0, -1), 3, 3)) == - [1.0 0.0 0.0; 1.0 1.0 0.0; 0.0 1.0 0.0] - @test Array(spdiagm(ones(2), -1, 3, 3)) == diagm(ones(2), -1) -end - @testset "issue #5190" begin @test_throws ArgumentError sparsevec([3,5,7],[0.1,0.0,3.2],4) end @@ -1302,10 +1296,6 @@ end @test norm(Array(D) - Array(S)) == 0.0 end -@testset "spdiagm promotion" begin - @test spdiagm(([1,2],[3.5],[4+5im]), (0,1,-1), 2,2) == [1 3.5; 4+5im 2] -end - @testset "error conditions for reshape, and squeeze" begin local A = sprand(Bool, 5, 5, 0.2) @test_throws DimensionMismatch reshape(A,(20, 2)) @@ -1419,10 +1409,18 @@ end end @testset "spdiagm" begin - v = sprand(10, 0.4) - @test spdiagm(v)::SparseMatrixCSC == diagm(Vector(v)) - @test spdiagm(sparse(ones(5)))::SparseMatrixCSC == speye(5) - @test spdiagm(sparse(zeros(5)))::SparseMatrixCSC == spzeros(5,5) + @test spdiagm(0 => ones(2), -1 => ones(2)) == [1.0 0.0 0.0; 1.0 1.0 0.0; 0.0 1.0 0.0] + @test spdiagm(0 => ones(2), 1 => ones(2)) == [1.0 1.0 0.0; 0.0 1.0 1.0; 0.0 0.0 0.0] + + for (x, y) in ((rand(5), rand(4)),(sparse(rand(5)), sparse(rand(4)))) + @test spdiagm(-1 => x)::SparseMatrixCSC == diagm(x, -1) + @test spdiagm( 0 => x)::SparseMatrixCSC == diagm(x, 0) == sparse(Diagonal(x)) + @test spdiagm(-1 => x)::SparseMatrixCSC == diagm(x, -1) + @test spdiagm(0 => x, -1 => y)::SparseMatrixCSC == diagm(x) + diagm(y, -1) + @test spdiagm(0 => x, 1 => y)::SparseMatrixCSC == diagm(x) + diagm(y, 1) + end + # promotion + @test spdiagm(0 => [1,2], 1 => [3.5], -1 => [4+5im]) == [1 3.5; 4+5im 2] end @testset "diag" begin @@ -1699,16 +1697,16 @@ end @testset "factorization" begin local A - A = spdiagm(rand(5)) + sprandn(5, 5, 0.2) + im*sprandn(5, 5, 0.2) + A = sparse(Diagonal(rand(5))) + sprandn(5, 5, 0.2) + im*sprandn(5, 5, 0.2) A = A + A' @test !Base.USE_GPL_LIBS || abs(det(factorize(Hermitian(A)))) ≈ abs(det(factorize(Array(A)))) - A = spdiagm(rand(5)) + sprandn(5, 5, 0.2) + im*sprandn(5, 5, 0.2) + A = sparse(Diagonal(rand(5))) + sprandn(5, 5, 0.2) + im*sprandn(5, 5, 0.2) A = A*A' @test !Base.USE_GPL_LIBS || abs(det(factorize(Hermitian(A)))) ≈ abs(det(factorize(Array(A)))) - A = spdiagm(rand(5)) + sprandn(5, 5, 0.2) + A = sparse(Diagonal(rand(5))) + sprandn(5, 5, 0.2) A = A + A.' @test !Base.USE_GPL_LIBS || abs(det(factorize(Symmetric(A)))) ≈ abs(det(factorize(Array(A)))) - A = spdiagm(rand(5)) + sprandn(5, 5, 0.2) + A = sparse(Diagonal(rand(5))) + sprandn(5, 5, 0.2) A = A*A.' @test !Base.USE_GPL_LIBS || abs(det(factorize(Symmetric(A)))) ≈ abs(det(factorize(Array(A)))) @test factorize(triu(A)) == triu(A) @@ -1778,7 +1776,7 @@ end N = 4 densevec = ones(N) densemat = diagm(ones(N)) - spmat = spdiagm(ones(N)) + spmat = sparse(Diagonal(ones(N))) # Test that concatenations of pairs of sparse matrices yield sparse arrays @test issparse(vcat(spmat, spmat)) @test issparse(hcat(spmat, spmat)) @@ -1819,7 +1817,8 @@ end # are called. (Issue #18705.) EDIT: #19239 unified broadcast over a single sparse matrix, # eliminating the former operation classes. @testset "issue #18705" begin - @test isa(sin.(spdiagm(1.0:5.0)), SparseMatrixCSC) + S = sparse(Diagonal(collect(1.0:5.0))) + @test isa(sin.(S), SparseMatrixCSC) end @testset "issue #19225" begin @@ -1857,7 +1856,8 @@ end # Check that `broadcast` methods specialized for unary operations over # `SparseMatrixCSC`s determine a reasonable return type. @testset "issue #18974" begin - @test eltype(sin.(spdiagm(Int64(1):Int64(4)))) == Float64 + S = sparse(Diagonal(collect(Int64(1):Int64(4)))) + @test eltype(sin.(S)) == Float64 end # Check calling of unary minus method specialized for SparseMatrixCSCs