diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index 446a91289388cc..370074dd30e360 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -1260,3 +1260,24 @@ end @deprecate scale!(C::AbstractMatrix, a::AbstractVector, B::AbstractMatrix) mul!(C, Diagonal(a), B) Base.@deprecate_binding trace tr + +# deprecate lu(...) in favor of lufact and factorization destructuring +function lu(A::AbstractMatrix, pivot::Union{Val{false}, Val{true}} = Val(true)) + depwarn(string("`lu(A[, pivot])` has been deprecated in favor of ", + "`lufact(A[, pivot])`. Whereas `lu(A[, pivot])` returns a tuple of arrays, ", + "`lufact(A[, pivot])` returns an `LU` object. So for a direct replacement, ", + "use `(lufact(A[, pivot])...,)`. But going forward, consider using the direct ", + "result of `lufact(A[, pivot])` instead, either destructured into its components ", + "(`l, u, p = lufact(A[, pivot])`) or as an `LU` object (`lup = lufact(A)`).")) + return (lufact(A)...,) +end +function lu(x::Number) + depwarn(string("`lu(x::Number)` has been deprecated in favor of `lufact(x::Number)`. ", + "Whereas `lu(x::Number)` returns a tuple of numbers, `lufact(x::Number)` ", + "returns a tuple of arrays for consistency with other `lufact` methods. ", + "So for a direct replacement, use `first.((lufact(x)...,))`. But going ", + "forward, consider using the direct result of `lufact(x)` instead, either ", + "destructured into its components (`l, u, p = lufact(x)`) or as an ", + "`LU` object (`lup = lufact(x)`).")) + return first.((lufact(x)...,)) +end diff --git a/stdlib/LinearAlgebra/src/lu.jl b/stdlib/LinearAlgebra/src/lu.jl index 34b6efe660940b..13fef48849b2df 100644 --- a/stdlib/LinearAlgebra/src/lu.jl +++ b/stdlib/LinearAlgebra/src/lu.jl @@ -11,6 +11,14 @@ struct LU{T,S<:AbstractMatrix} <: Factorization{T} end LU(factors::AbstractMatrix{T}, ipiv::Vector{BlasInt}, info::BlasInt) where {T} = LU{T,typeof(factors)}(factors, ipiv, info) +# iteration for destructuring into factors +Base.start(::LU) = Val(:L) +Base.next(F::LU, ::Val{:L}) = (F.L, Val(:U)) +Base.next(F::LU, ::Val{:U}) = (F.U, Val(:p)) +Base.next(F::LU, ::Val{:p}) = (F.p, Val(:done)) +Base.done(F::LU, ::Val{:done}) = true +Base.done(F::LU, ::Any) = false + adjoint(F::LU) = Adjoint(F) transpose(F::LU) = Transpose(F) @@ -174,6 +182,26 @@ U factor: julia> F.L * F.U == A[F.p, :] true + +julia> L, U, p = lufact(A); # destructuring via iteration + +julia> L +2×2 Array{Float64,2}: + 1.0 0.0 + 1.5 1.0 + +julia> U +2×2 Array{Float64,2}: + 4.0 3.0 + 0.0 -1.5 + +julia> p +2-element Array{Int64,1}: + 1 + 2 + +julia> A[p, :] == L * U +true ``` """ function lufact(A::AbstractMatrix{T}, pivot::Union{Val{false}, Val{true}}) where T @@ -200,36 +228,6 @@ end lufact(x::Number) = LU(fill(x, 1, 1), BlasInt[1], x == 0 ? one(BlasInt) : zero(BlasInt)) lufact(F::LU) = F -lu(x::Number) = (one(x), x, 1) - -""" - lu(A, pivot=Val(true)) -> L, U, p - -Compute the LU factorization of `A`, such that `A[p,:] = L*U`. -By default, pivoting is used. This can be overridden by passing -`Val(false)` for the second argument. - -See also [`lufact`](@ref). - -# Examples -```jldoctest -julia> A = [4. 3.; 6. 3.] -2×2 Array{Float64,2}: - 4.0 3.0 - 6.0 3.0 - -julia> L, U, p = lu(A) -([1.0 0.0; 0.666667 1.0], [6.0 3.0; 0.0 1.0], [2, 1]) - -julia> A[p, :] == L * U -true -``` -""" -function lu(A::AbstractMatrix, pivot::Union{Val{false}, Val{true}} = Val(true)) - F = lufact(A, pivot) - F.L, F.U, F.p -end - function LU{T}(F::LU) where T M = convert(AbstractMatrix{T}, F.factors) LU{T,typeof(M)}(M, F.ipiv, F.info) diff --git a/stdlib/LinearAlgebra/test/lu.jl b/stdlib/LinearAlgebra/test/lu.jl index bf6de1cb381270..0a1c42a103e817 100644 --- a/stdlib/LinearAlgebra/test/lu.jl +++ b/stdlib/LinearAlgebra/test/lu.jl @@ -42,7 +42,7 @@ dimg = randn(n)/2 if eltya <: BlasFloat @testset "LU factorization for Number" begin num = rand(eltya) - @test lu(num) == (one(eltya),num,1) + @test (lufact(num)...,) == (hcat(one(eltya)), hcat(num), [1]) @test convert(Array, lufact(num)) ≈ eltya[num] end @testset "Balancing in eigenvector calculations" begin @@ -67,7 +67,7 @@ dimg = randn(n)/2 lua = factorize(a) @test_throws ErrorException lua.Z l,u,p = lua.L, lua.U, lua.p - ll,ul,pl = lu(a) + ll,ul,pl = lufact(a) @test ll * ul ≈ a[pl,:] @test l*u ≈ a[p,:] @test (l*u)[invperm(p),:] ≈ a diff --git a/stdlib/SparseArrays/src/linalg.jl b/stdlib/SparseArrays/src/linalg.jl index c304d3c3821fa5..23a33b39985214 100644 --- a/stdlib/SparseArrays/src/linalg.jl +++ b/stdlib/SparseArrays/src/linalg.jl @@ -1009,7 +1009,6 @@ function factorize(A::LinearAlgebra.RealHermSymComplexHerm{Float64,<:SparseMatri end chol(A::SparseMatrixCSC) = error("Use cholfact() instead of chol() for sparse matrices.") -lu(A::SparseMatrixCSC) = error("Use lufact() instead of lu() for sparse matrices.") eig(A::SparseMatrixCSC) = error("Use IterativeEigensolvers.eigs() instead of eig() for sparse matrices.") function Base.cov(X::SparseMatrixCSC; dims::Int=1, corrected::Bool=true) diff --git a/stdlib/SparseArrays/test/sparse.jl b/stdlib/SparseArrays/test/sparse.jl index 09d24e711fed82..b8e8991d4b6b13 100644 --- a/stdlib/SparseArrays/test/sparse.jl +++ b/stdlib/SparseArrays/test/sparse.jl @@ -1780,7 +1780,6 @@ end C, b = A[:, 1:4], fill(1., size(A, 1)) @test !Base.USE_GPL_LIBS || factorize(C)\b ≈ Array(C)\b @test_throws ErrorException chol(A) - @test_throws ErrorException lu(A) @test_throws ErrorException eig(A) @test_throws ErrorException inv(A) end