diff --git a/src/factorization.jl b/src/factorization.jl index 2ab063cfb..7156991fb 100644 --- a/src/factorization.jl +++ b/src/factorization.jl @@ -29,7 +29,7 @@ end `LUFactorization(pivot=LinearAlgebra.RowMaximum())` Julia's built in `lu`. Equivalent to calling `lu!(A)` - + * On dense matrices, this uses the current BLAS implementation of the user's computer, which by default is OpenBLAS but will use MKL if the user does `using MKL` in their system. @@ -135,7 +135,7 @@ end `QRFactorization(pivot=LinearAlgebra.NoPivot(),blocksize=16)` Julia's built in `qr`. Equivalent to calling `qr!(A)`. - + * On dense matrices, this uses the current BLAS implementation of the user's computer which by default is OpenBLAS but will use MKL if the user does `using MKL` in their system. @@ -242,7 +242,7 @@ end function do_factorization(alg::CholeskyFactorization, A, b, u) A = convert(AbstractMatrix, A) if A isa SparseMatrixCSC - fact = cholesky!(A; shift = alg.shift, check = false, perm = alg.perm) + fact = cholesky(A; shift = alg.shift, check = false, perm = alg.perm) elseif alg.pivot === Val(false) || alg.pivot === NoPivot() fact = cholesky!(A, alg.pivot; check = false) else @@ -346,7 +346,7 @@ end `SVDFactorization(full=false,alg=LinearAlgebra.DivideAndConquer())` Julia's built in `svd`. Equivalent to `svd!(A)`. - + * On dense matrices, this uses the current BLAS implementation of the user's computer which by default is OpenBLAS but will use MKL if the user does `using MKL` in their system. @@ -444,7 +444,7 @@ end `GenericFactorization(;fact_alg=LinearAlgebra.factorize)`: Constructs a linear solver from a generic factorization algorithm `fact_alg` which complies with the Base.LinearAlgebra factorization API. Quoting from Base: - + * If `A` is upper or lower triangular (or diagonal), no factorization of `A` is required. The system is then solved with either forward or backward substitution. For non-triangular square matrices, an LU factorization is used. @@ -666,7 +666,7 @@ end """ `UMFPACKFactorization(;reuse_symbolic=true, check_pattern=true)` -A fast sparse multithreaded LU-factorization which specializes on sparsity +A fast sparse multithreaded LU-factorization which specializes on sparsity patterns with “more structure”. !!! note @@ -850,7 +850,7 @@ Only supports sparse matrices. ## Keyword Arguments -* shift: the shift argument in CHOLMOD. +* shift: the shift argument in CHOLMOD. * perm: the perm argument in CHOLMOD """ Base.@kwdef struct CHOLMODFactorization{T} <: AbstractFactorization @@ -916,12 +916,12 @@ end ## RFLUFactorization """ -`RFLUFactorization()` +`RFLUFactorization()` A fast pure Julia LU-factorization implementation using RecursiveFactorization.jl. This is by far the fastest LU-factorization implementation, usually outperforming OpenBLAS and MKL for smaller matrices -(<500x500), but currently optimized only for Base `Array` with `Float32` or `Float64`. +(<500x500), but currently optimized only for Base `Array` with `Float32` or `Float64`. Additional optimization for complex matrices is in the works. """ struct RFLUFactorization{P, T} <: AbstractFactorization @@ -1179,7 +1179,7 @@ end # But I'm not sure it makes sense as a GenericFactorization # since it just uses `LAPACK.getrf!`. """ -`FastLUFactorization()` +`FastLUFactorization()` The FastLapackInterface.jl version of the LU factorization. Notably, this version does not allow for choice of pivoting method. @@ -1210,7 +1210,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::FastLUFactorization; kwargs.. end """ -`FastQRFactorization()` +`FastQRFactorization()` The FastLapackInterface.jl version of the QR factorization. """ diff --git a/test/sparse_vector.jl b/test/sparse_vector.jl index d5c9bbda9..7f80ae1a5 100644 --- a/test/sparse_vector.jl +++ b/test/sparse_vector.jl @@ -2,7 +2,7 @@ using SparseArrays using LinearSolve using LinearAlgebra -# Constructing sparse array +# Constructing sparse array function hess_sparse(x::Vector{T}) where {T} return [ -sin(x[1] + x[2]) + 1, @@ -37,7 +37,12 @@ n = length(x0) hess_mat = sparse(rowval, colval, hess_sparse(x0), n, n) grad_vec = sparsevec(gradinds, grad_sparse(x0), n) -# # Converting grad_vec to dense succeeds in solving +# Converting grad_vec to dense succeeds in solving prob = LinearProblem(hess_mat, grad_vec) -linsolve = init(prob) +linsolve = init(prob); @test solve!(linsolve).u ≈ hess_mat \ Array(grad_vec) + +H = hess_mat' * hess_mat +prob = LinearProblem(H, hess_mat' * grad_vec) +linsolve = init(prob, CholeskyFactorization()) +VERSION >= v"1.8" && @test solve!(linsolve).u ≈ H \ Array(hess_mat' * grad_vec)