From a538b3a1a508948b3ae6428f26ad0b932bcc4c80 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Sat, 14 Oct 2023 22:53:03 -0400 Subject: [PATCH 1/3] Hacky patch --- src/factorization.jl | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/src/factorization.jl b/src/factorization.jl index 2ab063cfb..7e3b6d63c 100644 --- a/src/factorization.jl +++ b/src/factorization.jl @@ -29,7 +29,7 @@ end `LUFactorization(pivot=LinearAlgebra.RowMaximum())` Julia's built in `lu`. Equivalent to calling `lu!(A)` - + * On dense matrices, this uses the current BLAS implementation of the user's computer, which by default is OpenBLAS but will use MKL if the user does `using MKL` in their system. @@ -135,7 +135,7 @@ end `QRFactorization(pivot=LinearAlgebra.NoPivot(),blocksize=16)` Julia's built in `qr`. Equivalent to calling `qr!(A)`. - + * On dense matrices, this uses the current BLAS implementation of the user's computer which by default is OpenBLAS but will use MKL if the user does `using MKL` in their system. @@ -242,7 +242,9 @@ end function do_factorization(alg::CholeskyFactorization, A, b, u) A = convert(AbstractMatrix, A) if A isa SparseMatrixCSC - fact = cholesky!(A; shift = alg.shift, check = false, perm = alg.perm) + # fact = cholesky!(A; shift = alg.shift, check = false, perm = alg.perm) + # fact = @time cholesky!(A; check = false) + fact = cholesky(A; shift = alg.shift, check = false, perm = alg.perm) elseif alg.pivot === Val(false) || alg.pivot === NoPivot() fact = cholesky!(A, alg.pivot; check = false) else @@ -268,6 +270,7 @@ function init_cacheval(alg::CholeskyFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) ArrayInterface.cholesky_instance(convert(AbstractMatrix, A), alg.pivot) + # cholesky!(similar(A, 1, 1); check=false) end @static if VERSION < v"1.8beta" @@ -346,7 +349,7 @@ end `SVDFactorization(full=false,alg=LinearAlgebra.DivideAndConquer())` Julia's built in `svd`. Equivalent to `svd!(A)`. - + * On dense matrices, this uses the current BLAS implementation of the user's computer which by default is OpenBLAS but will use MKL if the user does `using MKL` in their system. @@ -444,7 +447,7 @@ end `GenericFactorization(;fact_alg=LinearAlgebra.factorize)`: Constructs a linear solver from a generic factorization algorithm `fact_alg` which complies with the Base.LinearAlgebra factorization API. Quoting from Base: - + * If `A` is upper or lower triangular (or diagonal), no factorization of `A` is required. The system is then solved with either forward or backward substitution. For non-triangular square matrices, an LU factorization is used. @@ -666,7 +669,7 @@ end """ `UMFPACKFactorization(;reuse_symbolic=true, check_pattern=true)` -A fast sparse multithreaded LU-factorization which specializes on sparsity +A fast sparse multithreaded LU-factorization which specializes on sparsity patterns with “more structure”. !!! note @@ -850,7 +853,7 @@ Only supports sparse matrices. ## Keyword Arguments -* shift: the shift argument in CHOLMOD. +* shift: the shift argument in CHOLMOD. * perm: the perm argument in CHOLMOD """ Base.@kwdef struct CHOLMODFactorization{T} <: AbstractFactorization @@ -916,12 +919,12 @@ end ## RFLUFactorization """ -`RFLUFactorization()` +`RFLUFactorization()` A fast pure Julia LU-factorization implementation using RecursiveFactorization.jl. This is by far the fastest LU-factorization implementation, usually outperforming OpenBLAS and MKL for smaller matrices -(<500x500), but currently optimized only for Base `Array` with `Float32` or `Float64`. +(<500x500), but currently optimized only for Base `Array` with `Float32` or `Float64`. Additional optimization for complex matrices is in the works. """ struct RFLUFactorization{P, T} <: AbstractFactorization @@ -1179,7 +1182,7 @@ end # But I'm not sure it makes sense as a GenericFactorization # since it just uses `LAPACK.getrf!`. """ -`FastLUFactorization()` +`FastLUFactorization()` The FastLapackInterface.jl version of the LU factorization. Notably, this version does not allow for choice of pivoting method. @@ -1210,7 +1213,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::FastLUFactorization; kwargs.. end """ -`FastQRFactorization()` +`FastQRFactorization()` The FastLapackInterface.jl version of the QR factorization. """ From f28b102c766876ab163826f18246c9884877990c Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Sun, 15 Oct 2023 13:16:08 -0400 Subject: [PATCH 2/3] Add test for sparse cholesky --- src/factorization.jl | 3 --- test/sparse_vector.jl | 11 ++++++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/src/factorization.jl b/src/factorization.jl index 7e3b6d63c..7156991fb 100644 --- a/src/factorization.jl +++ b/src/factorization.jl @@ -242,8 +242,6 @@ end function do_factorization(alg::CholeskyFactorization, A, b, u) A = convert(AbstractMatrix, A) if A isa SparseMatrixCSC - # fact = cholesky!(A; shift = alg.shift, check = false, perm = alg.perm) - # fact = @time cholesky!(A; check = false) fact = cholesky(A; shift = alg.shift, check = false, perm = alg.perm) elseif alg.pivot === Val(false) || alg.pivot === NoPivot() fact = cholesky!(A, alg.pivot; check = false) @@ -270,7 +268,6 @@ function init_cacheval(alg::CholeskyFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) ArrayInterface.cholesky_instance(convert(AbstractMatrix, A), alg.pivot) - # cholesky!(similar(A, 1, 1); check=false) end @static if VERSION < v"1.8beta" diff --git a/test/sparse_vector.jl b/test/sparse_vector.jl index d5c9bbda9..145745528 100644 --- a/test/sparse_vector.jl +++ b/test/sparse_vector.jl @@ -2,7 +2,7 @@ using SparseArrays using LinearSolve using LinearAlgebra -# Constructing sparse array +# Constructing sparse array function hess_sparse(x::Vector{T}) where {T} return [ -sin(x[1] + x[2]) + 1, @@ -37,7 +37,12 @@ n = length(x0) hess_mat = sparse(rowval, colval, hess_sparse(x0), n, n) grad_vec = sparsevec(gradinds, grad_sparse(x0), n) -# # Converting grad_vec to dense succeeds in solving +# Converting grad_vec to dense succeeds in solving prob = LinearProblem(hess_mat, grad_vec) -linsolve = init(prob) +linsolve = init(prob); @test solve!(linsolve).u ≈ hess_mat \ Array(grad_vec) + +H = hess_mat' * hess_mat +prob = LinearProblem(H, hess_mat' * grad_vec) +linsolve = init(prob, CholeskyFactorization()) +@test solve!(linsolve).u ≈ H \ Array(hess_mat' * grad_vec) From 0f197b08b25e5b405ec7ce6f8b144a68dc232c28 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Sun, 15 Oct 2023 13:48:26 -0400 Subject: [PATCH 3/3] Update test/sparse_vector.jl --- test/sparse_vector.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/sparse_vector.jl b/test/sparse_vector.jl index 145745528..7f80ae1a5 100644 --- a/test/sparse_vector.jl +++ b/test/sparse_vector.jl @@ -45,4 +45,4 @@ linsolve = init(prob); H = hess_mat' * hess_mat prob = LinearProblem(H, hess_mat' * grad_vec) linsolve = init(prob, CholeskyFactorization()) -@test solve!(linsolve).u ≈ H \ Array(hess_mat' * grad_vec) +VERSION >= v"1.8" && @test solve!(linsolve).u ≈ H \ Array(hess_mat' * grad_vec)