diff --git a/Project.toml b/Project.toml index 10c787304..564f4af05 100644 --- a/Project.toml +++ b/Project.toml @@ -23,6 +23,7 @@ Preferences = "21216c6a-2e73-6563-6e65-726566657250" RecursiveArrayTools = "731186ca-8d62-57ce-b412-fbd966d074cd" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" +SciMLLogging = "a6db7da4-7206-11f0-1eab-35f2a5dbe1d1" SciMLOperators = "c0aeaf25-5076-4817-a8d5-81caf7dfa961" Setfield = "efcf1570-3423-57d1-acb7-fd33fddbac46" StaticArraysCore = "1e83bf80-4336-4d27-bf5d-d5a4f845583c" @@ -128,6 +129,7 @@ Reexport = "1.2.2" SafeTestsets = "0.1" SciMLBase = "2.70" SciMLOperators = "1.7.1" +SciMLLogging = "1.3.0" Setfield = "1.1.1" SparseArrays = "1.10" Sparspak = "0.3.9" diff --git a/docs/src/advanced/developing.md b/docs/src/advanced/developing.md index 31b7a5d1f..ea70d32e1 100644 --- a/docs/src/advanced/developing.md +++ b/docs/src/advanced/developing.md @@ -19,7 +19,7 @@ struct MyLUFactorization{P} <: LinearSolve.SciMLLinearSolveAlgorithm end function LinearSolve.init_cacheval( alg::MyLUFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assump::LinearSolve.OperatorAssumptions) + verbose::LinearVerbosity, assump::LinearSolve.OperatorAssumptions) lu!(convert(AbstractMatrix, A)) end diff --git a/docs/src/basics/common_solver_opts.md b/docs/src/basics/common_solver_opts.md index 80c994621..e8e534c82 100644 --- a/docs/src/basics/common_solver_opts.md +++ b/docs/src/basics/common_solver_opts.md @@ -26,3 +26,92 @@ solve completely. Error controls only apply to iterative solvers. - `maxiters`: The number of iterations allowed. Defaults to `length(prob.b)` - `Pl,Pr`: The left and right preconditioners, respectively. For more information, see [the Preconditioners page](@ref prec). + +## Verbosity Controls + +The verbosity system in LinearSolve.jl provides fine-grained control over the diagnostic messages, warnings, and errors that are displayed during the solution of linear systems. + +The verbosity system is organized hierarchically into three main categories: + +1. Error Control - Messages related to fallbacks and error handling +2. Performance - Messages related to performance considerations +3. Numerical - Messages related to numerical solvers and iterations + +Each category can be configured independently, and individual settings can be adjusted to suit your needs. + +### Verbosity Levels +The following verbosity levels are available: + +#### Individual Settings +These settings are meant for individual settings within a category. These can also be used to set all of the individual settings in a group to the same value. +- SciMLLogging.None() - Suppress all messages +- SciMLLogging.Info() - Show message as log message at info level +- SciMLLogging.Warn() - Show warnings (default for most settings) +- SciMLLogging.Error() - Throw errors instead of warnings +- SciMLLogging.Level(n) - Show messages with a log level setting of n + +#### Group Settings +These settings are meant for controlling a group of settings. +- SciMLLogging.Default() - Use the default settings +- SciMLLogging.All() - Show all possible messages + +### Basic Usage + +#### Global Verbosity Control + +```julia +using LinearSolve + +# Suppress all messages +verbose = LinearVerbosity(SciMLLogging.None()) +prob = LinearProblem(A, b) +sol = solve(prob; verbose=verbose) + +# Show all messages +verbose = LinearVerbosity(SciMLLogging.All()) +sol = solve(prob; verbose=verbose) + +# Use default settings +verbose = LinearVerbosity(SciMLLogging.Default()) +sol = solve(prob; verbose=verbose) +``` + +#### Group Level Control + +```julia +# Customize by category +verbose = LinearVerbosity( + error_control = SciMLLogging.Warn(), # Show warnings for error control related issues + performance = SciMLLogging.None(), # Suppress performance messages + numerical = SciMLLogging.Info() # Show all numerical related log messages at info level +) + +sol = solve(prob; verbose=verbose) +``` + +#### Fine-grained Control +The constructor for `LinearVerbosity` allows you to set verbosity for each specific message toggle, giving you fine-grained control. +The verbosity settings for the toggles are automatically passed to the group objects. +```julia +# Set specific message types +verbose = LinearVerbosity( + default_lu_fallback = SciMLLogging.Info(), # Show info when LU fallback is used + KrylovJL_verbosity = SciMLLogging.Warn(), # Show warnings from KrylovJL + no_right_preconditioning = SciMLLogging.None(), # Suppress right preconditioning messages + KrylovKit_verbosity = SciMLLogging.Level(KrylovKit.WARN_LEVEL) # Set KrylovKit verbosity level using KrylovKit's own verbosity levels +) + +sol = solve(prob; verbose=verbose) + +``` + +#### Verbosity Levels +##### Error Control Settings +- default_lu_fallback: Controls messages when falling back to LU factorization (default: Warn) +##### Performance Settings +- no_right_preconditioning: Controls messages when right preconditioning is not used (default: Warn) +##### Numerical Settings +- using_IterativeSolvers: Controls messages when using the IterativeSolvers.jl package (default: Warn) +- IterativeSolvers_iterations: Controls messages about iteration counts from IterativeSolvers.jl (default: Warn) +- KrylovKit_verbosity: Controls messages from the KrylovKit.jl package (default: Warn) +- KrylovJL_verbosity: Controls verbosity of the KrylovJL.jl package (default: None) \ No newline at end of file diff --git a/ext/LinearSolveAMDGPUExt.jl b/ext/LinearSolveAMDGPUExt.jl index 4fad3d9f3..2b9dae94d 100644 --- a/ext/LinearSolveAMDGPUExt.jl +++ b/ext/LinearSolveAMDGPUExt.jl @@ -2,7 +2,7 @@ module LinearSolveAMDGPUExt using AMDGPU using LinearSolve: LinearSolve, LinearCache, AMDGPUOffloadLUFactorization, - AMDGPUOffloadQRFactorization, init_cacheval, OperatorAssumptions + AMDGPUOffloadQRFactorization, init_cacheval, OperatorAssumptions, LinearVerbosity using LinearSolve.LinearAlgebra, LinearSolve.SciMLBase # LU Factorization @@ -25,7 +25,7 @@ function SciMLBase.solve!(cache::LinearSolve.LinearCache, alg::AMDGPUOffloadLUFa end function LinearSolve.init_cacheval(alg::AMDGPUOffloadLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) AMDGPU.rocSOLVER.getrf!(AMDGPU.ROCArray(A)) end @@ -57,7 +57,7 @@ function SciMLBase.solve!(cache::LinearSolve.LinearCache, alg::AMDGPUOffloadQRFa end function LinearSolve.init_cacheval(alg::AMDGPUOffloadQRFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) A_gpu = AMDGPU.ROCArray(A) tau = AMDGPU.ROCVector{eltype(A_gpu)}(undef, min(size(A_gpu)...)) diff --git a/ext/LinearSolveBLISExt.jl b/ext/LinearSolveBLISExt.jl index dc8d1aa93..48e5b9112 100644 --- a/ext/LinearSolveBLISExt.jl +++ b/ext/LinearSolveBLISExt.jl @@ -9,7 +9,8 @@ using LinearSolve using LinearAlgebra: BlasInt, LU using LinearAlgebra.LAPACK: require_one_based_indexing, chkfinite, chkstride1, @blasfunc, chkargsok -using LinearSolve: ArrayInterface, BLISLUFactorization, @get_cacheval, LinearCache, SciMLBase +using LinearSolve: ArrayInterface, BLISLUFactorization, @get_cacheval, LinearCache, SciMLBase, LinearVerbosity, get_blas_operation_info, blas_info_msg +using SciMLLogging: SciMLLogging, @SciMLMessage using SciMLBase: ReturnCode const global libblis = blis_jll.blis @@ -206,13 +207,13 @@ const PREALLOCATED_BLIS_LU = begin end function LinearSolve.init_cacheval(alg::BLISLUFactorization, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_BLIS_LU end function LinearSolve.init_cacheval(alg::BLISLUFactorization, A::AbstractMatrix{<:Union{Float32,ComplexF32,ComplexF64}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) A = rand(eltype(A), 0, 0) ArrayInterface.lu_instance(A), Ref{BlasInt}() @@ -222,12 +223,45 @@ function SciMLBase.solve!(cache::LinearCache, alg::BLISLUFactorization; kwargs...) A = cache.A A = convert(AbstractMatrix, A) + verbose = cache.verbose if cache.isfresh cacheval = @get_cacheval(cache, :BLISLUFactorization) res = getrf!(A; ipiv = cacheval[1].ipiv, info = cacheval[2]) fact = LU(res[1:3]...), res[4] cache.cacheval = fact + info_value = res[3] + + if info_value != 0 + if !isa(verbose.blas_info, SciMLLogging.Silent) || !isa(verbose.blas_errors, SciMLLogging.Silent) || + !isa(verbose.blas_invalid_args, SciMLLogging.Silent) + op_info = get_blas_operation_info(:dgetrf, A, cache.b, condition = !isa(verbose.condition_number, SciMLLogging.Silent)) + @SciMLMessage(cache.verbose, :condition_number) do + if op_info[:condition_number] === nothing + return "Matrix condition number calculation failed." + else + return "Matrix condition number: $(round(op_info[:condition_number], sigdigits=4)) for $(size(A, 1))×$(size(A, 2)) matrix in dgetrf" + end + end + verb_option, message = blas_info_msg( + :dgetrf, info_value; extra_context = op_info) + @SciMLMessage(message, verbose, verb_option) + end + else + @SciMLMessage(cache.verbose, :blas_success) do + op_info = get_blas_operation_info(:dgetrf, A, cache.b, + condition = !isa(verbose.condition_number, SciMLLogging.Silent)) + @SciMLMessage(cache.verbose, :condition_number) do + if op_info[:condition_number] === nothing + return "Matrix condition number calculation failed." + else + return "Matrix condition number: $(round(op_info[:condition_number], sigdigits=4)) for $(size(A, 1))×$(size(A, 2)) matrix in dgetrf" + end + end + return "BLAS LU factorization (dgetrf) completed successfully for $(op_info[:matrix_size]) matrix" + end + end + if !LinearAlgebra.issuccess(fact[1]) return SciMLBase.build_linear_solution( alg, cache.u, nothing, cache; retcode = ReturnCode.Failure) diff --git a/ext/LinearSolveBandedMatricesExt.jl b/ext/LinearSolveBandedMatricesExt.jl index deb85e25a..376556202 100644 --- a/ext/LinearSolveBandedMatricesExt.jl +++ b/ext/LinearSolveBandedMatricesExt.jl @@ -3,7 +3,7 @@ module LinearSolveBandedMatricesExt using BandedMatrices, LinearAlgebra, LinearSolve import LinearSolve: defaultalg, do_factorization, init_cacheval, DefaultLinearSolver, - DefaultAlgorithmChoice + DefaultAlgorithmChoice, LinearVerbosity # Defaults for BandedMatrices function defaultalg(A::BandedMatrix, b, oa::OperatorAssumptions{Bool}) @@ -41,14 +41,14 @@ for alg in (:SVDFactorization, :MKLLUFactorization, :DiagonalFactorization, :AppleAccelerateLUFactorization, :CholeskyFactorization) @eval begin function init_cacheval(::$(alg), ::BandedMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return nothing end end end function init_cacheval(::LUFactorization, A::BandedMatrix{T}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) where {T} + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T} (T <: BigFloat) && return qr(similar(A, 0, 0)) return lu(similar(A, 0, 0)) end @@ -61,7 +61,7 @@ for alg in (:SVDFactorization, :MKLLUFactorization, :DiagonalFactorization, :AppleAccelerateLUFactorization, :QRFactorization, :LUFactorization) @eval begin function init_cacheval(::$(alg), ::Symmetric{<:Number, <:BandedMatrix}, b, u, Pl, - Pr, maxiters::Int, abstol, reltol, verbose::Bool, + Pr, maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return nothing end diff --git a/ext/LinearSolveCUDAExt.jl b/ext/LinearSolveCUDAExt.jl index 767a5119d..3e4531e02 100644 --- a/ext/LinearSolveCUDAExt.jl +++ b/ext/LinearSolveCUDAExt.jl @@ -7,7 +7,7 @@ using LinearSolve: LinearSolve, is_cusparse, defaultalg, cudss_loaded, DefaultLi error_no_cudss_lu, init_cacheval, OperatorAssumptions, CudaOffloadFactorization, CudaOffloadLUFactorization, CudaOffloadQRFactorization, CUDAOffload32MixedLUFactorization, - SparspakFactorization, KLUFactorization, UMFPACKFactorization + SparspakFactorization, KLUFactorization, UMFPACKFactorization, LinearVerbosity using LinearSolve.LinearAlgebra, LinearSolve.SciMLBase, LinearSolve.ArrayInterface using SciMLBase: AbstractSciMLOperator @@ -53,7 +53,7 @@ function SciMLBase.solve!(cache::LinearSolve.LinearCache, alg::CudaOffloadLUFact end function LinearSolve.init_cacheval(alg::CudaOffloadLUFactorization, A::AbstractArray, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) # Check if CUDA is functional before creating CUDA arrays if !CUDA.functional() @@ -81,7 +81,7 @@ function SciMLBase.solve!(cache::LinearSolve.LinearCache, alg::CudaOffloadQRFact end function LinearSolve.init_cacheval(alg::CudaOffloadQRFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) # Check if CUDA is functional before creating CUDA arrays if !CUDA.functional() @@ -105,26 +105,26 @@ function SciMLBase.solve!(cache::LinearSolve.LinearCache, alg::CudaOffloadFactor end function LinearSolve.init_cacheval(alg::CudaOffloadFactorization, A::AbstractArray, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) qr(CUDA.CuArray(A)) end function LinearSolve.init_cacheval( ::SparspakFactorization, A::CUDA.CUSPARSE.CuSparseMatrixCSR, b, u, - Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + Pl, Pr, maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function LinearSolve.init_cacheval( ::KLUFactorization, A::CUDA.CUSPARSE.CuSparseMatrixCSR, b, u, - Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + Pl, Pr, maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function LinearSolve.init_cacheval( ::UMFPACKFactorization, A::CUDA.CUSPARSE.CuSparseMatrixCSR, b, u, - Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + Pl, Pr, maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -158,7 +158,7 @@ function SciMLBase.solve!(cache::LinearSolve.LinearCache, alg::CUDAOffload32Mixe end function LinearSolve.init_cacheval(alg::CUDAOffload32MixedLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) # Pre-allocate with Float32 arrays m, n = size(A) diff --git a/ext/LinearSolveCUSOLVERRFExt.jl b/ext/LinearSolveCUSOLVERRFExt.jl index 68b72c604..0522d5e1a 100644 --- a/ext/LinearSolveCUSOLVERRFExt.jl +++ b/ext/LinearSolveCUSOLVERRFExt.jl @@ -10,7 +10,7 @@ using SciMLBase: SciMLBase, LinearProblem, ReturnCode function LinearSolve.init_cacheval(alg::LinearSolve.CUSOLVERRFFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -18,7 +18,7 @@ function LinearSolve.init_cacheval(alg::LinearSolve.CUSOLVERRFFactorization, A::Union{CuSparseMatrixCSR{Float64, Int32}, SparseMatrixCSC{Float64, <:Integer}}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) # Create initial factorization with appropriate options nrhs = b isa AbstractMatrix ? size(b, 2) : 1 symbolic = alg.symbolic diff --git a/ext/LinearSolveCliqueTreesExt.jl b/ext/LinearSolveCliqueTreesExt.jl index 4c4530baf..d06a4e3fa 100644 --- a/ext/LinearSolveCliqueTreesExt.jl +++ b/ext/LinearSolveCliqueTreesExt.jl @@ -22,7 +22,7 @@ end function LinearSolve.init_cacheval( alg::CliqueTreesFactorization, A::AbstractMatrix, b, u, Pl, Pr, maxiters::Int, abstol, - reltol, verbose::Bool, assumptions::OperatorAssumptions) + reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) symbfact = _symbolic(A, alg) cholfact, cholwork = cholinit(A, symbfact) linwork = lininit(1, cholfact) diff --git a/ext/LinearSolveFastAlmostBandedMatricesExt.jl b/ext/LinearSolveFastAlmostBandedMatricesExt.jl index 1ceff10c5..572693b2b 100644 --- a/ext/LinearSolveFastAlmostBandedMatricesExt.jl +++ b/ext/LinearSolveFastAlmostBandedMatricesExt.jl @@ -3,7 +3,7 @@ module LinearSolveFastAlmostBandedMatricesExt using FastAlmostBandedMatrices, LinearAlgebra, LinearSolve import LinearSolve: defaultalg, do_factorization, init_cacheval, DefaultLinearSolver, - DefaultAlgorithmChoice + DefaultAlgorithmChoice, LinearVerbosity function defaultalg(A::AlmostBandedMatrix, b, oa::OperatorAssumptions{Bool}) if oa.issq @@ -21,7 +21,7 @@ for alg in (:SVDFactorization, :MKLLUFactorization, :DiagonalFactorization, :AppleAccelerateLUFactorization, :CholeskyFactorization, :LUFactorization) @eval begin function init_cacheval(::$(alg), ::AlmostBandedMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return nothing end end diff --git a/ext/LinearSolveFastLapackInterfaceExt.jl b/ext/LinearSolveFastLapackInterfaceExt.jl index 45b690037..f924cc8cd 100644 --- a/ext/LinearSolveFastLapackInterfaceExt.jl +++ b/ext/LinearSolveFastLapackInterfaceExt.jl @@ -1,6 +1,7 @@ module LinearSolveFastLapackInterfaceExt using LinearSolve, LinearAlgebra +using LinearSolve: LinearVerbosity using FastLapackInterface struct WorkspaceAndFactors{W, F} @@ -9,7 +10,7 @@ struct WorkspaceAndFactors{W, F} end function LinearSolve.init_cacheval(::FastLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ws = LUWs(A) return WorkspaceAndFactors( @@ -36,7 +37,7 @@ end function LinearSolve.init_cacheval( alg::FastQRFactorization{NoPivot}, A::AbstractMatrix, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ws = QRWYWs(A; blocksize = alg.blocksize) return WorkspaceAndFactors(ws, @@ -44,7 +45,7 @@ function LinearSolve.init_cacheval( end function LinearSolve.init_cacheval( ::FastQRFactorization{ColumnNorm}, A::AbstractMatrix, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ws = QRpWs(A) return WorkspaceAndFactors(ws, @@ -52,10 +53,10 @@ function LinearSolve.init_cacheval( end function LinearSolve.init_cacheval(alg::FastQRFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return init_cacheval(alg, convert(AbstractMatrix, A), b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) end diff --git a/ext/LinearSolveForwardDiffExt.jl b/ext/LinearSolveForwardDiffExt.jl index d9feefeb6..a29e94787 100644 --- a/ext/LinearSolveForwardDiffExt.jl +++ b/ext/LinearSolveForwardDiffExt.jl @@ -1,12 +1,13 @@ module LinearSolveForwardDiffExt using LinearSolve -using LinearSolve: SciMLLinearSolveAlgorithm, __init, DefaultLinearSolver, DefaultAlgorithmChoice, defaultalg +using LinearSolve: SciMLLinearSolveAlgorithm, __init, LinearVerbosity, DefaultLinearSolver, DefaultAlgorithmChoice, defaultalg using LinearAlgebra using ForwardDiff using ForwardDiff: Dual, Partials using SciMLBase using RecursiveArrayTools +using SciMLLogging const DualLinearProblem = LinearProblem{ <:Union{Number, <:AbstractArray, Nothing}, iip, @@ -221,7 +222,7 @@ function __dual_init( abstol = LinearSolve.default_tol(real(eltype(prob.b))), reltol = LinearSolve.default_tol(real(eltype(prob.b))), maxiters::Int = length(prob.b), - verbose::Bool = false, + verbose = LinearVerbosity(SciMLLogging.None()), Pl = nothing, Pr = nothing, assumptions = OperatorAssumptions(issquare(prob.A)), diff --git a/ext/LinearSolveHYPREExt.jl b/ext/LinearSolveHYPREExt.jl index ad7d98333..eae9d07d6 100644 --- a/ext/LinearSolveHYPREExt.jl +++ b/ext/LinearSolveHYPREExt.jl @@ -5,7 +5,8 @@ using HYPRE.LibHYPRE: HYPRE_Complex using HYPRE: HYPRE, HYPREMatrix, HYPRESolver, HYPREVector using LinearSolve: HYPREAlgorithm, LinearCache, LinearProblem, LinearSolve, OperatorAssumptions, default_tol, init_cacheval, __issquare, - __conditioning, LinearSolveAdjoint + __conditioning, LinearSolveAdjoint, LinearVerbosity +using SciMLLogging: SciMLLogging, verbosity_to_int, @SciMLMessage using SciMLBase: LinearProblem, LinearAliasSpecifier, SciMLBase using UnPack: @unpack using Setfield: @set! @@ -22,7 +23,7 @@ end function LinearSolve.init_cacheval(alg::HYPREAlgorithm, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) return HYPRECache(nothing, nothing, nothing, nothing, true, true, true) end @@ -64,7 +65,7 @@ function SciMLBase.init(prob::LinearProblem, alg::HYPREAlgorithm, eltype(prob.A)), # TODO: Implement length() for HYPREVector in HYPRE.jl? maxiters::Int = prob.b isa HYPREVector ? 1000 : length(prob.b), - verbose::Bool = false, + verbose = LinearVerbosity(SciMLLogging.None()), Pl = LinearAlgebra.I, Pr = LinearAlgebra.I, assumptions = OperatorAssumptions(), @@ -111,6 +112,18 @@ function SciMLBase.init(prob::LinearProblem, alg::HYPREAlgorithm, alias_b = aliases.alias_b end + if verbose isa Bool + #@warn "Using `true` or `false` for `verbose` is being deprecated. Please use a `LinearVerbosity` type to specify verbosity settings. + # For details see the verbosity section of the common solver options documentation page." + if verbose + verbose = LinearVerbosity() + else + verbose = LinearVerbosity(SciMLLogging.None()) + end + elseif verbose isa SciMLLogging.AbstractMessageLevel + verbose = LinearVerbosity(verbose) + end + A = A isa HYPREMatrix ? A : HYPREMatrix(A) b = b isa HYPREVector ? b : HYPREVector(b) u0 = u0 isa HYPREVector ? u0 : (u0 === nothing ? nothing : HYPREVector(u0)) @@ -159,10 +172,11 @@ function create_solver(alg::HYPREAlgorithm, cache::LinearCache) solver = create_solver(alg.solver, comm) # Construct solver options + verbose = verbosity_to_int(cache.verbose.HYPRE_verbosity) solver_options = (; AbsoluteTol = cache.abstol, MaxIter = cache.maxiters, - PrintLevel = Int(cache.verbose), + PrintLevel = verbose, Tol = cache.reltol) # Preconditioner (uses Pl even though it might not be a *left* preconditioner just *a* diff --git a/ext/LinearSolveIterativeSolversExt.jl b/ext/LinearSolveIterativeSolversExt.jl index 901b6bf74..76598474d 100644 --- a/ext/LinearSolveIterativeSolversExt.jl +++ b/ext/LinearSolveIterativeSolversExt.jl @@ -1,8 +1,9 @@ module LinearSolveIterativeSolversExt using LinearSolve, LinearAlgebra -using LinearSolve: LinearCache, DEFAULT_PRECS +using LinearSolve: LinearCache, DEFAULT_PRECS, LinearVerbosity import LinearSolve: IterativeSolversJL +using SciMLLogging: SciMLLogging, @SciMLMessage using IterativeSolvers @@ -47,7 +48,7 @@ LinearSolve.default_alias_b(::IterativeSolversJL, ::Any, ::Any) = true function LinearSolve.init_cacheval(alg::IterativeSolversJL, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) restart = (alg.gmres_restart == 0) ? min(20, size(A, 1)) : alg.gmres_restart s = :idrs_s in keys(alg.kwargs) ? alg.kwargs.idrs_s : 4 # shadow space @@ -56,7 +57,8 @@ function LinearSolve.init_cacheval(alg::IterativeSolversJL, A, b, u, Pl, Pr, max iterable = if alg.generate_iterator === IterativeSolvers.cg_iterator! !LinearSolve._isidentity_struct(Pr) && - @warn "$(alg.generate_iterator) doesn't support right preconditioning" + @SciMLMessage("$(alg.generate_iterator) doesn't support right preconditioning", + verbose, :no_right_preconditioning) alg.generate_iterator(u, A, b, Pl; kwargs...) elseif alg.generate_iterator === IterativeSolvers.gmres_iterable! @@ -64,7 +66,8 @@ function LinearSolve.init_cacheval(alg::IterativeSolversJL, A, b, u, Pl, Pr, max kwargs...) elseif alg.generate_iterator === IterativeSolvers.idrs_iterable! !!LinearSolve._isidentity_struct(Pr) && - @warn "$(alg.generate_iterator) doesn't support right preconditioning" + @SciMLMessage("$(alg.generate_iterator) doesn't support right preconditioning", + verbose, :no_right_preconditioning) history = IterativeSolvers.ConvergenceHistory(partial = true) history[:abstol] = abstol history[:reltol] = reltol @@ -72,7 +75,8 @@ function LinearSolve.init_cacheval(alg::IterativeSolversJL, A, b, u, Pl, Pr, max alg.kwargs...) elseif alg.generate_iterator === IterativeSolvers.bicgstabl_iterator! !!LinearSolve._isidentity_struct(Pr) && - @warn "$(alg.generate_iterator) doesn't support right preconditioning" + @SciMLMessage("$(alg.generate_iterator) doesn't support right preconditioning", + verbose, :no_right_preconditioning) alg.generate_iterator(u, A, b, alg.args...; Pl = Pl, abstol = abstol, reltol = reltol, max_mv_products = maxiters * 2, @@ -103,14 +107,15 @@ function SciMLBase.solve!(cache::LinearCache, alg::IterativeSolversJL; kwargs... end purge_history!(cache.cacheval, cache.u, cache.b) - cache.verbose && println("Using IterativeSolvers.$(alg.generate_iterator)") + @SciMLMessage("Using IterativeSolvers.$(alg.generate_iterator)", + cache.verbose, :using_IterativeSolvers) i = 0 for iter in enumerate(cache.cacheval) i += 1 - cache.verbose && println("Iter: $(iter[1]), residual: $(iter[2])") + @SciMLMessage("Iter: $(iter[1]), residual: $(iter[2])", + cache.verbose, :IterativeSolvers_iterations) # TODO inject callbacks KSP into solve! cb!(cache.cacheval) end - cache.verbose && println() resid = cache.cacheval isa IterativeSolvers.IDRSIterable ? cache.cacheval.R : cache.cacheval.residual diff --git a/ext/LinearSolveKrylovKitExt.jl b/ext/LinearSolveKrylovKitExt.jl index 1aa1e5d52..12b69c8fd 100644 --- a/ext/LinearSolveKrylovKitExt.jl +++ b/ext/LinearSolveKrylovKitExt.jl @@ -2,6 +2,7 @@ module LinearSolveKrylovKitExt using LinearSolve, KrylovKit, LinearAlgebra using LinearSolve: LinearCache, DEFAULT_PRECS +using SciMLLogging: SciMLLogging, @SciMLMessage, verbosity_to_int function LinearSolve.KrylovKitJL(args...; KrylovAlg = KrylovKit.GMRES, gmres_restart = 0, @@ -25,7 +26,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::KrylovKitJL; kwargs...) atol = float(cache.abstol) rtol = float(cache.reltol) maxiter = cache.maxiters - verbosity = cache.verbose ? 1 : 0 + verbosity = verbosity_to_int(cache.verbose.KrylovKit_verbosity) krylovdim = (alg.gmres_restart == 0) ? min(20, size(cache.A, 1)) : alg.gmres_restart kwargs = (atol = atol, rtol = rtol, maxiter = maxiter, verbosity = verbosity, diff --git a/ext/LinearSolveMetalExt.jl b/ext/LinearSolveMetalExt.jl index 1035117d4..3e29a3e96 100644 --- a/ext/LinearSolveMetalExt.jl +++ b/ext/LinearSolveMetalExt.jl @@ -4,7 +4,7 @@ using Metal, LinearSolve using LinearAlgebra, SciMLBase using SciMLBase: AbstractSciMLOperator using LinearSolve: ArrayInterface, MKLLUFactorization, MetalOffload32MixedLUFactorization, - @get_cacheval, LinearCache, SciMLBase, OperatorAssumptions + @get_cacheval, LinearCache, SciMLBase, OperatorAssumptions, LinearVerbosity @static if Sys.isapple() @@ -16,7 +16,7 @@ default_alias_A(::MetalLUFactorization, ::Any, ::Any) = false default_alias_b(::MetalLUFactorization, ::Any, ::Any) = false function LinearSolve.init_cacheval(alg::MetalLUFactorization, A::AbstractArray, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(convert(AbstractMatrix, A)) end @@ -40,7 +40,7 @@ default_alias_A(::MetalOffload32MixedLUFactorization, ::Any, ::Any) = false default_alias_b(::MetalOffload32MixedLUFactorization, ::Any, ::Any) = false function LinearSolve.init_cacheval(alg::MetalOffload32MixedLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) # Pre-allocate with Float32 arrays m, n = size(A) diff --git a/ext/LinearSolvePardisoExt.jl b/ext/LinearSolvePardisoExt.jl index 5b459d8cc..61a368306 100644 --- a/ext/LinearSolvePardisoExt.jl +++ b/ext/LinearSolvePardisoExt.jl @@ -3,8 +3,8 @@ module LinearSolvePardisoExt using Pardiso, LinearSolve using SparseArrays using SparseArrays: nonzeros, rowvals, getcolptr -using LinearSolve: PardisoJL, @unpack - +using LinearSolve: PardisoJL, @unpack, LinearVerbosity +using SciMLLogging: SciMLLogging, @SciMLMessage, verbosity_to_bool using LinearSolve.SciMLBase LinearSolve.needs_concrete_A(alg::PardisoJL) = true @@ -20,7 +20,7 @@ function LinearSolve.init_cacheval(alg::PardisoJL, maxiters::Int, abstol, reltol, - verbose::Bool, + verbose::LinearVerbosity, assumptions::LinearSolve.OperatorAssumptions) @unpack nprocs, solver_type, matrix_type, cache_analysis, iparm, dparm, vendor = alg A = convert(AbstractMatrix, A) @@ -73,8 +73,10 @@ function LinearSolve.init_cacheval(alg::PardisoJL, error("Number type not supported by Pardiso") end end - verbose && Pardiso.set_msglvl!(solver, Pardiso.MESSAGE_LEVEL_ON) - + + if verbosity_to_bool(verbose.pardiso_verbosity) + Pardiso.set_msglvl!(solver, Pardiso.MESSAGE_LEVEL_ON) + end #= Note: It is recommended to use IPARM(11)=1 (scaling) and IPARM(13)=1 (matchings) for highly indefinite symmetric matrices e.g. from interior point optimizations or saddle point problems. diff --git a/ext/LinearSolveRecursiveFactorizationExt.jl b/ext/LinearSolveRecursiveFactorizationExt.jl index 947dd8020..765c895c5 100644 --- a/ext/LinearSolveRecursiveFactorizationExt.jl +++ b/ext/LinearSolveRecursiveFactorizationExt.jl @@ -2,7 +2,7 @@ module LinearSolveRecursiveFactorizationExt using LinearSolve: LinearSolve, userecursivefactorization, LinearCache, @get_cacheval, RFLUFactorization, RF32MixedLUFactorization, default_alias_A, - default_alias_b + default_alias_b, LinearVerbosity using LinearSolve.LinearAlgebra, LinearSolve.ArrayInterface, RecursiveFactorization using SciMLBase: SciMLBase, ReturnCode @@ -42,7 +42,7 @@ const PREALLOCATED_RF32_LU = begin end function LinearSolve.init_cacheval(alg::RF32MixedLUFactorization{P, T}, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::LinearSolve.OperatorAssumptions) where {P, T} # Pre-allocate appropriate 32-bit arrays based on input type m, n = size(A) diff --git a/ext/LinearSolveSparseArraysExt.jl b/ext/LinearSolveSparseArraysExt.jl index d5018394e..7753de4c6 100644 --- a/ext/LinearSolveSparseArraysExt.jl +++ b/ext/LinearSolveSparseArraysExt.jl @@ -4,7 +4,7 @@ using LinearSolve: LinearSolve, BLASELTYPES, pattern_changed, ArrayInterface, @get_cacheval, CHOLMODFactorization, GenericFactorization, GenericLUFactorization, KLUFactorization, LUFactorization, NormalCholeskyFactorization, - OperatorAssumptions, + OperatorAssumptions, LinearVerbosity, QRFactorization, RFLUFactorization, UMFPACKFactorization, solve using ArrayInterface: ArrayInterface using LinearAlgebra: LinearAlgebra, I, Hermitian, Symmetric, cholesky, ldiv!, lu, lu!, QR @@ -13,7 +13,7 @@ using SparseArrays: SparseArrays, AbstractSparseArray, AbstractSparseMatrixCSC, nonzeros, rowvals, getcolptr, sparse, sprand @static if Base.USE_GPL_LIBS -using SparseArrays.UMFPACK: UMFPACK_OK + using SparseArrays.UMFPACK: UMFPACK_OK end using Base: /, \, convert using SciMLBase: SciMLBase, LinearProblem, ReturnCode @@ -37,7 +37,7 @@ end function LinearSolve.init_cacheval(alg::RFLUFactorization, A::Union{AbstractSparseArray, LinearSolve.SciMLOperators.AbstractSciMLOperator}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing, nothing end @@ -51,15 +51,15 @@ function LinearSolve.handle_sparsematrixcsc_lu(A::AbstractSparseMatrixCSC) end @static if Base.USE_GPL_LIBS -function LinearSolve.defaultalg( - A::Symmetric{<:BLASELTYPES, <:SparseMatrixCSC}, b, ::OperatorAssumptions{Bool}) - LinearSolve.DefaultLinearSolver(LinearSolve.DefaultAlgorithmChoice.CHOLMODFactorization) -end + function LinearSolve.defaultalg( + A::Symmetric{<:BLASELTYPES, <:SparseMatrixCSC}, b, ::OperatorAssumptions{Bool}) + LinearSolve.DefaultLinearSolver(LinearSolve.DefaultAlgorithmChoice.CHOLMODFactorization) + end else -function LinearSolve.defaultalg( - A::Symmetric{<:BLASELTYPES, <:SparseMatrixCSC}, b, ::OperatorAssumptions{Bool}) - LinearSolve.DefaultLinearSolver(LinearSolve.DefaultAlgorithmChoice.CholeskyFactorization) -end + function LinearSolve.defaultalg( + A::Symmetric{<:BLASELTYPES, <:SparseMatrixCSC}, b, ::OperatorAssumptions{Bool}) + LinearSolve.DefaultLinearSolver(LinearSolve.DefaultAlgorithmChoice.CholeskyFactorization) + end end # @static if Base.USE_GPL_LIBS function LinearSolve.defaultalg(A::AbstractSparseMatrixCSC{Tv, Ti}, b, @@ -79,23 +79,22 @@ end function LinearSolve.init_cacheval(alg::GenericFactorization, A::Union{Hermitian{T, <:SparseMatrixCSC}, Symmetric{T, <:SparseMatrixCSC}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T} newA = copy(convert(AbstractMatrix, A)) LinearSolve.do_factorization(alg, newA, b, u) end @static if Base.USE_GPL_LIBS -const PREALLOCATED_UMFPACK = SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC(0, 0, [1], - Int[], Float64[])) - + const PREALLOCATED_UMFPACK = SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC(0, 0, [1], + Int[], Float64[])) end # @static if Base.USE_GPL_LIBS function LinearSolve.init_cacheval( alg::LUFactorization, A::AbstractSparseArray{<:Number, <:Integer}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -103,7 +102,7 @@ function LinearSolve.init_cacheval( alg::GenericLUFactorization, A::AbstractSparseArray{<:Number, <:Integer}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -111,53 +110,51 @@ function LinearSolve.init_cacheval( alg::UMFPACKFactorization, A::AbstractArray, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @static if Base.USE_GPL_LIBS - -function LinearSolve.init_cacheval( - alg::LUFactorization, A::AbstractSparseArray{Float64, Int64}, b, u, - Pl, Pr, - maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) - PREALLOCATED_UMFPACK -end - -function LinearSolve.init_cacheval( - alg::LUFactorization, A::AbstractSparseArray{T, Int64}, b, u, - Pl, Pr, - maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} - if LinearSolve.is_cusparse(A) - ArrayInterface.lu_instance(A) - else - SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC{T, Int64}( - zero(Int64), zero(Int64), [Int64(1)], Int64[], T[])) + function LinearSolve.init_cacheval( + alg::LUFactorization, A::AbstractSparseArray{Float64, Int64}, b, u, + Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::LinearVerbosity, assumptions::OperatorAssumptions) + PREALLOCATED_UMFPACK end -end -function LinearSolve.init_cacheval( - alg::LUFactorization, A::AbstractSparseArray{T, Int32}, b, u, - Pl, Pr, - maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} - if LinearSolve.is_cusparse(A) - ArrayInterface.lu_instance(A) - else - SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC{T, Int32}( - zero(Int32), zero(Int32), [Int32(1)], Int32[], T[])) + function LinearSolve.init_cacheval( + alg::LUFactorization, A::AbstractSparseArray{T, Int64}, b, u, + Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} + if LinearSolve.is_cusparse(A) + ArrayInterface.lu_instance(A) + else + SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC{T, Int64}( + zero(Int64), zero(Int64), [Int64(1)], Int64[], T[])) + end end -end + function LinearSolve.init_cacheval( + alg::LUFactorization, A::AbstractSparseArray{T, Int32}, b, u, + Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} + if LinearSolve.is_cusparse(A) + ArrayInterface.lu_instance(A) + else + SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC{T, Int32}( + zero(Int32), zero(Int32), [Int32(1)], Int32[], T[])) + end + end end # @static if Base.USE_GPL_LIBS function LinearSolve.init_cacheval( alg::LUFactorization, A::LinearSolve.GPUArraysCore.AnyGPUArray, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end @@ -165,89 +162,87 @@ function LinearSolve.init_cacheval( alg::UMFPACKFactorization, A::LinearSolve.GPUArraysCore.AnyGPUArray, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @static if Base.USE_GPL_LIBS + function LinearSolve.init_cacheval( + alg::UMFPACKFactorization, A::AbstractSparseArray{Float64, Int}, b, u, Pl, Pr, + maxiters::Int, abstol, + reltol, + verbose::LinearVerbosity, assumptions::OperatorAssumptions) + PREALLOCATED_UMFPACK + end -function LinearSolve.init_cacheval( - alg::UMFPACKFactorization, A::AbstractSparseArray{Float64, Int}, b, u, Pl, Pr, - maxiters::Int, abstol, - reltol, - verbose::Bool, assumptions::OperatorAssumptions) - PREALLOCATED_UMFPACK -end - -function LinearSolve.init_cacheval( - alg::UMFPACKFactorization, A::AbstractSparseArray{T, Int64}, b, u, - Pl, Pr, - maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} - SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC{T, Int64}( - zero(Int64), zero(Int64), [Int64(1)], Int64[], T[])) -end + function LinearSolve.init_cacheval( + alg::UMFPACKFactorization, A::AbstractSparseArray{T, Int64}, b, u, + Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} + SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC{T, Int64}( + zero(Int64), zero(Int64), [Int64(1)], Int64[], T[])) + end -function LinearSolve.init_cacheval( - alg::UMFPACKFactorization, A::AbstractSparseArray{T, Int32}, b, u, - Pl, Pr, - maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} - SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC{T, Int32}( - zero(Int32), zero(Int32), [Int32(1)], Int32[], T[])) -end + function LinearSolve.init_cacheval( + alg::UMFPACKFactorization, A::AbstractSparseArray{T, Int32}, b, u, + Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} + SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC{T, Int32}( + zero(Int32), zero(Int32), [Int32(1)], Int32[], T[])) + end -function SciMLBase.solve!( - cache::LinearSolve.LinearCache, alg::UMFPACKFactorization; kwargs...) - A = cache.A - A = convert(AbstractMatrix, A) - if cache.isfresh - cacheval = LinearSolve.@get_cacheval(cache, :UMFPACKFactorization) - if alg.reuse_symbolic - # Caches the symbolic factorization: https://github.com/JuliaLang/julia/pull/33738 - if alg.check_pattern && pattern_changed(cacheval, A) + function SciMLBase.solve!( + cache::LinearSolve.LinearCache, alg::UMFPACKFactorization; kwargs...) + A = cache.A + A = convert(AbstractMatrix, A) + if cache.isfresh + cacheval = LinearSolve.@get_cacheval(cache, :UMFPACKFactorization) + if alg.reuse_symbolic + # Caches the symbolic factorization: https://github.com/JuliaLang/julia/pull/33738 + if alg.check_pattern && pattern_changed(cacheval, A) + fact = lu( + SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), + nonzeros(A)), + check = false) + else + fact = lu!(cacheval, + SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), + nonzeros(A)), check = false) + end + else fact = lu( - SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), - nonzeros(A)), + SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), nonzeros(A)), check = false) - else - fact = lu!(cacheval, - SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), - nonzeros(A)), check = false) end - else - fact = lu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), nonzeros(A)), - check = false) + cache.cacheval = fact + cache.isfresh = false end - cache.cacheval = fact - cache.isfresh = false - end - F = LinearSolve.@get_cacheval(cache, :UMFPACKFactorization) - if F.status == UMFPACK_OK - y = ldiv!(cache.u, F, cache.b) - SciMLBase.build_linear_solution( - alg, y, nothing, cache; retcode = ReturnCode.Success) - else - SciMLBase.build_linear_solution( - alg, cache.u, nothing, cache; retcode = ReturnCode.Infeasible) + F = LinearSolve.@get_cacheval(cache, :UMFPACKFactorization) + if F.status == UMFPACK_OK + y = ldiv!(cache.u, F, cache.b) + SciMLBase.build_linear_solution( + alg, y, nothing, cache; retcode = ReturnCode.Success) + else + SciMLBase.build_linear_solution( + alg, cache.u, nothing, cache; retcode = ReturnCode.Infeasible) + end end -end else - -function SciMLBase.solve!( - cache::LinearSolve.LinearCache, alg::UMFPACKFactorization; kwargs...) - error("UMFPACKFactorization requires GPL libraries (UMFPACK). Rebuild Julia with USE_GPL_LIBS=1 or use an alternative algorithm like SparspakFactorization") -end - + function SciMLBase.solve!( + cache::LinearSolve.LinearCache, alg::UMFPACKFactorization; kwargs...) + error("UMFPACKFactorization requires GPL libraries (UMFPACK). Rebuild Julia with USE_GPL_LIBS=1 or use an alternative algorithm like SparspakFactorization") + end end # @static if Base.USE_GPL_LIBS function LinearSolve.init_cacheval( alg::KLUFactorization, A::AbstractArray, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -255,7 +250,7 @@ function LinearSolve.init_cacheval( alg::KLUFactorization, A::LinearSolve.GPUArraysCore.AnyGPUArray, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -266,7 +261,7 @@ function LinearSolve.init_cacheval( alg::KLUFactorization, A::AbstractSparseArray{Float64, Int64}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_KLU end @@ -274,7 +269,7 @@ function LinearSolve.init_cacheval( alg::KLUFactorization, A::AbstractSparseArray{Float64, Int32}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) KLU.KLUFactorization(SparseMatrixCSC{Float64, Int32}( 0, 0, [Int32(1)], Int32[], Float64[])) end @@ -317,38 +312,36 @@ function LinearSolve.init_cacheval(alg::CHOLMODFactorization, A::AbstractArray, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @static if Base.USE_GPL_LIBS + const PREALLOCATED_CHOLMOD = cholesky(sparse(reshape([1.0], 1, 1))) + + function LinearSolve.init_cacheval(alg::CHOLMODFactorization, + A::Union{SparseMatrixCSC{T, Int}, Symmetric{T, SparseMatrixCSC{T, Int}}}, b, u, + Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T <: + Float64} + PREALLOCATED_CHOLMOD + end -const PREALLOCATED_CHOLMOD = cholesky(sparse(reshape([1.0], 1, 1))) - -function LinearSolve.init_cacheval(alg::CHOLMODFactorization, - A::Union{SparseMatrixCSC{T, Int}, Symmetric{T, SparseMatrixCSC{T, Int}}}, b, u, - Pl, Pr, - maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) where {T <: - Float64} - PREALLOCATED_CHOLMOD -end - -function LinearSolve.init_cacheval(alg::CHOLMODFactorization, - A::Union{SparseMatrixCSC{T, Int}, Symmetric{T, SparseMatrixCSC{T, Int}}}, b, u, - Pl, Pr, - maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) where {T <: - BLASELTYPES} - cholesky(sparse(reshape([one(T)], 1, 1))) -end - + function LinearSolve.init_cacheval(alg::CHOLMODFactorization, + A::Union{SparseMatrixCSC{T, Int}, Symmetric{T, SparseMatrixCSC{T, Int}}}, b, u, + Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T <: + BLASELTYPES} + cholesky(sparse(reshape([one(T)], 1, 1))) + end end # @static if Base.USE_GPL_LIBS function LinearSolve.init_cacheval(alg::NormalCholeskyFactorization, A::Union{AbstractSparseArray{T}, LinearSolve.GPUArraysCore.AnyGPUArray, Symmetric{T, <:AbstractSparseArray{T}}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} ArrayInterface.cholesky_instance(convert(AbstractMatrix, A)) end @@ -378,27 +371,27 @@ function LinearSolve._ldiv!(::SVector, end @static if Base.USE_GPL_LIBS -# SPQR and CHOLMOD Factor support -function LinearSolve._ldiv!(x::Vector, - A::Union{SparseArrays.SPQR.QRSparse, SparseArrays.CHOLMOD.Factor}, b::Vector) - x .= A \ b -end + # SPQR and CHOLMOD Factor support + function LinearSolve._ldiv!(x::Vector, + A::Union{SparseArrays.SPQR.QRSparse, SparseArrays.CHOLMOD.Factor}, b::Vector) + x .= A \ b + end -function LinearSolve._ldiv!(x::AbstractVector, - A::Union{SparseArrays.SPQR.QRSparse, SparseArrays.CHOLMOD.Factor}, b::AbstractVector) - x .= A \ b -end + function LinearSolve._ldiv!(x::AbstractVector, + A::Union{SparseArrays.SPQR.QRSparse, SparseArrays.CHOLMOD.Factor}, b::AbstractVector) + x .= A \ b + end -function LinearSolve._ldiv!(::SVector, - A::Union{SparseArrays.CHOLMOD.Factor, SparseArrays.SPQR.QRSparse}, - b::AbstractVector) - (A \ b) -end -function LinearSolve._ldiv!(::SVector, - A::Union{SparseArrays.CHOLMOD.Factor, SparseArrays.SPQR.QRSparse}, - b::SVector) - (A \ b) -end + function LinearSolve._ldiv!(::SVector, + A::Union{SparseArrays.CHOLMOD.Factor, SparseArrays.SPQR.QRSparse}, + b::AbstractVector) + (A \ b) + end + function LinearSolve._ldiv!(::SVector, + A::Union{SparseArrays.CHOLMOD.Factor, SparseArrays.SPQR.QRSparse}, + b::SVector) + (A \ b) + end end # @static if Base.USE_GPL_LIBS function LinearSolve.pattern_changed(fact, A::SparseArrays.SparseMatrixCSC) @@ -408,29 +401,29 @@ function LinearSolve.pattern_changed(fact, A::SparseArrays.SparseMatrixCSC) end @static if Base.USE_GPL_LIBS -function LinearSolve.defaultalg( - A::AbstractSparseMatrixCSC{<:Union{Float64, ComplexF64}, Ti}, b, - assump::OperatorAssumptions{Bool}) where {Ti} - if assump.issq - if length(b) <= 10_000 && length(nonzeros(A)) / length(A) < 2e-4 - LinearSolve.DefaultLinearSolver(LinearSolve.DefaultAlgorithmChoice.KLUFactorization) + function LinearSolve.defaultalg( + A::AbstractSparseMatrixCSC{<:Union{Float64, ComplexF64}, Ti}, b, + assump::OperatorAssumptions{Bool}) where {Ti} + if assump.issq + if length(b) <= 10_000 && length(nonzeros(A)) / length(A) < 2e-4 + LinearSolve.DefaultLinearSolver(LinearSolve.DefaultAlgorithmChoice.KLUFactorization) + else + LinearSolve.DefaultLinearSolver(LinearSolve.DefaultAlgorithmChoice.UMFPACKFactorization) + end else - LinearSolve.DefaultLinearSolver(LinearSolve.DefaultAlgorithmChoice.UMFPACKFactorization) + LinearSolve.DefaultLinearSolver(LinearSolve.DefaultAlgorithmChoice.QRFactorization) end - else - LinearSolve.DefaultLinearSolver(LinearSolve.DefaultAlgorithmChoice.QRFactorization) end -end else -function LinearSolve.defaultalg( - A::AbstractSparseMatrixCSC{<:Union{Float64, ComplexF64}, Ti}, b, - assump::OperatorAssumptions{Bool}) where {Ti} - if assump.issq - LinearSolve.DefaultLinearSolver(LinearSolve.DefaultAlgorithmChoice.KLUFactorization) - elseif !assump.issq - LinearSolve.DefaultLinearSolver(LinearSolve.DefaultAlgorithmChoice.QRFactorization) + function LinearSolve.defaultalg( + A::AbstractSparseMatrixCSC{<:Union{Float64, ComplexF64}, Ti}, b, + assump::OperatorAssumptions{Bool}) where {Ti} + if assump.issq + LinearSolve.DefaultLinearSolver(LinearSolve.DefaultAlgorithmChoice.KLUFactorization) + elseif !assump.issq + LinearSolve.DefaultLinearSolver(LinearSolve.DefaultAlgorithmChoice.QRFactorization) + end end -end end # @static if Base.USE_GPL_LIBS # SPQR Handling @@ -438,20 +431,20 @@ function LinearSolve.init_cacheval( alg::QRFactorization, A::AbstractSparseArray{<:Number, <:Integer}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function LinearSolve.init_cacheval( alg::QRFactorization, A::SparseMatrixCSC{Float64, <:Integer}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(convert(AbstractMatrix, A), alg.pivot) end function LinearSolve.init_cacheval( alg::QRFactorization, A::Symmetric{<:Number, <:SparseMatrixCSC}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return nothing end diff --git a/ext/LinearSolveSparspakExt.jl b/ext/LinearSolveSparspakExt.jl index 4cf36ce35..380e682ca 100644 --- a/ext/LinearSolveSparspakExt.jl +++ b/ext/LinearSolveSparspakExt.jl @@ -1,6 +1,7 @@ module LinearSolveSparspakExt using LinearSolve, LinearAlgebra +using LinearSolve: LinearVerbosity using Sparspak using Sparspak.SparseCSCInterface.SparseArrays using SparseArrays: AbstractSparseMatrixCSC, nonzeros, rowvals, getcolptr @@ -12,14 +13,14 @@ function LinearSolve.init_cacheval( ::SparspakFactorization, A::SparseMatrixCSC{Float64, Int}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_SPARSEPAK end function LinearSolve.init_cacheval( ::SparspakFactorization, A::AbstractSparseMatrixCSC{Tv, Ti}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) where {Tv, Ti} + verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {Tv, Ti} if size(A, 1) == size(A, 2) A = convert(AbstractMatrix, A) if A isa SparseArrays.AbstractSparseArray diff --git a/src/LinearSolve.jl b/src/LinearSolve.jl index 62ef4026f..fa2eadaa0 100644 --- a/src/LinearSolve.jl +++ b/src/LinearSolve.jl @@ -8,7 +8,7 @@ import PrecompileTools using ArrayInterface: ArrayInterface using Base: Bool, convert, copyto!, adjoint, transpose, /, \, require_one_based_indexing using LinearAlgebra: LinearAlgebra, BlasInt, LU, Adjoint, BLAS, Bidiagonal, BunchKaufman, - ColumnNorm, Diagonal, Factorization, Hermitian, I, LAPACK, NoPivot, + ColumnNorm, cond, Diagonal, Factorization, Hermitian, I, LAPACK, NoPivot, RowMaximum, RowNonZero, SymTridiagonal, Symmetric, Transpose, Tridiagonal, UniformScaling, axpby!, axpy!, bunchkaufman, bunchkaufman!, @@ -21,6 +21,8 @@ using SciMLBase: SciMLBase, LinearAliasSpecifier, AbstractSciMLOperator, using SciMLOperators: SciMLOperators, AbstractSciMLOperator, IdentityOperator, MatrixOperator, has_ldiv!, issquare +using SciMLLogging: SciMLLogging, @SciMLMessage, verbosity_to_int, AbstractVerbositySpecifier, AbstractMessageLevel, AbstractVerbosityPreset, + Silent, InfoLevel, WarnLevel, ErrorLevel, CustomLevel, None, Minimal, Standard, Detailed, All using Setfield: @set, @set! using UnPack: @unpack using DocStringExtensions: DocStringExtensions @@ -384,6 +386,8 @@ const BLASELTYPES = Union{Float32, Float64, ComplexF32, ComplexF64} function defaultalg_symbol end +include("verbosity.jl") +include("blas_logging.jl") include("generic_lufact.jl") include("common.jl") include("extension_algs.jl") @@ -516,4 +520,6 @@ export OperatorAssumptions, OperatorCondition export LinearSolveAdjoint +export LinearVerbosity + end diff --git a/src/appleaccelerate.jl b/src/appleaccelerate.jl index be0aebb05..a7bda5f86 100644 --- a/src/appleaccelerate.jl +++ b/src/appleaccelerate.jl @@ -233,14 +233,14 @@ const PREALLOCATED_APPLE_LU = begin end function LinearSolve.init_cacheval(alg::AppleAccelerateLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_APPLE_LU end function LinearSolve.init_cacheval(alg::AppleAccelerateLUFactorization, A::AbstractMatrix{<:Union{Float32, ComplexF32, ComplexF64}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) A = rand(eltype(A), 0, 0) luinst = ArrayInterface.lu_instance(A) @@ -253,12 +253,45 @@ function SciMLBase.solve!(cache::LinearCache, alg::AppleAccelerateLUFactorizatio error("Error, AppleAccelerate binary is missing but solve is being called. Report this issue") A = cache.A A = convert(AbstractMatrix, A) + verbose = cache.verbose if cache.isfresh cacheval = @get_cacheval(cache, :AppleAccelerateLUFactorization) res = aa_getrf!(A; ipiv = cacheval[1].ipiv, info = cacheval[2]) fact = LU(res[1:3]...), res[4] cache.cacheval = fact + info_value = res[3] + + if info_value != 0 + if !isa(verbose.blas_info, SciMLLogging.Silent) || !isa(verbose.blas_errors, SciMLLogging.Silent) || + !isa(verbose.blas_invalid_args, SciMLLogging.Silent) + op_info = get_blas_operation_info(:dgetrf, A, cache.b, condition = !isa(verbose.condition_number, SciMLLogging.Silent)) + @SciMLMessage(cache.verbose, :condition_number) do + if op_info[:condition_number] === nothing + return "Matrix condition number calculation failed." + else + return "Matrix condition number: $(round(op_info[:condition_number], sigdigits=4)) for $(size(A, 1))×$(size(A, 2)) matrix in dgetrf" + end + end + verb_option, message = blas_info_msg( + :dgetrf, info_value; extra_context = op_info) + @SciMLMessage(message, verbose, verb_option) + end + else + @SciMLMessage(cache.verbose, :blas_success) do + op_info = get_blas_operation_info(:dgetrf, A, cache.b, + condition = !isa(verbose.condition_number, SciMLLogging.Silent)) + @SciMLMessage(cache.verbose, :condition_number) do + if op_info[:condition_number] === nothing + return "Matrix condition number calculation failed." + else + return "Matrix condition number: $(round(op_info[:condition_number], sigdigits=4)) for $(size(A, 1))×$(size(A, 2)) matrix in dgetrf" + end + end + return "BLAS LU factorization (dgetrf) completed successfully for $(op_info[:matrix_size]) matrix" + end + end + if !LinearAlgebra.issuccess(fact[1]) return SciMLBase.build_linear_solution( alg, cache.u, nothing, cache; retcode = ReturnCode.Failure) @@ -293,7 +326,7 @@ const PREALLOCATED_APPLE32_LU = begin end function LinearSolve.init_cacheval(alg::AppleAccelerate32MixedLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) # Pre-allocate appropriate 32-bit arrays based on input type m, n = size(A) diff --git a/src/blas_logging.jl b/src/blas_logging.jl new file mode 100644 index 000000000..0180c4fae --- /dev/null +++ b/src/blas_logging.jl @@ -0,0 +1,173 @@ + +""" + interpret_blas_code(func::Symbol, info::Integer) + +Interpret BLAS/LAPACK return codes (info parameter) to provide human-readable error messages. +Returns a tuple of (category::Symbol, message::String, details::String) +""" +function interpret_blas_code(func::Symbol, info::Integer) + if info == 0 + return (:success, "Operation completed successfully", "") + elseif info < 0 + return (:invalid_argument, + "Invalid argument error", + "Argument $(-info) had an illegal value") + else + # info > 0 means different things for different functions + return interpret_positive_info(func, info) + end +end + +function interpret_positive_info(func::Symbol, info::Integer) + func_str = string(func) + + # LU factorization routines + if occursin("getrf", func_str) + return (:singular_matrix, + "Matrix is singular", + "U($info,$info) is exactly zero. The factorization has been completed, but U is singular and division by U will produce infinity.") + + # Cholesky factorization routines + elseif occursin("potrf", func_str) + return (:not_positive_definite, + "Matrix is not positive definite", + "The leading minor of order $info is not positive definite, and the factorization could not be completed.") + + # QR factorization routines + elseif occursin("geqrf", func_str) || occursin("geqrt", func_str) + return (:numerical_issue, + "Numerical issue in QR factorization", + "Householder reflector $info could not be formed properly.") + + # SVD routines + elseif occursin("gesdd", func_str) || occursin("gesvd", func_str) + return (:convergence_failure, + "SVD did not converge", + "The algorithm failed to compute singular values. $info off-diagonal elements of an intermediate bidiagonal form did not converge to zero.") + + # Symmetric/Hermitian eigenvalue routines + elseif occursin("syev", func_str) || occursin("heev", func_str) + return (:convergence_failure, + "Eigenvalue computation did not converge", + "$info off-diagonal elements of an intermediate tridiagonal form did not converge to zero.") + + # Bunch-Kaufman factorization + elseif occursin("sytrf", func_str) || occursin("hetrf", func_str) + return (:singular_matrix, + "Matrix is singular", + "D($info,$info) is exactly zero. The factorization has been completed, but the block diagonal matrix D is singular.") + + # Solve routines (should not have positive info) + elseif occursin("getrs", func_str) || occursin("potrs", func_str) || + occursin("sytrs", func_str) || occursin("hetrs", func_str) + return (:unexpected_error, + "Unexpected positive return code from solve routine", + "Solve routine $func returned info=$info which should not happen.") + + # General eigenvalue problem + elseif occursin("ggev", func_str) || occursin("gges", func_str) + if info <= size + return (:convergence_failure, + "QZ iteration failed", + "The QZ iteration failed to compute all eigenvalues. Elements 1:$(info-1) converged.") + else + return (:unexpected_error, + "Unexpected error in generalized eigenvalue problem", + "Info value $info is unexpected for $func.") + end + + # LDLT factorization + elseif occursin("ldlt", func_str) + return (:singular_matrix, + "Matrix is singular", + "The $(info)-th pivot is zero. The factorization has been completed but division will produce infinity.") + + # Default case + else + return (:unknown_error, + "Unknown positive return code", + "Function $func returned info=$info. Consult LAPACK documentation for details.") + end +end + + + +""" + blas_info_msg(func::Symbol, info::Integer, verbose::LinearVerbosity; + extra_context::Dict{Symbol,Any} = Dict()) + +Log BLAS/LAPACK return code information with appropriate verbosity level. +""" +function blas_info_msg(func::Symbol, info::Integer; + extra_context::Dict{Symbol, Any} = Dict()) + category, message, details = interpret_blas_code(func, info) + + verbosity_field = if category in [:singular_matrix, :not_positive_definite, :convergence_failure] + :blas_errors + elseif category == :invalid_argument + :blas_invalid_args + else + :blas_info + end + + # Build structured message components + msg_main = "BLAS/LAPACK $func: $message" + msg_details = !isempty(details) ? details : nothing + msg_info = info + + # Build complete message with all details + full_msg = if !isempty(extra_context) || msg_details !== nothing + parts = String[msg_main] + if msg_details !== nothing + push!(parts, "Details: $msg_details") + end + push!(parts, "Return code (info): $msg_info") + if !isempty(extra_context) + for (key, value) in extra_context + push!(parts, "$key: $value") + end + end + join(parts, "\n ") + else + "$msg_main (info=$msg_info)" + end + + verbosity_field, full_msg +end + + +function get_blas_operation_info(func::Symbol, A, b; condition = false) + info = Dict{Symbol, Any}() + + # Matrix properties + info[:matrix_size] = size(A) + info[:matrix_type] = typeof(A) + info[:element_type] = eltype(A) + + # Condition number (based on verbosity setting) + if condition && size(A, 1) == size(A, 2) + try + cond_num = cond(A) + info[:condition_number] = cond_num + + # Log the condition number if enabled + cond_msg = "Matrix condition number: $(round(cond_num, sigdigits=4)) for $(size(A, 1))×$(size(A, 2)) matrix in $func" + + catch + # Skip if condition number computation fails + info[:condition_number] = nothing + end + end + + # RHS properties if provided + if b !== nothing + info[:rhs_size] = size(b) + info[:rhs_type] = typeof(b) + end + + # Memory usage estimate + mem_bytes = prod(size(A)) * sizeof(eltype(A)) + info[:memory_usage_MB] = round(mem_bytes / 1024^2, digits = 2) + + return info +end \ No newline at end of file diff --git a/src/common.jl b/src/common.jl index 7a29b521e..977a72f9a 100644 --- a/src/common.jl +++ b/src/common.jl @@ -89,7 +89,7 @@ solving and caching of factorizations and intermediate results. - `abstol::Ttol`: Absolute tolerance for iterative solvers. - `reltol::Ttol`: Relative tolerance for iterative solvers. - `maxiters::Int`: Maximum number of iterations for iterative solvers. -- `verbose::Bool`: Whether to print verbose output during solving. +- `verbose::LinearVerbosity`: Whether to print verbose output during solving. - `assumptions::OperatorAssumptions{issq}`: Assumptions about the operator properties. - `sensealg::S`: Sensitivity analysis algorithm for automatic differentiation. @@ -119,7 +119,7 @@ mutable struct LinearCache{TA, Tb, Tu, Tp, Talg, Tc, Tl, Tr, Ttol, issq, S} abstol::Ttol reltol::Ttol maxiters::Int - verbose::Bool + verbose::LinearVerbosity assumptions::OperatorAssumptions{issq} sensealg::S end @@ -267,7 +267,7 @@ function __init(prob::LinearProblem, alg::SciMLLinearSolveAlgorithm, abstol = default_tol(real(eltype(prob.b))), reltol = default_tol(real(eltype(prob.b))), maxiters::Int = length(prob.b), - verbose::Bool = false, + verbose = LinearVerbosity(), Pl = nothing, Pr = nothing, assumptions = OperatorAssumptions(issquare(prob.A)), @@ -324,6 +324,20 @@ function __init(prob::LinearProblem, alg::SciMLLinearSolveAlgorithm, copy(A) end + if verbose isa Bool + #@warn "Using `true` or `false` for `verbose` is being deprecated. Please use a `LinearVerbosity` type to specify verbosity settings. + # For details see the verbosity section of the common solver options documentation page." + if verbose + verbose_spec = LinearVerbosity() + else + verbose_spec = LinearVerbosity(SciMLLogging.None()) + end + elseif verbose isa SciMLLogging.AbstractVerbosityPreset + verbose_spec = LinearVerbosity(verbose) + else + verbose_spec = verbose + end + b = if issparsematrix(b) && !(A isa Diagonal) Array(b) # the solution to a linear solve will always be dense! elseif alias_b || b isa SVector @@ -361,7 +375,7 @@ function __init(prob::LinearProblem, alg::SciMLLinearSolveAlgorithm, # TODO: deprecate once all docs are updated to the new form #@warn "passing Preconditioners at `init`/`solve` time is deprecated. Instead add a `precs` function to your algorithm." end - cacheval = init_cacheval(alg, A, b, u0_, Pl, Pr, maxiters, abstol, reltol, verbose, + cacheval = init_cacheval(alg, A, b, u0_, Pl, Pr, maxiters, abstol, reltol, verbose_spec, assumptions) isfresh = true precsisfresh = false @@ -371,7 +385,7 @@ function __init(prob::LinearProblem, alg::SciMLLinearSolveAlgorithm, typeof(Pl), typeof(Pr), typeof(reltol), typeof(assumptions.issq), typeof(sensealg)}( A, b, u0_, p, alg, cacheval, isfresh, precsisfresh, Pl, Pr, abstol, reltol, - maxiters, verbose, assumptions, sensealg) + maxiters, verbose_spec, assumptions, sensealg) return cache end diff --git a/src/default.jl b/src/default.jl index c5997c037..cf91aeb5c 100644 --- a/src/default.jl +++ b/src/default.jl @@ -450,7 +450,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::Nothing, end function init_cacheval(alg::Nothing, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assump::OperatorAssumptions) + verbose::LinearVerbosity, assump::OperatorAssumptions) init_cacheval(defaultalg(A, b, assump), A, b, u, Pl, Pr, maxiters, abstol, reltol, verbose, assump) @@ -461,7 +461,7 @@ cache.cacheval = NamedTuple(LUFactorization = cache of LUFactorization, ...) """ @generated function init_cacheval(alg::DefaultLinearSolver, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assump::OperatorAssumptions) + verbose::LinearVerbosity, assump::OperatorAssumptions) caches = map(first.(EnumX.symbol_map(DefaultAlgorithmChoice.T))) do alg if alg === :KrylovJL_GMRES || alg === :KrylovJL_CRAIGMR || alg === :KrylovJL_LSMR quote @@ -513,7 +513,8 @@ end newex = quote sol = SciMLBase.solve!(cache, $(algchoice_to_alg(alg)), args...; kwargs...) if sol.retcode === ReturnCode.Failure && alg.safetyfallback - ## TODO: Add verbosity logging here about using the fallback + @SciMLMessage("LU factorization failed, falling back to QR factorization. `A` is potentially rank-deficient.", + cache.verbose, :default_lu_fallback) sol = SciMLBase.solve!( cache, QRFactorization(ColumnNorm()), args...; kwargs...) SciMLBase.build_linear_solution(alg, sol.u, sol.resid, sol.cache; @@ -533,7 +534,8 @@ end sol = SciMLBase.solve!(cache, $(algchoice_to_alg(alg)), args...; kwargs...) if sol.retcode === ReturnCode.Failure && alg.safetyfallback - ## TODO: Add verbosity logging here about using the fallback + @SciMLMessage("LU factorization failed, falling back to QR factorization. `A` is potentially rank-deficient.", + cache.verbose, :default_lu_fallback) sol = SciMLBase.solve!( cache, QRFactorization(ColumnNorm()), args...; kwargs...) SciMLBase.build_linear_solution(alg, sol.u, sol.resid, sol.cache; @@ -553,7 +555,8 @@ end sol = SciMLBase.solve!(cache, $(algchoice_to_alg(alg)), args...; kwargs...) if sol.retcode === ReturnCode.Failure && alg.safetyfallback - ## TODO: Add verbosity logging here about using the fallback + @SciMLMessage("LU factorization failed, falling back to QR factorization. `A` is potentially rank-deficient.", + cache.verbose, :default_lu_fallback) sol = SciMLBase.solve!( cache, QRFactorization(ColumnNorm()), args...; kwargs...) SciMLBase.build_linear_solution(alg, sol.u, sol.resid, sol.cache; @@ -573,7 +576,8 @@ end sol = SciMLBase.solve!(cache, $(algchoice_to_alg(alg)), args...; kwargs...) if sol.retcode === ReturnCode.Failure && alg.safetyfallback - ## TODO: Add verbosity logging here about using the fallback + @SciMLMessage("LU factorization failed, falling back to QR factorization. `A` is potentially rank-deficient.", + cache.verbose, :default_lu_fallback) sol = SciMLBase.solve!( cache, QRFactorization(ColumnNorm()), args...; kwargs...) SciMLBase.build_linear_solution(alg, sol.u, sol.resid, sol.cache; @@ -593,7 +597,8 @@ end sol = SciMLBase.solve!(cache, $(algchoice_to_alg(alg)), args...; kwargs...) if sol.retcode === ReturnCode.Failure && alg.safetyfallback - ## TODO: Add verbosity logging here about using the fallback + @SciMLMessage("LU factorization failed, falling back to QR factorization. `A` is potentially rank-deficient.", + cache.verbose, :default_lu_fallback) sol = SciMLBase.solve!( cache, QRFactorization(ColumnNorm()), args...; kwargs...) SciMLBase.build_linear_solution(alg, sol.u, sol.resid, sol.cache; diff --git a/src/factorization.jl b/src/factorization.jl index 9f9065c4a..ca335d0ed 100644 --- a/src/factorization.jl +++ b/src/factorization.jl @@ -50,14 +50,14 @@ end # RF Bad fallback: will fail if `A` is just a stand-in # This should instead just create the factorization type. function init_cacheval(alg::AbstractFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, - reltol, verbose::Bool, assumptions::OperatorAssumptions) + reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) do_factorization(alg, convert(AbstractMatrix, A), b, u) end ## RFLU Factorization function LinearSolve.init_cacheval(alg::RFLUFactorization, A, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ipiv = Vector{LinearAlgebra.BlasInt}(undef, min(size(A)...)) ArrayInterface.lu_instance(convert(AbstractMatrix, A)), ipiv end @@ -65,14 +65,14 @@ end function LinearSolve.init_cacheval( alg::RFLUFactorization, A::Matrix{Float64}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_LU, PREALLOCATED_IPIV end function LinearSolve.init_cacheval(alg::RFLUFactorization, A::Union{Diagonal, SymTridiagonal, Tridiagonal}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing, nothing end @@ -171,7 +171,7 @@ end function init_cacheval( alg::GenericLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ipiv = Vector{LinearAlgebra.BlasInt}(undef, min(size(A)...)) ArrayInterface.lu_instance(convert(AbstractMatrix, A)), ipiv @@ -179,7 +179,7 @@ end function init_cacheval( alg::GenericLUFactorization, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_LU, PREALLOCATED_IPIV end @@ -211,21 +211,21 @@ end function init_cacheval( alg::LUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(convert(AbstractMatrix, A)) end function init_cacheval(alg::LUFactorization, A::Union{<:Adjoint, <:Transpose}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) error_no_cudss_lu(A) return lu(A; check = false) end function init_cacheval(alg::GenericLUFactorization, A::Union{<:Adjoint, <:Transpose}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) error_no_cudss_lu(A) A isa GPUArraysCore.AnyGPUArray && return nothing ipiv = Vector{LinearAlgebra.BlasInt}(undef, 0) @@ -236,21 +236,21 @@ const PREALLOCATED_LU = ArrayInterface.lu_instance(rand(1, 1)) function init_cacheval(alg::LUFactorization, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_LU end function init_cacheval(alg::LUFactorization, A::AbstractSciMLOperator, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function init_cacheval(alg::GenericLUFactorization, A::AbstractSciMLOperator, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -300,13 +300,13 @@ function do_factorization(alg::QRFactorization, A, b, u) end function init_cacheval(alg::QRFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(convert(AbstractMatrix, A), alg.pivot) end function init_cacheval(alg::QRFactorization, A::Symmetric{<:Number, <:Array}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return qr(convert(AbstractMatrix, A), alg.pivot) end @@ -314,13 +314,13 @@ end const PREALLOCATED_QR_ColumnNorm = ArrayInterface.qr_instance(rand(1, 1), ColumnNorm()) function init_cacheval(alg::QRFactorization{ColumnNorm}, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return PREALLOCATED_QR_ColumnNorm end function init_cacheval( alg::QRFactorization, A::Union{<:Adjoint, <:Transpose}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) A isa GPUArraysCore.AnyGPUArray && return qr(A) return qr(A, alg.pivot) end @@ -328,12 +328,12 @@ end const PREALLOCATED_QR_NoPivot = ArrayInterface.qr_instance(rand(1, 1)) function init_cacheval(alg::QRFactorization{NoPivot}, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return PREALLOCATED_QR_NoPivot end function init_cacheval(alg::QRFactorization, A::AbstractSciMLOperator, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -379,33 +379,33 @@ function do_factorization(alg::CholeskyFactorization, A, b, u) end function init_cacheval(alg::CholeskyFactorization, A::SMatrix{S1, S2}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {S1, S2} cholesky(A) end function init_cacheval(alg::CholeskyFactorization, A::GPUArraysCore.AnyGPUArray, b, u, Pl, - Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + Pr, maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) cholesky(A; check = false) end function init_cacheval( alg::CholeskyFactorization, A::AbstractArray{<:BLASELTYPES}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.cholesky_instance(convert(AbstractMatrix, A), alg.pivot) end const PREALLOCATED_CHOLESKY = ArrayInterface.cholesky_instance(rand(1, 1), NoPivot()) function init_cacheval(alg::CholeskyFactorization, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_CHOLESKY end function init_cacheval(alg::CholeskyFactorization, A::Union{Diagonal, AbstractSciMLOperator, AbstractArray}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -433,12 +433,12 @@ end function init_cacheval(alg::LDLtFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function init_cacheval(alg::LDLtFactorization, A::SymTridiagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.ldlt_instance(convert(AbstractMatrix, A)) end @@ -472,7 +472,7 @@ function do_factorization(alg::SVDFactorization, A, b, u) end function init_cacheval(alg::SVDFactorization, A::Union{Matrix, SMatrix}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.svd_instance(convert(AbstractMatrix, A)) end @@ -480,13 +480,13 @@ end const PREALLOCATED_SVD = ArrayInterface.svd_instance(rand(1, 1)) function init_cacheval(alg::SVDFactorization, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_SVD end function init_cacheval(alg::SVDFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -515,7 +515,7 @@ end function init_cacheval(alg::BunchKaufmanFactorization, A::Symmetric{<:Number, <:Matrix}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.bunchkaufman_instance(convert(AbstractMatrix, A)) end @@ -525,13 +525,13 @@ const PREALLOCATED_BUNCHKAUFMAN = ArrayInterface.bunchkaufman_instance(Symmetric function init_cacheval(alg::BunchKaufmanFactorization, A::Symmetric{Float64, Matrix{Float64}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_BUNCHKAUFMAN end function init_cacheval(alg::BunchKaufmanFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -572,58 +572,58 @@ end function init_cacheval( alg::GenericFactorization{typeof(lu)}, A::AbstractMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval( alg::GenericFactorization{typeof(lu!)}, A::AbstractMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(lu)}, A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(lu!)}, A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(lu)}, A::Diagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval(alg::GenericFactorization{typeof(lu)}, A::Tridiagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(lu!)}, A::Diagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval( alg::GenericFactorization{typeof(lu!)}, A::Tridiagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval( alg::GenericFactorization{typeof(lu!)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end function init_cacheval( alg::GenericFactorization{typeof(lu)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end @@ -631,24 +631,24 @@ end function init_cacheval( alg::GenericFactorization{typeof(qr)}, A::AbstractMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(A) end function init_cacheval( alg::GenericFactorization{typeof(qr!)}, A::AbstractMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(A) end function init_cacheval( alg::GenericFactorization{typeof(qr)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end function init_cacheval( alg::GenericFactorization{typeof(qr!)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end @@ -656,33 +656,33 @@ end function init_cacheval(alg::GenericFactorization{typeof(qr)}, A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(qr!)}, A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(qr)}, A::Diagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval(alg::GenericFactorization{typeof(qr)}, A::Tridiagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(qr!)}, A::Diagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval( alg::GenericFactorization{typeof(qr!)}, A::Tridiagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(A) end @@ -690,87 +690,87 @@ end function init_cacheval( alg::GenericFactorization{typeof(svd)}, A::AbstractMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.svd_instance(A) end function init_cacheval( alg::GenericFactorization{typeof(svd!)}, A::AbstractMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.svd_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(svd)}, A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.svd_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(svd!)}, A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.svd_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(svd)}, A::Diagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval( alg::GenericFactorization{typeof(svd)}, A::Tridiagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.svd_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(svd!)}, A::Diagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval(alg::GenericFactorization{typeof(svd!)}, A::Tridiagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.svd_instance(A) end function init_cacheval( alg::GenericFactorization{typeof(svd!)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end function init_cacheval( alg::GenericFactorization{typeof(svd)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end function init_cacheval(alg::GenericFactorization, A::Diagonal, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval(alg::GenericFactorization, A::Tridiagonal, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval(alg::GenericFactorization, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end function init_cacheval(alg::GenericFactorization, A, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) init_cacheval(alg, convert(AbstractMatrix, A), b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) end function init_cacheval(alg::GenericFactorization, A::AbstractMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) do_factorization(alg, A, b, u) end @@ -778,7 +778,7 @@ function init_cacheval( alg::Union{GenericFactorization{typeof(bunchkaufman!)}, GenericFactorization{typeof(bunchkaufman)}}, A::Union{Hermitian, Symmetric}, b, u, Pl, Pr, maxiters::Int, abstol, - reltol, verbose::Bool, assumptions::OperatorAssumptions) + reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) BunchKaufman(A.data, Array(1:size(A, 1)), A.uplo, true, false, 0) end @@ -787,7 +787,7 @@ function init_cacheval( GenericFactorization{typeof(bunchkaufman)}}, A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) if eltype(A) <: Complex return bunchkaufman!(Hermitian(A)) else @@ -801,49 +801,49 @@ end # Cholesky needs the posdef matrix, for GenericFactorization assume structure is needed function init_cacheval( alg::GenericFactorization{typeof(cholesky)}, A::AbstractMatrix, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) newA = copy(convert(AbstractMatrix, A)) do_factorization(alg, newA, b, u) end function init_cacheval( alg::GenericFactorization{typeof(cholesky!)}, A::AbstractMatrix, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) newA = copy(convert(AbstractMatrix, A)) do_factorization(alg, newA, b, u) end function init_cacheval(alg::GenericFactorization{typeof(cholesky!)}, A::Diagonal, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval( alg::GenericFactorization{typeof(cholesky!)}, A::Tridiagonal, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval( alg::GenericFactorization{typeof(cholesky!)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end function init_cacheval(alg::GenericFactorization{typeof(cholesky)}, A::Diagonal, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval( alg::GenericFactorization{typeof(cholesky)}, A::Tridiagonal, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval( alg::GenericFactorization{typeof(cholesky)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end @@ -875,7 +875,7 @@ end function init_cacheval(alg::UMFPACKFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -901,7 +901,7 @@ end function init_cacheval(alg::KLUFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -929,7 +929,7 @@ end function init_cacheval(alg::CHOLMODFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -986,13 +986,13 @@ default_alias_b(::NormalCholeskyFactorization, ::Any, ::Any) = true const PREALLOCATED_NORMALCHOLESKY = ArrayInterface.cholesky_instance(rand(1, 1), NoPivot()) function init_cacheval(alg::NormalCholeskyFactorization, A::SMatrix, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return cholesky(Symmetric((A)' * A)) end function init_cacheval(alg::NormalCholeskyFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) A_ = convert(AbstractMatrix, A) return ArrayInterface.cholesky_instance( @@ -1003,13 +1003,13 @@ const PREALLOCATED_NORMALCHOLESKY_SYMMETRIC = ArrayInterface.cholesky_instance( Symmetric(rand(1, 1)), NoPivot()) function init_cacheval(alg::NormalCholeskyFactorization, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return PREALLOCATED_NORMALCHOLESKY_SYMMETRIC end function init_cacheval(alg::NormalCholeskyFactorization, A::Union{Diagonal, AbstractSciMLOperator}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -1070,7 +1070,7 @@ default_alias_A(::NormalBunchKaufmanFactorization, ::Any, ::Any) = true default_alias_b(::NormalBunchKaufmanFactorization, ::Any, ::Any) = true function init_cacheval(alg::NormalBunchKaufmanFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.bunchkaufman_instance(convert(AbstractMatrix, A)) end @@ -1098,7 +1098,7 @@ A special implementation only for solving `Diagonal` matrices fast. struct DiagonalFactorization <: AbstractDenseFactorization end function init_cacheval(alg::DiagonalFactorization, A, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -1149,12 +1149,12 @@ end function init_cacheval(alg::SparspakFactorization, A::Union{AbstractMatrix, Nothing, AbstractSciMLOperator}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function init_cacheval(::SparspakFactorization, ::StaticArray, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -1194,39 +1194,39 @@ struct CliqueTreesFactorization{A, S} <: AbstractSparseFactorization end function init_cacheval(::CliqueTreesFactorization, ::Union{AbstractMatrix, Nothing, AbstractSciMLOperator}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function init_cacheval(::CliqueTreesFactorization, ::StaticArray, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end # Fallback init_cacheval for extension-based algorithms when extensions aren't loaded # These return nothing since the actual implementations are in the extensions function init_cacheval(::BLISLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function init_cacheval(::CudaOffloadLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function init_cacheval(::MetalLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end for alg in vcat(InteractiveUtils.subtypes(AbstractDenseFactorization), InteractiveUtils.subtypes(AbstractSparseFactorization)) @eval function init_cacheval(alg::$alg, A::MatrixOperator, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) init_cacheval(alg, A.A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) end end diff --git a/src/iterative_wrappers.jl b/src/iterative_wrappers.jl index 16d28d908..35a904c3d 100644 --- a/src/iterative_wrappers.jl +++ b/src/iterative_wrappers.jl @@ -185,7 +185,7 @@ end # zeroinit allows for init_cacheval to start by initing with A (0,0) function init_cacheval(alg::KrylovJL, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions; zeroinit = true) + verbose::LinearVerbosity, assumptions::OperatorAssumptions; zeroinit = true) KS = get_KrylovJL_solver(alg.KrylovAlg) if zeroinit @@ -240,7 +240,7 @@ end # Krylov.jl tries to init with `ArrayPartition(undef, ...)`. Avoid hitting that! function init_cacheval( alg::LinearSolve.KrylovJL, A, b::RecursiveArrayTools.ArrayPartition, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, ::LinearSolve.OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, ::LinearSolve.OperatorAssumptions) return nothing end @@ -268,7 +268,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::KrylovJL; kwargs...) atol = float(cache.abstol) rtol = float(cache.reltol) itmax = cache.maxiters - verbose = cache.verbose ? 1 : 0 + verbose = cache.verbose cacheval = if cache.alg isa DefaultLinearSolver if alg.KrylovAlg === Krylov.gmres! @@ -284,13 +284,16 @@ function SciMLBase.solve!(cache::LinearCache, alg::KrylovJL; kwargs...) cache.cacheval end + krylovJL_verbose = verbosity_to_int(verbose.KrylovJL_verbosity) + args = (cacheval, cache.A, cache.b) - kwargs = (atol = atol, rtol, itmax, verbose, + kwargs = (atol = atol, rtol, itmax, verbose = krylovJL_verbose, ldiv = true, history = true, alg.kwargs...) if cache.cacheval isa Krylov.CgWorkspace N !== I && - @warn "$(alg.KrylovAlg) doesn't support right preconditioning." + @SciMLMessage("$(alg.KrylovAlg) doesn't support right preconditioning.", + verbose, :no_right_preconditioning) Krylov.krylov_solve!(args...; M, kwargs...) elseif cache.cacheval isa Krylov.GmresWorkspace Krylov.krylov_solve!(args...; M, N, restart = alg.gmres_restart > 0, kwargs...) @@ -298,7 +301,8 @@ function SciMLBase.solve!(cache::LinearCache, alg::KrylovJL; kwargs...) Krylov.krylov_solve!(args...; M, N, kwargs...) elseif cache.cacheval isa Krylov.MinresWorkspace N !== I && - @warn "$(alg.KrylovAlg) doesn't support right preconditioning." + @SciMLMessage("$(alg.KrylovAlg) doesn't support right preconditioning.", + verbose, :no_right_preconditioning) Krylov.krylov_solve!(args...; M, kwargs...) else Krylov.krylov_solve!(args...; kwargs...) diff --git a/src/mkl.jl b/src/mkl.jl index 84f517d0c..5882fa2ef 100644 --- a/src/mkl.jl +++ b/src/mkl.jl @@ -223,14 +223,14 @@ const PREALLOCATED_MKL_LU = begin end function LinearSolve.init_cacheval(alg::MKLLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_MKL_LU end function LinearSolve.init_cacheval(alg::MKLLUFactorization, A::AbstractMatrix{<:Union{Float32, ComplexF32, ComplexF64}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) A = rand(eltype(A), 0, 0) ArrayInterface.lu_instance(A), Ref{BlasInt}() @@ -242,12 +242,45 @@ function SciMLBase.solve!(cache::LinearCache, alg::MKLLUFactorization; error("Error, MKL binary is missing but solve is being called. Report this issue") A = cache.A A = convert(AbstractMatrix, A) + verbose = cache.verbose if cache.isfresh cacheval = @get_cacheval(cache, :MKLLUFactorization) res = getrf!(A; ipiv = cacheval[1].ipiv, info = cacheval[2]) fact = LU(res[1:3]...), res[4] cache.cacheval = fact + info_value = res[3] + + if info_value != 0 + if !isa(verbose.blas_info, SciMLLogging.Silent) || !isa(verbose.blas_errors, SciMLLogging.Silent) || + !isa(verbose.blas_invalid_args, SciMLLogging.Silent) + op_info = get_blas_operation_info(:dgetrf, A, cache.b, condition = !isa(verbose.condition_number, SciMLLogging.Silent)) + @SciMLMessage(cache.verbose, :condition_number) do + if op_info[:condition_number] === nothing + return "Matrix condition number calculation failed." + else + return "Matrix condition number: $(round(op_info[:condition_number], sigdigits=4)) for $(size(A, 1))×$(size(A, 2)) matrix in dgetrf" + end + end + verb_option, message = blas_info_msg( + :dgetrf, info_value; extra_context = op_info) + @SciMLMessage(message, verbose, verb_option) + end + else + @SciMLMessage(cache.verbose, :blas_success) do + op_info = get_blas_operation_info(:dgetrf, A, cache.b, + condition = !isa(verbose.condition_number, SciMLLogging.Silent)) + @SciMLMessage(cache.verbose, :condition_number) do + if op_info[:condition_number] === nothing + return "Matrix condition number calculation failed." + else + return "Matrix condition number: $(round(op_info[:condition_number], sigdigits=4)) for $(size(A, 1))×$(size(A, 2)) matrix in dgetrf" + end + end + return "BLAS LU factorization (dgetrf) completed successfully for $(op_info[:matrix_size]) matrix" + end + end + if !LinearAlgebra.issuccess(fact[1]) return SciMLBase.build_linear_solution( alg, cache.u, nothing, cache; retcode = ReturnCode.Failure) @@ -281,7 +314,7 @@ const PREALLOCATED_MKL32_LU = begin end function LinearSolve.init_cacheval(alg::MKL32MixedLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) # Pre-allocate appropriate 32-bit arrays based on input type m, n = size(A) diff --git a/src/openblas.jl b/src/openblas.jl index 3134c9812..0c701ff1e 100644 --- a/src/openblas.jl +++ b/src/openblas.jl @@ -245,14 +245,14 @@ const PREALLOCATED_OPENBLAS_LU = begin end function LinearSolve.init_cacheval(alg::OpenBLASLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_OPENBLAS_LU end function LinearSolve.init_cacheval(alg::OpenBLASLUFactorization, A::AbstractMatrix{<:Union{Float32, ComplexF32, ComplexF64}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) A = rand(eltype(A), 0, 0) ArrayInterface.lu_instance(A), Ref{BlasInt}() @@ -264,12 +264,45 @@ function SciMLBase.solve!(cache::LinearCache, alg::OpenBLASLUFactorization; error("Error, OpenBLAS binary is missing but solve is being called. Report this issue") A = cache.A A = convert(AbstractMatrix, A) + verbose = cache.verbose if cache.isfresh cacheval = @get_cacheval(cache, :OpenBLASLUFactorization) res = openblas_getrf!(A; ipiv = cacheval[1].ipiv, info = cacheval[2]) fact = LU(res[1:3]...), res[4] cache.cacheval = fact + info_value = res[3] + + if info_value != 0 + if !isa(verbose.blas_info, SciMLLogging.Silent) || !isa(verbose.blas_errors, SciMLLogging.Silent) || + !isa(verbose.blas_invalid_args, SciMLLogging.Silent) + op_info = get_blas_operation_info(:dgetrf, A, cache.b, condition = !isa(verbose.condition_number, SciMLLogging.Silent)) + @SciMLMessage(cache.verbose, :condition_number) do + if op_info[:condition_number] === nothing + return "Matrix condition number calculation failed." + else + return "Matrix condition number: $(round(op_info[:condition_number], sigdigits=4)) for $(size(A, 1))×$(size(A, 2)) matrix in dgetrf" + end + end + verb_option, message = blas_info_msg( + :dgetrf, info_value; extra_context = op_info) + @SciMLMessage(message, verbose, verb_option) + end + else + @SciMLMessage(cache.verbose, :blas_success) do + op_info = get_blas_operation_info(:dgetrf, A, cache.b, + condition = !isa(verbose.condition_number, SciMLLogging.Silent)) + @SciMLMessage(cache.verbose, :condition_number) do + if op_info[:condition_number] === nothing + return "Matrix condition number calculation failed." + else + return "Matrix condition number: $(round(op_info[:condition_number], sigdigits=4)) for $(size(A, 1))×$(size(A, 2)) matrix in dgetrf" + end + end + return "BLAS LU factorization (dgetrf) completed successfully for $(op_info[:matrix_size]) matrix" + end + end + if !LinearAlgebra.issuccess(fact[1]) return SciMLBase.build_linear_solution( alg, cache.u, nothing, cache; retcode = ReturnCode.Failure) @@ -303,7 +336,7 @@ const PREALLOCATED_OPENBLAS32_LU = begin end function LinearSolve.init_cacheval(alg::OpenBLAS32MixedLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) # Pre-allocate appropriate 32-bit arrays based on input type m, n = size(A) diff --git a/src/simplegmres.jl b/src/simplegmres.jl index a21826c9f..644a62b61 100644 --- a/src/simplegmres.jl +++ b/src/simplegmres.jl @@ -161,7 +161,7 @@ function init_cacheval(alg::SimpleGMRES{UDB}, args...; kwargs...) where {UDB} end function _init_cacheval(::Val{false}, alg::SimpleGMRES, A, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, ::Bool, ::OperatorAssumptions; zeroinit = true, kwargs...) + abstol, reltol, ::LinearVerbosity, ::OperatorAssumptions; zeroinit = true, kwargs...) @unpack memory, restart, blocksize, warm_start = alg if zeroinit @@ -392,7 +392,7 @@ function SciMLBase.solve!(cache::SimpleGMRESCache{false}, lincache::LinearCache) end function _init_cacheval(::Val{true}, alg::SimpleGMRES, A, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, ::Bool, ::OperatorAssumptions; zeroinit = true, + abstol, reltol, ::LinearVerbosity, ::OperatorAssumptions; zeroinit = true, blocksize = alg.blocksize) @unpack memory, restart, warm_start = alg diff --git a/src/simplelu.jl b/src/simplelu.jl index 9917f5869..78d6775ab 100644 --- a/src/simplelu.jl +++ b/src/simplelu.jl @@ -218,6 +218,6 @@ function SciMLBase.solve!(cache::LinearCache, alg::SimpleLUFactorization; kwargs end function init_cacheval(alg::SimpleLUFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, - reltol, verbose::Bool, assumptions::OperatorAssumptions) + reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) LUSolver(convert(AbstractMatrix, A)) end diff --git a/src/verbosity.jl b/src/verbosity.jl new file mode 100644 index 000000000..9258ba299 --- /dev/null +++ b/src/verbosity.jl @@ -0,0 +1,217 @@ +LinearSolve.@concrete struct LinearVerbosity <: + AbstractVerbositySpecifier + # Error control + default_lu_fallback + # Performance + no_right_preconditioning + # Numerical + using_IterativeSolvers + IterativeSolvers_iterations + KrylovKit_verbosity + KrylovJL_verbosity + HYPRE_verbosity + pardiso_verbosity + blas_errors + blas_invalid_args + blas_info + blas_success + condition_number +end + +function LinearVerbosity(; + error_control = nothing, performance = nothing, numerical = nothing, kwargs...) + # Validate group arguments + if error_control !== nothing && !(error_control isa AbstractMessageLevel) + throw(ArgumentError("error_control must be a SciMLLogging.AbstractMessageLevel, got $(typeof(error_control))")) + end + if performance !== nothing && !(performance isa AbstractMessageLevel) + throw(ArgumentError("performance must be a SciMLLogging.AbstractMessageLevel, got $(typeof(performance))")) + end + if numerical !== nothing && !(numerical isa AbstractMessageLevel) + throw(ArgumentError("numerical must be a SciMLLogging.AbstractMessageLevel, got $(typeof(numerical))")) + end + + # Validate individual kwargs + for (key, value) in kwargs + if !(key in error_control_options || key in performance_options || + key in numerical_options) + throw(ArgumentError("Unknown verbosity option: $key. Valid options are: $(tuple(error_control_options..., performance_options..., numerical_options...))")) + end + if !(value isa AbstractMessageLevel) + throw(ArgumentError("$key must be a SciMLLogging.AbstractMessageLevel, got $(typeof(value))")) + end + end + + # Build arguments using NamedTuple for type stability + default_args = ( + default_lu_fallback = WarnLevel(), + no_right_preconditioning = WarnLevel(), + using_IterativeSolvers = Silent(), + IterativeSolvers_iterations = Silent(), + KrylovKit_verbosity = CustomLevel(1), # WARN_LEVEL in KrylovKit.jl + KrylovJL_verbosity = Silent(), + HYPRE_verbosity = InfoLevel(), + pardiso_verbosity = Silent(), + blas_errors = ErrorLevel(), + blas_invalid_args = ErrorLevel(), + blas_info = Silent(), + blas_success = Silent(), + condition_number=Silent() + ) + + # Apply group-level settings + final_args = if error_control !== nothing || performance !== nothing || + numerical !== nothing + NamedTuple{keys(default_args)}( + _resolve_arg_value( + key, default_args[key], error_control, performance, numerical) + for key in keys(default_args) + ) + else + default_args + end + + # Apply individual overrides + if !isempty(kwargs) + final_args = merge(final_args, NamedTuple(kwargs)) + end + + LinearVerbosity(values(final_args)...) +end + +# Constructor for verbosity presets following the hierarchical levels: +# None < Minimal < Standard < Detailed < All +# Each level includes all messages from levels below it plus additional ones +function LinearVerbosity(verbose::AbstractVerbosityPreset) + if verbose isa Minimal + # Minimal: Only fatal errors and critical warnings (BLAS errors/invalid args) + LinearVerbosity( + default_lu_fallback = Silent(), + no_right_preconditioning = Silent(), + using_IterativeSolvers = Silent(), + IterativeSolvers_iterations = Silent(), + KrylovKit_verbosity = Silent(), + KrylovJL_verbosity = Silent(), + HYPRE_verbosity = Silent(), + pardiso_verbosity = Silent(), + blas_errors = ErrorLevel(), + blas_invalid_args = ErrorLevel(), + blas_info = Silent(), + blas_success = Silent(), + condition_number = Silent() + ) + elseif verbose isa Standard + # Standard: Everything from Minimal + non-fatal warnings + LinearVerbosity() + elseif verbose isa Detailed + # Detailed: Everything from Standard + debugging/solver behavior + LinearVerbosity( + default_lu_fallback = WarnLevel(), + no_right_preconditioning = InfoLevel(), + using_IterativeSolvers = InfoLevel(), + IterativeSolvers_iterations = Silent(), + KrylovKit_verbosity = CustomLevel(2), # STARTSTOP_LEVEL in KrylovKit.jl + KrylovJL_verbosity = CustomLevel(1), # verbose = true in Krylov.jl + HYPRE_verbosity = InfoLevel(), + pardiso_verbosity = CustomLevel(1), # verbose = true in Pardiso.jl + blas_errors = ErrorLevel(), + blas_invalid_args = ErrorLevel(), + blas_info = InfoLevel(), + blas_success = InfoLevel(), + condition_number = Silent() + ) + elseif verbose isa All + # All: Maximum verbosity - every possible logging message at InfoLevel + LinearVerbosity( + default_lu_fallback = WarnLevel(), + no_right_preconditioning = InfoLevel(), + using_IterativeSolvers = InfoLevel(), + IterativeSolvers_iterations = InfoLevel(), + KrylovKit_verbosity = CustomLevel(3), # EACHITERATION_LEVEL in KrylovKit.jl + KrylovJL_verbosity = CustomLevel(1), + HYPRE_verbosity = InfoLevel(), + pardiso_verbosity = CustomLevel(1), # verbsoe = true in Pardiso.jl + blas_errors = ErrorLevel(), + blas_invalid_args = ErrorLevel(), + blas_info = InfoLevel(), + blas_success = InfoLevel(), + condition_number = InfoLevel() + ) + end +end + +@inline function LinearVerbosity(verbose::None) + LinearVerbosity( + Silent(), + Silent(), + Silent(), + Silent(), + Silent(), + Silent(), + Silent(), + Silent(), + Silent(), + Silent(), + Silent(), + Silent(), + Silent()) +end + +# Helper function to resolve argument values based on group membership +@inline function _resolve_arg_value(key::Symbol, default_val, error_control, performance, numerical) + if key in error_control_options && error_control !== nothing + return error_control + elseif key in performance_options && performance !== nothing + return performance + elseif key in numerical_options && numerical !== nothing + return numerical + else + return default_val + end +end + +# Group classifications +const error_control_options = (:default_lu_fallback, :blas_errors, :blas_invalid_args) +const performance_options = (:no_right_preconditioning,) +const numerical_options = (:using_IterativeSolvers, :IterativeSolvers_iterations, + :KrylovKit_verbosity, :KrylovJL_verbosity, :HYPRE_verbosity, :pardiso_verbosity, + :blas_info, :blas_success, :condition_number) + +function option_group(option::Symbol) + if option in error_control_options + return :error_control + elseif option in performance_options + return :performance + elseif option in numerical_options + return :numerical + else + error("Unknown verbosity option: $option") + end +end + +# Get all options in a group +function group_options(verbosity::LinearVerbosity, group::Symbol) + if group === :error_control + return NamedTuple{error_control_options}(getproperty(verbosity, opt) for opt in error_control_options) + elseif group === :performance + return NamedTuple{performance_options}(getproperty(verbosity, opt) for opt in performance_options) + elseif group === :numerical + return NamedTuple{numerical_options}(getproperty(verbosity, opt) for opt in numerical_options) + else + error("Unknown group: $group") + end +end + +function Base.getproperty(verbosity::LinearVerbosity, name::Symbol) + # Check if this is a group name + if name === :error_control + return group_options(verbosity, :error_control) + elseif name === :performance + return group_options(verbosity, :performance) + elseif name === :numerical + return group_options(verbosity, :numerical) + else + # Fall back to default field access + return getfield(verbosity, name) + end +end \ No newline at end of file diff --git a/test/runtests.jl b/test/runtests.jl index 1362908bb..c39f973e3 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -17,6 +17,7 @@ if GROUP == "All" || GROUP == "Core" @time @safetestset "Adjoint Sensitivity" include("adjoint.jl") @time @safetestset "ForwardDiff Overloads" include("forwarddiff_overloads.jl") @time @safetestset "Traits" include("traits.jl") + @time @safetestset "Verbosity" include("verbosity.jl") @time @safetestset "BandedMatrices" include("banded.jl") @time @safetestset "Mixed Precision" include("test_mixed_precision.jl") end diff --git a/test/verbosity.jl b/test/verbosity.jl new file mode 100644 index 000000000..249c1abe6 --- /dev/null +++ b/test/verbosity.jl @@ -0,0 +1,505 @@ +using LinearSolve +using LinearSolve: LinearVerbosity, option_group, group_options, BLISLUFactorization, + __appleaccelerate_isavailable, __mkl_isavailable, __openblas_isavailable +using SciMLLogging +using Test + +@testset "LinearVerbosity Tests" begin + @testset "Default constructor" begin + v1 = LinearVerbosity() + @test v1 isa LinearVerbosity + @test v1.default_lu_fallback isa SciMLLogging.WarnLevel + @test v1.KrylovKit_verbosity == SciMLLogging.CustomLevel(1) + end + @testset "LinearVerbosity constructors" begin + v3_none = LinearVerbosity(SciMLLogging.None()) + v3_all = LinearVerbosity(SciMLLogging.All()) + v3_minimal = LinearVerbosity(SciMLLogging.Minimal()) + v3_standard = LinearVerbosity(SciMLLogging.Standard()) + v3_detailed = LinearVerbosity(SciMLLogging.Detailed()) + + @test v3_all.default_lu_fallback isa SciMLLogging.WarnLevel + @test v3_minimal.default_lu_fallback isa SciMLLogging.Silent + @test v3_minimal.KrylovKit_verbosity isa SciMLLogging.Silent + @test v3_detailed.KrylovKit_verbosity == SciMLLogging.CustomLevel(2) + end + + @testset "Group-level keyword constructors" begin + v4_error = LinearVerbosity(error_control = ErrorLevel()) + @test v4_error.default_lu_fallback isa SciMLLogging.ErrorLevel + + v4_numerical = LinearVerbosity(numerical = Silent()) + @test v4_numerical.KrylovKit_verbosity isa SciMLLogging.Silent + @test v4_numerical.using_IterativeSolvers isa SciMLLogging.Silent + @test v4_numerical.pardiso_verbosity isa SciMLLogging.Silent + + v4_performance = LinearVerbosity(performance = InfoLevel()) + @test v4_performance.no_right_preconditioning isa SciMLLogging.InfoLevel + end + + @testset "Mixed group and individual settings" begin + v5_mixed = LinearVerbosity( + numerical = Silent(), + KrylovKit_verbosity = WarnLevel(), + performance = InfoLevel() + ) + # Individual override should take precedence + @test v5_mixed.KrylovKit_verbosity isa SciMLLogging.WarnLevel + # Other numerical options should use group setting + @test v5_mixed.using_IterativeSolvers isa SciMLLogging.Silent + # Performance group setting should apply + @test v5_mixed.no_right_preconditioning isa SciMLLogging.InfoLevel + end + + @testset "Individual keyword arguments" begin + v6_individual = LinearVerbosity( + default_lu_fallback = ErrorLevel(), + KrylovKit_verbosity = InfoLevel(), + pardiso_verbosity = Silent() + ) + @test v6_individual.default_lu_fallback isa SciMLLogging.ErrorLevel + @test v6_individual.KrylovKit_verbosity isa SciMLLogging.InfoLevel + @test v6_individual.pardiso_verbosity isa SciMLLogging.Silent + # Unspecified options should use defaults + @test v6_individual.no_right_preconditioning isa SciMLLogging.WarnLevel + end + + @testset "Group classification functions" begin + @test option_group(:default_lu_fallback) == :error_control + @test option_group(:KrylovKit_verbosity) == :numerical + @test option_group(:no_right_preconditioning) == :performance + + # Test error for unknown option + @test_throws ErrorException option_group(:unknown_option) + end + + @testset "Group options function" begin + v8 = LinearVerbosity(numerical = WarnLevel()) + numerical_opts = group_options(v8, :numerical) + @test numerical_opts isa NamedTuple + @test :KrylovKit_verbosity in keys(numerical_opts) + @test :using_IterativeSolvers in keys(numerical_opts) + @test numerical_opts.KrylovKit_verbosity isa SciMLLogging.WarnLevel + + error_opts = group_options(v8, :error_control) + @test :default_lu_fallback in keys(error_opts) + + performance_opts = group_options(v8, :performance) + @test :no_right_preconditioning in keys(performance_opts) + + # Test error for unknown group + @test_throws ErrorException group_options(v8, :unknown_group) + end + + @testset "Group getproperty access" begin + v = LinearVerbosity() + + # Test getting groups returns NamedTuples + error_group = v.error_control + performance_group = v.performance + numerical_group = v.numerical + + @test error_group isa NamedTuple + @test performance_group isa NamedTuple + @test numerical_group isa NamedTuple + + # Test correct keys are present + @test :default_lu_fallback in keys(error_group) + @test :no_right_preconditioning in keys(performance_group) + @test :KrylovKit_verbosity in keys(numerical_group) + @test :using_IterativeSolvers in keys(numerical_group) + @test :pardiso_verbosity in keys(numerical_group) + + # Test values are AbstractMessageLevel types + @test error_group.default_lu_fallback isa SciMLLogging.AbstractMessageLevel + @test performance_group.no_right_preconditioning isa SciMLLogging.AbstractMessageLevel + @test numerical_group.KrylovKit_verbosity isa SciMLLogging.AbstractMessageLevel + + # Individual field access should still work + @test v.default_lu_fallback isa SciMLLogging.WarnLevel + @test v.KrylovKit_verbosity == SciMLLogging.CustomLevel(1) + end +end + + +@testset "LinearVerbosity Logs Tests" begin + A = [1.0 0 0 0 + 0 1 0 0 + 0 0 1 0 + 0 0 0 0] + b = rand(4) + prob = LinearProblem(A, b) + + @test_logs (:warn, + "LU factorization failed, falling back to QR factorization. `A` is potentially rank-deficient.") solve( + prob, + verbose = LinearVerbosity(default_lu_fallback = WarnLevel())) + + @test_logs (:warn, + "LU factorization failed, falling back to QR factorization. `A` is potentially rank-deficient.") solve( + prob, verbose = true) + + @test_logs min_level=SciMLLogging.Logging.Warn solve(prob, verbose = false) + + @test_logs (:info, + "LU factorization failed, falling back to QR factorization. `A` is potentially rank-deficient.") solve( + prob, + verbose = LinearVerbosity(default_lu_fallback = InfoLevel())) + + verb = LinearVerbosity(default_lu_fallback = WarnLevel()) + + @test_logs (:warn, + "LU factorization failed, falling back to QR factorization. `A` is potentially rank-deficient.") solve( + prob, + verbose = verb) + +end + +@testset "BLAS Return Code Interpretation" begin + # Test interpretation of various BLAS return codes + @testset "Return Code Interpretation" begin + # Test successful operation + category, message, details = LinearSolve.interpret_blas_code(:dgetrf, 0) + @test category == :success + @test message == "Operation completed successfully" + + # Test invalid argument + category, message, details = LinearSolve.interpret_blas_code(:dgetrf, -3) + @test category == :invalid_argument + @test occursin("Argument 3", details) + + # Test singular matrix in LU + category, message, details = LinearSolve.interpret_blas_code(:dgetrf, 2) + @test category == :singular_matrix + @test occursin("U(2,2)", details) + + # Test not positive definite in Cholesky + category, message, details = LinearSolve.interpret_blas_code(:dpotrf, 3) + @test category == :not_positive_definite + @test occursin("minor of order 3", details) + + # Test SVD convergence failure + category, message, details = LinearSolve.interpret_blas_code(:dgesvd, 5) + @test category == :convergence_failure + @test occursin("5 off-diagonal", details) + end + + @testset "BLAS Operation Info" begin + # Test getting operation info without condition number + A = rand(10, 10) + b = rand(10) + + # Test with condition_number disabled (default) + info = LinearSolve.get_blas_operation_info(:dgetrf, A, b) + + @test info[:matrix_size] == (10, 10) + @test info[:element_type] == Float64 + @test !haskey(info, :condition_number) # Should not compute by default + @test info[:memory_usage_MB] >= 0 # Memory can be 0 for very small matrices + + # Test with condition number computation enabled via verbosity + verbose_with_cond = LinearVerbosity(condition_number = InfoLevel()) + info_with_cond = LinearSolve.get_blas_operation_info( + :dgetrf, A, b, condition = !isa(verbose_with_cond.condition_number, SciMLLogging.Silent)) + @test haskey(info_with_cond, :condition_number) + end + + @testset "Error Categories" begin + # Test different error categories are properly identified + test_cases = [ + (:dgetrf, 1, :singular_matrix), + (:dpotrf, 2, :not_positive_definite), + (:dgeqrf, 3, :numerical_issue), + (:dgesdd, 4, :convergence_failure), + (:dsyev, 5, :convergence_failure), + (:dsytrf, 6, :singular_matrix), + (:dgetrs, 1, :unexpected_error), + (:unknown_func, 1, :unknown_error) + ] + + for (func, code, expected_category) in test_cases + category, _, _ = LinearSolve.interpret_blas_code(func, code) + @test category == expected_category + end + end +end + +# Try to load BLIS extension +try + using blis_jll, LAPACK_jll +catch LoadError + # BLIS dependencies not available, tests will be skipped +end + +@testset "BLIS Verbosity Integration Tests" begin + @testset "BLIS solver with verbosity logging" begin + # Test basic BLIS solver functionality with verbosity + if Base.get_extension(LinearSolve, :LinearSolveBLISExt) === nothing + # Only test if BLIS is available + @info "Skipping BLIS tests - BLIS not available" + else + # Test successful solve with success logging enabled + A_good = [2.0 1.0; 1.0 2.0] + b_good = [3.0, 4.0] + prob_good = LinearProblem(A_good, b_good) + + verbose_success = LinearVerbosity( + blas_success = InfoLevel(), + blas_errors = Silent(), + blas_info = Silent() + ) + + @test_logs (:info, r"BLAS LU factorization.*completed successfully") solve( + prob_good, BLISLUFactorization(); verbose = verbose_success) + + # Test singular matrix with error logging + A_singular = [1.0 2.0; 2.0 4.0] + b_singular = [1.0, 2.0] + prob_singular = LinearProblem(A_singular, b_singular) + + verbose_errors = LinearVerbosity( + blas_errors = WarnLevel(), + blas_success = Silent(), + blas_info = Silent() + ) + + @test_logs (:warn, r"BLAS/LAPACK.*Matrix is singular") solve( + prob_singular, BLISLUFactorization(); verbose = verbose_errors) + + # Test with info logging enabled + verbose_info = LinearVerbosity( + blas_info = InfoLevel(), + blas_errors = InfoLevel(), + blas_success = Silent() + ) + + @test_logs (:info, r"BLAS/LAPACK.*Matrix is singular") solve( + prob_singular, BLISLUFactorization(); verbose = verbose_info) + + # Test with all BLAS logging disabled - should produce no logs + verbose_silent = LinearVerbosity( + blas_errors = Silent(), + blas_invalid_args = Silent(), + blas_info = Silent(), + blas_success = Silent() + ) + + @test_logs min_level=SciMLLogging.Logging.Warn solve( + prob_singular, BLISLUFactorization(); verbose = verbose_silent) + + # Test condition number logging if enabled + verbose_with_cond = LinearVerbosity( + condition_number = InfoLevel(), + blas_success = InfoLevel(), + blas_errors = Silent() + ) + + @test_logs (:info, r"Matrix condition number:.*for.*matrix") match_mode=:any solve( + prob_good, BLISLUFactorization(); verbose = verbose_with_cond) + end + end +end + +@testset "OpenBLAS Verbosity Integration Tests" begin + @testset "OpenBLAS solver with verbosity logging" begin + # Test basic OpenBLAS solver functionality with verbosity + if __openblas_isavailable() + # Test successful solve with success logging enabled + A_good = [2.0 1.0; 1.0 2.0] + b_good = [3.0, 4.0] + prob_good = LinearProblem(A_good, b_good) + + verbose_success = LinearVerbosity( + blas_success = InfoLevel(), + blas_errors = Silent(), + blas_info = Silent() + ) + + @test_logs (:info, r"BLAS LU factorization.*completed successfully") solve( + prob_good, OpenBLASLUFactorization(); verbose = verbose_success) + + # Test singular matrix with error logging + A_singular = [1.0 2.0; 2.0 4.0] + b_singular = [1.0, 2.0] + prob_singular = LinearProblem(A_singular, b_singular) + + verbose_errors = LinearVerbosity( + blas_errors = WarnLevel(), + blas_success = Silent(), + blas_info = Silent() + ) + + @test_logs (:warn, r"BLAS/LAPACK.*Matrix is singular") solve( + prob_singular, OpenBLASLUFactorization(); verbose = verbose_errors) + + # Test with info logging enabled + verbose_info = LinearVerbosity( + blas_info = InfoLevel(), + blas_errors = InfoLevel(), + blas_success = Silent() + ) + + @test_logs (:info, r"BLAS/LAPACK.*Matrix is singular") solve( + prob_singular, OpenBLASLUFactorization(); verbose = verbose_info) + + # Test with all BLAS logging disabled - should produce no logs + verbose_silent = LinearVerbosity( + blas_errors = Silent(), + blas_invalid_args = Silent(), + blas_info = Silent(), + blas_success = Silent() + ) + + @test_logs min_level=SciMLLogging.Logging.Warn solve( + prob_singular, OpenBLASLUFactorization(); verbose = verbose_silent) + + # Test condition number logging if enabled + verbose_with_cond = LinearVerbosity( + condition_number = InfoLevel(), + blas_success = InfoLevel(), + blas_errors = Silent() + ) + + @test_logs (:info, r"Matrix condition number:.*for.*matrix") match_mode=:any solve( + prob_good, OpenBLASLUFactorization(); verbose = verbose_with_cond) + else + @info "Skipping OpenBLAS tests - OpenBLAS not available" + end + end +end + +@testset "AppleAccelerate Verbosity Integration Tests" begin + @testset "AppleAccelerate solver with verbosity logging" begin + # Test basic AppleAccelerate solver functionality with verbosity + if __appleaccelerate_isavailable() + # Test successful solve with success logging enabled + A_good = [2.0 1.0; 1.0 2.0] + b_good = [3.0, 4.0] + prob_good = LinearProblem(A_good, b_good) + + verbose_success = LinearVerbosity( + blas_success = InfoLevel(), + blas_errors = Silent(), + blas_info = Silent() + ) + + @test_logs (:info, r"BLAS LU factorization.*completed successfully") solve( + prob_good, AppleAccelerateLUFactorization(); verbose = verbose_success) + + # Test singular matrix with error logging + A_singular = [1.0 2.0; 2.0 4.0] + b_singular = [1.0, 2.0] + prob_singular = LinearProblem(A_singular, b_singular) + + verbose_errors = LinearVerbosity( + blas_errors = WarnLevel(), + blas_success = Silent(), + blas_info = Silent() + ) + + @test_logs (:warn, r"BLAS/LAPACK.*Matrix is singular") solve( + prob_singular, AppleAccelerateLUFactorization(); verbose = verbose_errors) + + # Test with info logging enabled + verbose_info = LinearVerbosity( + blas_info = InfoLevel(), + blas_errors = InfoLevel(), + blas_success = Silent() + ) + + @test_logs (:info, r"BLAS/LAPACK.*Matrix is singular") solve( + prob_singular, AppleAccelerateLUFactorization(); verbose = verbose_info) + + # Test with all BLAS logging disabled - should produce no logs + verbose_silent = LinearVerbosity( + blas_errors = Silent(), + blas_invalid_args = Silent(), + blas_info = Silent(), + blas_success = Silent() + ) + + @test_logs min_level=SciMLLogging.Logging.Warn solve( + prob_singular, AppleAccelerateLUFactorization(); verbose = verbose_silent) + + # Test condition number logging if enabled + verbose_with_cond = LinearVerbosity( + condition_number = InfoLevel(), + blas_success = InfoLevel(), + blas_errors = Silent() + ) + + @test_logs (:info, r"Matrix condition number:.*for.*matrix") match_mode=:any solve( + prob_good, AppleAccelerateLUFactorization(); verbose = verbose_with_cond) + else + @info "Skipping AppleAccelerate tests - AppleAccelerate not available" + end + end +end + +@testset "MKL Verbosity Integration Tests" begin + @testset "MKL solver with verbosity logging" begin + # Test basic MKL solver functionality with verbosity + if __mkl_isavailable() + # Test successful solve with success logging enabled + A_good = [2.0 1.0; 1.0 2.0] + b_good = [3.0, 4.0] + prob_good = LinearProblem(A_good, b_good) + + verbose_success = LinearVerbosity( + blas_success = InfoLevel(), + blas_errors = Silent(), + blas_info = Silent() + ) + + @test_logs (:info, r"BLAS LU factorization.*completed successfully") solve( + prob_good, MKLLUFactorization(); verbose = verbose_success) + + # Test singular matrix with error logging + A_singular = [1.0 2.0; 2.0 4.0] + b_singular = [1.0, 2.0] + prob_singular = LinearProblem(A_singular, b_singular) + + verbose_errors = LinearVerbosity( + blas_errors = WarnLevel(), + blas_success = Silent(), + blas_info = Silent() + ) + + @test_logs (:warn, r"BLAS/LAPACK.*Matrix is singular") solve( + prob_singular, MKLLUFactorization(); verbose = verbose_errors) + + # Test with info logging enabled + verbose_info = LinearVerbosity( + blas_info = InfoLevel(), + blas_errors = InfoLevel(), + blas_success = Silent() + ) + + @test_logs (:info, r"BLAS/LAPACK.*Matrix is singular") solve( + prob_singular, MKLLUFactorization(); verbose = verbose_info) + + # Test with all BLAS logging disabled - should produce no logs + verbose_silent = LinearVerbosity( + blas_errors = Silent(), + blas_invalid_args = Silent(), + blas_info = Silent(), + blas_success = Silent() + ) + + @test_logs min_level=SciMLLogging.Logging.Warn solve( + prob_singular, MKLLUFactorization(); verbose = verbose_silent) + + # Test condition number logging if enabled + verbose_with_cond = LinearVerbosity( + condition_number = InfoLevel(), + blas_success = InfoLevel(), + blas_errors = Silent() + ) + + @test_logs (:info, r"Matrix condition number:.*for.*matrix") match_mode=:any solve( + prob_good, MKLLUFactorization(); verbose = verbose_with_cond) + else + @info "Skipping MKL tests - MKL not available" + end + end +end \ No newline at end of file