diff --git a/docs/Project.toml b/docs/Project.toml index 5363260e7..1a2caad91 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -46,6 +46,9 @@ SymbolicAnalysis = "4297ee4d-0239-47d8-ba5d-195ecdf594fe" Symbolics = "0c5d862f-8b57-4792-8d23-62f2024744c7" Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" +BlackBoxOptim = "a134a8b2-14d6-55f6-9291-3336d3ab0209" +Metaheuristics = "bcdb8e00-2c21-11e9-3065-2b553b22f898" +Evolutionary = "86b6b26d-c046-49b6-aa0b-5f0f74682bd6" [compat] AmplNLWriter = "1" @@ -93,3 +96,7 @@ SymbolicAnalysis = "0.3" Symbolics = "6" Tracker = ">= 0.2" Zygote = ">= 0.5" +BlackBoxOptim = "0.6" +Metaheuristics = "3" +Evolutionary = "0.11" + diff --git a/docs/src/optimization_packages/blackboxoptim.md b/docs/src/optimization_packages/blackboxoptim.md index ca5b2385b..85f17cf93 100644 --- a/docs/src/optimization_packages/blackboxoptim.md +++ b/docs/src/optimization_packages/blackboxoptim.md @@ -67,3 +67,21 @@ prob = Optimization.OptimizationProblem(f, x0, p, lb = [-1.0, -1.0], ub = [1.0, sol = solve(prob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 100000, maxtime = 1000.0) ``` + +## Multi-objective optimization +The optimizer for Multi-Objective Optimization is `BBO_borg_moea()`. Your objective function should return a vector of the objective values and you should indicate the fitness scheme to be (typically) Pareto fitness and specify the number of objectives. Otherwise, the use is similar, here is an example: + +```@example MOO-BBO +using OptimizationBBO, Optimization, BlackBoxOptim +using SciMLBase: MultiObjectiveOptimizationFunction +u0 = [0.25, 0.25] +opt = OptimizationBBO.BBO_borg_moea() +function multi_obj_func(x, p) + f1 = (1.0 - x[1])^2 + 100.0 * (x[2] - x[1]^2)^2 # Rosenbrock function + f2 = -20.0 * exp(-0.2 * sqrt(0.5 * (x[1]^2 + x[2]^2))) - exp(0.5 * (cos(2π * x[1]) + cos(2π * x[2]))) + exp(1) + 20.0 # Ackley function + return (f1, f2) +end +mof = MultiObjectiveOptimizationFunction(multi_obj_func) +prob = Optimization.OptimizationProblem(mof, u0; lb = [0.0, 0.0], ub = [2.0, 2.0]) +sol = solve(prob, opt, NumDimensions=2, FitnessScheme=ParetoFitnessScheme{2}(is_minimizing=true)) +``` diff --git a/docs/src/optimization_packages/evolutionary.md b/docs/src/optimization_packages/evolutionary.md index 9fa582c74..86cf27b71 100644 --- a/docs/src/optimization_packages/evolutionary.md +++ b/docs/src/optimization_packages/evolutionary.md @@ -41,3 +41,20 @@ f = OptimizationFunction(rosenbrock) prob = Optimization.OptimizationProblem(f, x0, p, lb = [-1.0, -1.0], ub = [1.0, 1.0]) sol = solve(prob, Evolutionary.CMAES(μ = 40, λ = 100)) ``` + +## Multi-objective optimization +The Rosenbrock and Ackley functions can be optimized using the `Evolutionary.NSGA2()` as follows: + +```@example MOO-Evolutionary +using Optimization, OptimizationEvolutionary, Evolutionary +function func(x, p=nothing)::Vector{Float64} + f1 = (1.0 - x[1])^2 + 100.0 * (x[2] - x[1]^2)^2 # Rosenbrock function + f2 = -20.0 * exp(-0.2 * sqrt(0.5 * (x[1]^2 + x[2]^2))) - exp(0.5 * (cos(2π * x[1]) + cos(2π * x[2]))) + exp(1) + 20.0 # Ackley function + return [f1, f2] +end +initial_guess = [1.0, 1.0] +obj_func = MultiObjectiveOptimizationFunction(func) +algorithm = OptimizationEvolutionary.NSGA2() +problem = OptimizationProblem(obj_func, initial_guess) +result = solve(problem, algorithm) +``` diff --git a/docs/src/optimization_packages/metaheuristics.md b/docs/src/optimization_packages/metaheuristics.md index ae1694bcc..f95ad505a 100644 --- a/docs/src/optimization_packages/metaheuristics.md +++ b/docs/src/optimization_packages/metaheuristics.md @@ -70,3 +70,46 @@ sol = solve(prob, ECA(), use_initial = true, maxiters = 100000, maxtime = 1000.0 ### With Constraint Equations While `Metaheuristics.jl` supports such constraints, `Optimization.jl` currently does not relay these constraints. + + +## Multi-objective optimization +The zdt1 functions can be optimized using the `Metaheuristics.jl` as follows: + +```@example MOO-Metaheuristics +using Optimization, OptimizationEvolutionary,OptimizationMetaheuristics, Metaheuristics +function zdt1(x) + f1 = x[1] + g = 1 + 9 * mean(x[2:end]) + h = 1 - sqrt(f1 / g) + f2 = g * h + # In this example, we have no constraints + gx = [0.0] # Inequality constraints (not used) + hx = [0.0] # Equality constraints (not used) + return [f1, f2], gx, hx +end +multi_obj_fun = MultiObjectiveOptimizationFunction((x, p) -> zdt1(x)) + +# Define the problem bounds +lower_bounds = [0.0, 0.0, 0.0] +upper_bounds = [1.0, 1.0, 1.0] + +# Define the initial guess +initial_guess = [0.5, 0.5, 0.5] + +# Create the optimization problem +prob = OptimizationProblem(multi_obj_fun, initial_guess; lb = lower_bounds, ub = upper_bounds) + +nobjectives = 2 +npartitions = 100 + +# reference points (Das and Dennis's method) +weights = Metaheuristics.gen_ref_dirs(nobjectives, npartitions) + +# Choose the algorithm and solve the problem +sol1 = solve(prob, Metaheuristics.NSGA2(); maxiters = 100, use_initial = true) +sol2 = solve(prob, Metaheuristics.NSGA3(); maxiters = 100, use_initial = true) +sol3 = solve(prob, Metaheuristics.SPEA2(); maxiters = 100, use_initial = true) +sol4 = solve(prob, Metaheuristics.CCMO(NSGA2(N=100, p_m=0.001))) +sol5 = solve(prob, Metaheuristics.MOEAD_DE(weights, options=Options(debug=false, iterations = 250)); maxiters = 100, use_initial = true) +sol6 = solve(prob, Metaheuristics.SMS_EMOA(); maxiters = 100, use_initial = true) +``` diff --git a/docs/src/optimization_packages/ode.md b/docs/src/optimization_packages/ode.md index f89d348dc..b3ab031bc 100644 --- a/docs/src/optimization_packages/ode.md +++ b/docs/src/optimization_packages/ode.md @@ -28,8 +28,8 @@ p = [] f_manual = OptimizationFunction(f, SciMLBase.NoAD(); grad = g!) prob_manual = OptimizationProblem(f_manual, x0) -opt = ODEGradientDescent(dt=0.01) -sol = solve(prob_manual, opt; maxiters=50_000) +opt = ODEGradientDescent() +sol = solve(prob_manual, opt; dt=0.01, maxiters=50_000) @show sol.u @show sol.objective @@ -39,7 +39,7 @@ sol = solve(prob_manual, opt; maxiters=50_000) All provided optimizers are **gradient-based local optimizers** that solve optimization problems by integrating gradient-based ODEs to convergence: -* `ODEGradientDescent(dt=...)` — performs basic gradient descent using the explicit Euler method. This is a simple and efficient method suitable for small-scale or well-conditioned problems. +* `ODEGradientDescent()` — performs basic gradient descent using the explicit Euler method. This is a simple and efficient method suitable for small-scale or well-conditioned problems. * `RKChebyshevDescent()` — uses the ROCK2 solver, a stabilized explicit Runge-Kutta method suitable for stiff problems. It allows larger step sizes while maintaining stability. @@ -47,7 +47,15 @@ All provided optimizers are **gradient-based local optimizers** that solve optim * `HighOrderDescent()` — applies Vern7, a high-order (7th-order) explicit Runge-Kutta method for even more accurate integration. This can be beneficial for problems requiring high precision. -You can also define a custom optimizer using the generic `ODEOptimizer(solver; dt=nothing)` constructor by supplying any ODE solver supported by [OrdinaryDiffEq.jl](https://docs.sciml.ai/DiffEqDocs/stable/solvers/ode_solve/). +## DAE-based Optimizers + +In addition to ODE-based optimizers, OptimizationODE.jl provides optimizers for differential-algebraic equation (DAE) constrained problems: + +* `DAEMassMatrix()` — uses the Rodas5 solver (from OrdinaryDiffEq.jl) for DAE problems with a mass matrix formulation. + +* `DAEIndexing()` — uses the IDA solver (from Sundials.jl) for DAE problems with index variable support. + +You can also define a custom optimizer using the generic `ODEOptimizer(solver)` or `DAEOptimizer(solver)` constructor by supplying any ODE or DAE solver supported by [OrdinaryDiffEq.jl](https://docs.sciml.ai/DiffEqDocs/stable/solvers/ode_solve/) or [Sundials.jl](https://github.com/SciML/Sundials.jl). ## DAE-based Optimizers diff --git a/lib/OptimizationBBO/src/OptimizationBBO.jl b/lib/OptimizationBBO/src/OptimizationBBO.jl index 57f874356..c39465edf 100644 --- a/lib/OptimizationBBO/src/OptimizationBBO.jl +++ b/lib/OptimizationBBO/src/OptimizationBBO.jl @@ -55,13 +55,31 @@ function __map_optimizer_args(prob::Optimization.OptimizationCache, opt::BBO; abstol::Union{Number, Nothing} = nothing, reltol::Union{Number, Nothing} = nothing, verbose::Bool = false, + num_dimensions::Union{Number, Nothing} = nothing, + fitness_scheme::Union{String, Nothing} = nothing, kwargs...) if !isnothing(reltol) @warn "common reltol is currently not used by $(opt)" end - mapped_args = (; kwargs...) - mapped_args = (; mapped_args..., Method = opt.method, - SearchRange = [(prob.lb[i], prob.ub[i]) for i in 1:length(prob.lb)]) + + # Determine number of objectives for multi-objective problems + if isa(prob.f, MultiObjectiveOptimizationFunction) + num_objectives = length(prob.f.cost_prototype) + mapped_args = (; kwargs...) + mapped_args = (; mapped_args..., Method = opt.method, + SearchRange = [(prob.lb[i], prob.ub[i]) for i in 1:length(prob.lb)], + NumDimensions = length(prob.lb), + NumObjectives = num_objectives) + # FitnessScheme should be in opt, not the function + if hasproperty(opt, :FitnessScheme) + mapped_args = (; mapped_args..., FitnessScheme = opt.FitnessScheme) + end + else + mapped_args = (; kwargs...) + mapped_args = (; mapped_args..., Method = opt.method, + SearchRange = [(prob.lb[i], prob.ub[i]) for i in 1:length(prob.lb)], + NumDimensions = length(prob.lb)) + end if !isnothing(callback) mapped_args = (; mapped_args..., CallbackFunction = callback, @@ -86,6 +104,16 @@ function __map_optimizer_args(prob::Optimization.OptimizationCache, opt::BBO; mapped_args = (; mapped_args..., TraceMode = :silent) end + if isa(prob.f, MultiObjectiveOptimizationFunction) + if isnothing(num_dimensions) && isnothing(fitness_scheme) + mapped_args = (; mapped_args..., NumDimensions = 2, FitnessScheme = BlackBoxOptim.ParetoFitnessScheme{2}(is_minimizing=true)) + elseif isnothing(num_dimensions) + mapped_args = (; mapped_args..., NumDimensions = 2, FitnessScheme = fitness_scheme) + elseif isnothing(fitness_scheme) + mapped_args = (; mapped_args..., NumDimensions = num_dimensions, FitnessScheme = BlackBoxOptim.ParetoFitnessScheme{2}(is_minimizing=true)) + end + end + return mapped_args end @@ -116,8 +144,7 @@ function SciMLBase.__solve(cache::Optimization.OptimizationCache{ LC, UC, S, - O <: - BBO, + O <: BBO, D, P, C @@ -151,16 +178,37 @@ function SciMLBase.__solve(cache::Optimization.OptimizationCache{ maxiters = Optimization._check_and_convert_maxiters(cache.solver_args.maxiters) maxtime = Optimization._check_and_convert_maxtime(cache.solver_args.maxtime) - _loss = function (θ) - cache.f(θ, cache.p) + + # Multi-objective: use out-of-place or in-place as appropriate + if isa(cache.f, MultiObjectiveOptimizationFunction) + if is_inplace(cache.f) + _loss = θ -> (cost = similar(cache.f.cost_prototype); cache.f.f(cost, θ, cache.p); cost) + else + _loss = θ -> cache.f.f(θ, cache.p) + end + else + _loss = θ -> cache.f(θ, cache.p) end - opt_args = __map_optimizer_args(cache, cache.opt; - callback = cache.callback === Optimization.DEFAULT_CALLBACK ? - nothing : _cb, - cache.solver_args..., - maxiters = maxiters, - maxtime = maxtime) + if isa(cache.f, MultiObjectiveOptimizationFunction) + opt_args = __map_optimizer_args(cache, cache.opt; + callback = cache.callback === Optimization.DEFAULT_CALLBACK && + cache.data === Optimization.DEFAULT_DATA ? + nothing : _cb, + cache.solver_args..., + maxiters = maxiters, + maxtime = maxtime, + num_dimensions = isnothing(cache.num_dimensions) ? nothing : cache.num_dimensions, + fitness_scheme = isnothing(cache.fitness_scheme) ? nothing : cache.fitness_scheme) + else + opt_args = __map_optimizer_args(cache, cache.opt; + callback = cache.callback === Optimization.DEFAULT_CALLBACK && + cache.data === Optimization.DEFAULT_DATA ? + nothing : _cb, + cache.solver_args..., + maxiters = maxiters, + maxtime = maxtime) + end opt_setup = BlackBoxOptim.bbsetup(_loss; opt_args...) diff --git a/lib/OptimizationBBO/test/runtests.jl b/lib/OptimizationBBO/test/runtests.jl index 1295465fc..1a6938d48 100644 --- a/lib/OptimizationBBO/test/runtests.jl +++ b/lib/OptimizationBBO/test/runtests.jl @@ -1,4 +1,5 @@ using OptimizationBBO, Optimization, BlackBoxOptim +using Optimization.SciMLBase using Optimization.SciMLBase: MultiObjectiveOptimizationFunction using Test @@ -34,6 +35,43 @@ using Test push!(loss_history, fitness) return false end + + # Define the initial guess and bounds ONCE for all tests + u0 = [0.25, 0.25] + lb = [0.0, 0.0] + ub = [2.0, 2.0] + opt = OptimizationBBO.BBO_borg_moea() + + @testset "In-place Multi-Objective Optimization" begin + function inplace_multi_obj!(cost, x, p) + cost[1] = sum(x .^ 2) + cost[2] = sum(x .^ 2 .- 10 .* cos.(2π .* x) .+ 10) + return nothing + end + cost_prototype = zeros(2) + mof_inplace = MultiObjectiveOptimizationFunction{true}(inplace_multi_obj!, SciMLBase.NoAD(); cost_prototype=cost_prototype) + prob_inplace = Optimization.OptimizationProblem(mof_inplace, u0; lb=lb, ub=ub) + sol_inplace = solve(prob_inplace, opt, NumDimensions=2, FitnessScheme=ParetoFitnessScheme{2}(is_minimizing=true)) + @test sol_inplace ≠ nothing + @test length(sol_inplace.objective) == 2 + @test sol_inplace.objective[1] ≈ 6.9905986e-18 atol=1e-3 + @test sol_inplace.objective[2] ≈ 1.7763568e-15 atol=1e-3 + end + + @testset "Custom coalesce for Multi-Objective" begin + function multi_obj_tuple(x, p) + f1 = sum(x .^ 2) + f2 = sum(x .^ 2 .- 10 .* cos.(2π .* x) .+ 10) + return (f1, f2) + end + mof_coalesce = MultiObjectiveOptimizationFunction{false}(multi_obj_tuple, SciMLBase.NoAD(); cost_prototype=zeros(2)) + prob_coalesce = Optimization.OptimizationProblem(mof_coalesce, u0; lb=lb, ub=ub) + sol_coalesce = solve(prob_coalesce, opt, NumDimensions=2, FitnessScheme=ParetoFitnessScheme{2}(is_minimizing=true)) + @test sol_coalesce ≠ nothing + @test sol_coalesce.objective[1] ≈ 6.9905986e-18 atol=1e-3 + @test sol_coalesce.objective[2] ≈ 1.7763568e-15 atol=1e-3 + end + sol = solve(prob, BBO_adaptive_de_rand_1_bin_radiuslimited(), callback = cb) # println(fitness_progress_history) @test !isempty(fitness_progress_history) @@ -55,13 +93,7 @@ using Test maxtime = 5) end - # Define the initial guess and bounds - u0 = [0.25, 0.25] - lb = [0.0, 0.0] - ub = [2.0, 2.0] - - # Define the optimizer - opt = OptimizationBBO.BBO_borg_moea() + # ...existing code... @testset "Multi-Objective Optimization Tests" begin @@ -73,10 +105,10 @@ using Test return (f1, f2) end - mof_1 = MultiObjectiveOptimizationFunction(multi_obj_func_1) + mof_1 = MultiObjectiveOptimizationFunction{false}(multi_obj_func_1, SciMLBase.NoAD(); cost_prototype=zeros(2)) prob_1 = Optimization.OptimizationProblem(mof_1, u0; lb = lb, ub = ub) - sol_1 = solve(prob_1, opt, NumDimensions = 2, - FitnessScheme = ParetoFitnessScheme{2}(is_minimizing = true)) + sol_1 = solve(prob_1, opt, num_dimensions = 2, + fitness_scheme = ParetoFitnessScheme{2}(is_minimizing = true)) @test sol_1 ≠ nothing println("Solution for Sphere and Rastrigin: ", sol_1) @@ -100,7 +132,7 @@ using Test return false end - mof_1 = MultiObjectiveOptimizationFunction(multi_obj_func_1) + mof_1 = MultiObjectiveOptimizationFunction{false}(multi_obj_func_1, SciMLBase.NoAD(); cost_prototype=zeros(2)) prob_1 = Optimization.OptimizationProblem(mof_1, u0; lb = lb, ub = ub) sol_1 = solve(prob_1, opt, NumDimensions = 2, FitnessScheme = ParetoFitnessScheme{2}(is_minimizing = true), @@ -126,10 +158,10 @@ using Test return (f1, f2) end - mof_2 = MultiObjectiveOptimizationFunction(multi_obj_func_2) + mof_2 = MultiObjectiveOptimizationFunction{false}(multi_obj_func_2, SciMLBase.NoAD(); cost_prototype=zeros(2)) prob_2 = Optimization.OptimizationProblem(mof_2, u0; lb = lb, ub = ub) - sol_2 = solve(prob_2, opt, NumDimensions = 2, - FitnessScheme = ParetoFitnessScheme{2}(is_minimizing = true)) + sol_2 = solve(prob_2, opt, num_dimensions = 2, + fitness_scheme = ParetoFitnessScheme{2}(is_minimizing = true)) @test sol_2 ≠ nothing println("Solution for Rosenbrock and Ackley: ", sol_2) @@ -146,10 +178,10 @@ using Test return (f1, f2) end - mof_3 = MultiObjectiveOptimizationFunction(multi_obj_func_3) + mof_3 = SciMLBase.MultiObjectiveOptimizationFunction{false}(multi_obj_func_3, SciMLBase.NoAD(); cost_prototype=zeros(2)) prob_3 = Optimization.OptimizationProblem(mof_3, u0; lb = lb, ub = ub) - sol_3 = solve(prob_3, opt, NumDimensions = 2, - FitnessScheme = ParetoFitnessScheme{2}(is_minimizing = true)) + sol_3 = solve(prob_3, opt, num_dimensions = 2, + fitness_scheme = ParetoFitnessScheme{2}(is_minimizing = true)) @test sol_3 ≠ nothing println("Solution for ZDT1: ", sol_3) diff --git a/lib/OptimizationEvolutionary/test/runtests.jl b/lib/OptimizationEvolutionary/test/runtests.jl index 1bd810664..13c0f4fa3 100644 --- a/lib/OptimizationEvolutionary/test/runtests.jl +++ b/lib/OptimizationEvolutionary/test/runtests.jl @@ -40,6 +40,44 @@ Random.seed!(1234) if state.iter % 10 == 0 println(state.u) end + + @testset "In-place Multi-Objective Optimization" begin + function inplace_multi_obj!(cost, x, p) + cost[1] = sum(x .^ 2) + cost[2] = sum(x .^ 2 .- 10 .* cos.(2π .* x) .+ 10) + return nothing + end + u0 = [0.25, 0.25] + cost_prototype = zeros(2) + mof_inplace = MultiObjectiveOptimizationFunction(inplace_multi_obj!; cost_prototype=cost_prototype) + prob_inplace = OptimizationProblem(mof_inplace, u0) + sol_inplace = solve(prob_inplace, NSGA2()) + @test sol_inplace ≠ nothing + @test length(sol_inplace.objective) == 2 + end + + @testset "Custom coalesce for Multi-Objective" begin + function multi_obj_tuple(x, p) + f1 = sum(x .^ 2) + f2 = sum(x .^ 2 .- 10 .* cos.(2π .* x) .+ 10) + return (f1, f2) + end + coalesce_sum(cost, x, p) = sum(cost) + mof_coalesce = MultiObjectiveOptimizationFunction(multi_obj_tuple; coalesce=coalesce_sum) + prob_coalesce = OptimizationProblem(mof_coalesce, u0) + sol_coalesce = solve(prob_coalesce, NSGA2()) + @test sol_coalesce ≠ nothing + @test mof_coalesce.coalesce([1.0, 2.0], [0.0, 0.0], nothing) == 3.0 + end + + @testset "Error if in-place MultiObjectiveOptimizationFunction without cost_prototype" begin + function inplace_multi_obj_err!(cost, x, p) + cost[1] = sum(x .^ 2) + cost[2] = sum(x .^ 2 .- 10 .* cos.(2π .* x) .+ 10) + return nothing + end + @test_throws ArgumentError MultiObjectiveOptimizationFunction(inplace_multi_obj_err!) + end return false end solve(prob, CMAES(μ = 40, λ = 100), callback = cb, maxiters = 100)