Skip to content

Commit 79be897

Browse files
authored
Merge pull request #196 from jump-dev/jg/tests
Simplify Solvers
2 parents 041cffe + 78c21c6 commit 79be897

18 files changed

+163
-106
lines changed

Project.toml

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -23,19 +23,3 @@ LazyArrays = "0.21, 0.22"
2323
MathOptInterface = "1"
2424
MathOptSetDistances = "0.2"
2525
julia = "1.6"
26-
27-
[extras]
28-
DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab"
29-
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
30-
GLPK = "60bf3e95-4087-53dc-ae20-288a0d20c6a6"
31-
Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9"
32-
MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458"
33-
OSQP = "ab2f91bb-94b4-55e3-9ba0-7f65df51de79"
34-
Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
35-
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
36-
SCS = "c946c3f1-0d1f-5ce8-9dea-7daa1f7e2d13"
37-
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
38-
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
39-
40-
[targets]
41-
test = ["Test", "DelimitedFiles", "SCS", "Random", "OSQP", "GLPK", "Ipopt", "Statistics", "Flux", "MLDatasets", "Plots"]

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,9 @@ julia> ]
2323
```julia
2424
using JuMP
2525
import DiffOpt
26-
import GLPK
26+
import HiGHS
2727

28-
model = JuMP.Model(() -> DiffOpt.diff_optimizer(GLPK.Optimizer))
28+
model = JuMP.Model(() -> DiffOpt.diff_optimizer(HiGHS.Optimizer))
2929
```
3030

3131
2. Define your model and solve it a single line.

docs/Project.toml

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,14 +2,13 @@
22
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
33
Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
44
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
5-
GLPK = "60bf3e95-4087-53dc-ae20-288a0d20c6a6"
5+
HiGHS = "87dc4568-4c63-4d18-b0c0-bb2238e4078b"
66
Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9"
77
JuMP = "4076af6c-e467-56ae-b986-b466b2749572"
88
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
99
Literate = "98b081ad-f1c9-55d3-8b20-4c87d4299306"
1010
MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458"
1111
MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee"
12-
OSQP = "ab2f91bb-94b4-55e3-9ba0-7f65df51de79"
1312
Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
1413
SCS = "c946c3f1-0d1f-5ce8-9dea-7daa1f7e2d13"
1514
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"

docs/src/examples/Thermal_Generation_Dispatch_Example.jl

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ using Test
2727
using JuMP
2828
import DiffOpt
2929
import LinearAlgebra: dot
30-
import GLPK
30+
import HiGHS
3131
import MathOptInterface
3232
import Plots
3333
const MOI = MathOptInterface
@@ -36,7 +36,7 @@ const MOI = MathOptInterface
3636

3737
function generate_model(d::Float64; g_sup::Vector{Float64}, c_g::Vector{Float64}, c_ϕ::Float64)
3838
## Creation of the Model and Parameters
39-
model = Model(() -> DiffOpt.diff_optimizer(GLPK.Optimizer))
39+
model = Model(() -> DiffOpt.diff_optimizer(HiGHS.Optimizer))
4040
set_silent(model)
4141
I = length(g_sup)
4242

@@ -116,20 +116,20 @@ d = 0.0:0.1:80
116116
d_size = length(d)
117117
c_g = [1.0, 3.0, 5.0]
118118
c_ϕ = 10.0
119+
;
119120

120121
# Generate models for each demand `d`
121-
models = generate_model.(d; g_sup = g_sup, c_g = c_g, c_ϕ = c_ϕ)
122+
models = generate_model.(d; g_sup = g_sup, c_g = c_g, c_ϕ = c_ϕ);
122123

123124
# Get the results of models with the DiffOpt Forward and Backward context
124125

125126
result_forward = diff_forward.(models)
126-
127127
optimize!.(models)
128-
result_backward = diff_backward.(models)
128+
result_backward = diff_backward.(models);
129129

130130
# Organization of results to plot
131131
# Initialize data_results that will contain every result
132-
data_results = Array{Float64,3}(undef, 2, d_size, 2*(I+1))
132+
data_results = Array{Float64,3}(undef, 2, d_size, 2*(I+1));
133133

134134
# Populate the data_results array
135135
for k in 1:d_size

docs/src/examples/autotuning-ridge.jl

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626

2727
using JuMP # The mathematical programming modelling language
2828
import DiffOpt # JuMP extension for differentiable optimization
29-
import OSQP # Optimization solver that handles quadratic programs
29+
import Ipopt # Optimization solver that handles quadratic programs
3030
import Plots # Graphing tool
3131
import LinearAlgebra: norm, dot
3232
import Random
@@ -66,7 +66,7 @@ function fit_ridge(model, X, y, α)
6666
dot(err_term, err_term) / (2 * N * D) + α * dot(w, w) / (2 * D),
6767
)
6868
optimize!(model)
69-
@assert termination_status(model) == MOI.OPTIMAL
69+
@assert termination_status(model) in [MOI.OPTIMAL, MOI.LOCALLY_SOLVED, MOI.ALMOST_LOCALLY_SOLVED]
7070
return w
7171
end
7272

@@ -76,7 +76,7 @@ end
7676
αs = 0.00:0.01:0.50
7777
mse_test = Float64[]
7878
mse_train = Float64[]
79-
model = Model(() -> DiffOpt.diff_optimizer(OSQP.Optimizer))
79+
model = Model(() -> DiffOpt.diff_optimizer(Ipopt.Optimizer))
8080
(Ntest, D) = size(X_test)
8181
(Ntrain, D) = size(X_train)
8282
for α in αs
@@ -146,7 +146,7 @@ function descent(α0, max_iters=100; fixed_step = 0.01, grad_tol=1e-3)
146146
test_loss = Float64[]
147147
α = α0
148148
N, D = size(X_test)
149-
model = Model(() -> DiffOpt.diff_optimizer(OSQP.Optimizer))
149+
model = Model(() -> DiffOpt.diff_optimizer(Ipopt.Optimizer))
150150
for iter in 1:max_iters
151151
w = fit_ridge(model, X_train, y_train, α)
152152
= value.(w)

docs/src/examples/chainrules_unit.jl

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ using JuMP
1414
import DiffOpt
1515
import Plots
1616
import LinearAlgebra:
17-
import GLPK
17+
import HiGHS
1818
import ChainRulesCore
1919

2020
# ## Unit commitment problem
@@ -41,7 +41,7 @@ import ChainRulesCore
4141

4242
function unit_commitment(
4343
load1_demand, load2_demand, gen_costs, noload_costs;
44-
model = Model(GLPK.Optimizer), silent=false)
44+
model = Model(HiGHS.Optimizer), silent=false)
4545
MOI.set(model, MOI.Silent(), silent)
4646

4747
## Problem data
@@ -96,7 +96,7 @@ function unit_commitment(
9696
return JuMP.value.(p.data)
9797
end
9898

99-
m = Model(GLPK.Optimizer)
99+
m = Model(HiGHS.Optimizer)
100100
@show unit_commitment(
101101
[1.0, 1.2, 1.4, 1.6], [1.0, 1.2, 1.4, 1.6],
102102
[1000.0, 1500.0], [500.0, 1000.0],
@@ -142,9 +142,9 @@ function ChainRulesCore.frule(
142142
(_, Δload1_demand, Δload2_demand, Δgen_costs, Δnoload_costs),
143143
::typeof(unit_commitment),
144144
load1_demand, load2_demand, gen_costs, noload_costs;
145-
optimizer=GLPK.Optimizer,
145+
optimizer=HiGHS.Optimizer,
146146
)
147-
## creating the UC model with a DiffOpt optimizer wrapper around GLPK
147+
## creating the UC model with a DiffOpt optimizer wrapper around HiGHS
148148
model = Model(() -> DiffOpt.diff_optimizer(optimizer))
149149
## building and solving the main model
150150
pv = unit_commitment(
@@ -212,7 +212,7 @@ noload_costs = [500.0, 1000.0];
212212
function ChainRulesCore.rrule(
213213
::typeof(unit_commitment),
214214
load1_demand, load2_demand, gen_costs, noload_costs;
215-
optimizer=GLPK.Optimizer,
215+
optimizer=HiGHS.Optimizer,
216216
silent=false)
217217
model = Model(() -> DiffOpt.diff_optimizer(optimizer))
218218
## solve the forward UC problem
@@ -253,7 +253,7 @@ end
253253
(pv, pullback_unit_commitment) = ChainRulesCore.rrule(
254254
unit_commitment,
255255
load1_demand, load2_demand, gen_costs, noload_costs,
256-
optimizer=GLPK.Optimizer,
256+
optimizer=HiGHS.Optimizer,
257257
silent=true,
258258
)
259259
dpv = 0 * pv

docs/src/examples/custom-relu.jl

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
using JuMP
1313
import DiffOpt
14-
import OSQP
14+
import Ipopt
1515
import ChainRulesCore
1616
import Flux
1717
import Statistics
@@ -23,7 +23,7 @@ import Base.Iterators: repeated
2323
# Return the solution of the problem.
2424
function matrix_relu(
2525
y::AbstractArray{T};
26-
model = Model(() -> DiffOpt.diff_optimizer(OSQP.Optimizer))
26+
model = Model(() -> DiffOpt.diff_optimizer(Ipopt.Optimizer))
2727
) where T
2828
_x = zeros(size(y))
2929
N = length(y[:, 1])
@@ -47,7 +47,7 @@ end
4747
function ChainRulesCore.rrule(
4848
::typeof(matrix_relu),
4949
y::AbstractArray{T};
50-
model = Model(() -> DiffOpt.diff_optimizer(OSQP.Optimizer))
50+
model = Model(() -> DiffOpt.diff_optimizer(Ipopt.Optimizer))
5151
) where T
5252
pv = matrix_relu(y, model = model)
5353
function pullback_matrix_relu(dl_dx)

docs/src/examples/sensitivity-analysis-ridge.jl

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
using JuMP
3636
import DiffOpt
3737
import Random
38-
import OSQP
38+
import Ipopt
3939
import Plots
4040
import LinearAlgebra: normalize!, dot
4141

@@ -55,13 +55,13 @@ Y = w * X .+ b + 0.8 * randn(N);
5555
# The helper method `fit_ridge` defines and solves the corresponding model.
5656
# The ridge regression is modeled with quadratic programming
5757
# (quadratic objective and linear constraints) and solved in generic methods
58-
# of OSQP. This is not the standard way of solving the ridge regression problem
58+
# of Ipopt. This is not the standard way of solving the ridge regression problem
5959
# this is done here for didactic purposes.
6060

6161
function fit_ridge(X, Y, alpha = 0.1)
6262
N = length(Y)
63-
## Initialize a JuMP Model with OSQP solver
64-
model = Model(() -> DiffOpt.diff_optimizer(OSQP.Optimizer))
63+
## Initialize a JuMP Model with Ipopt solver
64+
model = Model(() -> DiffOpt.diff_optimizer(Ipopt.Optimizer))
6565
set_silent(model)
6666
@variable(model, w) # angular coefficient
6767
@variable(model, b) # linear coefficient

0 commit comments

Comments
 (0)