@@ -79,7 +79,7 @@ To make the example more realistic we add random normally distributed noise to t
7979
8080``` {julia}
8181sol = solve(prob, Tsit5(); saveat=0.1)
82- odedata = Array(sol) + 0.8 * randn(size(Array(sol)))
82+ odedata = Array(sol) + 0.5 * randn(size(Array(sol)))
8383
8484# Plot simulation and noisy observations.
8585plot(sol; alpha=0.3)
@@ -98,18 +98,18 @@ Therefore, we write the Lotka-Volterra parameter estimation problem using the Tu
9898``` {julia}
9999@model function fitlv(data, prob)
100100 # Prior distributions.
101- σ ~ InverseGamma(2, 3 )
102- α ~ truncated(Normal(1.5, 0.5 ); lower=0.5, upper=2.5)
103- β ~ truncated(Normal(1.2 , 0.5 ); lower=0, upper=2)
104- γ ~ truncated(Normal(3.0, 0.5 ); lower=1, upper=4)
105- δ ~ truncated(Normal(1.0, 0.5 ); lower=0, upper=2)
101+ σ ~ InverseGamma(3, 2 )
102+ α ~ truncated(Normal(1.5, 0.2 ); lower=0.5, upper=2.5)
103+ β ~ truncated(Normal(1.1 , 0.2 ); lower=0, upper=2)
104+ γ ~ truncated(Normal(3.0, 0.2 ); lower=1, upper=4)
105+ δ ~ truncated(Normal(1.0, 0.2 ); lower=0, upper=2)
106106
107107 # Simulate Lotka-Volterra model.
108108 p = [α, β, γ, δ]
109109 predicted = solve(prob, Tsit5(); p=p, saveat=0.1)
110110
111111 # Observations.
112- for i in 1:length (predicted)
112+ for i in eachindex (predicted)
113113 data[:, i] ~ MvNormal(predicted[i], σ^2 * I)
114114 end
115115
@@ -160,11 +160,11 @@ I.e., we fit the model only to the $y$ variable of the system without providing
160160``` {julia}
161161@model function fitlv2(data::AbstractVector, prob)
162162 # Prior distributions.
163- σ ~ InverseGamma(2, 3 )
164- α ~ truncated(Normal(1.5, 0.5 ); lower=0.5, upper=2.5)
165- β ~ truncated(Normal(1.2 , 0.5 ); lower=0, upper=2)
166- γ ~ truncated(Normal(3.0, 0.5 ); lower=1, upper=4)
167- δ ~ truncated(Normal(1.0, 0.5 ); lower=0, upper=2)
163+ σ ~ InverseGamma(3, 2 )
164+ α ~ truncated(Normal(1.5, 0.2 ); lower=0.5, upper=2.5)
165+ β ~ truncated(Normal(1.1 , 0.2 ); lower=0, upper=2)
166+ γ ~ truncated(Normal(3.0, 0.2 ); lower=1, upper=4)
167+ δ ~ truncated(Normal(1.0, 0.2 ); lower=0, upper=2)
168168
169169 # Simulate Lotka-Volterra model but save only the second state of the system (predators).
170170 p = [α, β, γ, δ]
@@ -260,18 +260,18 @@ Now we define the Turing model for the Lotka-Volterra model with delay and sampl
260260``` {julia}
261261@model function fitlv_dde(data, prob)
262262 # Prior distributions.
263- σ ~ InverseGamma(2, 3 )
264- α ~ truncated(Normal(1.5, 0.5 ); lower=0.5, upper=2.5)
265- β ~ truncated(Normal(1.2 , 0.5 ); lower=0, upper=2)
266- γ ~ truncated(Normal(3.0, 0.5 ); lower=1, upper=4)
267- δ ~ truncated(Normal(1.0, 0.5 ); lower=0, upper=2)
263+ σ ~ InverseGamma(3, 2 )
264+ α ~ truncated(Normal(1.5, 0.2 ); lower=0.5, upper=2.5)
265+ β ~ truncated(Normal(1.1 , 0.2 ); lower=0, upper=2)
266+ γ ~ truncated(Normal(3.0, 0.2 ); lower=1, upper=4)
267+ δ ~ truncated(Normal(1.0, 0.2 ); lower=0, upper=2)
268268
269269 # Simulate Lotka-Volterra model.
270270 p = [α, β, γ, δ]
271271 predicted = solve(prob, MethodOfSteps(Tsit5()); p=p, saveat=0.1)
272272
273273 # Observations.
274- for i in 1:length (predicted)
274+ for i in eachindex (predicted)
275275 data[:, i] ~ MvNormal(predicted[i], σ^2 * I)
276276 end
277277end
@@ -340,18 +340,18 @@ Here we will not choose a `sensealg` and let it use the default choice:
340340``` {julia}
341341@model function fitlv_sensealg(data, prob)
342342 # Prior distributions.
343- σ ~ InverseGamma(2, 3 )
344- α ~ truncated(Normal(1.5, 0.5 ); lower=0.5, upper=2.5)
345- β ~ truncated(Normal(1.2 , 0.5 ); lower=0, upper=2)
346- γ ~ truncated(Normal(3.0, 0.5 ); lower=1, upper=4)
347- δ ~ truncated(Normal(1.0, 0.5 ); lower=0, upper=2)
343+ σ ~ InverseGamma(3, 2 )
344+ α ~ truncated(Normal(1.5, 0.2 ); lower=0.5, upper=2.5)
345+ β ~ truncated(Normal(1.1 , 0.2 ); lower=0, upper=2)
346+ γ ~ truncated(Normal(3.0, 0.2 ); lower=1, upper=4)
347+ δ ~ truncated(Normal(1.0, 0.2 ); lower=0, upper=2)
348348
349349 # Simulate Lotka-Volterra model and use a specific algorithm for computing sensitivities.
350350 p = [α, β, γ, δ]
351351 predicted = solve(prob; p=p, saveat=0.1)
352352
353353 # Observations.
354- for i in 1:length (predicted)
354+ for i in eachindex (predicted)
355355 data[:, i] ~ MvNormal(predicted[i], σ^2 * I)
356356 end
357357
361361model_sensealg = fitlv_sensealg(odedata, prob)
362362
363363# Sample a single chain with 1000 samples using Zygote.
364- sample(model_sensealg, NUTS(;adtype=AutoZygote()), 1000; progress=false)
364+ sample(model_sensealg, NUTS(; adtype=AutoZygote()), 1000; progress=false)
365365```
366366
367367For more examples of adjoint usage on large parameter models, consult the [ DiffEqFlux documentation] ( https://diffeqflux.sciml.ai/dev/ ) .
0 commit comments