diff --git a/.github/workflows/CI_ecosystem.yml b/.github/workflows/CI_ecosystem.yml new file mode 100644 index 000000000..a3fa15887 --- /dev/null +++ b/.github/workflows/CI_ecosystem.yml @@ -0,0 +1,45 @@ +name: CI_ecosystem +on: + #pull_request: + # branches: + # - main + #push: + # branches: + # - main + workflow_dispatch: +jobs: + test: + name: Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} + runs-on: ${{ matrix.os }} + env: + JULIA_NUM_THREADS: 8 + JULIA_EXTENDED_TESTS: true + strategy: + fail-fast: false + matrix: + version: + - '1.7.3' + os: + - ubuntu-latest + - macos-latest + - windows-latest + arch: + - x64 + steps: + - uses: actions/checkout@v2 + - uses: julia-actions/setup-julia@v1 + with: + version: ${{ matrix.version }} + arch: ${{ matrix.arch }} + - uses: actions/cache@v1 + env: + cache-name: cache-artifacts + with: + path: ~/.julia/artifacts + key: ${{ runner.os }}-test_extended-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }} + - uses: julia-actions/julia-buildpkg@v1 + - uses: julia-actions/julia-runtest@v1 + - uses: julia-actions/julia-processcoverage@v1 + - uses: codecov/codecov-action@v2 + with: + file: lcov.info \ No newline at end of file diff --git a/.github/workflows/CI_extended.yml b/.github/workflows/CI_extended.yml index ec2291910..f6857a517 100644 --- a/.github/workflows/CI_extended.yml +++ b/.github/workflows/CI_extended.yml @@ -1,4 +1,4 @@ -name: CI +name: CI_extended on: pull_request: branches: diff --git a/Project.toml b/Project.toml index bbe7c3ea2..36f3226fc 100644 --- a/Project.toml +++ b/Project.toml @@ -5,6 +5,7 @@ version = "0.1.0" [deps] ChainRules = "082447d4-558c-5d27-93f4-14fc19e9eca2" +ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab" Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" diff --git a/docs/make.jl b/docs/make.jl index 36f370482..8bd4baa59 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -11,11 +11,13 @@ makedocs( "tutorials/specification/specification.md", "tutorials/specification/graph_interface.md", "tutorials/specification/ram_matrices.md", - "tutorials/specification/parameter_table.md"], + "tutorials/specification/parameter_table.md" + ], "Model Construction" => [ "tutorials/construction/construction.md", "tutorials/construction/outer_constructor.md", - "tutorials/construction/build_by_parts.md"], + "tutorials/construction/build_by_parts.md" + ], "Optimization Backends" => [ "tutorials/backends/optim.md", "tutorials/backends/nlopt.md" diff --git a/docs/src/developer/loss.md b/docs/src/developer/loss.md index 7ca142f95..30461bdb3 100644 --- a/docs/src/developer/loss.md +++ b/docs/src/developer/loss.md @@ -267,4 +267,6 @@ model_ml = SemFiniteDiff( ) model_fit = sem_fit(model_ml) -``` \ No newline at end of file +``` + +If you want to differentiate your own loss functions via automatic differentiation, check out the [AutoDiffSEM](https://github.com/StructuralEquationModels/AutoDiffSEM) package (spoiler allert: it's really easy). \ No newline at end of file diff --git a/docs/src/developer/sem.md b/docs/src/developer/sem.md index da16e6c3e..c6b9f0523 100644 --- a/docs/src/developer/sem.md +++ b/docs/src/developer/sem.md @@ -1,20 +1,17 @@ # Custom model types -The abstract supertype for all models is `AbstractSem`, which has two subtypes, `AbstractSemSingle{O, I, L, D}` and `AbstractSemCollection`. Currently, there are three subtypes of `AbstractSemSingle`: `Sem`, `SemFiniteDiff` and `SemForwardDiff`. All subtypes of `AbstractSemSingle` should have at least observed, imply, loss and optimizer fields, and share their types (`{O, I, L, D}`) with the parametric abstract supertype. For example, the `SemFiniteDiff` type is implemented as +The abstract supertype for all models is `AbstractSem`, which has two subtypes, `AbstractSemSingle{O, I, L, D}` and `AbstractSemCollection`. Currently, there are 2 subtypes of `AbstractSemSingle`: `Sem`, `SemFiniteDiff`. All subtypes of `AbstractSemSingle` should have at least observed, imply, loss and optimizer fields, and share their types (`{O, I, L, D}`) with the parametric abstract supertype. For example, the `SemFiniteDiff` type is implemented as ```julia struct SemFiniteDiff{ O <: SemObserved, I <: SemImply, L <: SemLoss, - D <: SemOptimizer, - G} <: AbstractSemSingle{O, I, L, D} + D <: SemOptimizer} <: AbstractSemSingle{O, I, L, D} observed::O imply::I loss::L - optimizer::D - has_gradient::G -end + optimizer::Dend ``` Additionally, we need to define a method to compute at least the objective value, and if you want to use gradient based optimizers (which you most probably will), we need also to define a method to compute the gradient. For example, the respective fallback methods for all `AbstractSemSingle` models are defined as diff --git a/docs/src/internals/types.md b/docs/src/internals/types.md index 9b1763fe4..488127b29 100644 --- a/docs/src/internals/types.md +++ b/docs/src/internals/types.md @@ -6,7 +6,6 @@ The type hierarchy is implemented in `"src/types.jl"`. - `AbstractSemSingle{O, I, L, D} <: AbstractSem` is an abstract parametric type that is a supertype of all single models - `Sem`: models that do not need automatic differentiation or finite difference approximation - `SemFiniteDiff`: models whose gradients and/or hessians should be computed via finite difference approximation - - `SemForwardDiff`: models whose gradients and/or hessians should be computed via forward mode automatic differentiation - `AbstractSemCollection <: AbstractSem` is an abstract supertype of all models that contain multiple `AbstractSem` submodels Every `AbstractSemSingle` has to have `SemObserved`, `SemImply`, `SemLoss` and `SemOptimizer` fields (and can have additional fields). diff --git a/docs/src/tutorials/collection/collection.md b/docs/src/tutorials/collection/collection.md index 48cda618d..84fa00500 100644 --- a/docs/src/tutorials/collection/collection.md +++ b/docs/src/tutorials/collection/collection.md @@ -13,7 +13,7 @@ model_1 = Sem(...) model_2 = SemFiniteDiff(...) -model_3 = SemForwardDiff(...) +model_3 = Sem(...) model_ensemble = SemEnsemble(model_1, model_2, model_3; optimizer = ...) ``` diff --git a/docs/src/tutorials/construction/build_by_parts.md b/docs/src/tutorials/construction/build_by_parts.md index 9bd7acad9..5a56f1ccf 100644 --- a/docs/src/tutorials/construction/build_by_parts.md +++ b/docs/src/tutorials/construction/build_by_parts.md @@ -64,10 +64,4 @@ optimizer = SemOptimizerOptim() # model ------------------------------------------------------------------------------------ model_ml = Sem(observed, imply_ram, loss_ml, optimizer) -``` - -Different models may need additional arguments (just check the help of the specific model types): - -```@example build -model_ml_fd = SemFiniteDiff(observed, imply_ram, loss_ml, optimizer, Val(false)) ``` \ No newline at end of file diff --git a/docs/src/tutorials/construction/outer_constructor.md b/docs/src/tutorials/construction/outer_constructor.md index 47bc848a8..21f6bfd3f 100644 --- a/docs/src/tutorials/construction/outer_constructor.md +++ b/docs/src/tutorials/construction/outer_constructor.md @@ -115,37 +115,18 @@ Extended help is available with `??` ## Optimize loss functions without analytic gradient -For loss functions without analytic gradients, it is possible to use finite difference approximation or forward mode automatic differentiation. +For loss functions without analytic gradients, it is possible to use finite difference approximation or automatic differentiation. All loss functions provided in the package do have analytic gradients (and some even hessians or approximations thereof), so there is no need do use this feature if you are only working with them. However, if you implement your own loss function, you do not have to provide analytic gradients. -In that case, you may construct your model just as before, but swap the `Sem` constructor for either `SemFiniteDiff` or `SemForwardDiff`. For example +This page is a about finite difference approximation. For information about how to use automatic differentiation, see the documentation of the [AutoDiffSEM](https://github.com/StructuralEquationModels/AutoDiffSEM) package. -```julia -model = SemFiniteDiff( - specification = partable, - data = data -) -``` - -constructs a model that will use finite difference approximation if you estimate the parameters via `sem_fit(model)`. -Both `SemFiniteDiff` and `SemForwardDiff` have an additional keyword argument, `has_gradient = ...` that can be set to `true` to indicate that the model has analytic gradients, and only the hessian should be computed via finite difference approximation / automatic differentiation. -For example +To use finite difference approximation, you may construct your model just as before, but swap the `Sem` constructor for `SemFiniteDiff`. For example ```julia -using Optim, LineSearches - model = SemFiniteDiff( specification = partable, - data = data, - has_gradient = true, - algorithm = Newton() + data = data ) ``` -will construct a model that, when fitted, will use [Newton's Method](https://julianlsolvers.github.io/Optim.jl/stable/#algo/newton/) from the `Optim.jl` package with gradients computed analytically and hessians computed via finite difference approximation. - - -!!! note "Using automatic differentiation" - You can construct a `SemForwardDiff` to use forward-mode automatic differentiation for the gradients. - However, at the moment, this does not work with the imply types in our package - (e.g. it only works with models that use `ImplyEmpty`). \ No newline at end of file +constructs a model that will use finite difference approximation if you estimate the parameters via `sem_fit(model)`. \ No newline at end of file diff --git a/src/StructuralEquationModels.jl b/src/StructuralEquationModels.jl index 939351b6d..9249cd2aa 100644 --- a/src/StructuralEquationModels.jl +++ b/src/StructuralEquationModels.jl @@ -4,7 +4,7 @@ using LinearAlgebra, Optim, NLSolversBase, Statistics, SparseArrays, Symbolics, NLopt, FiniteDiff, ForwardDiff, PrettyTables, Distributions, StenoGraphs, LazyArtifacts, DelimitedFiles, - DataFrames + DataFrames, Zygote, ChainRulesCore import DataFrames: DataFrame export *, ==, @StenoGraph, AbstractEdge, AbstractNode, DirectedEdge, Edge, EdgeModifier, @@ -82,10 +82,10 @@ include("frontend/fit/standard_errors/bootstrap.jl") export AbstractSem, - AbstractSemSingle, AbstractSemCollection, Sem, SemFiniteDiff, SemForwardDiff, + AbstractSemSingle, AbstractSemCollection, Sem, SemFiniteDiff, SemEnsemble, SemImply, - RAMSymbolic, RAM, ImplyEmpty, imply, + RAMSymbolic, RAMSymbolicZ, RAM, ImplyEmpty, imply, start_val, start_fabin3, start_simple, start_parameter_table, SemLoss, diff --git a/src/additional_functions/start_val/start_fabin3.jl b/src/additional_functions/start_val/start_fabin3.jl index fbc4f81e6..a15c3a7fe 100644 --- a/src/additional_functions/start_val/start_fabin3.jl +++ b/src/additional_functions/start_val/start_fabin3.jl @@ -8,7 +8,7 @@ function start_fabin3 end # splice model and loss functions function start_fabin3( - model::Union{Sem, SemForwardDiff, SemFiniteDiff}; + model::AbstractSemSingle; kwargs...) return start_fabin3( model.observed, @@ -20,12 +20,12 @@ end function start_fabin3( observed, - imply::Union{RAM, RAMSymbolic}, + imply, optimizer, args...; kwargs...) return start_fabin3( - imply.ram_matrices, + ram_matrices(imply), obs_cov(observed), obs_mean(observed)) end @@ -33,7 +33,7 @@ end # SemObservedMissing function start_fabin3( observed::SemObservedMissing, - imply::Union{RAM, RAMSymbolic}, + imply, optimizer, args...; kwargs...) @@ -43,7 +43,7 @@ function start_fabin3( end return start_fabin3( - imply.ram_matrices, + ram_matrices(imply), observed.em_model.Σ, observed.em_model.μ) end diff --git a/src/additional_functions/start_val/start_partable.jl b/src/additional_functions/start_val/start_partable.jl index 956b1353b..78ac4eed9 100644 --- a/src/additional_functions/start_val/start_partable.jl +++ b/src/additional_functions/start_val/start_partable.jl @@ -6,7 +6,7 @@ Return a vector of starting values taken from `parameter_table`. function start_parameter_table end # splice model and loss functions -function start_parameter_table(model::Union{Sem, SemForwardDiff, SemFiniteDiff}; kwargs...) +function start_parameter_table(model::AbstractSemSingle; kwargs...) return start_parameter_table( model.observed, model.imply, @@ -16,9 +16,9 @@ function start_parameter_table(model::Union{Sem, SemForwardDiff, SemFiniteDiff}; end # RAM(Symbolic) -function start_parameter_table(observed, imply::Union{RAM, RAMSymbolic}, optimizer, args...; kwargs...) +function start_parameter_table(observed, imply, optimizer, args...; kwargs...) return start_parameter_table( - imply.ram_matrices; + ram_matrices(imply); kwargs...) end diff --git a/src/additional_functions/start_val/start_simple.jl b/src/additional_functions/start_val/start_simple.jl index a8d5fdcd3..248b517df 100644 --- a/src/additional_functions/start_val/start_simple.jl +++ b/src/additional_functions/start_val/start_simple.jl @@ -16,7 +16,7 @@ Return a vector of simple starting values. function start_simple end # Single Models ---------------------------------------------------------------------------- -function start_simple(model::Union{Sem, SemForwardDiff, SemFiniteDiff}; kwargs...) +function start_simple(model::AbstractSemSingle; kwargs...) return start_simple( model.observed, model.imply, @@ -25,7 +25,7 @@ function start_simple(model::Union{Sem, SemForwardDiff, SemFiniteDiff}; kwargs.. kwargs...) end -function start_simple(observed, imply::Union{RAM, RAMSymbolic}, optimizer, args...; kwargs...) +function start_simple(observed, imply, optimizer, args...; kwargs...) return start_simple(imply.ram_matrices; kwargs...) end diff --git a/src/additional_functions/start_val/start_val.jl b/src/additional_functions/start_val/start_val.jl index 7c239e6c9..120212c49 100644 --- a/src/additional_functions/start_val/start_val.jl +++ b/src/additional_functions/start_val/start_val.jl @@ -9,7 +9,7 @@ function start_val end # Single Models ---------------------------------------------------------------------------- # splice model and loss functions -start_val(model::Union{Sem, SemFiniteDiff, SemForwardDiff}; kwargs...) = +start_val(model::AbstractSemSingle; kwargs...) = start_val( model, model.observed, @@ -22,7 +22,7 @@ start_val(model::Union{Sem, SemFiniteDiff, SemForwardDiff}; kwargs...) = start_val( model, observed, - imply::Union{RAM, RAMSymbolic}, + imply, optimizer, args...; kwargs...) = diff --git a/src/frontend/specification/Sem.jl b/src/frontend/specification/Sem.jl index ac7634fe9..c91d8c677 100644 --- a/src/frontend/specification/Sem.jl +++ b/src/frontend/specification/Sem.jl @@ -25,7 +25,6 @@ function SemFiniteDiff(; imply::I = RAM, loss::L = SemML, optimizer::D = SemOptimizerOptim, - has_gradient = false, kwargs...) where {O, I, L, D} kwargs = Dict{Symbol, Any}(kwargs...) @@ -34,30 +33,11 @@ function SemFiniteDiff(; observed, imply, loss, optimizer = get_fields!(kwargs, observed, imply, loss, optimizer) - sem = SemFiniteDiff(observed, imply, loss, optimizer, Val(has_gradient)) + sem = SemFiniteDiff(observed, imply, loss, optimizer) return sem end -function SemForwardDiff(; - observed::O = SemObservedData, - imply::I = RAM, - loss::L = SemML, - optimizer::D = SemOptimizerOptim, - has_gradient = false, - kwargs...) where {O, I, L, D} - - kwargs = Dict{Symbol, Any}(kwargs...) - - set_field_type_kwargs!(kwargs, observed, imply, loss, optimizer, O, I, D) - - observed, imply, loss, optimizer = get_fields!(kwargs, observed, imply, loss, optimizer) - - sem = SemForwardDiff(observed, imply, loss, optimizer, Val(has_gradient)) - - return sem -end - ############################################################################################ # functions ############################################################################################ @@ -162,18 +142,6 @@ function Base.show(io::IO, sem::SemFiniteDiff{O, I, L, D}) where {O, I, L, D} print(io, " optimizer: $(nameof(D)) \n") end -function Base.show(io::IO, sem::SemForwardDiff{O, I, L, D}) where {O, I, L, D} - lossfuntypes = @. string(nameof(typeof(sem.loss.functions))) - lossfuntypes = " ".*lossfuntypes.*("\n") - print(io, "Structural Equation Model : Forward Mode Autodiff\n") - print(io, "- Loss Functions \n") - print(io, lossfuntypes...) - print(io, "- Fields \n") - print(io, " observed: $(nameof(O)) \n") - print(io, " imply: $(nameof(I)) \n") - print(io, " optimizer: $(nameof(D)) \n") -end - function Base.show(io::IO, loss::SemLoss) lossfuntypes = @. string(nameof(typeof(loss.functions))) lossfuntypes = " ".*lossfuntypes.*("\n") diff --git a/src/imply/RAM/generic.jl b/src/imply/RAM/generic.jl index 7c25e0a09..95803b858 100644 --- a/src/imply/RAM/generic.jl +++ b/src/imply/RAM/generic.jl @@ -315,6 +315,8 @@ I_A⁻¹(imply::RAM) = imply.I_A⁻¹ # only for gradient available! has_meanstructure(imply::RAM) = imply.has_meanstructure +ram_matrices(imply::RAM) = imply.ram_matrices + ############################################################################################ ### additional functions ############################################################################################ diff --git a/src/imply/RAM/symbolic.jl b/src/imply/RAM/symbolic.jl index 51c977407..bcb4c3b67 100644 --- a/src/imply/RAM/symbolic.jl +++ b/src/imply/RAM/symbolic.jl @@ -262,6 +262,8 @@ end has_meanstructure(imply::RAMSymbolic) = imply.has_meanstructure +ram_matrices(imply::RAMSymbolic) = imply.ram_matrices + ############################################################################################ ### additional functions ############################################################################################ diff --git a/src/objective_gradient_hessian.jl b/src/objective_gradient_hessian.jl index 7a704522a..8d33ad804 100644 --- a/src/objective_gradient_hessian.jl +++ b/src/objective_gradient_hessian.jl @@ -46,62 +46,20 @@ function objective_gradient_hessian!(gradient, hessian, model::AbstractSemSingle end ############################################################################################ -# methods for SemFiniteDiff and SemForwardDiff +# methods for SemFiniteDiff ############################################################################################ -# gradient methods call themselves with the additional model.has_gradient argument - -gradient!(gradient, model::Union{SemFiniteDiff, SemForwardDiff}, par) = - gradient!(gradient, model, par, model.has_gradient) - -objective_gradient!(gradient, model::Union{SemFiniteDiff, SemForwardDiff}, par) = - objective_gradient!(gradient, model, par, model.has_gradient) - -# methods where autodiff takes place -# - these are specific to the method of automatic differentiation - -# FiniteDiff -gradient!(gradient, model::SemFiniteDiff, par, has_gradient::Val{false}) = +gradient!(gradient, model::SemFiniteDiff, par) = FiniteDiff.finite_difference_gradient!(gradient, x -> objective!(model, x), par) hessian!(hessian, model::SemFiniteDiff, par) = FiniteDiff.finite_difference_hessian!(hessian, x -> objective!(model, x), par) -# ForwardDiff -gradient!(gradient, model::SemForwardDiff, par, has_gradient::Val{false}) = - ForwardDiff.gradient!(gradient, x -> objective!(model, x), par) - -hessian!(hessian, model::SemForwardDiff, par) = - ForwardDiff.hessian!(hessian, x -> objective!(model, x), par) - -# gradient! -function gradient!( - gradient, - model::Union{SemFiniteDiff, SemForwardDiff}, - parameters, - has_gradient::Val{true}) - fill!(gradient, zero(eltype(gradient))) - gradient!(imply(model), parameters, model) - gradient!(gradient, loss(model), parameters, model) -end - -# objective_gradient! -function objective_gradient!( - gradient, - model::Union{SemFiniteDiff, SemForwardDiff}, - parameters, - has_gradient::Val{true}) - fill!(gradient, zero(eltype(gradient))) - objective_gradient!(imply(model), parameters, model) - return objective_gradient!(gradient, loss(model), parameters, model) -end function objective_gradient!( gradient, - model::Union{SemFiniteDiff, SemForwardDiff}, - parameters, - has_gradient::Val{false}) - fill!(gradient, zero(eltype(gradient))) + model::SemFiniteDiff, + parameters) gradient!(gradient, model, parameters) return objective!(model, parameters) end @@ -110,16 +68,13 @@ end function gradient_hessian!( gradient, hessian, - model::Union{SemFiniteDiff, SemForwardDiff}, + model::SemFiniteDiff, parameters) - fill!(gradient, zero(eltype(gradient))) - fill!(hessian, zero(eltype(hessian))) gradient!(gradient, model, parameters) hessian!(hessian, model, parameters) end -function objective_hessian!(hessian, model::Union{SemFiniteDiff, SemForwardDiff}, parameters) - fill!(hessian, zero(eltype(hessian))) +function objective_hessian!(hessian, model::SemFiniteDiff, parameters) hessian!(hessian, model, parameters) return objective!(model, parameters) end @@ -127,10 +82,8 @@ end function objective_gradient_hessian!( gradient, hessian, - model::Union{SemFiniteDiff, SemForwardDiff}, + model::SemFiniteDiff, parameters) - fill!(gradient, zero(eltype(gradient))) - fill!(hessian, zero(eltype(hessian))) hessian!(hessian, model, parameters) return objective_gradient!(gradient, model, parameters) end diff --git a/src/types.jl b/src/types.jl index 895d00632..e506f65e5 100644 --- a/src/types.jl +++ b/src/types.jl @@ -111,7 +111,7 @@ end # automatic differentiation ############################################################################################ """ - SemFiniteDiff(;observed = SemObservedData, imply = RAM, loss = SemML, optimizer = SemOptimizerOptim, has_gradient = false, kwargs...) + SemFiniteDiff(;observed = SemObservedData, imply = RAM, loss = SemML, optimizer = SemOptimizerOptim, kwargs...) Constructor for `SemFiniteDiff`. All additional kwargs are passed down to the constructors for the observed, imply, loss and optimizer fields. @@ -121,49 +121,18 @@ All additional kwargs are passed down to the constructors for the observed, impl - `imply`: object of subtype `SemImply` or a constructor. - `loss`: object of subtype `SemLossFunction`s or constructor; or a tuple of such. - `optimizer`: object of subtype `SemOptimizer` or a constructor. -- `has_gradient::Bool`: are analytic gradients available for this model. Returns a Sem with fields - `observed::SemObserved`: Stores observed data, sample statistics, etc. See also [`SemObserved`](@ref). - `imply::SemImply`: Computes model implied statistics, like Σ, μ, etc. See also [`SemImply`](@ref). - `loss::SemLoss`: Computes the objective and gradient of a sum of loss functions. See also [`SemLoss`](@ref). - `optimizer::SemOptimizer`: Connects the model to the optimizer. See also [`SemOptimizer`](@ref). -- `has_gradient::Val{Bool}`: signifies if analytic gradients are available for this model. """ -struct SemFiniteDiff{O <: SemObserved, I <: SemImply, L <: SemLoss, D <: SemOptimizer, G} <: AbstractSemSingle{O, I, L, D} +struct SemFiniteDiff{O <: SemObserved, I <: SemImply, L <: SemLoss, D <: SemOptimizer} <: AbstractSemSingle{O, I, L, D} observed::O imply::I loss::L optimizer::D - has_gradient::G -end - -""" - SemForwardDiff(;observed = SemObservedData, imply = RAM, loss = SemML, optimizer = SemOptimizerOptim, has_gradient = false, kwargs...) - -Constructor for `SemForwardDiff`. -All additional kwargs are passed down to the constructors for the observed, imply, loss and optimizer fields. - -# Arguments -- `observed`: object of subtype `SemObserved` or a constructor. -- `imply`: object of subtype `SemImply` or a constructor. -- `loss`: object of subtype `SemLossFunction`s or constructor; or a tuple of such. -- `optimizer`: object of subtype `SemOptimizer` or a constructor. -- `has_gradient::Bool`: are analytic gradients available for this model. - -Returns a Sem with fields -- `observed::SemObserved`: Stores observed data, sample statistics, etc. See also [`SemObserved`](@ref). -- `imply::SemImply`: Computes model implied statistics, like Σ, μ, etc. See also [`SemImply`](@ref). -- `loss::SemLoss`: Computes the objective and gradient of a sum of loss functions. See also [`SemLoss`](@ref). -- `optimizer::SemOptimizer`: Connects the model to the optimizer. See also [`SemOptimizer`](@ref). -- `has_gradient::Val{Bool}`: signifies if analytic gradients are available for this model. -""" -struct SemForwardDiff{O <: SemObserved, I <: SemImply, L <: SemLoss, D <: SemOptimizer, G} <: AbstractSemSingle{O, I, L, D} - observed::O - imply::I - loss::L - optimizer::D - has_gradient::G end ############################################################################################ @@ -285,12 +254,4 @@ loss(model::AbstractSemSingle) = model.loss Returns the optimizer part of a model. """ -optimizer(model::AbstractSemSingle) = model.optimizer - -""" - has_gradient(model::AbstractSemSingle) -> Val{bool} - -Returns whether the model has analytic gradients. -""" -has_gradient(model::SemForwardDiff) = model.has_gradient -has_gradient(model::SemFiniteDiff) = model.has_gradient \ No newline at end of file +optimizer(model::AbstractSemSingle) = model.optimizer \ No newline at end of file