diff --git a/src/linearization.jl b/src/linearization.jl index 411620e8f3..62cb2b3db0 100644 --- a/src/linearization.jl +++ b/src/linearization.jl @@ -157,7 +157,7 @@ end """ $(TYPEDSIGNATURES) -Linearize the wrapped system at the point given by `(u, p, t)`. +Linearize the wrapped system at the point given by `(unknowns, p, t)`. """ function (linfun::LinearizationFunction)(u, p, t) if eltype(p) <: Pair @@ -182,7 +182,7 @@ function (linfun::LinearizationFunction)(u, p, t) linfun.prob, integ, fun, linfun.initializealg, Val(true); linfun.initialize_kwargs...) if !success - error("Initialization algorithm $(linfun.initializealg) failed with `u = $u` and `p = $p`.") + error("Initialization algorithm $(linfun.initializealg) failed with `unknowns = $u` and `p = $p`.") end uf = SciMLBase.UJacobianWrapper(fun, t, p) fg_xz = ForwardDiff.jacobian(uf, u) @@ -211,7 +211,10 @@ function (linfun::LinearizationFunction)(u, p, t) g_u = fg_u[linfun.alge_idxs, :], h_x = h_xz[:, linfun.diff_idxs], h_z = h_xz[:, linfun.alge_idxs], - h_u = h_u) + h_u = h_u, + x = u, + p, + t) end """ @@ -319,7 +322,7 @@ function CommonSolve.solve(prob::LinearizationProblem; allow_input_derivatives = p = parameter_values(prob) t = current_time(prob) linres = prob.f(u0, p, t) - f_x, f_z, g_x, g_z, f_u, g_u, h_x, h_z, h_u = linres + f_x, f_z, g_x, g_z, f_u, g_u, h_x, h_z, h_u, x, p, t = linres nx, nu = size(f_u) nz = size(f_z, 2) @@ -356,7 +359,7 @@ function CommonSolve.solve(prob::LinearizationProblem; allow_input_derivatives = end end - (; A, B, C, D) + (; A, B, C, D), (; x, p, t) end """ @@ -487,8 +490,8 @@ function markio!(state, orig_inputs, inputs, outputs; check = true) end """ - (; A, B, C, D), simplified_sys = linearize(sys, inputs, outputs; t=0.0, op = Dict(), allow_input_derivatives = false, zero_dummy_der=false, kwargs...) - (; A, B, C, D) = linearize(simplified_sys, lin_fun; t=0.0, op = Dict(), allow_input_derivatives = false, zero_dummy_der=false) + (; A, B, C, D), simplified_sys, extras = linearize(sys, inputs, outputs; t=0.0, op = Dict(), allow_input_derivatives = false, zero_dummy_der=false, kwargs...) + (; A, B, C, D), extras = linearize(simplified_sys, lin_fun; t=0.0, op = Dict(), allow_input_derivatives = false, zero_dummy_der=false) Linearize `sys` between `inputs` and `outputs`, both vectors of variables. Return a NamedTuple with the matrices of a linear statespace representation on the form @@ -510,6 +513,8 @@ If `allow_input_derivatives = false`, an error will be thrown if input derivativ `zero_dummy_der` can be set to automatically set the operating point to zero for all dummy derivatives. +The return value `extras` is a NamedTuple `(; x, p, t)` containing the result of the initialization problem that was solved to determine the operating point. + See also [`linearization_function`](@ref) which provides a lower-level interface, [`linearize_symbolic`](@ref) and [`ModelingToolkit.reorder_unknowns`](@ref). See extended help for an example. @@ -616,7 +621,8 @@ function linearize(sys, inputs, outputs; op = Dict(), t = 0.0, zero_dummy_der, op, kwargs...) - linearize(ssys, lin_fun; op, t, allow_input_derivatives), ssys + mats, extras = linearize(ssys, lin_fun; op, t, allow_input_derivatives) + mats, ssys, extras end """ diff --git a/src/systems/analysis_points.jl b/src/systems/analysis_points.jl index 135bf81729..375753bb1b 100644 --- a/src/systems/analysis_points.jl +++ b/src/systems/analysis_points.jl @@ -862,7 +862,8 @@ for f in [:get_sensitivity, :get_comp_sensitivity, :get_looptransfer] sys, ap, args...; loop_openings = [], system_modifier = identity, kwargs...) lin_fun, ssys = $(utility_fun)( sys, ap, args...; loop_openings, system_modifier, kwargs...) - ModelingToolkit.linearize(ssys, lin_fun), ssys + mats, extras = ModelingToolkit.linearize(ssys, lin_fun) + mats, ssys, extras end end diff --git a/test/downstream/inversemodel.jl b/test/downstream/inversemodel.jl index 32d5ee87ec..1308655da2 100644 --- a/test/downstream/inversemodel.jl +++ b/test/downstream/inversemodel.jl @@ -131,13 +131,13 @@ sol = solve(prob, Rodas5P()) # we need to provide `op` so the initialization system knows what to hold constant # the values don't matter -Sf, simplified_sys = Blocks.get_sensitivity_function(model, :y; op); # This should work without providing an operating opint containing a dummy derivative +Sf, simplified_sys = get_sensitivity_function(model, :y; op); # This should work without providing an operating opint containing a dummy derivative x = state_values(Sf) p = parameter_values(Sf) # If this somehow passes, mention it on # https://github.com/SciML/ModelingToolkit.jl/issues/2786 matrices1 = Sf(x, p, 0) -matrices2, _ = Blocks.get_sensitivity(model, :y; op); # Test that we get the same result when calling the higher-level API +matrices2, _ = get_sensitivity(model, :y; op); # Test that we get the same result when calling the higher-level API @test matrices1.f_x ≈ matrices2.A[1:6, 1:6] nsys = get_named_sensitivity(model, :y; op) # Test that we get the same result when calling an even higher-level API @test matrices2.A ≈ nsys.A @@ -145,14 +145,14 @@ nsys = get_named_sensitivity(model, :y; op) # Test that we get the same result w # Test the same thing for comp sensitivities # This should work without providing an operating opint containing a dummy derivative -Sf, simplified_sys = Blocks.get_comp_sensitivity_function(model, :y; op); +Sf, simplified_sys = get_comp_sensitivity_function(model, :y; op); x = state_values(Sf) p = parameter_values(Sf) # If this somehow passes, mention it on # https://github.com/SciML/ModelingToolkit.jl/issues/2786 matrices1 = Sf(x, p, 0) # Test that we get the same result when calling the higher-level API -matrices2, _ = Blocks.get_comp_sensitivity(model, :y; op) +matrices2, _ = get_comp_sensitivity(model, :y; op) @test matrices1.f_x ≈ matrices2.A[1:6, 1:6] # Test that we get the same result when calling an even higher-level API nsys = get_named_comp_sensitivity(model, :y; op) @@ -172,15 +172,16 @@ nsys = get_named_comp_sensitivity(model, :y; op) output = :y # we need to provide `op` so the initialization system knows which # values to hold constant - lin_fun, ssys = Blocks.get_sensitivity_function(model, output; op = op1) - matrices1 = linearize(ssys, lin_fun, op = op1) - matrices2 = linearize(ssys, lin_fun, op = op2) + lin_fun, ssys = get_sensitivity_function(model, output; op = op1) + matrices1, extras1 = linearize(ssys, lin_fun, op = op1) + matrices2, extras2 = linearize(ssys, lin_fun, op = op2) + @test extras1.x != extras2.x S1f = ss(matrices1...) S2f = ss(matrices2...) @test S1f != S2f - matrices1, ssys = Blocks.get_sensitivity(model, output; op = op1) - matrices2, ssys = Blocks.get_sensitivity(model, output; op = op2) + matrices1, ssys = get_sensitivity(model, output; op = op1) + matrices2, ssys = get_sensitivity(model, output; op = op2) S1 = ss(matrices1...) S2 = ss(matrices2...) @test S1 != S2 diff --git a/test/downstream/linearize.jl b/test/downstream/linearize.jl index 898040fb4f..5c9a81ef32 100644 --- a/test/downstream/linearize.jl +++ b/test/downstream/linearize.jl @@ -14,14 +14,15 @@ eqs = [u ~ kp * (r - y) @named sys = ODESystem(eqs, t) -lsys, ssys = linearize(sys, [r], [y]) +lsys, ssys, extras = linearize(sys, [r], [y]) lprob = LinearizationProblem(sys, [r], [y]) -lsys2 = solve(lprob) +lsys2, extras2 = solve(lprob) @test lsys.A[] == lsys2.A[] == -2 @test lsys.B[] == lsys2.B[] == 1 @test lsys.C[] == lsys2.C[] == 1 @test lsys.D[] == lsys2.D[] == 0 +@test extras == extras2 lsys, ssys = linearize(sys, [r], [r])