Skip to content

Commit

Permalink
Added implicit midpoint discretization (#228)
Browse files Browse the repository at this point in the history
* first draft for midpoint

* build

* first test ok

* updated tests (improved checks for initial guess)

* tests ok for midpoint

* fix prof

* types seeem fine

* new file midpoint.jl

* tests ok

* ok

* sync from main

* sync from trapeze

* file

* trapz

* tests ok

* sync

* pseudo args

* final sync

* midpoint ready

* retrigger checks

* starting gauss legendre 2

* more tests

* prof

* prof

* use args for pathconstraints

* updated midpoint with args for pathconstraints

* prof

* prof

* NB multiple dispatch DOES seem to increase allocations...

* getter for full time grid

* cleanup

* use disc-specific Args structs

* midpoint ok; trapeze perf closer to main branch

* try to parametrize docp with the discretization ?

* perf for trapeze is back to main branch level; cause apparently was  insufficient specialization in multiple dispatch

* bench

* prof

* better call to get_optim_variable

* @. is not better than individual dot operators for state equation

* added todo for AD in constraints

* fix
  • Loading branch information
PierreMartinon authored Sep 2, 2024
1 parent 9a8af29 commit a050125
Show file tree
Hide file tree
Showing 19 changed files with 912 additions and 504 deletions.
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "CTDirect"
uuid = "790bbbee-bee9-49ee-8912-a9de031322d5"
authors = ["Olivier Cots <olivier.cots@enseeiht.fr>"]
version = "0.12.0"
version = "0.12.1"

[deps]
ADNLPModels = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a"
Expand Down
15 changes: 9 additions & 6 deletions benchmark/benchmark.jl
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ function bench(;
precompile = true,
display = false,
verbose = true,
discretization = :trapeze
)

#######################################################
Expand Down Expand Up @@ -66,6 +67,7 @@ function bench(;
linear_solver = linear_solver,
max_iter = 0,
display = display,
discretization = discretization
)
t_precomp += t
end
Expand All @@ -84,6 +86,7 @@ function bench(;
linear_solver = linear_solver,
grid_size = grid_size,
tol = tol,
discretization = discretization
)
if !isnothing(problem[:obj]) && !isapprox(sol.objective, problem[:obj], rtol = 5e-2)
error(
Expand Down Expand Up @@ -115,29 +118,29 @@ function bench(;
end

# +++ put repeat directly in bench()
function bench_average(; repeat = 2, verbose = false, kwargs...)
function bench_average(; repeat = 4, verbose = false, kwargs...)

# execute series of benchmark runs
t_list = []
for i = 1:repeat
t = bench(; verbose = verbose, kwargs...)
append!(t_list, t)
@printf("Run %d / %d: time (s) = %6.2f\n", i, repeat, t)
verbose && @printf("Run %d / %d: time (s) = %6.2f\n", i, repeat, t)
end

# print / return average total time
avg_time = sum(t_list) / length(t_list)
@printf("Average time (s): %6.2f\n", avg_time)
verbose && @printf("Average time (s): %6.2f\n", avg_time)
return avg_time
end

function bench_series(; grid_size_list = [250, 500, 1000, 2500, 5000, 10000], kwargs...)
function bench_series(; grid_size_list = [250, 500, 1000, 2500, 5000], kwargs...)
println(grid_size_list)
t_list = []
for grid_size in grid_size_list
t = bench_average(; grid_size = grid_size, kwargs...)
append!(t_list, t)
@printf("Grid size %d: time (s) = %6.2f\n\n", grid_size, t)
@printf("Grid size %d: time (s) = %6.1f\n", grid_size, t)
end
return t_list
#return t_list
end
46 changes: 26 additions & 20 deletions benchmark/prof.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
# +++ TODO: make function with bools as args ?
# Profiling
include("../test/deps.jl")
using CTDirect
using CTBase

using LinearAlgebra
using NLPModelsIpopt
using BenchmarkTools
#using Traceur
#using Profile
Expand All @@ -25,20 +29,31 @@ println("Load problem ", prob[:name])

if precompile
println("Precompilation")
direct_solve(ocp, grid_size = grid_size, display = false, max_iter = 2)
CTDirect.DOCP_objective(CTDirect.DOCP_initial_guess(docp), docp)
CTDirect.DOCP_constraints!(
zeros(docp.dim_NLP_constraints),
CTDirect.DOCP_initial_guess(docp),
docp,
)
if test_solve
direct_solve(ocp, grid_size = grid_size, display = false, max_iter = 2)
end
if test == :objective
CTDirect.DOCP_objective(CTDirect.DOCP_initial_guess(docp), docp)
else
CTDirect.DOCP_constraints!(zeros(docp.dim_NLP_constraints), CTDirect.DOCP_initial_guess(docp), docp)
end
end

if test == :objective
println("Timed objective")
#@timev CTDirect.DOCP_objective(CTDirect.DOCP_initial_guess(docp), docp)
@btime CTDirect.DOCP_objective(CTDirect.DOCP_initial_guess(docp), docp)
else
println("Timed constraints")
#@timev CTDirect.DOCP_constraints!(zeros(docp.dim_NLP_constraints), CTDirect.DOCP_initial_guess(docp), docp)
@btime CTDirect.DOCP_constraints!(zeros(docp.dim_NLP_constraints), CTDirect.DOCP_initial_guess(docp), docp)
end

# full solve
if test_solve
println("Timed solve")
@timev sol = direct_solve(ocp, grid_size = grid_size, display = false)
@btime sol = direct_solve(ocp, grid_size = grid_size, display = false)
println("Timed full solve")
#@timev sol = direct_solve(ocp, grid_size = grid_size, display=false)
@btime sol = direct_solve(ocp, grid_size = grid_size, display=false)
end

if test_code_warntype
Expand Down Expand Up @@ -71,12 +86,3 @@ if test_jet
end
end

#=
# ipopt statistics
if test_ipopt
docp = directTranscription(ocp, grid_size=200, init=(state=[1,0.2,0.5], control=0.5))
dsol = solve(docp, print_level=5, tol=1e-12, print_timing_statistics="yes")
println("\n Discrete solution")
println(dsol)
end
=#
4 changes: 3 additions & 1 deletion src/CTDirect.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,9 @@ const matrix2vec = CTBase.matrix2vec
# includes
include("utils.jl")
include("default.jl")
include("problem.jl")
include("problem.jl") # rename as docp.jl ?
include("midpoint.jl")
include("trapeze.jl")
include("solution.jl")
include("solve.jl")

Expand Down
8 changes: 8 additions & 0 deletions src/default.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,14 @@
"""
$(TYPEDSIGNATURES)
Used to set the default discretization method.
The default value is `trapeze`.
"""
__discretization() = "trapeze"

"""
$(TYPEDSIGNATURES)
Used to set the default grid size.
The default value is `250`.
"""
Expand Down
105 changes: 105 additions & 0 deletions src/gauss.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
#= Functions for implicit gauss-legendre 2 discretization scheme
Internal layout for NLP variables:
[X_0,U_0,K_0, X_1,U_1,K_1 .., X_N-1,U_N-1,K_N-1, XN, V]
with the conventions U_i constant per step, K_i := [K_i1 K_i2]
NB. try both control_stage and control_step versions !
Q. if taking controls at stages, how to define the control at 'step' (for path constraints or tf). Unless we evaluate path constraints at stages, but then we have to determine the state at stages (a possibility is to take the state argument as for the stage dynamics)
=#

# Later adjust this one for generic IRK scheme ! (use stage value)
# define several tags for different methods
# use intermediate abstract type ImplicitRungeKuttaTag

struct GaussLegendre2Tag <: DiscretizationTag
stage::Int
additional_controls::Int
butcher_a
butcher_b
butcher_c
GaussLegendre2Tag() = new(2, 0, [0.25 (0.25 - sqrt(3)/6); (0.25 + sqrt(3)/6) 0.25], [0.5, 0.5], [(0.5 - sqrt(3)/6), (0.5 + sqrt(3)/6)])
end


"""
$(TYPEDSIGNATURES)
Retrieve state and control variables at given time step from the NLP variables.
"""
function get_variables_at_time_step(xu, docp, i, tag::GuassLegendre2Tag)

# block: [X_i U_i1 U_i2 K_i1 K_i2]
nx = docp.dim_NLP_x
n = docp.dim_OCP_x
m = docp.dim_NLP_u
N = docp.dim_NLP_steps
offset = (nx*3 + m) * i

# retrieve scalar/vector OCP state (w/o lagrange state)
if n == 1
xi = xu[offset + 1]
else
xi = xu[(offset + 1):(offset + n)]
end
if docp.has_lagrange
xli = xu[offset + nx]
else
xli = nothing # dummy. use xu type ?
end

# retrieve scalar/vector controls
if i < N
offset_u = offset
else
offset_u = (nx*3 + m) * (i-1)
end
if m == 1
ui = xu[offset_u + nx + 1]
else
ui = xu[(offset_u + nx + 1):(offset_u + nx + m)]
end

# retrieve vector stage variable (except at final time)
if i < N
ki = (xu[(offset + nx + m + 1):(offset + nx + m + nx)],
xu[(offset + nx*2 + m + 1):(offset + nx*2 + m + nx)])
else
ki = nothing
end

return xi, ui, xli, ki
end


# internal NLP version for solution parsing
# could be fused with one above if
# - using extended dynamics that include lagrange cost
# - scalar case is handled at OCP level
function get_NLP_variables_at_time_step(xu, docp, i, tag::GaussLegendre2Tag)

+++
nx = docp.dim_NLP_x
m = docp.dim_NLP_u
N = docp.dim_NLP_steps
offset = (nx*2 + m) * i

# state
xi = xu[(offset + 1):(offset + nx)]
# control
if i < N
offset_u = offset
else
offset_u = (nx*2 + m) * (i-1)
end
ui = xu[(offset_u + nx + 1):(offset_u + nx + m)]
# stage
if i < N
ki = xu[(offset + nx + m + 1):(offset + nx + m + nx) ]
else
ki = nothing
end

return xi, ui, ki
end


Loading

0 comments on commit a050125

Please sign in to comment.