Skip to content
14 changes: 14 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,20 @@ deps/src/
docs/build/
docs/site/

# Literate.jl-generated markdown — regenerated by docs/make.jl on every build.
# Hand-written .md files (core.md, index.md, simd.md, develop.md, patterns.md,
# constraint_augmentation.md, ref.md, twostage.md, upgrade.md) stay tracked.
docs/src/distillation.md
docs/src/gpu.md
docs/src/guide.md
docs/src/jump.md
docs/src/opf.md
docs/src/oracle.md
docs/src/parameters.md
docs/src/performance.md
docs/src/quad.md
docs/src/two_stage.md

# File generated by Pkg, the package manager, based on a corresponding Project.toml
# It records a fixed state of all packages used by the project. As such, it should not be
# committed for packages, but should be committed for applications that require a static
Expand Down
10 changes: 5 additions & 5 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ version = "0.10.0"
[deps]
Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6"
NLPModelsJuMP = "792afdf1-32c1-5681-94e0-d7bf7a5df49e"
Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
SolverCore = "ff4d7338-4cf1-434d-91df-b86cb86fb843"

Expand All @@ -19,20 +20,18 @@ Metal = "dde4c033-4e86-420c-a63e-0dd931031962"
NLPModelsIpopt = "f4238b75-b362-5c4c-b852-0801c9a21d71"
OpenCL = "08131aa3-fb12-5dee-8b74-c09406e224a2"
SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b"
oneAPI = "8f75cd03-7ff8-4ecb-9b8f-daf728133b1b" # oneAPI version >= 2.6 is needed to use sort! on GPU arrays
# OptimalControl = "5f98b655-cc9a-415a-b60e-744165666948"
oneAPI = "8f75cd03-7ff8-4ecb-9b8f-daf728133b1b"

[extensions]
ExaModelsIpopt = ["MathOptInterface", "NLPModelsIpopt"]
ExaModelsJuMP = "JuMP"
ExaModelsKernelAbstractions = "KernelAbstractions"
ExaModelsOneAPI = "oneAPI"
ExaModelsMOI = "MathOptInterface"
ExaModelsMadNLP = ["MadNLP", "MathOptInterface"]
ExaModelsMetal = "Metal"
ExaModelsOneAPI = "oneAPI"
ExaModelsOpenCL = "OpenCL"
ExaModelsSpecialFunctions = "SpecialFunctions"
# ExaModelsOptimalControl = ["OptimalControl", "LinearAlgebra"]

[compat]
Adapt = "4"
Expand All @@ -44,8 +43,9 @@ MathOptInterface = "1.19"
Metal = "1.9"
NLPModels = "0.21"
NLPModelsIpopt = "0.11"
NLPModelsJuMP = "0.13.5"
OpenCL = "0.10"
SolverCore = "0.3"
SpecialFunctions = "2"
julia = "1.9"
oneAPI = "2"
oneAPI = "2"
4 changes: 3 additions & 1 deletion docs/make.jl
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ if !(@isdefined _PAGES)
"opf.md",
"upgrade.md",
],
"Oracle Constraints" => "oracle.md",
"JuMP Interface (experimental)" => "jump.md",
"API Manual" => "core.md",
"References" => "ref.md",
Expand All @@ -38,7 +39,8 @@ if !(@isdefined _JL_FILENAMES)
"gpu.jl",
"performance.jl",
"parameters.jl",

"two_stage.jl",
"oracle.jl",
]
end

Expand Down
86 changes: 86 additions & 0 deletions docs/src/oracle.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
# # [Oracle Constraints](@id oracle)

# ExaModels can combine its SIMD symbolic constraints with user-supplied
# "oracle" constraint blocks via [`VectorNonlinearOracle`](@ref). An oracle
# encapsulates an opaque nonlinear function together with derivative
# callbacks — either sparse coordinate matrices (`jac!`/`hess!`) or
# matrix-free products (`jvp!`/`vjp!`/`hvp!`).
#
# This is useful when part of the model comes from an external source that
# ExaModels cannot trace symbolically: a neural network, a PDE solver,
# a GPU simulation, etc.

# ## Low-level API: `VectorNonlinearOracle`
#
# At the lowest level, you construct a `VectorNonlinearOracle` with callbacks
# that operate on the **full** problem variable vector, then register it with
# `constraint(core, oracle)`. This gives full control over sparsity patterns
# and variable indexing.

# ## High-level API: `embed_oracle`
#
# For the common pattern of embedding an opaque function `f: ℝⁿ → ℝᵐ` into
# a model, `embed_oracle` handles all the plumbing automatically:
#
# 1. Creates auxiliary output variables `z ∈ ℝᵐ`
# 2. Registers an oracle constraint `z − f(x) = 0`
# 3. Returns the updated core and `z` as a regular `Variable` for use in any SIMD expression
#
# The callbacks operate on **local** vectors — no offset management needed:
#
# - `f!(y, x)`: `y[1:m] = f(x[1:n])`
# - `jvp!(Jv, x, v)`: `Jv[1:m] = J_f(x) * v[1:n]`
# - `vjp!(Jtv, x, w)`: `Jtv[1:n] = J_f(x)' * w[1:m]`
# - `hvp!(Hv, x, w, v)`: `Hv[1:n] = (Σ wᵢ ∇²fᵢ(x)) * v[1:n]` (optional)
#
# When `adapt=Val(false)` (the default), all callbacks receive device arrays (e.g. `CuArray`)
# and must use broadcast operations — no scalar indexing. Use `adapt=Val(true)` to have
# arrays automatically copied to CPU before each callback invocation.

# ## Example
#
# We embed an opaque element-wise squaring function `f(x)ᵢ = xᵢ²` and mix
# its output `z` with symbolic ExaModels expressions in the objective and
# constraints. Using broadcast in the callbacks makes them work on both
# CPU and GPU without changes.
#
# ```math
# \min_{x} \sum_i \bigl(z_i + \sin(x_i)\bigr) \quad
# \text{s.t.} \quad z = f(x),\; x_i \ge 1 \;\forall i
# ```

using ExaModels
using CUDA
using MadNLP
using MadNLPGPU

N = 100

core = ExaCore(Float64; concrete = Val(true), backend = CUDABackend())
@add_var(core, x, N; start = (1.0 for _ in 1:N))

core, z, _ = embed_oracle(
core, x, N;
f! = (y, xv) -> (y .= xv .^ 2; nothing),
jvp! = (Jv, xv, v) -> (Jv .= 2 .* xv .* v; nothing),
vjp! = (Jtv, xv, w) -> (Jtv .= 2 .* xv .* w; nothing),
hvp! = (Hv, xv, w, v) -> (Hv .= 2 .* w .* v; nothing),
adapt = Val(false),
)

# The oracle output `z` can now be used in SIMD generator expressions,
# freely mixed with symbolic operations on `x`:

@add_obj(core, z[i] + sin(x[i]) for i in 1:N)
@add_con(core, x[i] for i in 1:N; lcon = 1.0, ucon = Inf)

model = ExaModel(core)
result = madnlp(model; print_level = MadNLP.INFO, linear_solver = LapackCUDASolver)

println("\nStatus: ", result.status)
println("Objective: ", result.objective)
println("x[1:5]: ", round.(result.solution[1:5]; digits = 6))
println("z[1:5]: ", round.(result.solution[(N + 1):(N + 5)]; digits = 6))

# The optimal solution is xᵢ = 1, zᵢ = 1 for all i, with objective
# N × (1 + sin(1)) ≈ 184.15.
42 changes: 26 additions & 16 deletions docs/src/two_stage.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,23 +13,33 @@ nv = 2 ## recourse variables per scenario
nd = 1 ## design variables
weight = 1.0 / ns

# Two annotate the scenario for each variable and constraint, we can use the `scenario` we need to start with a special ExaCore that supports such scenario annotations, which can be created by calling `TwoStageExaCore(concrete = Val(true))`.
core = TwoStageExaCore(concrete = Val(true))

# Now we can define the design variable and recourse variables. The `scenario` keyword argument allows us to specify which scenario(s) each variable belongs to. For the design variable `d`, we set `scenario = 0` to indicate that it is shared across all scenarios.
@add_var(core, d; start = 1.0, lvar = 0.0, uvar = Inf, scenario = 0) ## design variable d

# For the recourse variables `v`, we specify `scenario = [i for i=1:ns, j=1:nv]` to indicate that each variable `v[s,i]` belongs to scenario `s`. This allows us to define scenario-specific constraints and objectives that involve these recourse variables.
@add_var(core, v, ns, nv; start = 1.0, lvar = 0.0, uvar = Inf, scenario = [i for i=1:ns, j=1:nv]) ## recourse variables v

# Now we can define the constraints and objective function. The `scenario` keyword argument in the `constraint` and `objective` functions allows us to specify which scenario(s) each constraint or objective term belongs to.
@add_con(core, v[s,1] - v[s,2]^2 for s in 1:ns; lcon = 0.0, scenario = 1:ns)

@add_obj(core, d^2)
@add_obj(core, weight * (v[s,i] - d)^2 for s in 1:ns, i in 1:nv)
# To annotate the scenario for each variable and constraint, we need to start with a special
# ExaCore that supports such scenario annotations. Pass `ns` as the first argument to set
# the number of scenarios.
core = TwoStageExaCore(ns, concrete = Val(true))

# Now we can define the first-stage (design) variable `d`, shared across all scenarios.
# These are added without `EachScenario()`.
@add_var(core, d, nd; start = 1.0, lvar = 0.0, uvar = Inf)

# Second-stage (recourse) variables are added with `EachScenario()` as the first argument
# after the core. This creates `nv * ns` variables in total — one block of `nv` per scenario.
# Variables for scenario `s` occupy flat indices `(s-1)*nv+1 : s*nv`.
v = @add_var(core, EachScenario(), nv; start = 1.0, lvar = 0.0, uvar = Inf)

# Second-stage constraints are also added with `EachScenario()`. Here we add the constraint
# v[s,1] - v[s,2]^2 = 0 for each scenario s, using flat variable indices.
con_data = [(s, (s - 1) * nv + 1, (s - 1) * nv + 2) for s in 1:ns]
@add_con(core, EachScenario(), (v[i1] - v[i2]^2 for (s, i1, i2) in con_data); lcon = 0.0, ucon = 0.0)

# The objective can mix first- and second-stage variables. Here we minimize
# d[1]^2 + weight * sum_{s,i} (v[s,i] - d[1])^2.
@add_obj(core, d[1]^2)
obj_data = [(i, j, (i - 1) * nv + j) for i in 1:ns for j in 1:nv]
@add_obj(core, weight * (v[vidx] - d[1])^2 for (i, j, vidx) in obj_data)

m = ExaModel(core)

# Now we can solve the model as usual.
ipopt(m)
# Now we can solve the model as usual.
ipopt(m)
# If the solver knows how to exploit the scenario structure, the structure-exploiting method can be used.
Loading
Loading