Skip to content
Open
12 changes: 12 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -127,3 +127,15 @@ dmypy.json

# Pyre type checker
.pyre/

# JSON files
*.json

# Output files
*.npy
*.jpg
*.png
*.pickle
*.pkl
*.npz

12 changes: 12 additions & 0 deletions examples/linear_SDE/config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
T: 1.0
nsteps: 5
# Model parameters
# For the linear SDE dx = -A*x*dt + D*dW
A: 1.0
D: 1.0
# Number of ranks (processes)
nranks: 8
# Number of particles per rank
p_per_rank: 50
# Verbose output
verbose: True
152 changes: 107 additions & 45 deletions examples/linear_SDE/linear_SDE_filter.py
Original file line number Diff line number Diff line change
@@ -1,54 +1,106 @@
from firedrake import dx
from nudging import LSDEModel, \
jittertemp_filter, base_diagnostic, Stage
from nudging import LSDEModel, jittertemp_filter, base_diagnostic, Stage
import numpy as np
from firedrake.petsc import PETSc
import yaml
import sys
import os
from rich.table import Table

Print = PETSc.Sys.Print # Set up printing only on the first rank
# Read the configuration from a yaml file. If a yaml file is not provided, use default settings
opts = PETSc.Options()
filename = opts.getString("-f", default=None)
final_time = opts.getReal("-T", default=1.0)
nsteps = opts.getInt("-n", default=5)
parameter_A = opts.getReal("-A", default=1.0)
parameter_D = opts.getReal("-D", default=1.0)
nranks = opts.getInt("-r", default=8)
particles_per_rank = opts.getInt("-p", default=10)
verbose = opts.getBool("-v", default=False)
if filename is not None:
with open(filename, "r") as f:
config = yaml.safe_load(f)
Print("Configuration loaded from", filename)
else:
config = {}
config["T"] = final_time
config["nsteps"] = nsteps
config["A"] = parameter_A
config["D"] = parameter_D
config["nranks"] = nranks
config["p_per_rank"] = particles_per_rank
config["verbose"] = verbose
Print("Using configuration from command line arguments or default values.")

Print = PETSc.Sys.Print
# model
# The one-dimensional linear SDE dx = -A*x*dt + D*dW
# multiply by A and add D
T = 1.
nsteps = 5
dt = T/nsteps
A = 1.
D = 1.0
model = LSDEModel(A=A, D=D, nsteps=nsteps, dt=dt, lambdas=True, seed=7123)
T = config["T"]
nsteps = config["nsteps"]
dt = T / nsteps
A = config["A"] # positive, constant parameter
D = config["D"] # positive, constant parameter

p_per_rank = 10
nranks = 30
nensemble = [p_per_rank]*nranks
# Instantiate the model
model = LSDEModel(A=A, D=D, nsteps=nsteps, dt=dt, lambdas=True, seed=7123)

myfilter = jittertemp_filter(n_jitt=0, delta=0.15,
verbose=2, MALA=False,
visualise_tape=False, nudging=False, sigma=0.01)
myfilter.setup(nensemble=nensemble, model=model,
residual=False)
p_per_rank = config["p_per_rank"] # number of particles per rank
nranks = config["nranks"] # total number of ranks
max_n_rank = os.cpu_count()
if nranks > max_n_rank:
Print(
f"Requested nranks {nranks} exceeds available cpu count {max_n_rank}, setting nranks to {max_n_rank}"
)
nranks = max_n_rank
nensemble = [p_per_rank] * nranks # list with ensemble size per rank


myfilter = jittertemp_filter(
n_jitt=0,
delta=0.15,
verbose=2,
MALA=False,
visualise_tape=False,
nudging=False,
sigma=0.01,
)
myfilter.setup(nensemble=nensemble, model=model, residual=False)

# data
# The ensemble is initialised as samples from N(0, D^2/(2A))
c = 0.0
d = D**2/2/A
d = D**2 / 2 / A
y0 = np.random.normal(loc=c, scale=np.sqrt(d))
Print("Initial observation value:", y0)
if config["verbose"]:
Print("Initial observation value:", y0)


y = model.obs()
y0 = -0.05563397349186569 # need to update from invariant distribution
y.dat.data[:] = y0


# prepare the initial ensemble
c = 0.
d = D**2/2/A
# Ensemble is initialized as samples from N(0, D^2/(2A))

c = 0.0
d = D**2 / 2 / A

#
for i in range(nensemble[myfilter.ensemble_rank]):
dx0 = model.rg.normal(model.R, c, d)
Print(f"dx0: {dx0}")
u = myfilter.ensemble[i][0]
Print(f"u : {u}")
u.assign(dx0)

# observation noise standard deviation
S = 0.1


def log_likelihood(y, Y):
ll = (y-Y)**2/S**2/2*dx
ll = (y - Y) ** 2 / S**2 / 2 * dx
return ll


Expand All @@ -60,22 +112,16 @@ def compute_diagnostic(self, particle):


# wihout nudging
nolambdasamples = samples(Stage.WITHOUT_LAMBDAS,
myfilter.subcommunicators,
nensemble)
nolambdasamples = samples(Stage.WITHOUT_LAMBDAS, myfilter.subcommunicators, nensemble)

# with nudging
nudgingsamples = samples(Stage.AFTER_NUDGING,
myfilter.subcommunicators,
nensemble)
nudgingsamples = samples(Stage.AFTER_NUDGING, myfilter.subcommunicators, nensemble)
# after computing filteing step
resamplingsamples = samples(Stage.AFTER_ASSIMILATION_STEP,
myfilter.subcommunicators,
nensemble)
resamplingsamples = samples(
Stage.AFTER_ASSIMILATION_STEP, myfilter.subcommunicators, nensemble
)

diagnostics = [nudgingsamples,
resamplingsamples,
nolambdasamples]
diagnostics = [nudgingsamples, resamplingsamples, nolambdasamples]

tao_params = {
"tao_type": "lmvm",
Expand All @@ -87,11 +133,14 @@ def compute_diagnostic(self, particle):
}


myfilter.assimilation_step(y, log_likelihood,
diagnostics=diagnostics,
ess_tol=-9000.8,
taylor_test=False,
tao_params=tao_params)
myfilter.assimilation_step(
y,
log_likelihood,
diagnostics=diagnostics,
ess_tol=-9000.8,
taylor_test=False,
tao_params=tao_params,
)

if myfilter.subcommunicators.global_comm.rank == 0:
before, descriptors = nolambdasamples.get_archive()
Expand All @@ -104,9 +153,22 @@ def compute_diagnostic(self, particle):
bs_mean = np.mean(resampled)
bs_var = np.var(resampled)

sigsq = D**2/2/A*(1 - np.exp(-2*A*T))
Sigsq = sigsq + np.exp(-2*A*T)*d
tmean = (Sigsq*y0 + np.exp(-A*T)*S**2*c)/(Sigsq + S**2)
tvar = Sigsq*S**2/(Sigsq + S**2)

print('True mean', tmean, 'ensemble mean', bs_mean, 'true var', tvar, 'ensemble var', bs_var, 'diffmean' , abs(tmean - bs_mean), 'diffvar' ,abs(tvar - bs_var))
sigsq = D**2 / 2 / A * (1 - np.exp(-2 * A * T))
Sigsq = sigsq + np.exp(-2 * A * T) * d
tmean = (Sigsq * y0 + np.exp(-A * T) * S**2 * c) / (Sigsq + S**2)
tvar = Sigsq * S**2 / (Sigsq + S**2)

print(
"True mean",
tmean,
"ensemble mean",
bs_mean,
"true var",
tvar,
"ensemble var",
bs_var,
"diffmean",
abs(tmean - bs_mean),
"diffvar",
abs(tvar - bs_var),
)
39 changes: 22 additions & 17 deletions examples/stochastic_CH/assimilate_data_jittertemp.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,29 +16,33 @@

nensemble = [5, 5, 5, 5]
jtfilter.setup(nensemble, model)
x, = fd.SpatialCoordinate(model.mesh)
(x,) = fd.SpatialCoordinate(model.mesh)

# prepare the initial ensemble
exp = fd.exp
for i in range(nensemble[jtfilter.ensemble_rank]):
dx0 = model.rg.normal(model.R, 0., 0.05)
dx1 = model.rg.normal(model.R, 0., 0.05)
a = model.rg.uniform(model.R, 0., 1.0)
b = model.rg.uniform(model.R, 0., 1.0)
u0_exp = (1+a)*0.2*2/(exp(x-403./15.+dx0)+exp(-x+403./15.+dx0))
u0_exp += (1+b)*0.5*2/(exp(x-203./15.+dx1)+exp(-x+203./15.+dx1))
dx0 = model.rg.normal(model.R, 0.0, 0.05)
dx1 = model.rg.normal(model.R, 0.0, 0.05)
a = model.rg.uniform(model.R, 0.0, 1.0)
b = model.rg.uniform(model.R, 0.0, 1.0)
u0_exp = (
(1 + a) * 0.2 * 2 / (exp(x - 403.0 / 15.0 + dx0) + exp(-x + 403.0 / 15.0 + dx0))
)
u0_exp += (
(1 + b) * 0.5 * 2 / (exp(x - 203.0 / 15.0 + dx1) + exp(-x + 203.0 / 15.0 + dx1))
)
_, u = jtfilter.ensemble[i][0].split()
u.interpolate(u0_exp)


def log_likelihood(y, Y):
ll = (y-Y)**2/0.05**2/2*fd.dx
ll = (y - Y) ** 2 / 0.05**2 / 2 * fd.dx
return ll


# Load data
y_exact = np.load('y_true.npy')
y = np.load('y_obs.npy')
y_exact = np.load("y_true.npy")
y = np.load("y_obs.npy")
N_obs = y.shape[0]

yVOM = fd.Function(model.VVOM)
Expand All @@ -47,11 +51,11 @@ def log_likelihood(y, Y):
y_e_list = []
y_sim_obs_list = []
for m in range(y.shape[1]):
y_e_shared = ndg.SharedArray(partition=nensemble,
comm=jtfilter.subcommunicators.ensemble_comm)
y_e_shared = ndg.SharedArray(
partition=nensemble, comm=jtfilter.subcommunicators.ensemble_comm
)
ecomm = jtfilter.subcommunicators.ensemble_comm
y_sim_obs_shared = ndg.SharedArray(partition=nensemble,
comm=ecomm)
y_sim_obs_shared = ndg.SharedArray(partition=nensemble, comm=ecomm)
y_e_list.append(y_e_shared)
y_sim_obs_list.append(y_sim_obs_shared)

Expand All @@ -60,7 +64,7 @@ def log_likelihood(y, Y):
y_e = np.zeros((np.sum(nensemble), ys[0], ys[1]))
y_e_asmfwd = np.zeros((np.sum(nensemble), ys[0], ys[1]))
y_sim_obs_alltime_step = np.zeros((np.sum(nensemble), nsteps, ys[1]))
y_sim_obs_allobs_step = np.zeros((np.sum(nensemble), nsteps*N_obs, ys[1]))
y_sim_obs_allobs_step = np.zeros((np.sum(nensemble), nsteps * N_obs, ys[1]))

# do assimiliation step
for k in range(N_obs):
Expand Down Expand Up @@ -88,8 +92,9 @@ def log_likelihood(y, Y):
y_sim_obs_list[m].synchronise()
if fd.COMM_WORLD.rank == 0:
y_sim_obs_alltime_step[:, step, m] = y_sim_obs_list[m].data()
y_sim_obs_allobs_step[:, nsteps*k+step, m] = \
y_sim_obs_alltime_step[:, step, m]
y_sim_obs_allobs_step[:, nsteps * k + step, m] = y_sim_obs_alltime_step[
:, step, m
]

# actually do the data assimilation step
jtfilter.assimilation_step(yVOM, log_likelihood)
Expand Down
8 changes: 5 additions & 3 deletions examples/stochastic_CH/generate_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,12 @@
model.setup()
X_truth = model.allocate()
_, u0 = X_truth[0].split()
x, = fd.SpatialCoordinate(model.mesh)
(x,) = fd.SpatialCoordinate(model.mesh)
exp = fd.exp
u0.interpolate(0.2*2/(exp(x-403./15.) + exp(-x+403./15.))
+ 0.5*2/(exp(x-203./15.) + exp(-x+203./15.)))
u0.interpolate(
0.2 * 2 / (exp(x - 403.0 / 15.0) + exp(-x + 403.0 / 15.0))
+ 0.5 * 2 / (exp(x - 203.0 / 15.0) + exp(-x + 203.0 / 15.0))
)
N_obs = 50

y_true = model.obs().dat.data[:]
Expand Down
Loading
Loading