Skip to content

Commit ab4c42f

Browse files
authored
Merge pull request #168 from florisvb/robust-optimization
Addressing #167
2 parents 6310343 + 4be8161 commit ab4c42f

13 files changed

Lines changed: 231 additions & 277 deletions

File tree

pynumdiff/basis_fit/_basis_fit.py

Lines changed: 14 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,8 @@ def spectraldiff(x, dt, params=None, options=None, high_freq_cutoff=None, even_e
1919
:param bool pad_to_zero_dxdt: if True, extend the data with extra regions that smoothly force the derivative to
2020
zero before taking FFT.
2121
22-
:return: tuple[np.array, np.array] of\n
23-
- **x_hat** -- estimated (smoothed) x
24-
- **dxdt_hat** -- estimated derivative of x
22+
:return: - **x_hat** (np.array) -- estimated (smoothed) x
23+
- **dxdt_hat** (np.array) -- estimated derivative of x
2524
"""
2625
if params != None: # Warning to support old interface for a while. Remove these lines along with params in a future release.
2726
warn("`params` and `options` parameters will be removed in a future version. Use `high_freq_cutoff`, " +
@@ -52,27 +51,25 @@ def spectraldiff(x, dt, params=None, options=None, high_freq_cutoff=None, even_e
5251
if even_extension is True:
5352
x = np.hstack((x, x[::-1]))
5453

55-
# If odd, make N even, and pad x
54+
# Form wavenumbers
5655
N = len(x)
57-
58-
# Define the frequency range.
5956
k = np.concatenate((np.arange(N//2 + 1), np.arange(-N//2 + 1, 0)))
6057
if N % 2 == 0: k[N//2] = 0 # odd derivatives get the Nyquist element zeroed out
61-
omega = k*2*np.pi/(dt*N) # turn wavenumbers into frequencies in radians/s
6258

63-
# Frequency based smoothing: remove signals with a frequency higher than high_freq_cutoff
59+
# Filter to zero out higher wavenumbers
6460
discrete_cutoff = int(high_freq_cutoff*N/2) # Nyquist is at N/2 location, and we're cutting off as a fraction of that
65-
omega[discrete_cutoff:N-discrete_cutoff] = 0
61+
filt = np.ones(k.shape); filt[discrete_cutoff:N-discrete_cutoff] = 0
62+
63+
# Smoothed signal
64+
X = np.fft.fft(x)
65+
x_hat = np.real(np.fft.ifft(filt * X))
66+
x_hat = x_hat[padding:L+padding]
6667

6768
# Derivative = 90 deg phase shift
68-
dxdt_hat = np.real(np.fft.ifft(1.0j * omega * np.fft.fft(x)))
69+
omega = 2*np.pi/(dt*N) # factor of 2pi/T turns wavenumbers into frequencies in radians/s
70+
dxdt_hat = np.real(np.fft.ifft(1j * k * omega * filt * X))
6971
dxdt_hat = dxdt_hat[padding:L+padding]
7072

71-
# Integrate to get x_hat
72-
x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
73-
x0 = utility.estimate_integration_constant(x[padding:L+padding], x_hat)
74-
x_hat = x_hat + x0
75-
7673
return x_hat, dxdt_hat
7774

7875

@@ -88,9 +85,8 @@ def rbfdiff(x, _t, sigma=1, lmbd=0.01):
8885
:param float sigma: controls width of radial basis functions
8986
:param float lmbd: controls smoothness
9087
91-
:return: tuple[np.array, np.array] of\n
92-
- **x_hat** -- estimated (smoothed) x
93-
- **dxdt_hat** -- estimated derivative of x
88+
:return: - **x_hat** (np.array) -- estimated (smoothed) x
89+
- **dxdt_hat** (np.array) -- estimated derivative of x
9490
"""
9591
if np.isscalar(_t):
9692
t = np.arange(len(x))*_t

pynumdiff/finite_difference/_finite_difference.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,9 @@ def finitediff(x, dt, num_iterations=1, order=2):
1313
:param int num_iterations: number of iterations. If >1, the derivative is integrated with trapezoidal
1414
rule, that result is finite-differenced again, and the cycle is repeated num_iterations-1 times
1515
:param int order: 1, 2, or 4, controls which finite differencing scheme to employ
16+
17+
:return: - **x_hat** (np.array) -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat
18+
- **dxdt_hat** (np.array) -- estimated derivative of x
1619
"""
1720
if num_iterations < 1: raise ValueError("num_iterations must be >0")
1821
if order not in [1, 2, 4]: raise ValueError("order must be 1, 2, or 4")
@@ -59,7 +62,7 @@ def finitediff(x, dt, num_iterations=1, order=2):
5962
dxdt_hat /= dt # don't forget to scale by dt, can't skip it this time
6063

6164
if num_iterations > 1: # We've lost a constant of integration in the above
62-
x_hat += utility.estimate_integration_constant(x, x_hat) # uses least squares
65+
x_hat += utility.estimate_integration_constant(x, x_hat)
6366

6467
return x_hat, dxdt_hat
6568

@@ -75,9 +78,8 @@ def first_order(x, dt, params=None, options={}, num_iterations=1):
7578
:param int num_iterations: number of iterations. If >1, the derivative is integrated with trapezoidal
7679
rule, that result is finite-differenced again, and the cycle is repeated num_iterations-1 times
7780
78-
:return: tuple[np.array, np.array] of\n
79-
- **x_hat** -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat
80-
- **dxdt_hat** -- estimated derivative of x
81+
:return: - **x_hat** (np.array) -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat
82+
- **dxdt_hat** (np.array) -- estimated derivative of x
8183
"""
8284
warn("`first_order` in past releases was actually calculating a second-order FD. Use `second_order` to achieve " +
8385
"approximately the same behavior. Note that odd-order methods have asymmetrical stencils, which causes " +
@@ -99,9 +101,8 @@ def second_order(x, dt, num_iterations=1):
99101
:param int num_iterations: number of iterations. If >1, the derivative is integrated with trapezoidal
100102
rule, that result is finite-differenced again, and the cycle is repeated num_iterations-1 times
101103
102-
:return: tuple[np.array, np.array] of\n
103-
- **x_hat** -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat
104-
- **dxdt_hat** -- estimated derivative of x
104+
:return: - **x_hat** (np.array) -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat
105+
- **dxdt_hat** (np.array) -- estimated derivative of x
105106
"""
106107
warn("`second_order` is deprecated. Call `finitediff` with order 2 instead.", DeprecationWarning)
107108
return finitediff(x, dt, num_iterations, 2)
@@ -116,9 +117,8 @@ def fourth_order(x, dt, num_iterations=1):
116117
:param int num_iterations: number of iterations. If >1, the derivative is integrated with trapezoidal
117118
rule, that result is finite-differenced again, and the cycle is repeated num_iterations-1 times
118119
119-
:return: tuple[np.array, np.array] of\n
120-
- **x_hat** -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat
121-
- **dxdt_hat** -- estimated derivative of x
120+
:return: - **x_hat** (np.array) -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat
121+
- **dxdt_hat** (np.array) -- estimated derivative of x
122122
"""
123123
warn("`fourth_order` is deprecated. Call `finitediff` with order 4 instead.", DeprecationWarning)
124124
return finitediff(x, dt, num_iterations, 4)

pynumdiff/kalman_smooth/_kalman_smooth.py

Lines changed: 32 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -25,12 +25,11 @@ def kalman_filter(y, _t, xhat0, P0, A, Q, C, R, B=None, u=None, save_P=True):
2525
:param bool save_P: whether to save history of error covariance and a priori state estimates, used with rts
2626
smoothing but nonstandard to compute for ordinary filtering
2727
28-
:return: tuple[np.array, np.array, np.array, np.array] of\n
29-
- **xhat_pre** -- a priori estimates of xhat, with axis=0 the batch dimension, so xhat[n] gets the nth step
30-
- **xhat_post** -- a posteriori estimates of xhat
31-
- **P_pre** -- a priori estimates of P
32-
- **P_post** -- a posteriori estimates of P
33-
if :code:`save_P` is :code:`True` else only **xhat_post** to save memory
28+
:return: - **xhat_pre** (np.array) -- a priori estimates of xhat, with axis=0 the batch dimension, so xhat[n] gets the nth step
29+
- **xhat_post** (np.array) -- a posteriori estimates of xhat
30+
- **P_pre** (np.array) -- a priori estimates of P
31+
- **P_post** (np.array) -- a posteriori estimates of P
32+
if :code:`save_P` is :code:`True` else only **xhat_post** to save memory
3433
"""
3534
N = y.shape[0]
3635
m = xhat0.shape[0] # dimension of the state
@@ -84,16 +83,15 @@ def rts_smooth(_t, A, xhat_pre, xhat_post, P_pre, P_post, compute_P_smooth=True)
8483
:param float or array[float] _t: This function supports variable step size. This parameter is either the constant
8584
step size if given as a single float, or sample locations if given as an array of same length as the state histories.
8685
:param np.array A: state transition matrix, in discrete time if constant dt, in continuous time if variable dt
87-
:param list[np.array] xhat_pre: a priori estimates of xhat from a kalman_filter forward pass
88-
:param list[np.array] xhat_post: a posteriori estimates of xhat from a kalman_filter forward pass
89-
:param list[np.array] P_pre: a priori estimates of P from a kalman_filter forward pass
90-
:param list[np.array] P_post: a posteriori estimates of P from a kalman_filter forward pass
86+
:param np.array xhat_pre: a priori estimates of xhat from a kalman_filter forward pass
87+
:param np.array xhat_post: a posteriori estimates of xhat from a kalman_filter forward pass
88+
:param np.array P_pre: a priori estimates of P from a kalman_filter forward pass
89+
:param np.array P_post: a posteriori estimates of P from a kalman_filter forward pass
9190
:param bool compute_P_smooth: it's extra work if you don't need it
9291
93-
:return: tuple[np.array, np.array] of\n
94-
- **xhat_smooth** -- RTS smoothed xhat
95-
- **P_smooth** -- RTS smoothed P
96-
if :code:`compute_P_smooth` is :code:`True` else only **xhat_smooth**
92+
:return: - **xhat_smooth** (np.array) -- RTS smoothed xhat
93+
- **P_smooth** (np.array) -- RTS smoothed P estimates
94+
if :code:`compute_P_smooth` is :code:`True` else only **xhat_smooth**
9795
"""
9896
xhat_smooth = np.empty(xhat_post.shape); xhat_smooth[-1] = xhat_post[-1] # preallocate arrays
9997
if compute_P_smooth: P_smooth = np.empty(P_post.shape); P_smooth[-1] = P_post[-1]
@@ -123,9 +121,8 @@ def rtsdiff(x, _t, order, log_qr_ratio, forwardbackward):
123121
:param bool forwardbackward: indicates whether to run smoother forwards and backwards
124122
(usually achieves better estimate at end points)
125123
126-
:return: tuple[np.array, np.array] of\n
127-
- **x_hat** -- estimated (smoothed) x
128-
- **dxdt_hat** -- estimated derivative of x
124+
:return: - **x_hat** (np.array) -- estimated (smoothed) x
125+
- **dxdt_hat** (np.array) -- estimated derivative of x
129126
"""
130127
if not np.isscalar(_t) and len(x) != len(_t):
131128
raise ValueError("If `_t` is given as array-like, must have same length as `x`.")
@@ -185,9 +182,8 @@ def constant_velocity(x, dt, params=None, options=None, r=None, q=None, forwardb
185182
:param bool forwardbackward: indicates whether to run smoother forwards and backwards
186183
(usually achieves better estimate at end points)
187184
188-
:return: tuple[np.array, np.array] of\n
189-
- **x_hat** -- estimated (smoothed) x
190-
- **dxdt_hat** -- estimated derivative of x
185+
:return: - **x_hat** (np.array) -- estimated (smoothed) x
186+
- **dxdt_hat** (np.array) -- estimated derivative of x
191187
"""
192188
if params != None: # boilerplate backwards compatibility code
193189
warn("`params` and `options` parameters will be removed in a future version. Use `r`, " +
@@ -216,9 +212,8 @@ def constant_acceleration(x, dt, params=None, options=None, r=None, q=None, forw
216212
:param bool forwardbackward: indicates whether to run smoother forwards and backwards
217213
(usually achieves better estimate at end points)
218214
219-
:return: tuple[np.array, np.array] of\n
220-
- **x_hat** -- estimated (smoothed) x
221-
- **dxdt_hat** -- estimated derivative of x
215+
:return: - **x_hat** (np.array) -- estimated (smoothed) x
216+
- **dxdt_hat** (np.array) -- estimated derivative of x
222217
"""
223218
if params != None: # boilerplate backwards compatibility code
224219
warn("`params` and `options` parameters will be removed in a future version. Use `r`, " +
@@ -247,9 +242,8 @@ def constant_jerk(x, dt, params=None, options=None, r=None, q=None, forwardbackw
247242
:param bool forwardbackward: indicates whether to run smoother forwards and backwards
248243
(usually achieves better estimate at end points)
249244
250-
:return: tuple[np.array, np.array] of\n
251-
- **x_hat** -- estimated (smoothed) x
252-
- **dxdt_hat** -- estimated derivative of x
245+
:return: - **x_hat** (np.array) -- estimated (smoothed) x
246+
- **dxdt_hat** (np.array) -- estimated derivative of x
253247
"""
254248
if params != None: # boilerplate backwards compatibility code
255249
warn("`params` and `options` parameters will be removed in a future version. Use `r`, " +
@@ -290,13 +284,9 @@ def robustdiff(x, dt, order, log_q, log_r, proc_huberM=6, meas_huberM=0):
290284
:param float proc_huberM: quadratic-to-linear transition point for process loss
291285
:param float meas_huberM: quadratic-to-linear transition point for measurement loss
292286
293-
:return: tuple[np.array, np.array] of\n
294-
- **x_hat** -- estimated (smoothed) x
295-
- **dxdt_hat** -- estimated derivative of x
287+
:return: - **x_hat** (np.array) -- estimated (smoothed) x
288+
- **dxdt_hat** (np.array) -- estimated derivative of x
296289
"""
297-
#q = 1e4 # I found q too small worsened condition number of the Q matrix, so fixing it at a biggish value
298-
#r = q/qr_ratio
299-
300290
A_c = np.diag(np.ones(order), 1) # continuous-time A just has 1s on the first diagonal (where 0th is main diagonal)
301291
Q_c = np.zeros(A_c.shape); Q_c[-1,-1] = 10**log_q # continuous-time uncertainty around the last derivative
302292
C = np.zeros((1, order+1)); C[0,0] = 1 # we measure only y = noisy x
@@ -324,31 +314,31 @@ def convex_smooth(y, A, Q, C, R, proc_huberM, meas_huberM):
324314
:param float proc_huberM: Huber loss parameter. :math:`M=0` reduces to :math:`\\sqrt{2}\\ell_1`.
325315
:param float meas_huberM: Huber loss parameter. :math:`M=\\infty` reduces to :math:`\\frac{1}{2}\\ell_2^2`.
326316
327-
:return: np.array -- state estimates (state_dim x N)
317+
:return: (np.array) -- state estimates (state_dim x N)
328318
"""
329319
N = len(y)
330320
x_states = cvxpy.Variable((A.shape[0], N)) # each column is [position, velocity, acceleration, ...] at step n
331321

332322
def huber_const(M): # from https://jmlr.org/papers/volume14/aravkin13a/aravkin13a.pdf, with correction for missing sqrt
333-
a = 2*np.exp(-M**2 / 2)/M
323+
a = 2*np.exp(-M**2 / 2)/M # huber_const smoothly transitions Huber between 1-norm and 2-norm squared cases
334324
b = np.sqrt(2*np.pi)*(2*norm.cdf(M) - 1)
335325
return np.sqrt((2*a*(1 + M**2)/M**2 + b)/(a + b))
336326

337327
# It is extremely important to run time that CVXPY expressions be in vectorized form
338328
proc_resids = np.linalg.inv(sqrtm(Q)) @ (x_states[:,1:] - A @ x_states[:,:-1]) # all Q^(-1/2)(x_n - A x_{n-1})
339329
meas_resids = np.linalg.inv(sqrtm(R)) @ (y.reshape(C.shape[0],-1) - C @ x_states) # all R^(-1/2)(y_n - C x_n)
340330
# Process terms: sum of J(proc_resids)
341-
objective = 0.5*cvxpy.sum_squares(proc_resids) if huberM == float('inf') \
342-
else np.sqrt(2)*cvxpy.sum(cvxpy.abs(proc_resids)) if huberM < 1e-3 \
343-
else huber_const(huberM)*cvxpy.sum(cvxpy.huber(proc_resids, huberM)) # 1/2 l2^2, l1, or Huber
331+
objective = 0.5*cvxpy.sum_squares(proc_resids) if proc_huberM == float('inf') \
332+
else np.sqrt(2)*cvxpy.sum(cvxpy.abs(proc_resids)) if proc_huberM < 1e-3 \
333+
else huber_const(proc_huberM)*cvxpy.sum(cvxpy.huber(proc_resids, proc_huberM)) # 1/2 l2^2, l1, or Huber
344334
# Measurement terms: sum of V(meas_resids)
345-
objective += 0.5*cvxpy.sum_squares(meas_resids) if huberM == float('inf') \
346-
else np.sqrt(2)*cvxpy.sum(cvxpy.abs(meas_resids)) if huberM < 1e-3 \
347-
else huber_const(huberM)*cvxpy.sum(cvxpy.huber(meas_resids, huberM)) # CVXPY quirk: norm(, 1) != sum(abs()) for matrices
335+
objective += 0.5*cvxpy.sum_squares(meas_resids) if meas_huberM == float('inf') \
336+
else np.sqrt(2)*cvxpy.sum(cvxpy.abs(meas_resids)) if meas_huberM < 1e-3 \
337+
else huber_const(meas_huberM)*cvxpy.sum(cvxpy.huber(meas_resids, meas_huberM)) # CVXPY quirk: norm(, 1) != sum(abs()) for matrices
348338

349339
problem = cvxpy.Problem(cvxpy.Minimize(objective))
350340
try: problem.solve(solver=cvxpy.CLARABEL); print("CLARABEL succeeded")
351-
except cvxpy.error.SolverError: pass # Could try a different solver here, like SCS, but slows things down
341+
except cvxpy.error.SolverError: pass # Could try another solver here, like SCS, but slows things down
352342

353-
if x_states.value is None: return np.full((A.shape[0], N), np.nan) # There can be solver failure, even without exception
343+
if x_states.value is None: return np.full((A.shape[0], N), np.nan) # There can be solver failure, even without error
354344
return x_states.value

pynumdiff/linear_model/_linear_model.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -83,9 +83,8 @@ def lineardiff(x, dt, params=None, options=None, order=None, gamma=None, window_
8383
:param str kernel: name of kernel to use for weighting and smoothing windows ('gaussian' or 'friedrichs')
8484
:param str solver: CVXPY solver to use, one of :code:`cvxpy.installed_solvers()`
8585
86-
:return: tuple[np.array, np.array] of\n
87-
- **x_hat** -- estimated (smoothed) x
88-
- **dxdt_hat** -- estimated derivative of x
86+
:return: - **x_hat** (np.array) -- estimated (smoothed) x
87+
- **dxdt_hat** (np.array) -- estimated derivative of x
8988
"""
9089
if params != None:
9190
warn("`params` and `options` parameters will be removed in a future version. Use `order`, " +

0 commit comments

Comments
 (0)