@@ -25,12 +25,11 @@ def kalman_filter(y, _t, xhat0, P0, A, Q, C, R, B=None, u=None, save_P=True):
2525 :param bool save_P: whether to save history of error covariance and a priori state estimates, used with rts
2626 smoothing but nonstandard to compute for ordinary filtering
2727
28- :return: tuple[np.array, np.array, np.array, np.array] of\n
29- - **xhat_pre** -- a priori estimates of xhat, with axis=0 the batch dimension, so xhat[n] gets the nth step
30- - **xhat_post** -- a posteriori estimates of xhat
31- - **P_pre** -- a priori estimates of P
32- - **P_post** -- a posteriori estimates of P
33- if :code:`save_P` is :code:`True` else only **xhat_post** to save memory
28+ :return: - **xhat_pre** (np.array) -- a priori estimates of xhat, with axis=0 the batch dimension, so xhat[n] gets the nth step
29+ - **xhat_post** (np.array) -- a posteriori estimates of xhat
30+ - **P_pre** (np.array) -- a priori estimates of P
31+ - **P_post** (np.array) -- a posteriori estimates of P
32+ if :code:`save_P` is :code:`True` else only **xhat_post** to save memory
3433 """
3534 N = y .shape [0 ]
3635 m = xhat0 .shape [0 ] # dimension of the state
@@ -84,16 +83,15 @@ def rts_smooth(_t, A, xhat_pre, xhat_post, P_pre, P_post, compute_P_smooth=True)
8483 :param float or array[float] _t: This function supports variable step size. This parameter is either the constant
8584 step size if given as a single float, or sample locations if given as an array of same length as the state histories.
8685 :param np.array A: state transition matrix, in discrete time if constant dt, in continuous time if variable dt
87- :param list[ np.array] xhat_pre: a priori estimates of xhat from a kalman_filter forward pass
88- :param list[ np.array] xhat_post: a posteriori estimates of xhat from a kalman_filter forward pass
89- :param list[ np.array] P_pre: a priori estimates of P from a kalman_filter forward pass
90- :param list[ np.array] P_post: a posteriori estimates of P from a kalman_filter forward pass
86+ :param np.array xhat_pre: a priori estimates of xhat from a kalman_filter forward pass
87+ :param np.array xhat_post: a posteriori estimates of xhat from a kalman_filter forward pass
88+ :param np.array P_pre: a priori estimates of P from a kalman_filter forward pass
89+ :param np.array P_post: a posteriori estimates of P from a kalman_filter forward pass
9190 :param bool compute_P_smooth: it's extra work if you don't need it
9291
93- :return: tuple[np.array, np.array] of\n
94- - **xhat_smooth** -- RTS smoothed xhat
95- - **P_smooth** -- RTS smoothed P
96- if :code:`compute_P_smooth` is :code:`True` else only **xhat_smooth**
92+ :return: - **xhat_smooth** (np.array) -- RTS smoothed xhat
93+ - **P_smooth** (np.array) -- RTS smoothed P estimates
94+ if :code:`compute_P_smooth` is :code:`True` else only **xhat_smooth**
9795 """
9896 xhat_smooth = np .empty (xhat_post .shape ); xhat_smooth [- 1 ] = xhat_post [- 1 ] # preallocate arrays
9997 if compute_P_smooth : P_smooth = np .empty (P_post .shape ); P_smooth [- 1 ] = P_post [- 1 ]
@@ -123,9 +121,8 @@ def rtsdiff(x, _t, order, log_qr_ratio, forwardbackward):
123121 :param bool forwardbackward: indicates whether to run smoother forwards and backwards
124122 (usually achieves better estimate at end points)
125123
126- :return: tuple[np.array, np.array] of\n
127- - **x_hat** -- estimated (smoothed) x
128- - **dxdt_hat** -- estimated derivative of x
124+ :return: - **x_hat** (np.array) -- estimated (smoothed) x
125+ - **dxdt_hat** (np.array) -- estimated derivative of x
129126 """
130127 if not np .isscalar (_t ) and len (x ) != len (_t ):
131128 raise ValueError ("If `_t` is given as array-like, must have same length as `x`." )
@@ -185,9 +182,8 @@ def constant_velocity(x, dt, params=None, options=None, r=None, q=None, forwardb
185182 :param bool forwardbackward: indicates whether to run smoother forwards and backwards
186183 (usually achieves better estimate at end points)
187184
188- :return: tuple[np.array, np.array] of\n
189- - **x_hat** -- estimated (smoothed) x
190- - **dxdt_hat** -- estimated derivative of x
185+ :return: - **x_hat** (np.array) -- estimated (smoothed) x
186+ - **dxdt_hat** (np.array) -- estimated derivative of x
191187 """
192188 if params != None : # boilerplate backwards compatibility code
193189 warn ("`params` and `options` parameters will be removed in a future version. Use `r`, " +
@@ -216,9 +212,8 @@ def constant_acceleration(x, dt, params=None, options=None, r=None, q=None, forw
216212 :param bool forwardbackward: indicates whether to run smoother forwards and backwards
217213 (usually achieves better estimate at end points)
218214
219- :return: tuple[np.array, np.array] of\n
220- - **x_hat** -- estimated (smoothed) x
221- - **dxdt_hat** -- estimated derivative of x
215+ :return: - **x_hat** (np.array) -- estimated (smoothed) x
216+ - **dxdt_hat** (np.array) -- estimated derivative of x
222217 """
223218 if params != None : # boilerplate backwards compatibility code
224219 warn ("`params` and `options` parameters will be removed in a future version. Use `r`, " +
@@ -247,9 +242,8 @@ def constant_jerk(x, dt, params=None, options=None, r=None, q=None, forwardbackw
247242 :param bool forwardbackward: indicates whether to run smoother forwards and backwards
248243 (usually achieves better estimate at end points)
249244
250- :return: tuple[np.array, np.array] of\n
251- - **x_hat** -- estimated (smoothed) x
252- - **dxdt_hat** -- estimated derivative of x
245+ :return: - **x_hat** (np.array) -- estimated (smoothed) x
246+ - **dxdt_hat** (np.array) -- estimated derivative of x
253247 """
254248 if params != None : # boilerplate backwards compatibility code
255249 warn ("`params` and `options` parameters will be removed in a future version. Use `r`, " +
@@ -290,13 +284,9 @@ def robustdiff(x, dt, order, log_q, log_r, proc_huberM=6, meas_huberM=0):
290284 :param float proc_huberM: quadratic-to-linear transition point for process loss
291285 :param float meas_huberM: quadratic-to-linear transition point for measurement loss
292286
293- :return: tuple[np.array, np.array] of\n
294- - **x_hat** -- estimated (smoothed) x
295- - **dxdt_hat** -- estimated derivative of x
287+ :return: - **x_hat** (np.array) -- estimated (smoothed) x
288+ - **dxdt_hat** (np.array) -- estimated derivative of x
296289 """
297- #q = 1e4 # I found q too small worsened condition number of the Q matrix, so fixing it at a biggish value
298- #r = q/qr_ratio
299-
300290 A_c = np .diag (np .ones (order ), 1 ) # continuous-time A just has 1s on the first diagonal (where 0th is main diagonal)
301291 Q_c = np .zeros (A_c .shape ); Q_c [- 1 ,- 1 ] = 10 ** log_q # continuous-time uncertainty around the last derivative
302292 C = np .zeros ((1 , order + 1 )); C [0 ,0 ] = 1 # we measure only y = noisy x
@@ -324,31 +314,31 @@ def convex_smooth(y, A, Q, C, R, proc_huberM, meas_huberM):
324314 :param float proc_huberM: Huber loss parameter. :math:`M=0` reduces to :math:`\\ sqrt{2}\\ ell_1`.
325315 :param float meas_huberM: Huber loss parameter. :math:`M=\\ infty` reduces to :math:`\\ frac{1}{2}\\ ell_2^2`.
326316
327- :return: np.array -- state estimates (state_dim x N)
317+ :return: ( np.array) -- state estimates (state_dim x N)
328318 """
329319 N = len (y )
330320 x_states = cvxpy .Variable ((A .shape [0 ], N )) # each column is [position, velocity, acceleration, ...] at step n
331321
332322 def huber_const (M ): # from https://jmlr.org/papers/volume14/aravkin13a/aravkin13a.pdf, with correction for missing sqrt
333- a = 2 * np .exp (- M ** 2 / 2 )/ M
323+ a = 2 * np .exp (- M ** 2 / 2 )/ M # huber_const smoothly transitions Huber between 1-norm and 2-norm squared cases
334324 b = np .sqrt (2 * np .pi )* (2 * norm .cdf (M ) - 1 )
335325 return np .sqrt ((2 * a * (1 + M ** 2 )/ M ** 2 + b )/ (a + b ))
336326
337327 # It is extremely important to run time that CVXPY expressions be in vectorized form
338328 proc_resids = np .linalg .inv (sqrtm (Q )) @ (x_states [:,1 :] - A @ x_states [:,:- 1 ]) # all Q^(-1/2)(x_n - A x_{n-1})
339329 meas_resids = np .linalg .inv (sqrtm (R )) @ (y .reshape (C .shape [0 ],- 1 ) - C @ x_states ) # all R^(-1/2)(y_n - C x_n)
340330 # Process terms: sum of J(proc_resids)
341- objective = 0.5 * cvxpy .sum_squares (proc_resids ) if huberM == float ('inf' ) \
342- else np .sqrt (2 )* cvxpy .sum (cvxpy .abs (proc_resids )) if huberM < 1e-3 \
343- else huber_const (huberM )* cvxpy .sum (cvxpy .huber (proc_resids , huberM )) # 1/2 l2^2, l1, or Huber
331+ objective = 0.5 * cvxpy .sum_squares (proc_resids ) if proc_huberM == float ('inf' ) \
332+ else np .sqrt (2 )* cvxpy .sum (cvxpy .abs (proc_resids )) if proc_huberM < 1e-3 \
333+ else huber_const (proc_huberM )* cvxpy .sum (cvxpy .huber (proc_resids , proc_huberM )) # 1/2 l2^2, l1, or Huber
344334 # Measurement terms: sum of V(meas_resids)
345- objective += 0.5 * cvxpy .sum_squares (meas_resids ) if huberM == float ('inf' ) \
346- else np .sqrt (2 )* cvxpy .sum (cvxpy .abs (meas_resids )) if huberM < 1e-3 \
347- else huber_const (huberM )* cvxpy .sum (cvxpy .huber (meas_resids , huberM )) # CVXPY quirk: norm(, 1) != sum(abs()) for matrices
335+ objective += 0.5 * cvxpy .sum_squares (meas_resids ) if meas_huberM == float ('inf' ) \
336+ else np .sqrt (2 )* cvxpy .sum (cvxpy .abs (meas_resids )) if meas_huberM < 1e-3 \
337+ else huber_const (meas_huberM )* cvxpy .sum (cvxpy .huber (meas_resids , meas_huberM )) # CVXPY quirk: norm(, 1) != sum(abs()) for matrices
348338
349339 problem = cvxpy .Problem (cvxpy .Minimize (objective ))
350340 try : problem .solve (solver = cvxpy .CLARABEL ); print ("CLARABEL succeeded" )
351- except cvxpy .error .SolverError : pass # Could try a different solver here, like SCS, but slows things down
341+ except cvxpy .error .SolverError : pass # Could try another solver here, like SCS, but slows things down
352342
353- if x_states .value is None : return np .full ((A .shape [0 ], N ), np .nan ) # There can be solver failure, even without exception
343+ if x_states .value is None : return np .full ((A .shape [0 ], N ), np .nan ) # There can be solver failure, even without error
354344 return x_states .value
0 commit comments