@@ -261,10 +261,11 @@ def spectraldiff(x, dt, params=None, options=None, high_freq_cutoff=None, even_e
261261 :param list[float] or float params: (**deprecated**, prefer :code:`high_freq_cutoff`)
262262 :param dict options: (**deprecated**, prefer :code:`even_extension`
263263 and :code:`pad_to_zero_dxdt`) a dictionary consisting of {'even_extension': (bool), 'pad_to_zero_dxdt': (bool)}
264- :param float high_freq_cutoff: The high frequency cutoff. Frequencies below this threshold will be kept, and above will be zeroed.
264+ :param float high_freq_cutoff: The high frequency cutoff as a multiple of the Nyquist frequency: Should be between 0
265+ and 0.5. Frequencies below this threshold will be kept, and above will be zeroed.
265266 :param bool even_extension: if True, extend the data with an even extension so signal starts and ends at the same value.
266- :param bool pad_to_zero_dxdt: if True, extend the data with extensions that smoothly force the derivative to zero. This
267- allows the spectral derivative to fit data which does not start and end with derivatives equal to zero .
267+ :param bool pad_to_zero_dxdt: if True, extend the data with extra regions that smoothly force the derivative to
268+ zero before taking FFT .
268269
269270 :return: tuple[np.array, np.array] of\n
270271 - **x_hat** -- estimated (smoothed) x
@@ -280,16 +281,17 @@ def spectraldiff(x, dt, params=None, options=None, high_freq_cutoff=None, even_e
280281 elif high_freq_cutoff == None :
281282 raise ValueError ("`high_freq_cutoff` must be given." )
282283
283- original_L = len (x )
284+ L = len (x )
284285
285286 # make derivative go to zero at ends (optional)
286287 if pad_to_zero_dxdt :
287288 padding = 100
288- pre = x [0 ]* np .ones (padding )
289+ pre = x [0 ]* np .ones (padding ) # extend the edges
289290 post = x [- 1 ]* np .ones (padding )
290291 x = np .hstack ((pre , x , post ))
291- x_hat , _ = smooth_finite_difference .meandiff (x , dt , window_size = int (padding / 2 ))
292- x_hat [padding :- padding ] = x [padding :- padding ]
292+ kernel = utility .mean_kernel (padding // 2 )
293+ x_hat = utility .convolutional_smoother (x , kernel ) # smooth the edges in
294+ x_hat [padding :- padding ] = x [padding :- padding ] # replace middle with original signal
293295 x = x_hat
294296 else :
295297 padding = 0
@@ -299,65 +301,24 @@ def spectraldiff(x, dt, params=None, options=None, high_freq_cutoff=None, even_e
299301 x = np .hstack ((x , x [::- 1 ]))
300302
301303 # If odd, make N even, and pad x
302- L = len (x )
303- if L % 2 != 0 :
304- N = L + 1
305- x = np .hstack ((x , x [- 1 ] + dt * (x [- 1 ]- x [- 1 ])))
306- else :
307- N = L
304+ N = len (x )
308305
309306 # Define the frequency range.
310- k = np .asarray (list (range (0 , int (N / 2 ))) + [0 ] + list (range (int (- N / 2 ) + 1 ,0 )))
311- k = k * 2 * np .pi / (dt * N )
307+ k = np .concatenate ((np .arange (N // 2 + 1 ), np .arange (- N // 2 + 1 , 0 )))
308+ if N % 2 == 0 : k [N // 2 ] = 0 # odd derivatives get the Nyquist element zeroed out
309+ omega = k * 2 * np .pi / (dt * N ) # turn wavenumbers into frequencies in radians/s
312310
313311 # Frequency based smoothing: remove signals with a frequency higher than high_freq_cutoff
314- discrete_high_freq_cutoff = int (high_freq_cutoff * N )
315- k [ discrete_high_freq_cutoff :N - discrete_high_freq_cutoff ] = 0
312+ discrete_cutoff = int (high_freq_cutoff * N )
313+ omega [ discrete_cutoff :N - discrete_cutoff ] = 0
316314
317315 # Derivative = 90 deg phase shift
318- dxdt_hat = np .real (np .fft .ifft (1.0j * k * np .fft .fft (x )))
319- dxdt_hat = dxdt_hat [padding :original_L + padding ]
316+ dxdt_hat = np .real (np .fft .ifft (1.0j * omega * np .fft .fft (x )))
317+ dxdt_hat = dxdt_hat [padding :L + padding ]
320318
321319 # Integrate to get x_hat
322320 x_hat = utility .integrate_dxdt_hat (dxdt_hat , dt )
323- x0 = utility .estimate_integration_constant (x [padding :original_L + padding ], x_hat )
321+ x0 = utility .estimate_integration_constant (x [padding :L + padding ], x_hat )
324322 x_hat = x_hat + x0
325323
326324 return x_hat , dxdt_hat
327-
328-
329- #############
330- # Chebychev #
331- #############
332- def chebydiff (x , dt , poly_order , window_size = None , step_size = 1 , kernel = 'friedrichs' ):
333- """Fit Chebyshev polynomials to the data, and differentiate those
334-
335- :param np.array[float] x: data to differentiate
336- :param float dt: step size
337- :param int poly_order: keep polynomials up to this order
338- :param int window_size: size of the sliding window, if not given no sliding
339-
340- :return: tuple[np.array, np.array] of\n
341- - **x_hat** -- estimated (smoothed) x
342- - **dxdt_hat** -- estimated derivative of x
343- """
344- if window_size % 2 == 0 :
345- window_size += 1
346- warn ("Kernel window size should be odd. Added 1 to length." )
347-
348- def _chebdiff (x , dt , poly_order , weights = None ):
349- t = np .arange (len (x ))* dt
350-
351- r = np .polynomial .chebyshev .chebfit (t , x , poly_order , w = weights ) # chebfit returns lowest order first
352- dr = np .polynomial .chebyshev .chebder (r ) # series derivative rule already implemented for us
353-
354- dxdt_hat = np .polynomial .chebyshev .chebval (t , dr ) # evaluate the derivative and original polynomials at points t
355- x_hat = np .polynomial .chebyshev .chebval (t , r ) # smoothed x
356-
357- return x_hat , dxdt_hat
358-
359- if not window_size :
360- return _chebdiff (x , dt , poly_order )
361-
362- kernel = {'gaussian' :utility .gaussian_kernel , 'friedrichs' :utility .friedrichs_kernel }[kernel ](window_size )
363- return utility .slide_function (_chebdiff , x , dt , kernel , poly_order , stride = step_size , pass_weights = True )
0 commit comments