|
2 | 2 | from pytest import mark |
3 | 3 | from warnings import warn |
4 | 4 |
|
5 | | -from ..finite_difference import first_order, second_order, fourth_order |
| 5 | +from ..finite_difference import finitediff, first_order, second_order, fourth_order |
6 | 6 | from ..linear_model import lineardiff |
7 | 7 | from ..basis_fit import spectraldiff, rbfdiff |
8 | 8 | from ..polynomial_fit import polydiff, savgoldiff, splinediff |
@@ -235,25 +235,16 @@ def spline_irreg_step(*args, **kwargs): return splinediff(*args, **kwargs) |
235 | 235 | @mark.parametrize("diff_method_and_params", diff_methods_and_params) # things like splinediff, with their parameters |
236 | 236 | @mark.parametrize("test_func_and_deriv", test_funcs_and_derivs) # analytic functions, with their true derivatives |
237 | 237 | def test_diff_method(diff_method_and_params, test_func_and_deriv, request): # request gives access to context |
| 238 | + """Ensure differentiation methods find accurate derivatives""" |
238 | 239 | # unpack |
239 | 240 | diff_method, params = diff_method_and_params[:2] |
240 | 241 | if len(diff_method_and_params) == 3: options = diff_method_and_params[2] # optionally pass old-style `options` dict |
241 | 242 | i, latex_name, f, df = test_func_and_deriv |
242 | 243 |
|
243 | | - # some methods rely on cvxpy, and we'd like to allow use of pynumdiff without convex optimization |
244 | | - if diff_method in [lineardiff, velocity, acceleration, jerk, smooth_acceleration, robustdiff]: |
245 | | - try: import cvxpy |
246 | | - except: warn(f"Cannot import cvxpy, skipping {diff_method} test."); return |
247 | | - |
248 | 244 | # sample the true function and true derivative, and make noisy samples |
249 | | - if diff_method in [spline_irreg_step, rbfdiff, rtsdiff]: # list that can handle variable dt |
250 | | - x = f(t_irreg) |
251 | | - dxdt = df(t_irreg) |
252 | | - _t = t_irreg |
253 | | - else: |
254 | | - x = f(t) |
255 | | - dxdt = df(t) |
256 | | - _t = dt |
| 245 | + x = f(t) if diff_method not in [spline_irreg_step, rbfdiff, rtsdiff] else f(t_irreg) |
| 246 | + dxdt = df(t) if diff_method not in [spline_irreg_step, rbfdiff, rtsdiff] else df(t_irreg) |
| 247 | + _t = dt if diff_method not in [spline_irreg_step, rbfdiff, rtsdiff] else t_irreg |
257 | 248 | x_noisy = x + noise |
258 | 249 |
|
259 | 250 | # differentiate without and with noise, accounting for new and old styles of calling functions |
@@ -305,3 +296,69 @@ def test_diff_method(diff_method_and_params, test_func_and_deriv, request): # re |
305 | 296 | # methods that get super duper close can converge to different very small limits on different runs |
306 | 297 | if 1e-18 < l2_error < 10**(log_l2_bound - 1) or 1e-18 < linf_error < 10**(log_linf_bound - 1): |
307 | 298 | print(f"Improvement detected for method {diff_method.__name__}") |
| 299 | + |
| 300 | + |
| 301 | +T1, T2 = np.meshgrid(np.linspace(-1, 1, 101), np.linspace(-1, 1, 101)) # a 101 x 101 grid |
| 302 | +dt2 = 0.02 # distance between samples in the 2D T grids |
| 303 | +x = T1**2 * np.sin(3/2 * np.pi * T2) # 2D function |
| 304 | + |
| 305 | +# When one day all or most methods support multidimensionality, and the legacy way of calling methods is |
| 306 | +# gone, diff_methods_and_params can be used for the multidimensionality test as well |
| 307 | +multidim_methods_and_params = [(finitediff, {})] |
| 308 | + |
| 309 | +# Similar to the error_bounds table, index by method first. But then we test against only one 2D function, |
| 310 | +# and only in the absence of noise, since the other test covers that. Instead, because multidimensional |
| 311 | +# derivatives can be combined in interesting fashions, we find d^2 / dt_1 dt_2 and the Laplacian, |
| 312 | +# d^2/dt_1^2 + d^2/dt_2^2. Tuples are again (L2,Linf) distances. |
| 313 | +multidim_error_bounds = { |
| 314 | + finitediff: [(0, -1), (1, -1)] |
| 315 | +} |
| 316 | + |
| 317 | +@mark.parametrize("multidim_method_and_params", multidim_methods_and_params) |
| 318 | +def test_multidimensionality(multidim_method_and_params, request): |
| 319 | + """Ensure methods with an axis parameter can successfully differentiate in independent directions""" |
| 320 | + diff_method, params = multidim_method_and_params |
| 321 | + |
| 322 | + # d^2 / dt_1 dt_2 |
| 323 | + analytic_d2 = 3 * T1 * np.pi * np.cos(3/2 * np.pi * T2) |
| 324 | + dxdt1 = diff_method(x, dt2, **params, axis=0)[1] |
| 325 | + computed_d2 = diff_method(dxdt1, dt2, **params, axis=1)[1] |
| 326 | + l2_error_d2 = np.linalg.norm(analytic_d2 - computed_d2) # Frobenius norm (2 norm of vectorized array) |
| 327 | + linf_error_d2 = np.max(np.abs(analytic_d2 - computed_d2)) |
| 328 | + |
| 329 | + # Laplacian |
| 330 | + analytic_laplacian = 2 * np.sin(3/2 * np.pi * T2) - 9/4 * np.pi**2 * T1**2 * np.sin(3/2 * np.pi * T2) |
| 331 | + dxdt2 = diff_method(x, dt2, **params, axis=1)[1] |
| 332 | + computed_laplacian = diff_method(dxdt1, dt2, **params, axis=0)[1] + diff_method(dxdt2, dt2, **params, axis=1)[1] |
| 333 | + l2_error_lap = np.linalg.norm(analytic_laplacian - computed_laplacian) |
| 334 | + linf_error_lap = np.max(np.abs(analytic_laplacian - computed_laplacian)) |
| 335 | + |
| 336 | + if request.config.getoption("--bounds"): |
| 337 | + print([(int(np.ceil(np.log10(l2_error_d2))), int(np.ceil(np.log10(linf_error_d2)))), (int(np.ceil(np.log10(l2_error_lap))), int(np.ceil(np.log10(linf_error_lap))))]) |
| 338 | + else: |
| 339 | + (log_l2_bound_d2, log_linf_bound_d2), (log_l2_bound_lap, log_linf_bound_lap) = multidim_error_bounds[diff_method] |
| 340 | + assert l2_error_d2 < 10**log_l2_bound_d2 |
| 341 | + assert linf_error_d2 < 10**log_linf_bound_d2 |
| 342 | + |
| 343 | + if request.config.getoption("--plot"): |
| 344 | + from matplotlib import pyplot |
| 345 | + fig = pyplot.figure(figsize=(12, 5), constrained_layout=True) |
| 346 | + ax1 = fig.add_subplot(1, 3, 1, projection='3d') |
| 347 | + ax1.plot_surface(T1, T2, x, cmap='viridis', alpha=0.5) |
| 348 | + ax1.set_title(r'original function, $x$') |
| 349 | + ax1.set_xlabel(r'$t_1$') |
| 350 | + ax1.set_ylabel(r'$t_2$') |
| 351 | + ax2 = fig.add_subplot(1, 3, 2, projection='3d') |
| 352 | + ax2.plot_surface(T1, T2, analytic_d2, cmap='viridis', alpha=0.5) |
| 353 | + ax2.set_title(r'$\frac{\partial^2 x}{\partial t_1 \partial t_2}$') |
| 354 | + ax2.set_xlabel(r'$t_1$') |
| 355 | + ax2.set_ylabel(r'$t_2$') |
| 356 | + ax3 = fig.add_subplot(1, 3, 3, projection='3d') |
| 357 | + surf = ax3.plot_surface(T1, T2, analytic_laplacian, cmap='viridis', alpha=0.5, label='analytic') |
| 358 | + ax3.set_title(r'$\frac{\partial^2}{\partial t_1^2} + \frac{\partial^2}{\partial t_2^2}$') |
| 359 | + ax3.set_xlabel(r'$t_1$') |
| 360 | + ax3.set_ylabel(r'$t_2$') |
| 361 | + |
| 362 | + ax2.plot_wireframe(T1, T2, computed_d2) |
| 363 | + ax3.plot_wireframe(T1, T2, computed_laplacian, label='computed') |
| 364 | + legend = ax3.legend(bbox_to_anchor=(0.7, 0.8)); legend.legend_handles[0].set_facecolor(pyplot.cm.viridis(0.6)) |
0 commit comments