|
| 1 | +import numpy as np |
| 2 | +from pytest import mark |
| 3 | +from warnings import warn |
| 4 | + |
| 5 | +from ..linear_model import lineardiff, polydiff, savgoldiff, spectraldiff |
| 6 | +from ..total_variation_regularization import velocity, acceleration, jerk, iterative_velocity |
| 7 | +from ..kalman_smooth import * # constant_velocity, constant_acceleration, constant_jerk, known_dynamics |
| 8 | +from ..smooth_finite_difference import * # mediandiff, meandiff, gaussiandiff, friedrichsdiff, butterdiff, splinediff |
| 9 | +from ..finite_difference import first_order, second_order |
| 10 | + |
| 11 | + |
| 12 | +dt = 0.01 |
| 13 | +t = np.arange(0, 3, dt) # domain to test over |
| 14 | +np.random.seed(7) # for repeatability of the test, so we don't get random failures |
| 15 | +noise = 0.05*np.random.randn(*t.shape) |
| 16 | + |
| 17 | +diff_methods_and_params = [ |
| 18 | + #(lineardiff, {'polynomial_order':3, 'gamma':5, 'window_size':10, 'solver':'CVXOPT'}), |
| 19 | + (polydiff, {'polynomial_order':2, 'window_size':3}), |
| 20 | + (savgoldiff, {'polynomial_order':2, 'window_size':4, 'smoothing_win':4}), |
| 21 | + (spectraldiff, {'high_freq_cutoff':0.1}) |
| 22 | + ] |
| 23 | + |
| 24 | +# Analytic (function, derivative) pairs on which to test differentiation methods. |
| 25 | +test_funcs_and_derivs = [ |
| 26 | + (0, lambda t: np.ones(t.shape), lambda t: np.zeros(t.shape)), # x(t) = 1 |
| 27 | + (1, lambda t: t, lambda t: np.ones(t.shape)), # x(t) = t |
| 28 | + (2, lambda t: 2*t + 1, lambda t: 2*np.ones(t.shape)), # x(t) = 2t+1 |
| 29 | + (3, lambda t: t**2 - t + 1, lambda t: 2*t - 1), # x(t) = t^2 - t + 1 |
| 30 | + (4, lambda t: np.sin(t) + 1/2, lambda t: np.cos(t))] # x(t) = sin(t) + 1/2 |
| 31 | + |
| 32 | +# All the testing methodology follows the exact same pattern; the only thing that changes is the |
| 33 | +# closeness to the right answer various methods achieve with the given parameterizations. So index a |
| 34 | +# big ol' table by the method, then the test function, then the pair of quantities we're comparing. |
| 35 | +error_bounds = { |
| 36 | + lineardiff: [[(1e-25, 1e-25)]*4]*len(test_funcs_and_derivs), |
| 37 | + polydiff: [[(1e-15, 1e-15), (1e-12, 1e-13), (1, 0.1), (100, 100)], |
| 38 | + [(1e-14, 1e-14), (1e-12, 1e-13), (1, 0.1), (100, 100)], |
| 39 | + [(1e-13, 1e-14), (1e-11, 1e-12), (1, 0.1), (100, 100)], |
| 40 | + [(1e-13, 1e-14), (1e-12, 1e-12), (1, 0.1), (100, 100)], |
| 41 | + [(1e-6, 1e-7), (0.001, 0.0001), (1, 0.1), (100, 100)]], |
| 42 | + savgoldiff: [[(1e-7, 1e-8), (1e-12, 1e-13), (1, 0.1), (100, 10)], |
| 43 | + [(1e-5, 1e-7), (1e-12, 1e-13), (1, 0.1), (100, 10)], |
| 44 | + [(1e-7, 1e-8), (1e-11, 1e-12), (1, 0.1), (100, 10)], |
| 45 | + [(0.1, 0.01), (0.1, 0.01), (1, 0.1), (100, 10)], |
| 46 | + [(0.01, 1e-3), (0.01, 1e-3), (1, 0.1), (100, 10)]], |
| 47 | + spectraldiff: [[(1e-7, 1e-8), ( 1e-25 , 1e-25), (1, 0.1), (100, 10)], |
| 48 | + [(0.1, 0.1), (10, 10), (1, 0.1), (100, 10)], |
| 49 | + [(0.1, 0.1), (10, 10), (1, 0.1), (100, 10)], |
| 50 | + [(1, 1), (100, 10), (1, 1), (100, 10)], |
| 51 | + [(0.1, 0.1), (10, 10), (1, 0.1), (100, 10)]] |
| 52 | +} |
| 53 | + |
| 54 | + |
| 55 | +@mark.parametrize("diff_method_and_params", diff_methods_and_params) |
| 56 | +@mark.parametrize("test_func_and_deriv", test_funcs_and_derivs) |
| 57 | +def test_diff_method(diff_method_and_params, test_func_and_deriv): |
| 58 | + diff_method, params = diff_method_and_params # unpack |
| 59 | + i, f, df = test_func_and_deriv |
| 60 | + |
| 61 | + # some methods rely on cvxpy, and we'd like to allow use of pynumdiff without convex optimization |
| 62 | + if diff_method in [lineardiff, velocity]: |
| 63 | + try: import cvxpy |
| 64 | + except: warn(f"Cannot import cvxpy, skipping {diff_method} test."); return |
| 65 | + |
| 66 | + x = f(t) # sample the function |
| 67 | + x_noisy = x + noise # add a little noise |
| 68 | + dxdt = df(t) # true values of the derivative |
| 69 | + |
| 70 | + # differentiate without and with noise |
| 71 | + x_hat, dxdt_hat = diff_method(x, dt, **params) if isinstance(params, dict) else diff_method(x, dt, params) |
| 72 | + x_hat_noisy, dxdt_hat_noisy = diff_method(x_noisy, dt, **params) if isinstance(params, dict) else diff_method(x_noisy, dt, params) |
| 73 | + |
| 74 | + # check x_hat and x_hat_noisy are close to x and dxdt_hat and dxdt_hat_noisy are close to dxdt |
| 75 | + #print("]\n[", end="") |
| 76 | + for j,(a,b) in enumerate([(x,x_hat), (dxdt,dxdt_hat), (x,x_hat_noisy), (dxdt,dxdt_hat_noisy)]): |
| 77 | + l2_error = np.linalg.norm(a - b) |
| 78 | + linf_error = np.max(np.abs(a - b)) |
| 79 | + |
| 80 | + #print(f"({10 ** np.ceil(np.log10(l2_error)) if l2_error> 0 else 1e-25}, {10 ** np.ceil(np.log10(linf_error)) if linf_error > 0 else 1e-25})", end=", ") |
| 81 | + l2_bound, linf_bound = error_bounds[diff_method][i][j] |
| 82 | + assert np.linalg.norm(a - b) < l2_bound |
| 83 | + assert np.max(np.abs(a - b)) < linf_bound |
0 commit comments