Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Clean the Trapping variable names, #553

Merged
merged 15 commits into from
Sep 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
758 changes: 237 additions & 521 deletions examples/8_trapping_sindy_examples/example.ipynb

Large diffs are not rendered by default.

43 changes: 16 additions & 27 deletions examples/8_trapping_sindy_examples/example.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@
sindy_library_no_bias,
make_fits,
make_lissajou,
check_stability,
check_local_stability,
trapping_region,
make_progress_plots,
galerkin_model,
Expand Down Expand Up @@ -133,7 +133,7 @@
print("Frobenius Error = ", E_pred)
mean_val = np.mean(x_test_pred, axis=0)
mean_val = np.sqrt(np.sum(mean_val**2))
check_stability(r, Xi, sindy_opt, mean_val)
check_local_stability(Xi, sindy_opt, mean_val)

# compute relative Frobenius error in the model coefficients
terms = sindy_library.get_feature_names()
Expand All @@ -154,15 +154,6 @@
) / np.dot(xdot_test[i, :], xdot_test[i, :])
print("Time-averaged derivative error = ", np.nanmean(deriv_error))

# %% [markdown]
# Awesome! The trapping algorithm gets exactly the right model and produces a negative definite matrix,
# $$\mathbf{A}^S = \begin{bmatrix}
# -1.32 & 0 & 0 \\
# 0 & -1.31 & 0 \\
# 0 & 0 & -1
# \end{bmatrix},$$
# i.e. it identifies $\epsilon \approx 1.3$ from above. Note that with different algorithm hyperparameters it will produce different $\epsilon$, since the algorithm only cares that the matrix is negative definite (i.e. only cares about the largest eigenvalue), not the precise value of $\epsilon$. Moreover, these eigenvalues can change as the algorithm converges further.

# %% [markdown]
#
#
Expand Down Expand Up @@ -235,13 +226,14 @@
# define hyperparameters
eta = 1.0e8

# run trapping SINDy, reusing previous threshold, max_iter and constraints
# run trapping SINDy, reusing previous reg_weight_lam, max_iter and constraints
sindy_opt = ps.TrappingSR3(
_n_tgts=3,
_include_bias=True,
reg_weight_lam=reg_weight_lam,
eta=eta,
max_iter=max_iter,
verbose=True,
)
model = ps.SINDy(
optimizer=sindy_opt,
Expand Down Expand Up @@ -270,7 +262,7 @@
print("Frobenius error = ", E_pred)
mean_val = np.mean(x_test_pred, axis=0)
mean_val = np.sqrt(np.sum(mean_val**2))
check_stability(r, Xi, sindy_opt, mean_val)
check_local_stability(Xi, sindy_opt, mean_val)

# compute relative Frobenius error in the model coefficients
terms = sindy_library.get_feature_names()
Expand Down Expand Up @@ -416,7 +408,7 @@

mean_val = np.mean(x_test_pred, axis=0)
mean_val = np.sqrt(np.sum(mean_val**2))
check_stability(r, Xi, sindy_opt, mean_val)
check_local_stability(Xi, sindy_opt, mean_val)
E_pred = np.linalg.norm(x_test - x_test_pred) / np.linalg.norm(x_test)
print("Frobenius error = ", E_pred)

Expand Down Expand Up @@ -501,9 +493,9 @@
nu = 0.0 # viscosity
mu = 0.0 # resistivity

# define training and testing data
# define training and testing data (low resolution)
dt = 0.02
T = 50
T = 40
t = np.arange(0, T + dt, dt)
t_span = (t[0], t[-1])
x0 = rng.random((6,)) - 0.5
Expand All @@ -515,7 +507,7 @@
reg_weight_lam = 0.0
max_iter = 1000
eta = 1.0e10
alpha_m = 5.0e-1 * eta
alpha_m = 9.0e-1 * eta


sindy_opt = ps.TrappingSR3(
Expand Down Expand Up @@ -551,7 +543,7 @@
make_lissajou(r, x_train, x_test, x_train_pred, x_test_pred, "mhd")
mean_val = np.mean(x_test_pred, axis=0)
mean_val = np.sqrt(np.sum(mean_val**2))
check_stability(r, Xi, sindy_opt, mean_val)
check_local_stability(Xi, sindy_opt, mean_val)
E_pred = np.linalg.norm(x_test - x_test_pred) / np.linalg.norm(x_test)
print(E_pred)

Expand Down Expand Up @@ -639,7 +631,6 @@
L, Q = burgers_galerkin(sigma, nu, U)
rhs = lambda t, a: galerkin_model(a, L, Q) # noqa: E731


# Generate initial condition from unstable eigenvectors
lamb, Phi = np.linalg.eig(L)
idx = np.argsort(-np.real(lamb))
Expand Down Expand Up @@ -742,7 +733,6 @@
galerkin5["Q"] = galerkin9["Q"][inds5]
model5 = lambda t, a: galerkin_model(a, galerkin5["L"], galerkin5["Q"]) # noqa: E731


# make the 3D, 5D, and 9D POD-Galerkin trajectories
t_span = (t[0], t[-1])
a_galerkin5 = solve_ivp(model5, t_span, a0[:5], t_eval=t, **integrator_keywords).y.T
Expand Down Expand Up @@ -790,14 +780,13 @@
x_test = a

# define hyperparameters
max_iter = 5000
eta = 1.0e2
max_iter = 10000
eta = 1.0

# don't need a threshold if eta is sufficiently small
# don't need a reg_weight_lam if eta is sufficiently small
# which is good news because CVXPY is much slower
reg_weight_lam = 0
alpha_m = 9e-1 * eta

alpha_m = 1e-1 * eta

# run trapping SINDy
sindy_opt = ps.TrappingSR3(
Expand All @@ -824,7 +813,7 @@
Q = np.tensordot(PQ_tensor, Xi, axes=([4, 3], [0, 1]))
Q_sum = np.max(np.abs((Q + np.transpose(Q, [1, 2, 0]) + np.transpose(Q, [2, 0, 1]))))
print("Max deviation from the constraints = ", Q_sum)
if check_stability(r, Xi, sindy_opt, 1):
if check_local_stability(Xi, sindy_opt, 1):
x_train_pred = model.simulate(x_train[0, :], t, integrator_kws=integrator_keywords)
x_test_pred = model.simulate(a0, t, integrator_kws=integrator_keywords)
make_progress_plots(r, sindy_opt)
Expand All @@ -834,7 +823,7 @@
make_lissajou(r, x_train, x_test, x_train_pred, x_test_pred, "VonKarman")
mean_val = np.mean(x_test_pred, axis=0)
mean_val = np.sqrt(np.sum(mean_val**2))
check_stability(r, Xi, sindy_opt, mean_val)
check_local_stability(Xi, sindy_opt, mean_val)
A_guess = sindy_opt.A_history_[-1]
m_guess = sindy_opt.m_history_[-1]
E_pred = np.linalg.norm(x_test - x_test_pred) / np.linalg.norm(x_test)
Expand Down
Loading
Loading