diff --git a/examples/example_burgers_inverse_efficient_kan.py b/examples/example_burgers_inverse_efficient_kan.py new file mode 100644 index 00000000..e4095ee9 --- /dev/null +++ b/examples/example_burgers_inverse_efficient_kan.py @@ -0,0 +1,109 @@ +import torch +import numpy as np +import scipy +import os +import sys + +os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE' +sys.path.append(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..'))) + +from tedeous.data import Domain, Conditions, Equation +from tedeous.model import Model +from tedeous.callbacks import early_stopping, plot, inverse_task +from tedeous.optimizers.optimizer import Optimizer +from tedeous.device import solver_device +from tedeous.models import parameter_registr + +import efficient_kan + +solver_device('cuda') + +domain = Domain() + +domain.variable('x', [-1, 1], 60, dtype='float32') +domain.variable('t', [0, 1], 60, dtype='float32') + +boundaries = Conditions() + +data = scipy.io.loadmat(os.path.abspath(os.path.join(os.path.dirname( __file__ ), 'wolfram_sln/Burgers.mat'))) + +x = torch.tensor(data['x']).reshape(-1) +t = torch.tensor(data['t']).reshape(-1) + +usol = data['usol'] + +bnd1 = torch.cartesian_prod(x, t).float() +bndval1 = torch.tensor(usol).reshape(-1, 1) + +id_f = np.random.choice(len(bnd1), 2000, replace=False) + +bnd1 = bnd1[id_f] +bndval1 = bndval1[id_f] + +boundaries.data(bnd=bnd1, operator=None, value=bndval1) + +net = efficient_kan.KAN( + [2, 100, 1], + grid_size=5, + spline_order=3, + scale_noise=0.1, + scale_base=1.0, + scale_spline=1.0, + base_activation=torch.nn.Tanh, + grid_eps=0.02, + grid_range=[-1, 1] +) + +parameters = {'lam1': 2., 'lam2': 0.2} # true parameters: lam1 = 1, lam2 = -0.01*pi + +parameter_registr(net, parameters) + +equation = Equation() + +burgers_eq = { + 'du/dt**1': + { + 'coeff': 1., + 'du/dt': [1], + 'pow': 1, + 'var': 0 + }, + '+u*du/dx': + { + 'coeff': net.lam1, + 'u*du/dx': [[None], [0]], + 'pow': [1, 1], + 'var': [0, 0] + }, + '-mu*d2u/dx2': + { + 'coeff': net.lam2, + 'd2u/dx2': [0, 0], + 'pow': 1, + 'var': 0 + } +} + +equation.add(burgers_eq) + +model = Model(net, domain, equation, boundaries) + +model.compile('autograd', lambda_operator=1, lambda_bound=100) + +img_dir = os.path.join(os.path.dirname( __file__ ), 'burgers_eq_img_efficient_kan') + +cb_es = early_stopping.EarlyStopping(eps=1e-7, + loss_window=100, + no_improvement_patience=1000, + patience=3, + abs_loss=1e-5, + randomize_parameter=1e-5, + info_string_every=10) + +cb_plots = plot.Plots(save_every=500, print_every=500, img_dir=img_dir) + +cb_params = inverse_task.InverseTask(parameters=parameters, info_string_every=10) + +optimizer = Optimizer('Adam', {'lr': 1e-4}) + +model.train(optimizer, 25e3, save_model=False, callbacks=[cb_es, cb_plots, cb_params]) \ No newline at end of file diff --git a/examples/example_burgers_inverse_kan.py b/examples/example_burgers_inverse_kan.py new file mode 100644 index 00000000..c9f22bfc --- /dev/null +++ b/examples/example_burgers_inverse_kan.py @@ -0,0 +1,102 @@ +import torch +import numpy as np +import scipy +import os +import sys + +os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE' +sys.path.append(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..'))) + +from tedeous.data import Domain, Conditions, Equation +from tedeous.model import Model +from tedeous.callbacks import early_stopping, plot, inverse_task +from tedeous.optimizers.optimizer import Optimizer +from tedeous.device import solver_device +from tedeous.models import parameter_registr + +import kan + +solver_device('cuda') + +domain = Domain() + +domain.variable('x', [-1, 1], 60, dtype='float32') +domain.variable('t', [0, 1], 60, dtype='float32') + +boundaries = Conditions() + +data = scipy.io.loadmat(os.path.abspath(os.path.join(os.path.dirname( __file__ ), 'wolfram_sln/Burgers.mat'))) + +x = torch.tensor(data['x']).reshape(-1) +t = torch.tensor(data['t']).reshape(-1) + +usol = data['usol'] + +bnd1 = torch.cartesian_prod(x, t).float() +bndval1 = torch.tensor(usol).reshape(-1, 1) + +id_f = np.random.choice(len(bnd1), 2000, replace=False) + +bnd1 = bnd1[id_f] +bndval1 = bndval1[id_f] + +boundaries.data(bnd=bnd1, operator=None, value=bndval1) + +net = kan.KAN( + width=[2, 100, 1], + base_fun='silu' +) + +parameters = {'lam1': 2., 'lam2': 0.2} # true parameters: lam1 = 1, lam2 = -0.01*pi + +parameter_registr(net, parameters) + +equation = Equation() + +burgers_eq = { + 'du/dt**1': + { + 'coeff': 1., + 'du/dt': [1], + 'pow': 1, + 'var': 0 + }, + '+u*du/dx': + { + 'coeff': net.lam1, + 'u*du/dx': [[None], [0]], + 'pow': [1, 1], + 'var': [0, 0] + }, + '-mu*d2u/dx2': + { + 'coeff': net.lam2, + 'd2u/dx2': [0, 0], + 'pow': 1, + 'var': 0 + } +} + +equation.add(burgers_eq) + +model = Model(net, domain, equation, boundaries) + +model.compile('autograd', lambda_operator=1, lambda_bound=100) + +img_dir = os.path.join(os.path.dirname( __file__ ), 'burgers_eq_img_kan') + +cb_es = early_stopping.EarlyStopping(eps=1e-7, + loss_window=100, + no_improvement_patience=1000, + patience=3, + abs_loss=1e-5, + randomize_parameter=1e-5, + info_string_every=10) + +cb_plots = plot.Plots(save_every=500, print_every=500, img_dir=img_dir) + +cb_params = inverse_task.InverseTask(parameters=parameters, info_string_every=10) + +optimizer = Optimizer('Adam', {'lr': 1e-4}) + +model.train(optimizer, 25e3, save_model=False, callbacks=[cb_es, cb_plots, cb_params]) \ No newline at end of file diff --git a/examples/example_wave_physics_efficient_kan.py b/examples/example_wave_physics_efficient_kan.py new file mode 100644 index 00000000..019517f6 --- /dev/null +++ b/examples/example_wave_physics_efficient_kan.py @@ -0,0 +1,207 @@ +# -*- coding: utf-8 -*- +""" +Created on Mon May 31 12:33:44 2021 + +@author: user +""" +import torch +import numpy as np +import os +import sys +import time + +os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE' +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) + +from tedeous.data import Domain, Conditions, Equation +from tedeous.model import Model + +from tedeous.callbacks import early_stopping, plot, cache +from tedeous.optimizers.optimizer import Optimizer +from tedeous.device import solver_device + +import efficient_kan + + +""" +Preparing grid + +Grid is an essentially torch.Tensor of a n-D points where n is the problem +dimensionality +""" + +solver_device('gpu') + + +def func(grid): + x, t = grid[:, 0], grid[:, 1] + sln = torch.cos(2 * np.pi * t) * torch.sin(np.pi * x) + return sln + + +def wave_experiment(grid_res): + exp_dict_list = [] + + domain = Domain() + domain.variable('x', [0, 1], grid_res) + domain.variable('t', [0, 1], grid_res) + + """ + Preparing boundary conditions (BC) + + For every boundary we define three items + + bnd=torch.Tensor of a boundary n-D points where n is the problem + dimensionality + + bop=dict in form {'term1':term1,'term2':term2} -> term1+term2+...=0 + + NB! dictionary keys at the current time serve only for user-frienly + description/comments and are not used in model directly thus order of + items must be preserved as (coeff,op,pow) + + term is a dict term={coefficient:c1,[sterm1,sterm2],'pow': power} + + Meaning c1*u*d2u/dx2 has the form + + {'coefficient':c1, + 'u*d2u/dx2': [[None],[0,0]], + 'pow':[1,1]} + + None is for function without derivatives + + + bval=torch.Tensor prescribed values at every point in the boundary + """ + + boundaries = Conditions() + + # Initial conditions at t=0 + boundaries.dirichlet({'x': [0, 1], 't': 0}, value=func) + + # Initial conditions at t=1 + # u(1,x)=sin(pi*x) + bop2 = { + 'du/dt': + { + 'coeff': 1, + 'du/dx': [1], + 'pow': 1, + 'var': 0 + } + } + boundaries.operator({'x': [0, 1], 't': 0}, operator=bop2, value=0) + + # Boundary conditions at x=0 + boundaries.dirichlet({'x': 0, 't': [0, 1]}, value=func) + + # Boundary conditions at x=1 + boundaries.dirichlet({'x': 1, 't': [0, 1]}, value=func) + + + """ + Defining wave equation + + Operator has the form + + op=dict in form {'term1':term1,'term2':term2}-> term1+term2+...=0 + + NB! dictionary keys at the current time serve only for user-friendly + description/comments and are not used in model directly thus order of + items must be preserved as (coeff,op,pow) + + term is a dict term={coefficient:c1,[sterm1,sterm2],'pow': power} + + c1 may be integer, function of grid or tensor of dimension of grid + + Meaning c1*u*d2u/dx2 has the form + + {'coefficient':c1, + 'u*d2u/dx2': [[None],[0,0]], + 'pow':[1,1]} + + None is for function without derivatives + + """ + + equation = Equation() + + # operator is 4*d2u/dx2-1*d2u/dt2=0 + wave_eq = { + 'd2u/dt2**1': + { + 'coeff': 1, + 'd2u/dt2': [1, 1], + 'pow': 1 + }, + '-C*d2u/dx2**1': + { + 'coeff': -4., + 'd2u/dx2': [0, 0], + 'pow': 1 + } + } + + equation.add(wave_eq) + + net = efficient_kan.KAN( + [2, 100, 100, 100, 1], + grid_size=20, + spline_order=3, + scale_noise=0.1, + scale_base=1.0, + scale_spline=1.0, + base_activation=torch.nn.Tanh, + grid_eps=0.02, + grid_range=[-2, 2] + ) + + start = time.time() + + model = Model(net, domain, equation, boundaries) + + model.compile("autograd", lambda_operator=1, lambda_bound=100) + + cb_es = early_stopping.EarlyStopping(eps=1e-5, randomize_parameter=1e-6, info_string_every=10) + + cb_cache = cache.Cache(cache_verbose=True, model_randomize_parameter=1e-6) + + img_dir = os.path.join(os.path.dirname( __file__ ), 'wave_img_efficient_kan') + + cb_plots = plot.Plots(save_every=500, print_every=500, img_dir=img_dir) + + optimizer = Optimizer('Adam', {'lr': 1e-4}) + + model.train(optimizer, 5e6, save_model=True, callbacks=[cb_es, cb_plots, cb_cache]) + + end = time.time() + + grid = domain.build('NN').to('cuda') + net = net.to('cuda') + + error_rmse = torch.sqrt(torch.mean((func(grid).reshape(-1, 1) - net(grid)) ** 2)) + + exp_dict_list.append({'grid_res': grid_res, 'time': end - start, 'RMSE': error_rmse.detach().cpu().numpy(), + 'type': 'wave_eqn_physical', 'cache': True}) + + print('Time taken {} = {}'.format(grid_res, end - start)) + print('RMSE {} = {}'.format(grid_res, error_rmse)) + + return exp_dict_list + + +nruns = 10 + +exp_dict_list = [] + +for grid_res in range(10, 101, 10): + for _ in range(nruns): + exp_dict_list.append(wave_experiment(grid_res)) + +import pandas as pd + +exp_dict_list_flatten = [item for sublist in exp_dict_list for item in sublist] +df = pd.DataFrame(exp_dict_list_flatten) +# df.boxplot(by='grid_res',column='time',fontsize=42,figsize=(20,10)) +# df.boxplot(by='grid_res',column='RMSE',fontsize=42,figsize=(20,10),showfliers=False) +df.to_csv('examples/benchmarking_data/wave_experiment_physical_10_100_cache={}.csv'.format(str(True))) \ No newline at end of file diff --git a/examples/example_wave_physics_fast_kan.py b/examples/example_wave_physics_fast_kan.py new file mode 100644 index 00000000..e3e3aaf1 --- /dev/null +++ b/examples/example_wave_physics_fast_kan.py @@ -0,0 +1,206 @@ +# -*- coding: utf-8 -*- +""" +Created on Mon May 31 12:33:44 2021 + +@author: user +""" +import torch +import numpy as np +import os +import sys +import time +import torch.nn.functional as F + +os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE' +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) + +from tedeous.data import Domain, Conditions, Equation +from tedeous.model import Model + +from tedeous.callbacks import early_stopping, plot, cache +from tedeous.optimizers.optimizer import Optimizer +from tedeous.device import solver_device + +import fastkan + + +""" +Preparing grid + +Grid is an essentially torch.Tensor of a n-D points where n is the problem +dimensionality +""" + +solver_device('gpu') + + +def func(grid): + x, t = grid[:, 0], grid[:, 1] + sln = torch.cos(2 * np.pi * t) * torch.sin(np.pi * x) + return sln + + +def wave_experiment(grid_res): + exp_dict_list = [] + + domain = Domain() + domain.variable('x', [0, 1], grid_res) + domain.variable('t', [0, 1], grid_res) + + """ + Preparing boundary conditions (BC) + + For every boundary we define three items + + bnd=torch.Tensor of a boundary n-D points where n is the problem + dimensionality + + bop=dict in form {'term1':term1,'term2':term2} -> term1+term2+...=0 + + NB! dictionary keys at the current time serve only for user-frienly + description/comments and are not used in model directly thus order of + items must be preserved as (coeff,op,pow) + + term is a dict term={coefficient:c1,[sterm1,sterm2],'pow': power} + + Meaning c1*u*d2u/dx2 has the form + + {'coefficient':c1, + 'u*d2u/dx2': [[None],[0,0]], + 'pow':[1,1]} + + None is for function without derivatives + + + bval=torch.Tensor prescribed values at every point in the boundary + """ + + boundaries = Conditions() + + # Initial conditions at t=0 + boundaries.dirichlet({'x': [0, 1], 't': 0}, value=func) + + # Initial conditions at t=1 + # u(1,x)=sin(pi*x) + bop2 = { + 'du/dt': + { + 'coeff': 1, + 'du/dx': [1], + 'pow': 1, + 'var': 0 + } + } + boundaries.operator({'x': [0, 1], 't': 0}, operator=bop2, value=0) + + # Boundary conditions at x=0 + boundaries.dirichlet({'x': 0, 't': [0, 1]}, value=func) + + # Boundary conditions at x=1 + boundaries.dirichlet({'x': 1, 't': [0, 1]}, value=func) + + + """ + Defining wave equation + + Operator has the form + + op=dict in form {'term1':term1,'term2':term2}-> term1+term2+...=0 + + NB! dictionary keys at the current time serve only for user-friendly + description/comments and are not used in model directly thus order of + items must be preserved as (coeff,op,pow) + + term is a dict term={coefficient:c1,[sterm1,sterm2],'pow': power} + + c1 may be integer, function of grid or tensor of dimension of grid + + Meaning c1*u*d2u/dx2 has the form + + {'coefficient':c1, + 'u*d2u/dx2': [[None],[0,0]], + 'pow':[1,1]} + + None is for function without derivatives + + """ + + equation = Equation() + + # operator is 4*d2u/dx2-1*d2u/dt2=0 + wave_eq = { + 'd2u/dt2**1': + { + 'coeff': 1, + 'd2u/dt2': [1, 1], + 'pow': 1 + }, + '-C*d2u/dx2**1': + { + 'coeff': -4., + 'd2u/dx2': [0, 0], + 'pow': 1 + } + } + + equation.add(wave_eq) + + net = fastkan.FastKAN( + [2, 100, 100, 100, 1], + grid_min=-4., + grid_max=4., + num_grids=2, + use_base_update=True, + base_activation=F.tanh, + spline_weight_init_scale=0.05 + ) + + start = time.time() + + model = Model(net, domain, equation, boundaries) + + model.compile("autograd", lambda_operator=1, lambda_bound=100) + + cb_es = early_stopping.EarlyStopping(eps=1e-5, randomize_parameter=1e-6, info_string_every=50) + + cb_cache = cache.Cache(cache_verbose=True, model_randomize_parameter=1e-6) + + img_dir = os.path.join(os.path.dirname( __file__ ), 'wave_img_fast_kan') + + cb_plots = plot.Plots(save_every=1000, print_every=1000, img_dir=img_dir) + + optimizer = Optimizer('Adam', {'lr': 1e-4}) + + model.train(optimizer, 5e6, save_model=True, callbacks=[cb_es, cb_plots, cb_cache]) + + end = time.time() + + grid = domain.build('NN').to('cuda') + net = net.to('cuda') + + error_rmse = torch.sqrt(torch.mean((func(grid).reshape(-1, 1) - net(grid)) ** 2)) + + exp_dict_list.append({'grid_res': grid_res, 'time': end - start, 'RMSE': error_rmse.detach().cpu().numpy(), + 'type': 'wave_eqn_physical', 'cache': True}) + + print('Time taken {} = {}'.format(grid_res, end - start)) + print('RMSE {} = {}'.format(grid_res, error_rmse)) + + return exp_dict_list + + +nruns = 10 + +exp_dict_list = [] + +for grid_res in range(10, 101, 10): + for _ in range(nruns): + exp_dict_list.append(wave_experiment(grid_res)) + +import pandas as pd + +exp_dict_list_flatten = [item for sublist in exp_dict_list for item in sublist] +df = pd.DataFrame(exp_dict_list_flatten) +# df.boxplot(by='grid_res',column='time',fontsize=42,figsize=(20,10)) +# df.boxplot(by='grid_res',column='RMSE',fontsize=42,figsize=(20,10),showfliers=False) +df.to_csv('examples/benchmarking_data/wave_experiment_physical_10_100_cache={}.csv'.format(str(True))) \ No newline at end of file diff --git a/examples/example_wave_physics_kan.py b/examples/example_wave_physics_kan.py new file mode 100644 index 00000000..44f50509 --- /dev/null +++ b/examples/example_wave_physics_kan.py @@ -0,0 +1,211 @@ +# -*- coding: utf-8 -*- +""" +Created on Mon May 31 12:33:44 2021 + +@author: user +""" +import torch +import numpy as np +import os +import sys +import time +import torch.nn.functional as F + +os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE' +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) + +from tedeous.data import Domain, Conditions, Equation +from tedeous.model import Model + +from tedeous.callbacks import early_stopping, plot, cache +from tedeous.optimizers.optimizer import Optimizer +from tedeous.device import solver_device + +import kan + + +""" +Preparing grid + +Grid is an essentially torch.Tensor of a n-D points where n is the problem +dimensionality +""" + +solver_device('gpu') + + +def func(grid): + x, t = grid[:, 0], grid[:, 1] + sln = torch.cos(2 * np.pi * t) * torch.sin(np.pi * x) + return sln + + +def wave_experiment(grid_res): + exp_dict_list = [] + + domain = Domain() + domain.variable('x', [0, 1], grid_res) + domain.variable('t', [0, 1], grid_res) + + """ + Preparing boundary conditions (BC) + + For every boundary we define three items + + bnd=torch.Tensor of a boundary n-D points where n is the problem + dimensionality + + bop=dict in form {'term1':term1,'term2':term2} -> term1+term2+...=0 + + NB! dictionary keys at the current time serve only for user-frienly + description/comments and are not used in model directly thus order of + items must be preserved as (coeff,op,pow) + + term is a dict term={coefficient:c1,[sterm1,sterm2],'pow': power} + + Meaning c1*u*d2u/dx2 has the form + + {'coefficient':c1, + 'u*d2u/dx2': [[None],[0,0]], + 'pow':[1,1]} + + None is for function without derivatives + + + bval=torch.Tensor prescribed values at every point in the boundary + """ + + boundaries = Conditions() + + # Initial conditions at t=0 + boundaries.dirichlet({'x': [0, 1], 't': 0}, value=func) + + # Initial conditions at t=1 + # u(1,x)=sin(pi*x) + bop2 = { + 'du/dt': + { + 'coeff': 1, + 'du/dx': [1], + 'pow': 1, + 'var': 0 + } + } + boundaries.operator({'x': [0, 1], 't': 0}, operator=bop2, value=0) + + # Boundary conditions at x=0 + boundaries.dirichlet({'x': 0, 't': [0, 1]}, value=func) + + # Boundary conditions at x=1 + boundaries.dirichlet({'x': 1, 't': [0, 1]}, value=func) + + + """ + Defining wave equation + + Operator has the form + + op=dict in form {'term1':term1,'term2':term2}-> term1+term2+...=0 + + NB! dictionary keys at the current time serve only for user-friendly + description/comments and are not used in model directly thus order of + items must be preserved as (coeff,op,pow) + + term is a dict term={coefficient:c1,[sterm1,sterm2],'pow': power} + + c1 may be integer, function of grid or tensor of dimension of grid + + Meaning c1*u*d2u/dx2 has the form + + {'coefficient':c1, + 'u*d2u/dx2': [[None],[0,0]], + 'pow':[1,1]} + + None is for function without derivatives + + """ + + equation = Equation() + + # operator is 4*d2u/dx2-1*d2u/dt2=0 + wave_eq = { + 'd2u/dt2**1': + { + 'coeff': 1, + 'd2u/dt2': [1, 1], + 'pow': 1 + }, + '-C*d2u/dx2**1': + { + 'coeff': -4., + 'd2u/dx2': [0, 0], + 'pow': 1 + } + } + + equation.add(wave_eq) + + net = kan.KAN( + width=[2, 3, 3, 3, 1], + grid=50, + k=3, + mult_arity=2, + noise_scale=1.0, + scale_base_mu=0.0, + scale_base_sigma=1.0, + base_fun='silu', + symbolic_enabled=True, + affine_trainable=False, + grid_eps=1.0, + grid_range=[-5, 5] + ) + + start = time.time() + + model = Model(net, domain, equation, boundaries) + + model.compile("autograd", lambda_operator=1, lambda_bound=100) + + cb_es = early_stopping.EarlyStopping(eps=1e-5, randomize_parameter=1e-6, info_string_every=10) + + cb_cache = cache.Cache(cache_verbose=True, model_randomize_parameter=1e-6) + + img_dir = os.path.join(os.path.dirname( __file__ ), 'wave_img_kan') + + cb_plots = plot.Plots(save_every=500, print_every=500, img_dir=img_dir) + + optimizer = Optimizer('Adam', {'lr': 5e-4}) + + model.train(optimizer, 5e6, save_model=True, callbacks=[cb_es, cb_plots, cb_cache]) + + end = time.time() + + grid = domain.build('NN').to('cuda') + net = net.to('cuda') + + error_rmse = torch.sqrt(torch.mean((func(grid).reshape(-1, 1) - net(grid)) ** 2)) + + exp_dict_list.append({'grid_res': grid_res, 'time': end - start, 'RMSE': error_rmse.detach().cpu().numpy(), + 'type': 'wave_eqn_physical', 'cache': True}) + + print('Time taken {} = {}'.format(grid_res, end - start)) + print('RMSE {} = {}'.format(grid_res, error_rmse)) + + return exp_dict_list + + +nruns = 10 + +exp_dict_list = [] + +for grid_res in range(10, 101, 10): + for _ in range(nruns): + exp_dict_list.append(wave_experiment(grid_res)) + +import pandas as pd + +exp_dict_list_flatten = [item for sublist in exp_dict_list for item in sublist] +df = pd.DataFrame(exp_dict_list_flatten) +# df.boxplot(by='grid_res',column='time',fontsize=42,figsize=(20,10)) +# df.boxplot(by='grid_res',column='RMSE',fontsize=42,figsize=(20,10),showfliers=False) +df.to_csv('examples/benchmarking_data/wave_experiment_physical_10_100_cache={}.csv'.format(str(True))) diff --git a/requirements-kan.txt b/requirements-kan.txt new file mode 100644 index 00000000..0368d48b --- /dev/null +++ b/requirements-kan.txt @@ -0,0 +1,4 @@ +git+https://github.com/KindXiaoming/pykan.git +git+https://github.com/Blealtan/efficient-kan.git +git+https://github.com/ZiyaoLi/fast-kan.git + diff --git a/tedeous/callbacks/cache.py b/tedeous/callbacks/cache.py index 34c78c9f..01b137dc 100644 --- a/tedeous/callbacks/cache.py +++ b/tedeous/callbacks/cache.py @@ -134,14 +134,25 @@ def cache_lookup(self, for i in cache_n: file = files[i] - checkpoint = torch.load(file) + + try: + checkpoint = torch.load(file) + except Exception: + if cache_verbose: + print('Error loading file {}'.format(file)) + continue model = checkpoint['model'] model.load_state_dict(checkpoint['model_state_dict']) # this one for the input shape fix if needed - solver_model, cache_model = self._model_reform(self.solution_cls.model, model) + try: + solver_model, cache_model = self._model_reform(self.solution_cls.model, model) + except Exception: + if cache_verbose: + print('Error reforming file {}'.format(file)) + continue if cache_model[0].in_features != solver_model[0].in_features: continue diff --git a/tedeous/callbacks/plot.py b/tedeous/callbacks/plot.py index 8cf69a24..213d5daf 100644 --- a/tedeous/callbacks/plot.py +++ b/tedeous/callbacks/plot.py @@ -10,6 +10,7 @@ class Plots(Callback): """Class for ploting solutions.""" + def __init__(self, print_every: Union[int, None] = 500, save_every: Union[int, None] = 500, @@ -24,7 +25,7 @@ def __init__(self, """ super().__init__() self.print_every = print_every if print_every is not None else 0.1 - self.save_every = save_every if save_every is not None else 0.1 + self.save_every = save_every if save_every is not None else 0.1 self.title = title self.img_dir = img_dir @@ -34,10 +35,24 @@ def _print_nn(self): """ - try: - nvars_model = self.net[-1].out_features - except: - nvars_model = self.net.model[-1].out_features + attributes = {'model': ['out_features', 'output_dim', 'width_out'], + 'layers': ['out_features', 'output_dim', 'width_out']} + + nvars_model = None + + for key, values in attributes.items(): + for value in values: + try: + nvars_model = getattr(getattr(self.net, key)[-1], value) + break + except AttributeError: + pass + + if nvars_model is None: + try: + nvars_model = self.net[-1].out_features + except: + nvars_model = self.net.width_out[-1] nparams = self.grid.shape[1] fig = plt.figure(figsize=(15, 8)) @@ -55,9 +70,9 @@ def _print_nn(self): ax1.set_title(self.title + ' variable {}'.format(i)) ax1.plot_trisurf(self.grid[:, 0].detach().cpu().numpy(), - self.grid[:, 1].detach().cpu().numpy(), - self.net(self.grid)[:, i].detach().cpu().numpy(), - cmap=cm.jet, linewidth=0.2, alpha=1) + self.grid[:, 1].detach().cpu().numpy(), + self.net(self.grid)[:, i].detach().cpu().numpy(), + cmap=cm.jet, linewidth=0.2, alpha=1) ax1.set_xlabel("x1") ax1.set_ylabel("x2") @@ -68,23 +83,23 @@ def _print_mat(self): nparams = self.grid.shape[0] nvars_model = self.net.shape[0] - fig = plt.figure(figsize=(15,8)) + fig = plt.figure(figsize=(15, 8)) for i in range(nvars_model): if nparams == 1: - ax1 = fig.add_subplot(1, nvars_model, i+1) + ax1 = fig.add_subplot(1, nvars_model, i + 1) if self.title is not None: - ax1.set_title(self.title+' variable {}'.format(i)) + ax1.set_title(self.title + ' variable {}'.format(i)) ax1.scatter(self.grid.detach().cpu().numpy().reshape(-1), self.net[i].detach().cpu().numpy().reshape(-1)) else: - ax1 = fig.add_subplot(1, nvars_model, i+1, projection='3d') + ax1 = fig.add_subplot(1, nvars_model, i + 1, projection='3d') if self.title is not None: - ax1.set_title(self.title+' variable {}'.format(i)) + ax1.set_title(self.title + ' variable {}'.format(i)) ax1.plot_trisurf(self.grid[0].detach().cpu().numpy().reshape(-1), - self.grid[1].detach().cpu().numpy().reshape(-1), - self.net[i].detach().cpu().numpy().reshape(-1), - cmap=cm.jet, linewidth=0.2, alpha=1) + self.grid[1].detach().cpu().numpy().reshape(-1), + self.net[i].detach().cpu().numpy().reshape(-1), + cmap=cm.jet, linewidth=0.2, alpha=1) ax1.set_xlabel("x1") ax1.set_ylabel("x2") @@ -108,7 +123,7 @@ def _dir_path(self, save_dir: str) -> str: if not os.path.isdir(img_dir): os.mkdir(img_dir) directory = os.path.abspath(os.path.join(img_dir, - str(datetime.datetime.now().timestamp()) + '.png')) + str(datetime.datetime.now().timestamp()) + '.png')) else: if not os.path.isdir(save_dir): os.mkdir(save_dir) @@ -117,7 +132,7 @@ def _dir_path(self, save_dir: str) -> str: return directory def solution_print( - self): + self): """ printing or saving figures. """ print_flag = self.model.t % self.print_every == 0 @@ -136,6 +151,8 @@ def solution_print( if print_flag: plt.show() plt.close() - + def on_epoch_end(self, logs=None): self.solution_print() + + diff --git a/tedeous/model.py b/tedeous/model.py index 910b0c19..0eec3827 100644 --- a/tedeous/model.py +++ b/tedeous/model.py @@ -33,6 +33,7 @@ def __init__( self.domain = domain self.equation = equation self.conditions = conditions + self._check = None temp_dir = tempfile.gettempdir() folder_path = os.path.join(temp_dir, 'tedeous_cache/') @@ -88,9 +89,9 @@ def compile( self.equation_cls = Operator_bcond_preproc(grid, operator, bconds, h=h, inner_order=inner_order, boundary_order=boundary_order).set_strategy(mode) - + self.solution_cls = Solution(grid, self.equation_cls, self.net, mode, weak_form, - lambda_operator, lambda_bound, tol, derivative_points) + lambda_operator, lambda_bound, tol, derivative_points) def _model_save( self, @@ -118,7 +119,7 @@ def train(self, mixed_precision: bool = False, save_model: bool = False, model_name: Union[str, None] = None, - callbacks: Union[List, None]=None): + callbacks: Union[List, None] = None): """ train model. Args: @@ -151,7 +152,7 @@ def train(self, print('[{}] initial (min) loss is {}'.format( datetime.datetime.now(), self.min_loss.item())) - while self.t < epochs and self.stop_training == False: + while self.t < epochs and self.stop_training is False: callbacks.on_epoch_begin() self.optimizer.zero_grad() diff --git a/tedeous/models.py b/tedeous/models.py index d12af9ee..3c581bfc 100644 --- a/tedeous/models.py +++ b/tedeous/models.py @@ -1,5 +1,4 @@ """Module keeps custom models arctectures""" - from typing import List, Any import torch from torch import nn @@ -81,7 +80,6 @@ class FourierNN(nn.Module): def __init__(self, layers=[100, 100, 100, 1], L=[1], M=[1], activation=nn.Tanh(), ones=False): """ - Args: layers (list, optional): neurons quantity in each layer (exclusion input layer), the number of neurons in the hidden layers must match. Defaults to [100, 100, 100, 1]. @@ -103,6 +101,7 @@ def __init__(self, layers=[100, 100, 100, 1], L=[1], M=[1], self.activation = activation self.model = nn.ModuleList([FFL]) + for i in range(len(layers) - 1): self.model.append(nn.Linear(layers[i], layers[i + 1])) @@ -132,7 +131,7 @@ class FeedForward(nn.Module): """Simple MLP neural network""" def __init__(self, - layers: List=[2, 100, 100, 100, 1], + layers: List = [2, 100, 100, 100, 1], activation: nn.Module = nn.Tanh(), parameters: dict = None): """ @@ -144,13 +143,15 @@ def __init__(self, parameters (dict, optional): parameters initial values (for inverse task). Defaults to None. """ + super().__init__() - model = [] - for i in range(len(layers)-2): - model.append(nn.Linear(layers[i], layers[i+1])) - model.append(activation) - model.append(nn.Linear(layers[-2], layers[-1])) - self.net = torch.nn.Sequential(*model) + self.model = [] + + for i in range(len(layers) - 2): + self.model.append(nn.Linear(layers[i], layers[i + 1])) + self.model.append(activation) + self.model.append(nn.Linear(layers[-2], layers[-1])) + self.net = torch.nn.Sequential(*self.model) if parameters is not None: self.reg_param(parameters) @@ -165,7 +166,8 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: """ return self.net(x) - def reg_param(self, parameters: dict): + def reg_param(self, + parameters: dict): """ Parameters registration as neural network parameters. Should be used in inverse coefficients tasks. @@ -174,7 +176,7 @@ def reg_param(self, parameters: dict): """ for key, value in parameters.items(): parameters[key] = torch.nn.Parameter(torch.tensor([value], - requires_grad=True).float()) + requires_grad=True).float()) self.net.register_parameter(key, parameters[key]) @@ -189,7 +191,7 @@ def parameter_registr(model: torch.nn.Module, """ for key, value in parameters.items(): parameters[key] = torch.nn.Parameter(torch.tensor([value], - requires_grad=True).float()) + requires_grad=True).float()) model.register_parameter(key, parameters[key]) @@ -215,8 +217,7 @@ def mat_model(domain: Any, shape = [eq_num] + list(grid.shape)[1:] if nn_model is not None: - nn_grid = torch.vstack([grid[i].reshape(-1) for i in \ - range(grid.shape[0])]).T.float() + nn_grid = torch.vstack([grid[i].reshape(-1) for i in range(grid.shape[0])]).T.float() model = nn_model(nn_grid).detach() model = model.reshape(shape) else: diff --git a/tedeous/utils.py b/tedeous/utils.py index 1ecfbd3f..c4535b3b 100644 --- a/tedeous/utils.py +++ b/tedeous/utils.py @@ -9,6 +9,7 @@ import torch from tedeous.device import check_device + def create_random_fn(eps: float) -> callable: """ Create random tensors to add some variance to torch neural network. @@ -19,17 +20,18 @@ def create_random_fn(eps: float) -> callable: callable: creating random params function. """ def randomize_params(m): - if isinstance(m, torch.nn.Linear) or isinstance(m, torch.nn.Conv2d): + if (isinstance(m, torch.nn.Linear) or isinstance(m, torch.nn.Conv2d)) and m.bias is not None: m.weight.data = m.weight.data + \ (2 * torch.randn(m.weight.size()) - 1) * eps m.bias.data = m.bias.data + (2 * torch.randn(m.bias.size()) - 1) * eps return randomize_params + def samples_count(second_order_interactions: bool, sampling_N: int, op_length: list, - bval_length:list) -> Tuple[int, int]: + bval_length: list) -> Tuple[int, int]: """ Count samples for variance based sensitivity analysis. Args: @@ -54,6 +56,7 @@ def samples_count(second_order_interactions: bool, sampling_amount = sampling_N * (sampling_D + 2) return sampling_amount, sampling_D + def lambda_print(lam: torch.Tensor, keys: List) -> None: """ Print lambda value. @@ -66,6 +69,7 @@ def lambda_print(lam: torch.Tensor, keys: List) -> None: for val, key in zip(lam, keys): print('lambda_{}: {}'.format(key, val.item())) + def bcs_reshape( bval: torch.Tensor, true_bval: torch.Tensor, @@ -90,6 +94,7 @@ def bcs_reshape( return bcs + def remove_all_files(folder: str) -> None: """ Remove all files from folder. @@ -106,6 +111,7 @@ def remove_all_files(folder: str) -> None: except Exception as e: print('Failed to delete %s. Reason: %s' % (file_path, e)) + def mat_op_coeff(equation: Any) -> Any: """ Preparation of coefficients in the operator of the *mat* method to suit methods *NN, autograd*. @@ -127,6 +133,7 @@ def mat_op_coeff(equation: Any) -> Any: it may lead to wrong cache item choice") return equation + def model_mat(model: torch.Tensor, domain: Any, cache_model: torch.nn.Module=None) -> Tuple[torch.Tensor, torch.nn.Module]: @@ -159,6 +166,7 @@ def model_mat(model: torch.Tensor, return cache_model + def save_model_nn( cache_dir: str, model: torch.nn.Module, @@ -176,6 +184,7 @@ def save_model_nn( name = str(datetime.datetime.now().timestamp()) if not os.path.isdir(cache_dir): os.mkdir(cache_dir) + parameters_dict = {'model': model.to('cpu'), 'model_state_dict': model.state_dict()} @@ -184,11 +193,12 @@ def save_model_nn( print(f'model is saved in cache dir: {cache_dir}') except RuntimeError: torch.save(parameters_dict, cache_dir + '\\' + name + '.tar', - _use_new_zipfile_serialization=False) # cyrrilic in path + _use_new_zipfile_serialization=False) # cyrillic in path print(f'model is saved in cache: {cache_dir}') except: print(f'Cannot save model in cache: {cache_dir}') + def save_model_mat(cache_dir: str, model: torch.Tensor, domain: Any, diff --git a/tedeous/version.py b/tedeous/version.py index b703f5c9..c9435752 100644 --- a/tedeous/version.py +++ b/tedeous/version.py @@ -1 +1 @@ -__version__ = '0.4.1' \ No newline at end of file +__version__ = '0.4.2' \ No newline at end of file