Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Normalized loss parameter swtich #34

Merged
merged 3 commits into from
Jul 28, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/example_1d_navier_stokes.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'

sys.path.pop()
#sys.path.pop()
sys.path.append(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..')))

from tedeous.input_preprocessing import Equation
Expand Down
4 changes: 2 additions & 2 deletions examples/example_SODtest.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@

os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'

sys.path.append('../')
#sys.path.append('../')

sys.path.pop()
#sys.path.pop()
sys.path.append(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..')))

from tedeous.input_preprocessing import Equation
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def get_requirements():

setup(
name = 'tedeous',
version= '0.3.0' ,
version= '0.3.1' ,
description = 'TEDEouS - Torch Exhaustive Differential Equations Solver. Differential equation solver, based on pytorch library',
long_description = 'Combine power of pytorch, numerical methods and math overall to conquer and solve ALL {O,P}DEs. There are some examples to provide a little insight to an operator form',
author = 'Alexander Hvatov',
Expand Down
23 changes: 14 additions & 9 deletions tedeous/cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def model_reform(init_model, model):


def cache_lookup(self, lambda_operator: float = 1., lambda_bound: float = 0.001,
nmodels: Union[int, None] = None, save_graph: bool = False, cache_verbose: bool = False) -> Tuple[dict, torch.Tensor]:
nmodels: Union[int, None] = None, save_graph: bool = False, cache_verbose: bool = False, return_normalized_loss: bool = False) -> Tuple[dict, torch.Tensor]:
"""
Looking for a saved cache.
Args:
Expand Down Expand Up @@ -193,7 +193,9 @@ def cache_lookup(self, lambda_operator: float = 1., lambda_bound: float = 0.001,
if best_checkpoint == {}:
best_checkpoint = None
min_loss = np.inf
return best_checkpoint, min_norm_loss
if return_normalized_loss:
min_loss=min_norm_loss
return best_checkpoint, min_loss

def save_model(self, prep_model: Any, state: dict, optimizer_state: dict, name: Union[str, None] = None):
"""
Expand Down Expand Up @@ -326,7 +328,7 @@ def cache_retrain(self, cache_checkpoint, cache_verbose: bool = False) -> Union[

def cache_nn(self, cache_dir: str, nmodels: Union[int, None], lambda_operator: float, lambda_bound: float,
cache_verbose: bool,model_randomize_parameter: Union[float, None],
cache_model: torch.nn.Sequential, ):
cache_model: torch.nn.Sequential, return_normalized_loss: bool = False):
"""
Restores the model from the cache and uses it for retraining.
Args:
Expand All @@ -345,7 +347,8 @@ def cache_nn(self, cache_dir: str, nmodels: Union[int, None], lambda_operator: f
cache_checkpoint, min_loss = self.cache_lookup(nmodels=nmodels,
cache_verbose=cache_verbose,
lambda_operator= lambda_operator,
lambda_bound=lambda_bound)
lambda_bound=lambda_bound,
return_normalized_loss = return_normalized_loss)

self.model, optimizer_state = self.cache_retrain(cache_checkpoint,
cache_verbose=cache_verbose)
Expand All @@ -356,7 +359,7 @@ def cache_nn(self, cache_dir: str, nmodels: Union[int, None], lambda_operator: f

def cache_mat(self, nmodels: Union[int, None],lambda_operator: float, lambda_bound: float,
cache_verbose: bool,model_randomize_parameter: Union[float, None],
cache_model: torch.nn.Sequential):
cache_model: torch.nn.Sequential, return_normalized_loss: bool = False):
"""
Restores the model from the cache and uses it for retraining.
Args:
Expand Down Expand Up @@ -389,7 +392,8 @@ def cache_mat(self, nmodels: Union[int, None],lambda_operator: float, lambda_bou
cache_dir=self.cache_dir,
nmodels=nmodels,
cache_verbose=cache_verbose,
lambda_bound=lambda_bound)
lambda_bound=lambda_bound,
return_normalized_loss=return_normalized_loss)
prepared_model, optimizer_state = model_cls.cache_retrain(
cache_checkpoint,
cache_verbose=cache_verbose)
Expand All @@ -411,7 +415,8 @@ def cache_mat(self, nmodels: Union[int, None],lambda_operator: float, lambda_bou

def cache(self, nmodels: Union[int, None],lambda_operator, lambda_bound: float,
cache_verbose: bool,model_randomize_parameter: Union[float, None],
cache_model: torch.nn.Sequential, ):
cache_model: torch.nn.Sequential,
return_normalized_loss: bool = False):
"""
Restores the model from the cache and uses it for retraining.
Args:
Expand All @@ -430,9 +435,9 @@ def cache(self, nmodels: Union[int, None],lambda_operator, lambda_bound: float,
if self.mode != 'mat':
return self.cache_nn(self.cache_dir, nmodels,lambda_operator, lambda_bound,
cache_verbose, model_randomize_parameter,
cache_model)
cache_model,return_normalized_loss=return_normalized_loss)
elif self.mode == 'mat':
return self.cache_mat(self.cache_dir, nmodels, lambda_operator, lambda_bound,
cache_verbose, model_randomize_parameter,
cache_model)
cache_model,return_normalized_loss=return_normalized_loss)

10 changes: 7 additions & 3 deletions tedeous/solver.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,8 @@ def solve(self,lambda_operator: Union[float, list] = 1,lambda_bound: Union[float
patience: int = 5, loss_oscillation_window: int = 100,
no_improvement_patience: int = 1000, model_randomize_parameter: Union[int, float] = 0,
optimizer_mode: str = 'Adam', step_plot_print: Union[bool, int] = False,
step_plot_save: Union[bool, int] = False, image_save_dir: Union[str, None] = None, tol: float = 0,clear_cache: bool =False) -> Any:
step_plot_save: Union[bool, int] = False, image_save_dir: Union[str, None] = None, tol: float = 0,
clear_cache: bool =False, normalized_loss_stop: bool = False) -> Any:
"""
High-level interface for solving equations.

Expand Down Expand Up @@ -277,7 +278,7 @@ def solve(self,lambda_operator: Union[float, list] = 1,lambda_bound: Union[float
cache_verbose,
model_randomize_parameter,
cache_model,
)
return_normalized_loss=normalized_loss_stop)

Solution_class = Solution(self.grid, self.equal_cls,
self.model, self.mode, self.weak_form,
Expand Down Expand Up @@ -315,7 +316,10 @@ def closure():
tol=tol)

loss.backward()
cur_loss = loss_normalized.item()
if normalized_loss_stop:
cur_loss = loss_normalized.item()
else:
cur_loss = loss.item()
return loss

stop_dings = 0
Expand Down