Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Extend Server class #1679

Merged
merged 1 commit into from
Oct 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 24 additions & 10 deletions pyiron_base/jobs/job/extension/server/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,13 @@ class Server(
cores (int): number of cores
run_mode (pyiron_base.server.runmode.Runmode): mode of the job ['modal', 'non_modal', 'queue', 'manual']
new_hdf (bool): create a new HDF5 file [True/False] - default=True
accept_crash (bool): ignore execution errors raised by external executables - default False
run_time (int): run time limit in seconds for the job to finish - required for HPC job schedulers
memory_limit (str): memory required
qid (int): Queuing system ID - ID received from the HPC job scheduler
additional_arguments (dict): Additional arguments for the HPC job scheduler
conda_environment_name (str): Name of the conda environment
conda_environment_path (str): Path to the conda environment

Attributes:

Expand Down Expand Up @@ -94,6 +101,13 @@ def __init__(
gpus: Optional[int] = None,
run_mode: str = "modal",
new_hdf: bool = True,
accept_crash: bool = False,
run_time: Optional[int] = None,
memory_limit: Optional[str] = None,
qid: Optional[int] = None,
additional_arguments: dict = {},
conda_environment_name: Optional[str] = None,
conda_environment_path: Optional[str] = None,
):
super().__init__()
self._data = ServerDataClass(
Expand All @@ -104,14 +118,14 @@ def __init__(
gpus=gpus,
threads=threads,
new_hdf=new_hdf,
accept_crash=False,
run_time=None,
memory_limit=None,
queue=None,
qid=None,
additional_arguments={},
conda_environment_name=None,
conda_environment_path=None,
accept_crash=accept_crash,
run_time=run_time,
memory_limit=memory_limit,
queue=queue,
qid=qid,
additional_arguments=additional_arguments,
conda_environment_name=conda_environment_name,
conda_environment_path=conda_environment_path,
)
self._run_mode = Runmode()
self._executor: Union[Executor, None] = None
Expand Down Expand Up @@ -339,12 +353,12 @@ def run_time(self, new_run_time: int) -> None:
self._data.run_time = new_run_time

@property
def memory_limit(self) -> int:
def memory_limit(self) -> str:
return self._data.memory_limit

@memory_limit.setter
@sentinel
def memory_limit(self, limit: int) -> None:
def memory_limit(self, limit: str) -> None:
if state.queue_adapter is not None and self._data.queue is not None:
memory_max = state.queue_adapter.check_queue_parameters(
queue=self.queue,
Expand Down
21 changes: 21 additions & 0 deletions pyiron_base/project/decorator.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,13 @@ def pyiron_job(
gpus: Optional[int] = None,
run_mode: str = "modal",
new_hdf: bool = True,
accept_crash: bool = False,
run_time: Optional[int] = None,
memory_limit: Optional[str] = None,
qid: Optional[int] = None,
additional_arguments: dict = {},
conda_environment_name: Optional[str] = None,
conda_environment_path: Optional[str] = None,
output_file_lst: list = [],
output_key_lst: list = [],
):
Expand All @@ -31,6 +38,13 @@ def pyiron_job(
gpus (int): the number of gpus selected for the current simulation.
run_mode (str): the run mode of the job ['modal', 'non_modal', 'queue', 'manual']
new_hdf (bool): defines whether a subjob should be stored in the same HDF5 file or in a new one.
accept_crash (bool): ignore execution errors raised by external executables - default False
run_time (int): run time limit in seconds for the job to finish - required for HPC job schedulers
memory_limit (str): memory required
qid (int): Queuing system ID - ID received from the HPC job scheduler
additional_arguments (dict): Additional arguments for the HPC job scheduler
conda_environment_name (str): Name of the conda environment
conda_environment_path (str): Path to the conda environment
output_file_lst (list):
output_key_lst (list):

Expand Down Expand Up @@ -100,6 +114,13 @@ def function(
"gpus": gpus,
"run_mode": run_mode,
"new_hdf": new_hdf,
"accept_crash": accept_crash,
"run_time": run_time,
"memory_limit": memory_limit,
"qid": qid,
"additional_arguments": additional_arguments,
"conda_environment_name": conda_environment_name,
"conda_environment_path": conda_environment_path,
}
return get_delayed_object(
*args,
Expand Down
Loading