From 9db7cedc5b6f39c128e2b2ac3ec2bc5c39a86f15 Mon Sep 17 00:00:00 2001 From: bastonero Date: Wed, 31 May 2023 13:31:35 +0000 Subject: [PATCH] Workflows: add the `skip_relax_iterations` logic We implement the logic to skip a certain number of relax iterations. For these iterations the check on convergence is also skipped. Some fixes are also added. --- .../functions/structure_relabel_kinds.py | 2 +- .../workflows/hubbard.py | 192 ++++++++---------- .../workflows/protocols/hubbard.yaml | 1 - tests/workflows/protocols/test_hubbard.py | 2 +- .../protocols/test_hubbard/test_default.yml | 1 - tests/workflows/test_hubbard.py | 76 +++++-- 6 files changed, 146 insertions(+), 128 deletions(-) diff --git a/src/aiida_quantumespresso_hp/calculations/functions/structure_relabel_kinds.py b/src/aiida_quantumespresso_hp/calculations/functions/structure_relabel_kinds.py index 176c81e..0c547e6 100644 --- a/src/aiida_quantumespresso_hp/calculations/functions/structure_relabel_kinds.py +++ b/src/aiida_quantumespresso_hp/calculations/functions/structure_relabel_kinds.py @@ -13,7 +13,7 @@ def structure_relabel_kinds( hubbard_structure: HubbardStructureData, hubbard: Dict, - magnetization: Dict | None = None, + magnetization: dict | None = None, ) -> Dict: """Create a clone of the given structure but with new kinds, based on the new hubbard sites. diff --git a/src/aiida_quantumespresso_hp/workflows/hubbard.py b/src/aiida_quantumespresso_hp/workflows/hubbard.py index f298304..60123a8 100644 --- a/src/aiida_quantumespresso_hp/workflows/hubbard.py +++ b/src/aiida_quantumespresso_hp/workflows/hubbard.py @@ -44,6 +44,12 @@ def get_separated_parameters( return onsites, intersites +def validate_positive(value, _): + """Validate that the value is positive.""" + if value.value < 0: + return 'the value must be positive.' + + def validate_inputs(inputs, _): """Validate the entire inputs.""" parameters = AttributeDict(inputs).scf.pw.parameters.get_dict() @@ -100,74 +106,36 @@ class SelfConsistentHubbardWorkChain(WorkChain, ProtocolMixin): @classmethod def define(cls, spec): """Define the specifications of the process.""" + # yapf: disable super().define(spec) - - spec.input('hubbard_structure', valid_type=HubbardStructureData) - spec.input( - 'tolerance_onsite', - valid_type=orm.Float, - default=lambda: orm.Float(0.1), - help=( - 'Tolerance value for self-consistent calculation of Hubbard U. ' - 'In case of DFT+U+V calculation, it refers to the diagonal elements (i.e. on-site).' - ) - ) - spec.input( - 'tolerance_intersite', - valid_type=orm.Float, - default=lambda: orm.Float(0.01), - help=( - 'Tolerance value for self-consistent DFT+U+V calculation. ' - 'It refers to the only off-diagonal elements V.' - ) - ) - spec.input( - 'skip_first_relax', - valid_type=orm.Bool, - default=lambda: orm.Bool(False), - help='If True, skip the first relaxation' - ) - spec.input( - 'relax_frequency', - valid_type=orm.Int, - required=False, - help='Integer value referring to the number of iterations to wait before performing the `relax` step.' - ) - spec.expose_inputs( - PwRelaxWorkChain, - namespace='relax', - exclude=( - 'clean_workdir', - 'structure', - ), - namespace_options={ - 'required': False, - 'populate_defaults': False, - 'help': 'Inputs for the `PwRelaxWorkChain` that, when defined, will iteratively relax the structure.' - } - ) - spec.expose_inputs(PwBaseWorkChain, namespace='scf', exclude=( - 'clean_workdir', - 'pw.structure', - )) - spec.expose_inputs( - HpWorkChain, - namespace='hubbard', - exclude=( - 'clean_workdir', - 'hp.parent_scf', - 'hp.parent_hp', - 'hp.hubbard_structure', - ) - ) - spec.input('max_iterations', valid_type=orm.Int, default=lambda: orm.Int(10)) - spec.input('meta_convergence', valid_type=orm.Bool, default=lambda: orm.Bool(False)) - spec.input( - 'clean_workdir', - valid_type=orm.Bool, - default=lambda: orm.Bool(True), - help='If `True`, work directories of all called calculation will be cleaned at the end of execution.' - ) + spec.input('hubbard_structure', valid_type=HubbardStructureData, + help=('The HubbardStructureData containing the initialized parameters for triggering ' + 'the Hubbard atoms which the `hp.x` code will perturbe.')) + spec.input('tolerance_onsite', valid_type=orm.Float, default=lambda: orm.Float(0.1), + help=('Tolerance value for self-consistent calculation of Hubbard U. ' + 'In case of DFT+U+V calculation, it refers to the diagonal elements (i.e. on-site).')) + spec.input('tolerance_intersite', valid_type=orm.Float, default=lambda: orm.Float(0.01), + help=('Tolerance value for self-consistent DFT+U+V calculation. ' + 'It refers to the only off-diagonal elements V.')) + spec.input('skip_relax_iterations', valid_type=orm.Int, required=False, validator=validate_positive, + help=('The number of iterations for skipping the `relax` ' + 'step without performing check on parameters convergence.')) + spec.input('relax_frequency', valid_type=orm.Int, required=False, validator=validate_positive, + help='Integer value referring to the number of iterations to wait before performing the `relax` step.') + spec.expose_inputs(PwRelaxWorkChain, namespace='relax', + exclude=('clean_workdir', 'structure', 'base_final_scf'), + namespace_options={'required': False, 'populate_defaults': False, + 'help': 'Inputs for the `PwRelaxWorkChain` that, when defined, will iteratively relax the structure.'}) + spec.expose_inputs(PwBaseWorkChain, namespace='scf', + exclude=('clean_workdir','pw.structure')) + spec.expose_inputs(HpWorkChain, namespace='hubbard', + exclude=('clean_workdir', 'hp.parent_scf', 'hp.parent_hp', 'hp.hubbard_structure')) + spec.input('max_iterations', valid_type=orm.Int, default=lambda: orm.Int(10), + help='Maximum number of iterations of the (relax-)scf-hp cycle.') + spec.input('meta_convergence', valid_type=orm.Bool, default=lambda: orm.Bool(False), + help='Whether performing the self-consistent cycle. If False, it will stop at the first iteration.') + spec.input('clean_workdir', valid_type=orm.Bool, default=lambda: orm.Bool(True), + help='If `True`, work directories of all called calculation will be cleaned at the end of execution.') spec.inputs.validator = validate_inputs spec.inputs['hubbard']['hp'].validator = None @@ -188,44 +156,30 @@ def define(cls, spec): ), cls.run_hp, cls.inspect_hp, - if_(cls.should_check_convergence)(cls.check_convergence,), + if_(cls.should_check_convergence)( + cls.check_convergence, + ), ), cls.run_results, ) - spec.output( - 'hubbard_structure', - valid_type=HubbardStructureData, - required=False, - help='The Hubbard structure containing the structure and associated Hubbard parameters.' - ) - - spec.exit_code( - 330, - 'ERROR_FAILED_TO_DETERMINE_PSEUDO_POTENTIAL', - message='Failed to determine the correct pseudo potential after the structure changed its kind names.' - ) - spec.exit_code( - 401, 'ERROR_SUB_PROCESS_FAILED_RECON', message='The reconnaissance PwBaseWorkChain sub process failed' - ) - spec.exit_code( - 402, - 'ERROR_SUB_PROCESS_FAILED_RELAX', - message='The PwRelaxWorkChain sub process failed in iteration {iteration}' - ) - spec.exit_code( - 403, - 'ERROR_SUB_PROCESS_FAILED_SCF', - message='The scf PwBaseWorkChain sub process failed in iteration {iteration}' - ) - spec.exit_code( - 404, 'ERROR_SUB_PROCESS_FAILED_HP', message='The HpWorkChain sub process failed in iteration {iteration}' - ) - spec.exit_code( - 405, 'ERROR_NON_INTEGER_TOT_MAGNETIZATION', + spec.output('hubbard_structure', valid_type=HubbardStructureData, required=False, + help='The Hubbard structure containing the structure and associated Hubbard parameters.') + + spec.exit_code(330, 'ERROR_FAILED_TO_DETERMINE_PSEUDO_POTENTIAL', + message='Failed to determine the correct pseudo potential after the structure changed its kind names.') + spec.exit_code(401, 'ERROR_SUB_PROCESS_FAILED_RECON', + message='The reconnaissance PwBaseWorkChain sub process failed') + spec.exit_code(402, 'ERROR_SUB_PROCESS_FAILED_RELAX', + message='The PwRelaxWorkChain sub process failed in iteration {iteration}') + spec.exit_code(403, 'ERROR_SUB_PROCESS_FAILED_SCF', + message='The scf PwBaseWorkChain sub process failed in iteration {iteration}') + spec.exit_code(404, 'ERROR_SUB_PROCESS_FAILED_HP', + message='The HpWorkChain sub process failed in iteration {iteration}') + spec.exit_code(405, 'ERROR_NON_INTEGER_TOT_MAGNETIZATION', message='The scf PwBaseWorkChain sub process in iteration {iteration}'\ - 'returned a non integer total magnetization (threshold exceeded).' - ) + 'returned a non integer total magnetization (threshold exceeded).') + # yapf: enable @classmethod def get_protocol_filepath(cls): @@ -289,12 +243,13 @@ def get_builder_from_protocol( if 'relax_frequency' in inputs: builder.relax_frequency = orm.Int(inputs['relax_frequency']) + if 'skip_relax_iterations' in inputs: + builder.skip_relax_iterations = orm.Int(inputs['skip_relax_iterations']) builder.hubbard_structure = hubbard_structure builder.relax = relax builder.scf = scf builder.hubbard = hubbard - builder.skip_first_relax = orm.Bool(inputs['skip_first_relax']) builder.tolerance_onsite = orm.Float(inputs['tolerance_onsite']) builder.tolerance_intersite = orm.Float(inputs['tolerance_intersite']) builder.max_iterations = orm.Int(inputs['max_iterations']) @@ -312,7 +267,9 @@ def setup(self): self.ctx.is_insulator = None self.ctx.is_magnetic = False self.ctx.iteration = 0 - self.ctx.skip_first_relax = self.inputs.skip_first_relax.value + self.ctx.skip_relax_iterations = 0 + if 'skip_relax_iterations' in self.inputs: + self.ctx.skip_relax_iterations = self.inputs.skip_relax_iterations.value self.ctx.relax_frequency = 1 if 'relax_frequency' in self.inputs: self.ctx.relax_frequency = self.inputs.relax_frequency.value @@ -342,23 +299,35 @@ def should_run_relax(self): if 'relax' not in self.inputs: return False - if self.ctx.skip_first_relax: - self.ctx.skip_first_relax = False # only the first one will be skipped - self.report('`skip_first_relax` is set to `True`. Skipping first relaxation.') + if self.ctx.iteration <= self.ctx.skip_relax_iterations: + self.report(( + f'`skip_relax_iterations` is set to {self.ctx.skip_relax_iterations}. ' + f'Skipping relaxation for iteration {self.ctx.iteration}.' + )) return False if self.ctx.iteration % self.ctx.relax_frequency != 0: self.report(( f'`relax_frequency` is set to {self.ctx.relax_frequency}. ' - f'Skipping relaxation for iteration {self.ctx.iteration }.' + f'Skipping relaxation for iteration {self.ctx.iteration}.' )) return False - return 'relax' in self.inputs + return True def should_check_convergence(self): """Return whether to check the convergence of Hubbard parameters.""" - return self.inputs.meta_convergence.value + if not self.inputs.meta_convergence.value: + return False + + if self.ctx.iteration <= self.ctx.skip_relax_iterations: + self.report(( + f'`skip_relax_iterations` is set to {self.ctx.skip_relax_iterations}. ' + f'Skipping convergence check for iteration {self.ctx.iteration}.' + )) + return False + + return True def should_run_iteration(self): """Return whether a new process should be run.""" @@ -601,10 +570,11 @@ def inspect_hp(self): self.report(f'hp.x in iteration {self.ctx.iteration} failed with exit status {workchain.exit_status}') return self.exit_codes.ERROR_SUB_PROCESS_FAILED_HP.format(iteration=self.ctx.iteration) - if not self.inputs.meta_convergence: + if not self.should_check_convergence(): self.ctx.current_hubbard_structure = workchain.outputs.hubbard_structure - self.report('meta convergence is switched off, so not checking convergence of Hubbard parameters.') - self.ctx.is_converged = True + if not self.inputs.meta_convergence: + self.report('meta convergence is switched off, so not checking convergence of Hubbard parameters.') + self.ctx.is_converged = True def check_convergence(self): """Check the convergence of the Hubbard parameters.""" diff --git a/src/aiida_quantumespresso_hp/workflows/protocols/hubbard.yaml b/src/aiida_quantumespresso_hp/workflows/protocols/hubbard.yaml index b4aa3ad..cb32044 100644 --- a/src/aiida_quantumespresso_hp/workflows/protocols/hubbard.yaml +++ b/src/aiida_quantumespresso_hp/workflows/protocols/hubbard.yaml @@ -4,7 +4,6 @@ default_inputs: meta_convergence: True tolerance_onsite: 0.1 tolerance_intersite: 0.01 - skip_first_relax: False scf: kpoints_distance: 0.4 diff --git a/tests/workflows/protocols/test_hubbard.py b/tests/workflows/protocols/test_hubbard.py index 9da437b..37d0f21 100644 --- a/tests/workflows/protocols/test_hubbard.py +++ b/tests/workflows/protocols/test_hubbard.py @@ -39,7 +39,7 @@ def test_default(fixture_code, data_regression, generate_hubbard_structure, seri 'tolerance_intersite': 1 }, { - 'skip_first_relax': True + 'skip_relax_iterations': 2 }, { 'relax_frequency': 3 diff --git a/tests/workflows/protocols/test_hubbard/test_default.yml b/tests/workflows/protocols/test_hubbard/test_default.yml index 275708d..ee18383 100644 --- a/tests/workflows/protocols/test_hubbard/test_default.yml +++ b/tests/workflows/protocols/test_hubbard/test_default.yml @@ -92,6 +92,5 @@ scf: Co: Co Li: Li O: O -skip_first_relax: false tolerance_intersite: 0.01 tolerance_onsite: 0.1 diff --git a/tests/workflows/test_hubbard.py b/tests/workflows/test_hubbard.py index feda8fe..11b5152 100644 --- a/tests/workflows/test_hubbard.py +++ b/tests/workflows/test_hubbard.py @@ -115,6 +115,20 @@ def test_validate_inputs_invalid_inputs(generate_workchain_hubbard, generate_inp generate_workchain_hubbard(inputs=inputs) +@pytest.mark.parametrize('parameters', ('skip_relax_iterations', 'relax_frequency')) +@pytest.mark.usefixtures('aiida_profile') +def test_validate_invalid_positve_input(generate_workchain_hubbard, generate_inputs_hubbard, parameters): + """Test `SelfConsistentHubbardWorkChain` for invalid positive inputs.""" + from aiida.orm import Int + + inputs = AttributeDict(generate_inputs_hubbard()) + inputs.update({parameters: Int(-1)}) + + match = 'the value must be positive.' + with pytest.raises(ValueError, match=match): + generate_workchain_hubbard(inputs=inputs) + + @pytest.mark.usefixtures('aiida_profile') def test_setup(generate_workchain_hubbard, generate_inputs_hubbard): """Test `SelfConsistentHubbardWorkChain.setup`.""" @@ -124,12 +138,12 @@ def test_setup(generate_workchain_hubbard, generate_inputs_hubbard): assert process.ctx.iteration == 0 assert process.ctx.relax_frequency == 1 + assert process.ctx.skip_relax_iterations == 0 assert process.ctx.current_hubbard_structure == inputs['hubbard_structure'] assert process.ctx.current_magnetic_moments is None - assert process.ctx.is_converged is False + assert not process.ctx.is_converged assert process.ctx.is_insulator is None assert not process.ctx.is_magnetic - assert not process.ctx.skip_first_relax assert not process.should_check_convergence() @@ -162,19 +176,55 @@ def test_magnetic_setup(generate_workchain_hubbard, generate_inputs_hubbard): @pytest.mark.usefixtures('aiida_profile') -def test_skip_first_relax(generate_workchain_hubbard, generate_inputs_hubbard): - """Test `SelfConsistentHubbardWorkChain` when skipping only the first relax.""" - from aiida.orm import Bool +def test_skip_relax_iterations(generate_workchain_hubbard, generate_inputs_hubbard, generate_hp_workchain_node): + """Test `SelfConsistentHubbardWorkChain` when skipping the first relax iterations.""" + from aiida.orm import Bool, Int inputs = generate_inputs_hubbard() - inputs['skip_first_relax'] = Bool(True) + inputs['skip_relax_iterations'] = Int(1) + inputs['meta_convergence'] = Bool(True) process = generate_workchain_hubbard(inputs=inputs) - process.setup() + # 1 + process.update_iteration() + assert process.ctx.skip_relax_iterations == 1 + assert process.ctx.iteration == 1 + assert not process.should_run_relax() + assert not process.should_check_convergence() + process.ctx.workchains_hp = [generate_hp_workchain_node()] + process.inspect_hp() + assert process.ctx.current_hubbard_structure == process.ctx.workchains_hp[-1].outputs.hubbard_structure + # 2 + process.update_iteration() + assert process.should_run_relax() + assert process.should_check_convergence() + # 3 + process.update_iteration() + assert process.should_run_relax() + assert process.should_check_convergence() - assert not process.should_run_relax() # skip only first one - assert process.should_run_relax() # the second one not - assert process.should_run_relax() # and the third neither! + inputs['skip_relax_iterations'] = Int(2) + process = generate_workchain_hubbard(inputs=inputs) + process.setup() + # 1 + process.update_iteration() + assert process.ctx.skip_relax_iterations == 2 + assert not process.should_run_relax() + assert not process.should_check_convergence() + process.ctx.workchains_hp = [generate_hp_workchain_node()] + process.inspect_hp() + assert process.ctx.current_hubbard_structure == process.ctx.workchains_hp[-1].outputs.hubbard_structure + # 2 + process.update_iteration() + assert not process.should_run_relax() + assert not process.should_check_convergence() + process.ctx.workchains_hp.append(generate_hp_workchain_node()) + process.inspect_hp() + assert process.ctx.current_hubbard_structure == process.ctx.workchains_hp[-1].outputs.hubbard_structure + # 3 + process.update_iteration() + assert process.should_run_relax() + assert process.should_check_convergence() @pytest.mark.usefixtures('aiida_profile') @@ -185,10 +235,9 @@ def test_relax_frequency(generate_workchain_hubbard, generate_inputs_hubbard): inputs = generate_inputs_hubbard() inputs['relax_frequency'] = Int(3) process = generate_workchain_hubbard(inputs=inputs) - process.setup() - process.update_iteration() # it updates first in the while of the outline + process.update_iteration() assert not process.should_run_relax() # skip process.update_iteration() assert not process.should_run_relax() # skip @@ -205,7 +254,8 @@ def test_should_check_convergence(generate_workchain_hubbard, generate_inputs_hu inputs = generate_inputs_hubbard() inputs['meta_convergence'] = Bool(True) process = generate_workchain_hubbard(inputs=inputs) - + process.setup() + process.update_iteration() assert process.should_check_convergence()