From dee667b0af75bf528378318704517ded07ec634a Mon Sep 17 00:00:00 2001 From: Lorenzo <79980269+bastonero@users.noreply.github.com> Date: Sun, 16 Feb 2025 19:36:08 +0000 Subject: [PATCH] :books: Docs: update tutorials, how-tos, and citations --- docs/source/1_computing_hubbard.ipynb | 2 +- docs/source/2_parallel_hubbard.ipynb | 13 +- docs/source/3_self_consistent.ipynb | 188 ++++------------------- docs/source/citeus.md | 23 +++ docs/source/howto/analyze.md | 45 ++++++ docs/source/howto/calculations/hp.md | 6 +- docs/source/howto/index.md | 1 + docs/source/local_module/temp_profile.py | 2 +- docs/source/topics/index.md | 2 + src/aiida_hubbard/workflows/hubbard.py | 27 ++-- 10 files changed, 129 insertions(+), 180 deletions(-) create mode 100644 docs/source/citeus.md create mode 100644 docs/source/howto/analyze.md diff --git a/docs/source/1_computing_hubbard.ipynb b/docs/source/1_computing_hubbard.ipynb index fa4109e..471c444 100644 --- a/docs/source/1_computing_hubbard.ipynb +++ b/docs/source/1_computing_hubbard.ipynb @@ -183,7 +183,7 @@ "from aiida_quantumespresso.workflows.pw.base import PwBaseWorkChain\n", "from aiida_quantumespresso.common.types import ElectronicType\n", "kpoints = KpointsData()\n", - "kpoints.set_kpoints_mesh([2,2,2])\n", + "kpoints.set_kpoints_mesh([1,1,1])\n", "\n", "builder = PwBaseWorkChain.get_builder_from_protocol(\n", " code=data.pw_code, # modify here if you downloaded the notebook\n", diff --git a/docs/source/2_parallel_hubbard.ipynb b/docs/source/2_parallel_hubbard.ipynb index 4a40197..a25f7f6 100644 --- a/docs/source/2_parallel_hubbard.ipynb +++ b/docs/source/2_parallel_hubbard.ipynb @@ -63,7 +63,7 @@ "from aiida_quantumespresso.workflows.pw.base import PwBaseWorkChain\n", "from aiida_quantumespresso.common.types import ElectronicType\n", "kpoints = KpointsData()\n", - "kpoints.set_kpoints_mesh([2,2,2])\n", + "kpoints.set_kpoints_mesh([1,1,1])\n", "\n", "builder = PwBaseWorkChain.get_builder_from_protocol(\n", " code=data.pw_code, # modify here if you downloaded the notebook\n", @@ -108,7 +108,7 @@ " \"parallelize_atoms\":True, \n", " \"parallelize_qpoints\":False, \n", " \"hp\":{\"hubbard_structure\":data.structure},\n", - " \"qpoints_distance\": 1000, # to get few q points\n", + " \"qpoints_distance\": 100.0, # to get few q points\n", " }\n", ")\n", "\n", @@ -139,11 +139,11 @@ "metadata": {}, "source": [ "The following just happened:\n", - "- A grid of q points is generated automatically using the distance (between points) in $\\AA$ we gave in input (of 1000 $\\AA$ to have very sparse - it is just a tutorial!).\n", + "- A grid of q points is generated automatically using the distance (between points) in $\\r{A}^{-1}$ we gave in input (of 100 $\\r{A}^{-1}$ to have very sparse - it is just a tutorial!).\n", "- The `HpParallelizeAtomsWorkChain` is called.\n", "- This work chain calls first a `HpBaseWorkChain` to get the independent atoms to perturb.\n", "- **Three** `HpBaseWorkChain` are submitted __simultaneously__, one for cobalt, and two for the two oxygen sites.\n", - "- The response matrices ($\\chi^{(0)}$,$\\chi$) of each atom are collected to post-process them and compute the final U/V values using $$V_{IJ} = (\\chi^{(0) -1} -\\chi^{-1})_{IJ}$$\n", + "- The response matrices ($\\chi^{(0)}$,$\\chi$) of each atom are collected to post-process them and compute the final U/V values using $V_{IJ} = (\\chi^{(0) -1} -\\chi^{-1})_{IJ}$\n", "\n", "As for the `HpBaseWorkChain`, we also have here the `hubbard_structure` output namespace, containing the same results as the serial execution:" ] @@ -193,7 +193,8 @@ " \"parallelize_qpoints\":True, \n", " \"hp\":{\"hubbard_structure\":data.structure},\n", " \"qpoints_distance\": 1000, # to get few q points\n", - " }\n", + " \"max_concurrent_base_workchains\": 2, # useful to not overload HPC or local computer\n", + " }\n", ")\n", "\n", "results, hp_node = run_get_node(builder)" @@ -214,7 +215,7 @@ "metadata": {}, "source": [ "The following just happened:\n", - "- A grid of q points was generated automatically using the distance (between points) in $\\AA$ we gave in input (of 1000 $\\AA$ to have very sparse - it is just a tutorial!).\n", + "- A grid of q points was generated automatically using the distance (between points) in $\\r{A}^{-1}$ we gave in input (of 1000 $\\r{A}^{-1}$ to have very sparse - it is just a tutorial!).\n", "- The `HpParallelizeAtomsWorkChain` is called.\n", "- This work chain calls first a `HpBaseWorkChain` to get the independent atoms to perturb.\n", "- For independent each atom (three in total) an `HpParallelizeQpointsWorkChain` is submitted __simultaneously__, one for cobalt, and two for the two oxygen sites.\n", diff --git a/docs/source/3_self_consistent.ipynb b/docs/source/3_self_consistent.ipynb index 4da89f1..13ec78d 100644 --- a/docs/source/3_self_consistent.ipynb +++ b/docs/source/3_self_consistent.ipynb @@ -15,24 +15,13 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": null, "metadata": { "tags": [ "hide-cell" ] }, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 26, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "from local_module import load_temp_profile\n", "from aiida_quantumespresso.data.hubbard_structure import HubbardStructureData\n", @@ -59,6 +48,7 @@ " ['O', 'O', (0, 0, 10.392)], \n", " ['Li', 'Li', (0, 0, 7.0268)],\n", "]\n", + "\n", "hubbard_structure = HubbardStructureData(cell=cell, sites=sites)\n", "hubbard_structure.initialize_onsites_hubbard(\"Co\", \"3d\")\n", "hubbard_structure.store()" @@ -108,93 +98,13 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": null, "metadata": { "tags": [ "hide-output" ] }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|setup]: system is treated to be non-magnetic because `nspin == 1` in `scf.pw.parameters` input.\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|run_relax]: launching PwRelaxWorkChain<444> iteration #1\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [444|PwRelaxWorkChain|run_relax]: launching PwBaseWorkChain<447>\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [447|PwBaseWorkChain|run_process]: launching PwCalculation<452> iteration #1\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [447|PwBaseWorkChain|results]: work chain completed after 1 iterations\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [447|PwBaseWorkChain|on_terminated]: remote folders will not be cleaned\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [444|PwRelaxWorkChain|inspect_relax]: after iteration 1 cell volume of relaxed structure is 31.592539105379053\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [444|PwRelaxWorkChain|run_relax]: launching PwBaseWorkChain<461>\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [461|PwBaseWorkChain|run_process]: launching PwCalculation<466> iteration #1\n", - "\u001b[31m\u001b[1mError\u001b[0m: Then ionic minimization cycle converged but the thresholds are exceeded in the final SCF.\n", - "\u001b[93m\u001b[1mWarning\u001b[0m: output parser returned exit code<501>: Then ionic minimization cycle converged but the thresholds are exceeded in the final SCF.\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [461|PwBaseWorkChain|report_error_handled]: PwCalculation<466> failed with exit status 501: Then ionic minimization cycle converged but the thresholds are exceeded in the final SCF.\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [461|PwBaseWorkChain|report_error_handled]: Action taken: ionic convergence thresholds met except in final scf: consider structure relaxed.\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [461|PwBaseWorkChain|results]: work chain completed after 1 iterations\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [461|PwBaseWorkChain|inspect_process]: PwCalculation<466> failed but a handler detected an unrecoverable problem, aborting\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [461|PwBaseWorkChain|on_terminated]: remote folders will not be cleaned\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [444|PwRelaxWorkChain|inspect_relax]: after iteration 2 cell volume of relaxed structure is 31.592538691211796\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [444|PwRelaxWorkChain|inspect_relax]: relative cell volume difference 1.310965400891578e-08 smaller than threshold 0.05\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [444|PwRelaxWorkChain|results]: workchain completed after 2 iterations\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [444|PwRelaxWorkChain|on_terminated]: remote folders will not be cleaned\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|run_scf_smearing]: launching PwBaseWorkChain<475> with smeared occupations\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [475|PwBaseWorkChain|run_process]: launching PwCalculation<480> iteration #1\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [475|PwBaseWorkChain|results]: work chain completed after 1 iterations\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [475|PwBaseWorkChain|on_terminated]: remote folders will not be cleaned\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|recon_scf]: after relaxation, system is determined to be an insulator\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|run_scf_fixed]: launching PwBaseWorkChain<488> with fixed occupations\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [488|PwBaseWorkChain|run_process]: launching PwCalculation<493> iteration #1\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [488|PwBaseWorkChain|results]: work chain completed after 1 iterations\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [488|PwBaseWorkChain|on_terminated]: remote folders will not be cleaned\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|run_hp]: launching HpWorkChain<499> iteration #1\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [499|HpWorkChain|run_base_workchain]: running in serial, launching HpBaseWorkChain<505>\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [505|HpBaseWorkChain|run_process]: launching HpCalculation<507> iteration #1\n", - "1\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [505|HpBaseWorkChain|results]: work chain completed after 1 iterations\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [505|HpBaseWorkChain|on_terminated]: remote folders will not be cleaned\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [505|HpBaseWorkChain|on_terminated]: remote folders will not be cleaned\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [499|HpWorkChain|on_terminated]: remote folders will not be cleaned\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|check_convergence]: Hubbard onsites parameters are not converged. Max difference is 8.14829999.\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|run_relax]: launching PwRelaxWorkChain<516> iteration #2\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [516|PwRelaxWorkChain|run_relax]: launching PwBaseWorkChain<519>\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [519|PwBaseWorkChain|run_process]: launching PwCalculation<524> iteration #1\n", - "\u001b[31m\u001b[1mError\u001b[0m: ERROR_IONIC_CYCLE_BFGS_HISTORY_FAILURE\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [519|PwBaseWorkChain|results]: work chain completed after 1 iterations\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [519|PwBaseWorkChain|on_terminated]: remote folders will not be cleaned\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [516|PwRelaxWorkChain|inspect_relax]: after iteration 1 cell volume of relaxed structure is 31.944624207488268\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [516|PwRelaxWorkChain|run_relax]: launching PwBaseWorkChain<533>\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [533|PwBaseWorkChain|run_process]: launching PwCalculation<538> iteration #1\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [533|PwBaseWorkChain|results]: work chain completed after 1 iterations\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [533|PwBaseWorkChain|on_terminated]: remote folders will not be cleaned\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [516|PwRelaxWorkChain|inspect_relax]: after iteration 2 cell volume of relaxed structure is 31.95904119405152\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [516|PwRelaxWorkChain|inspect_relax]: relative cell volume difference 0.0004513118222837555 smaller than threshold 0.05\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [516|PwRelaxWorkChain|results]: workchain completed after 2 iterations\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [516|PwRelaxWorkChain|on_terminated]: remote folders will not be cleaned\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|run_scf_smearing]: launching PwBaseWorkChain<547> with smeared occupations\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [547|PwBaseWorkChain|run_process]: launching PwCalculation<552> iteration #1\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [547|PwBaseWorkChain|results]: work chain completed after 1 iterations\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [547|PwBaseWorkChain|on_terminated]: remote folders will not be cleaned\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|recon_scf]: after relaxation, system is determined to be an insulator\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|run_scf_fixed]: launching PwBaseWorkChain<560> with fixed occupations\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [560|PwBaseWorkChain|run_process]: launching PwCalculation<565> iteration #1\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [560|PwBaseWorkChain|results]: work chain completed after 1 iterations\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [560|PwBaseWorkChain|on_terminated]: remote folders will not be cleaned\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|run_hp]: launching HpWorkChain<571> iteration #2\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [571|HpWorkChain|run_base_workchain]: running in serial, launching HpBaseWorkChain<577>\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [577|HpBaseWorkChain|run_process]: launching HpCalculation<579> iteration #1\n", - "1\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [577|HpBaseWorkChain|results]: work chain completed after 1 iterations\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [577|HpBaseWorkChain|on_terminated]: remote folders will not be cleaned\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [577|HpBaseWorkChain|on_terminated]: remote folders will not be cleaned\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [571|HpWorkChain|on_terminated]: remote folders will not be cleaned\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|check_convergence]: Hubbard parameters are converged. Stopping the cycle.\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|run_results]: Hubbard parameters self-consistently converged in 2 iterations\n", - "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|on_terminated]: remote folders will not be cleaned\n" - ] - } - ], + "outputs": [], "source": [ "from aiida.engine import run_get_node\n", "from aiida_hubbard.workflows.hubbard import SelfConsistentHubbardWorkChain\n", @@ -208,9 +118,31 @@ " \"clean_workdir\": False,\n", " \"tolerance_onsite\": 0.5,\n", " \"tolerance_intersite\": 0.1,\n", - " \"relax\":{\"base\":{\"kpoints_distance\":1.4}}, # to speed up the tutorial\n", - " \"scf\":{\"kpoints_distance\":1.4}, # to speed up the tutorial\n", - " \"hubbard\":{\"qpoints_distance\":1000, \"parallelize_atoms\":False, \"parallelize_qpoints\":False}}, # to speed up the tutorial\n", + " \"relax\":{\n", + " \"base\":{\n", + " \"kpoints_distance\":100.0,\n", + " \"pw\":{\n", + " \"parameters\":{\n", + " \"SYSTEM\":{\n", + " \"ecutwfc\": 60.0, # to speed up the tutorial\n", + " \"ecutrho\": 60.0 * 8,\n", + " },\n", + " },\n", + " },\n", + " }\n", + " }, # to speed up the tutorial\n", + " \"scf\":{\n", + " \"kpoints_distance\":100.0, \n", + " \"pw\":{\n", + " \"parameters\":{\n", + " \"SYSTEM\":{\n", + " \"ecutwfc\": 30.0, # to speed up the tutorial\n", + " \"ecutrho\": 30.0 * 8,\n", + " },\n", + " },\n", + " },\n", + " }, \n", + " \"hubbard\":{\"qpoints_distance\":100.0, \"parallelize_atoms\":False, \"parallelize_qpoints\":False}}, # to speed up the tutorial\n", ")\n", "\n", "results, node = run_get_node(builder)" @@ -226,51 +158,9 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[22mSelfConsistentHubbardWorkChain<442> Finished [0] [2:run_results]\n", - " ├── PwRelaxWorkChain<444> Finished [0] [3:results]\n", - " │ ├── PwBaseWorkChain<447> Finished [0] [3:results]\n", - " │ │ ├── create_kpoints_from_distance<448> Finished [0]\n", - " │ │ └── PwCalculation<452> Finished [0]\n", - " │ └── PwBaseWorkChain<461> Finished [501] [2:while_(should_run_process)(2:inspect_process)]\n", - " │ ├── create_kpoints_from_distance<462> Finished [0]\n", - " │ └── PwCalculation<466> Finished [501]\n", - " ├── PwBaseWorkChain<475> Finished [0] [3:results]\n", - " │ ├── create_kpoints_from_distance<476> Finished [0]\n", - " │ └── PwCalculation<480> Finished [0]\n", - " ├── PwBaseWorkChain<488> Finished [0] [3:results]\n", - " │ ├── create_kpoints_from_distance<489> Finished [0]\n", - " │ └── PwCalculation<493> Finished [0]\n", - " ├── HpWorkChain<499> Finished [0] [3:results]\n", - " │ ├── create_kpoints_from_distance<501> Finished [0]\n", - " │ └── HpBaseWorkChain<505> Finished [0] [3:results]\n", - " │ └── HpCalculation<507> Finished [0]\n", - " ├── PwRelaxWorkChain<516> Finished [0] [3:results]\n", - " │ ├── PwBaseWorkChain<519> Finished [0] [3:results]\n", - " │ │ ├── create_kpoints_from_distance<520> Finished [0]\n", - " │ │ └── PwCalculation<524> Finished [0]\n", - " │ └── PwBaseWorkChain<533> Finished [0] [3:results]\n", - " │ ├── create_kpoints_from_distance<534> Finished [0]\n", - " │ └── PwCalculation<538> Finished [0]\n", - " ├── PwBaseWorkChain<547> Finished [0] [3:results]\n", - " │ ├── create_kpoints_from_distance<548> Finished [0]\n", - " │ └── PwCalculation<552> Finished [0]\n", - " ├── PwBaseWorkChain<560> Finished [0] [3:results]\n", - " │ ├── create_kpoints_from_distance<561> Finished [0]\n", - " │ └── PwCalculation<565> Finished [0]\n", - " └── HpWorkChain<571> Finished [0] [3:results]\n", - " ├── create_kpoints_from_distance<573> Finished [0]\n", - " └── HpBaseWorkChain<577> Finished [0] [3:results]\n", - " └── HpCalculation<579> Finished [0]\u001b[0m\n" - ] - } - ], + "outputs": [], "source": [ "%verdi process status {node.pk}" ] @@ -285,19 +175,9 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "HUBBARD\tortho-atomic\n", - " U\tCo-3d\t7.8264\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "from aiida_quantumespresso.utils.hubbard import HubbardUtils\n", "print(HubbardUtils(results['hubbard_structure']).get_hubbard_card())" diff --git a/docs/source/citeus.md b/docs/source/citeus.md new file mode 100644 index 0000000..8998546 --- /dev/null +++ b/docs/source/citeus.md @@ -0,0 +1,23 @@ +(citeus)= + +# Cite + +If you use this plugin for your research, please cite the following works: + +> Lorenzo Bastonero, Cristiano Malica, Eric Macke, Marnik Bercx, Sebastiaan Huber, Iurii Timrov, and Nicola Marzari, *Hubbard from first-principles made easy from automated and reproducible workflows* (2025) + +> Sebastiaan. P. Huber _et al._, [*AiiDA 1.0, a scalable computational infrastructure for automated reproducible workflows and data provenance*](https://doi.org/10.1038/s41597-020-00638-4), Scientific Data **7**, 300 (2020) + +> Martin Uhrin, Sebastiaan. P. Huber, Jusong Yu, Nicola Marzari, and Giovanni Pizzi, [*Workflows in AiiDA: Engineering a high-throughput, event-based engine for robust and modular computational workflows*](https://www.sciencedirect.com/science/article/pii/S0010465522001746), Computational Materials Science **187**, 110086 (2021) + +Please, also cite the relevant _Quantum ESPRESSO_ and _HP_ references. + +> Iurii Timrov, Nicola Marzari, and Matteo Cococcioni, [*HP – A code for the calculation of Hubbard parameters using density-functional perturbation theory*](https://www.sciencedirect.com/science/article/pii/S0010465522001746), Computer Physics Communication **279**, 108455 (2022) + +> Paolo Giannozzi _et al._, [*Advanced capabilities for materials modelling with Quantum ESPRESSO*](https://iopscience.iop.org/article/10.1088/1361-648X/aa8f79) J.Phys.:Condens.Matter **29**, 465901 (2017) + +> Paolo Giannozzi _et al._, [*QUANTUM ESPRESSO: a modular and open-source software project for quantum simulations of materials*](https://iopscience.iop.org/article/10.1088/0953-8984/21/39/395502) J. Phys. Condens. Matter **21**, 395502 (2009) + +For the GPU-enabled version of _Quantum ESPRESSO_: + +> Paolo Giannozzi _et al._, [*Quantum ESPRESSO toward the exascale*](https://pubs.aip.org/aip/jcp/article/152/15/154105/1058748/Quantum-ESPRESSO-toward-the-exascale), J. Chem. Phys. **152**, 154105 (2020) diff --git a/docs/source/howto/analyze.md b/docs/source/howto/analyze.md new file mode 100644 index 0000000..5472423 --- /dev/null +++ b/docs/source/howto/analyze.md @@ -0,0 +1,45 @@ +(howto-analyze)= + +# How to analyze the results + +When a `SelfConsistentHubbardWorkChain` is completed, there are quite a few possible analyses to perform. + +## How to inspect the final Hubbard parameters + +A _complete_ `SelfConsistentHubbardWorkChain` will produce a {{ hubbard_structure }} containing the parsed Hubbard parameters. +The parameters are stored under the `hubbard` namespace: + +```shell +In [1]: node = load_node(HP_CALCULATION_IDENTIFIER) + +In [2]: node.outputs.hubbard_structure.hubbard +Out[2]: +Hubbard(parameters=(HubbardParameters([...]), ...), projectors='ortho-atomic', formulation='dudarev') +``` + +This corresponds to a `pydantic` class, so you can access the stores values (`parameters`, `projectors`, `formulations`) simply by: +```shell +In [3]: node.outputs.hubbard_structure.hubbard.parameters +Out[3]: [HubbardParameters(atom_index=0, atom_manifold='3d', neighbour_index=0, neighbour_manifold='3d', translation=(0, 0, 0), value=5.11, hubbard_type='Ueff'), ...] +``` + +To access to a specific value: +```shell +In [4]: hubbard_structure.hubbard.parameters[0].value +Out[4]: 5.11 +``` + +To visualize them as Quantum ESPRESSO HUBBARD card: + +```shell +In [5]: from aiida_quantumespresso.utils.hubbard import HubbardUtils + +In [6]: hubbard_card = HubbardUtils(node.outputs.hubbard_structure.hubbard).get_hubbard_card() + +In [7]: print(hubbard_card) +Out[7]: +HUBBARD ortho-atomic +V Co-3d Co-3d 1 1 5.11 +V Co-3d O-2p 1 2 1.65 +... +``` diff --git a/docs/source/howto/calculations/hp.md b/docs/source/howto/calculations/hp.md index 50be421..a209489 100644 --- a/docs/source/howto/calculations/hp.md +++ b/docs/source/howto/calculations/hp.md @@ -112,13 +112,13 @@ builder = load_code('hp').get_builder() builder.parent_scf = parent_scf ``` -## How to run a calculation without symlinking +## How to run a calculation with symlinking Specify `PARENT_FOLDER_SYMLINK: False` in the `settings` input: ```python builder = load_code('hp').get_builder() -builder.settings = Dict({'PARENT_FOLDER_SYMLINK': False}) +builder.settings = Dict({'PARENT_FOLDER_SYMLINK': True}) ``` If this setting is specified, the plugin will NOT symlink the SCF folder. @@ -146,7 +146,7 @@ To visualize them as Quantum ESPRESSO HUBBARD card: ```python In [3]: from aiida_quantumespresso.utils.hubbard import HubbardUtils -In [4]: hubbard_card = HubbardUtils(node.outputs.hubbard_structure.hubbard).get_hubbard_card +In [4]: hubbard_card = HubbardUtils(node.outputs.hubbard_structure.hubbard).get_hubbard_card() In [5]: print(hubbard_card) Out[5]: diff --git a/docs/source/howto/index.md b/docs/source/howto/index.md index b215b60..3f8dd4e 100644 --- a/docs/source/howto/index.md +++ b/docs/source/howto/index.md @@ -10,6 +10,7 @@ At the very least, make sure you have followed and understand the tutorial on [r ```{toctree} :maxdepth: 2 +analyze understand calculations/index workflows/index diff --git a/docs/source/local_module/temp_profile.py b/docs/source/local_module/temp_profile.py index 881d8d8..60358d3 100644 --- a/docs/source/local_module/temp_profile.py +++ b/docs/source/local_module/temp_profile.py @@ -180,7 +180,7 @@ def create_licoo_hubbard_structure(): return hubbard_structure -def load_sssp_pseudos(version='1.2', functional='PBEsol', protocol='efficiency'): +def load_sssp_pseudos(version='1.3', functional='PBEsol', protocol='efficiency'): """Load the SSSP pseudopotentials.""" config = SsspConfiguration(version, functional, protocol) label = SsspFamily.format_configuration_label(config) diff --git a/docs/source/topics/index.md b/docs/source/topics/index.md index 7b69f59..e9d8bfe 100644 --- a/docs/source/topics/index.md +++ b/docs/source/topics/index.md @@ -1,3 +1,5 @@ +(topics)= + # Topic guides ```{toctree} diff --git a/src/aiida_hubbard/workflows/hubbard.py b/src/aiida_hubbard/workflows/hubbard.py index 369a434..8b0beda 100644 --- a/src/aiida_hubbard/workflows/hubbard.py +++ b/src/aiida_hubbard/workflows/hubbard.py @@ -75,21 +75,18 @@ class SelfConsistentHubbardWorkChain(WorkChain, ProtocolMixin): The procedure in each step of the convergence cycle is slightly different depending on the electronic and magnetic properties of the system. Each cycle will roughly consist of three steps: - * Relaxing the structure at the current Hubbard values (optional). - * One or two SCF calculations depending whether the system is metallic or insulating, respectively. - * A self-consistent calculation of the Hubbard parameters, restarted from the last SCF run. - - The possible options for the set of SCF calculations that have to be run in the second step look are: - - * Metals: - - - SCF with smearing. - - * Insulators - - - SCF with smearing. - - SCF with fixed occupations; if magnetic, total magnetization and number of bands - are fixed to the values found from the previous SCF calculation. + * Relaxing the structure at the current Hubbard values (optional). + * One or two DFT calculations depending whether the system is metallic or insulating, respectively. + * A DFPT calculation of the Hubbard parameters, perturbing the ground-state of the last DFT run. + + The possible options for the set of DFT SCF calculations that have to be run in the second step look are: + + * Metals: + - SCF with smearing. + * Insulators + - SCF with smearing. + - SCF with fixed occupations; if magnetic, total magnetization and number of bands + are fixed to the values found from the previous SCF calculation. When convergence is achieved a node will be returned containing the final converged :class:`~aiida_quantumespresso.data.hubbard_structure.HubbardStructureData`.