From c1a610c259620509d47ca60db11a4ad32d085994 Mon Sep 17 00:00:00 2001 From: Janosh Riebesell Date: Tue, 30 Apr 2024 20:27:52 -0400 Subject: [PATCH] Officially support Python 3.12 and test in CI (#3685) * add python 3.12 to officially supported versions and test it in CI * down pin chgnet>=0.3.0 * fix weird typo nrafo_ew_tstructs * don't depend on tblite above 3.11 since unsupported https://github.com/tblite/tblite/issues/175 * improve TestVasprun.test_standard * drop Lobsterin inerheritance from UserDict, use simple dict instead and modify __getitem__ to get the salient __getitem__ behavior from UserDict * try DotDict as super class for Lobsterin * override Lobsterin.__contains__ to fix on py312 --------- Co-authored-by: JaGeo --- .github/workflows/release.yml | 8 +- .github/workflows/test.yml | 6 +- .../get_plane_permutations_optimized.py | 4 +- dev_scripts/chemenv/plane_multiplicity.py | 6 +- pymatgen/alchemy/materials.py | 2 +- pymatgen/alchemy/transmuters.py | 44 +++---- .../structure_prediction/substitutor.py | 16 +-- pymatgen/io/lobster/inputs.py | 69 ++++++----- pymatgen/io/vasp/outputs.py | 18 ++- setup.py | 11 +- tests/alchemy/test_materials.py | 62 +++++----- tests/io/lobster/test_inputs.py | 117 +++++++++--------- tests/io/test_shengbte.py | 2 +- tests/io/vasp/test_outputs.py | 26 ++-- 14 files changed, 203 insertions(+), 188 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 6bae8b67627..755437a1d72 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -29,7 +29,7 @@ jobs: - uses: actions/setup-python@v5 name: Install Python with: - python-version: "3.11" + python-version: "3.12" - name: Build sdist run: | @@ -45,7 +45,7 @@ jobs: strategy: matrix: os: [ubuntu-latest, macos-14, windows-latest] - python-version: ["39", "310", "311"] + python-version: ["39", "310", "311", "312"] runs-on: ${{ matrix.os }} steps: - name: Check out repo @@ -68,10 +68,10 @@ jobs: # For pypi trusted publishing id-token: write steps: - - name: Set up Python 3.11 + - name: Set up Python uses: actions/setup-python@v5 with: - python-version: 3.11 + python-version: "3.12" - name: Get build artifacts uses: actions/download-artifact@v3 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index dd7568b2365..2d1c7eb96b3 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -27,16 +27,16 @@ jobs: matrix: # pytest-split automatically distributes work load so parallel jobs finish in similar time os: [ubuntu-latest, windows-latest] - python-version: ["3.9", "3.11"] + python-version: ["3.9", "3.12"] split: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] # include/exclude is meant to maximize CI coverage of different platforms and python # versions while minimizing the total number of jobs. We run all pytest splits with the # oldest supported python version (currently 3.9) on windows (seems most likely to surface - # errors) and with newest version (currently 3.11) on ubuntu (to get complete and speedy + # errors) and with newest version (currently 3.12) on ubuntu (to get complete and speedy # coverage on unix). We ignore mac-os, which is assumed to be similar to ubuntu. exclude: - os: windows-latest - python-version: "3.11" + python-version: "3.12" - os: ubuntu-latest python-version: "3.9" diff --git a/dev_scripts/chemenv/get_plane_permutations_optimized.py b/dev_scripts/chemenv/get_plane_permutations_optimized.py index 47ed4f13b93..ef92a27e63e 100644 --- a/dev_scripts/chemenv/get_plane_permutations_optimized.py +++ b/dev_scripts/chemenv/get_plane_permutations_optimized.py @@ -209,7 +209,7 @@ def random_permutations_iterator(initial_permutation, n_permutations): # Definition of the facets all_planes_point_indices = [algo.plane_points] if algo.other_plane_points is not None: - all_planes_point_indices.extend(algo.other_plane_points) + all_planes_point_indices += algo.other_plane_points # Loop on the facets explicit_permutations_per_plane = [] @@ -305,7 +305,7 @@ def random_permutations_iterator(initial_permutation, n_permutations): # Definition of the facets all_planes_point_indices = [algo.plane_points] if algo.other_plane_points is not None: - all_planes_point_indices.extend(algo.other_plane_points) + all_planes_point_indices += algo.other_plane_points # Setup of the permutations to be used for this algorithm diff --git a/dev_scripts/chemenv/plane_multiplicity.py b/dev_scripts/chemenv/plane_multiplicity.py index b439165ee0c..efaf37b90b3 100644 --- a/dev_scripts/chemenv/plane_multiplicity.py +++ b/dev_scripts/chemenv/plane_multiplicity.py @@ -12,11 +12,11 @@ __date__ = "Feb 20, 2016" if __name__ == "__main__": - allcg = AllCoordinationGeometries() + all_cg = AllCoordinationGeometries() cg_symbol = "I:12" all_plane_points = [] - cg = allcg[cg_symbol] + cg = all_cg[cg_symbol] # I:12 if cg_symbol == "I:12": @@ -25,7 +25,7 @@ for edge in edges: opposite_edge = [opposite_points[edge[0]], opposite_points[edge[1]]] equiv_plane = list(edge) - equiv_plane.extend(opposite_edge) + equiv_plane += opposite_edge equiv_plane.sort() all_plane_points.append(tuple(equiv_plane)) all_plane_points = [tuple(equiv_plane) for equiv_plane in set(all_plane_points)] diff --git a/pymatgen/alchemy/materials.py b/pymatgen/alchemy/materials.py index 9600b52541d..ef3e40a1902 100644 --- a/pymatgen/alchemy/materials.py +++ b/pymatgen/alchemy/materials.py @@ -228,7 +228,7 @@ def __str__(self) -> str: for hist in self.history: hist.pop("input_structure", None) output.append(str(hist)) - output.extend(("\nOther parameters", "------------", str(self.other_parameters))) + output += ("\nOther parameters", "------------", str(self.other_parameters)) return "\n".join(output) def set_parameter(self, key: str, value: Any) -> TransformedStructure: diff --git a/pymatgen/alchemy/transmuters.py b/pymatgen/alchemy/transmuters.py index 457187624ae..606e749411d 100644 --- a/pymatgen/alchemy/transmuters.py +++ b/pymatgen/alchemy/transmuters.py @@ -22,6 +22,8 @@ from typing_extensions import Self + from pymatgen.alchemy.filters import AbstractStructureFilter + __author__ = "Shyue Ping Ong, Will Richards" __copyright__ = "Copyright 2012, The Materials Project" __version__ = "0.1" @@ -40,7 +42,7 @@ class StandardTransmuter: def __init__( self, - transformed_structures, + transformed_structures: list[TransformedStructure], transformations=None, extend_collection: int = 0, ncores: int | None = None, @@ -130,8 +132,8 @@ def append_transformation(self, transformation, extend_collection=False, clear_r for x in self.transformed_structures: new = x.append_transformation(transformation, extend_collection, clear_redo=clear_redo) if new is not None: - new_structures.extend(new) - self.transformed_structures.extend(new_structures) + new_structures += new + self.transformed_structures += new_structures def extend_transformations(self, transformations): """Extend a sequence of transformations to the TransformedStructure. @@ -142,18 +144,16 @@ def extend_transformations(self, transformations): for trafo in transformations: self.append_transformation(trafo) - def apply_filter(self, structure_filter): + def apply_filter(self, structure_filter: AbstractStructureFilter): """Apply a structure_filter to the list of TransformedStructures in the transmuter. Args: structure_filter: StructureFilter to apply. """ - - def test_transformed_structure(ts): - return structure_filter.test(ts.final_structure) - - self.transformed_structures = list(filter(test_transformed_structure, self.transformed_structures)) + self.transformed_structures = list( + filter(lambda ts: structure_filter.test(ts.final_structure), self.transformed_structures) + ) for ts in self.transformed_structures: ts.append_filter(structure_filter) @@ -174,8 +174,8 @@ def set_parameter(self, key, value): key: The key for the parameter. value: The value for the parameter. """ - for x in self.transformed_structures: - x.other_parameters[key] = value + for struct in self.transformed_structures: + struct.other_parameters[key] = value def add_tags(self, tags): """Add tags for the structures generated by the transmuter. @@ -196,11 +196,11 @@ def append_transformed_structures(self, trafo_structs_or_transmuter): transmuter. """ if isinstance(trafo_structs_or_transmuter, self.__class__): - self.transformed_structures.extend(trafo_structs_or_transmuter.transformed_structures) + self.transformed_structures += trafo_structs_or_transmuter.transformed_structures else: for ts in trafo_structs_or_transmuter: assert isinstance(ts, TransformedStructure) - self.transformed_structures.extend(trafo_structs_or_transmuter) + self.transformed_structures += trafo_structs_or_transmuter @classmethod def from_structures(cls, structures, transformations=None, extend_collection=0) -> Self: @@ -219,8 +219,8 @@ def from_structures(cls, structures, transformations=None, extend_collection=0) Returns: StandardTransmuter """ - trafo_struct = [TransformedStructure(s, []) for s in structures] - return cls(trafo_struct, transformations, extend_collection) + t_struct = [TransformedStructure(s, []) for s in structures] + return cls(t_struct, transformations, extend_collection) class CifTransmuter(StandardTransmuter): @@ -253,8 +253,8 @@ def __init__(self, cif_string, transformations=None, primitive=True, extend_coll if read_data: structure_data[-1].append(line) for data in structure_data: - trafo_struct = TransformedStructure.from_cif_str("\n".join(data), [], primitive) - transformed_structures.append(trafo_struct) + t_struct = TransformedStructure.from_cif_str("\n".join(data), [], primitive) + transformed_structures.append(t_struct) super().__init__(transformed_structures, transformations, extend_collection) @classmethod @@ -293,8 +293,8 @@ def __init__(self, poscar_string, transformations=None, extend_collection=False) extend_collection: Whether to use more than one output structure from one-to-many transformations. """ - trafo_struct = TransformedStructure.from_poscar_str(poscar_string, []) - super().__init__([trafo_struct], transformations, extend_collection=extend_collection) + t_struct = TransformedStructure.from_poscar_str(poscar_string, []) + super().__init__([t_struct], transformations, extend_collection=extend_collection) @classmethod def from_filenames(cls, poscar_filenames, transformations=None, extend_collection=False) -> StandardTransmuter: @@ -373,7 +373,7 @@ def _apply_transformation(inputs): """ ts, transformation, extend_collection, clear_redo = inputs new = ts.append_transformation(transformation, extend_collection, clear_redo=clear_redo) - o = [ts] + out = [ts] if new: - o.extend(new) - return o + out += new + return out diff --git a/pymatgen/analysis/structure_prediction/substitutor.py b/pymatgen/analysis/structure_prediction/substitutor.py index 36efa6b1b6d..45f847d1a0b 100644 --- a/pymatgen/analysis/structure_prediction/substitutor.py +++ b/pymatgen/analysis/structure_prediction/substitutor.py @@ -119,10 +119,10 @@ def pred_from_structures( raise ValueError("the species in target_species are not allowed for the probability model you are using") for permutation in itertools.permutations(target_species): - for s in structures_list: + for dct in structures_list: # check if: species are in the domain, # and the probability of subst. is above the threshold - els = s["structure"].elements + els = dct["structure"].elements if ( len(els) == len(permutation) and len(set(els) & set(self.get_allowed_species())) == len(els) @@ -135,18 +135,18 @@ def pred_from_structures( transf = SubstitutionTransformation(clean_subst) - if Substitutor._is_charge_balanced(transf.apply_transformation(s["structure"])): - ts = TransformedStructure( - s["structure"], + if Substitutor._is_charge_balanced(transf.apply_transformation(dct["structure"])): + t_struct = TransformedStructure( + dct["structure"], [transf], - history=[{"source": s["id"]}], + history=[{"source": dct["id"]}], other_parameters={ "type": "structure_prediction", "proba": self._sp.cond_prob_list(permutation, els), }, ) - result.append(ts) - transmuter.append_transformed_structures([ts]) + result.append(t_struct) + transmuter.append_transformed_structures([t_struct]) if remove_duplicates: transmuter.apply_filter(RemoveDuplicatesFilter(symprec=self._symprec)) diff --git a/pymatgen/io/lobster/inputs.py b/pymatgen/io/lobster/inputs.py index 500d7fd83e4..7d4a75917ea 100644 --- a/pymatgen/io/lobster/inputs.py +++ b/pymatgen/io/lobster/inputs.py @@ -132,7 +132,7 @@ def __init__(self, settingsdict: dict): raise KeyError("There are duplicates for the keywords!") self.update(settingsdict) - def __setitem__(self, key, val): + def __setitem__(self, key, val) -> None: """ Add parameter-val pair to Lobsterin. Warns if parameter is not in list of valid lobsterin tags. Also cleans the parameter and val by stripping @@ -146,14 +146,25 @@ def __setitem__(self, key, val): super().__setitem__(new_key, val.strip() if isinstance(val, str) else val) - def __getitem__(self, item): + def __getitem__(self, key) -> Any: """Implements getitem from dict to avoid problems with cases.""" - new_item = next((key_here for key_here in self if item.strip().lower() == key_here.lower()), item) + normalized_key = next((k for k in self if key.strip().lower() == k.lower()), key) - if new_item.lower() not in [element.lower() for element in Lobsterin.AVAILABLE_KEYWORDS]: - raise KeyError("Key is currently not available") + key_is_unknown = normalized_key.lower() not in map(str.lower, Lobsterin.AVAILABLE_KEYWORDS) + if key_is_unknown or normalized_key not in self.data: + raise KeyError(f"{key=} is not available") + + return self.data[normalized_key] + + def __contains__(self, key) -> bool: + """Implements getitem from dict to avoid problems with cases.""" + normalized_key = next((k for k in self if key.strip().lower() == k.lower()), key) + + key_is_unknown = normalized_key.lower() not in map(str.lower, Lobsterin.AVAILABLE_KEYWORDS) + if key_is_unknown or normalized_key not in self.data: + return False - return super().__getitem__(new_item) + return True def __delitem__(self, key): new_key = next((key_here for key_here in self if key.strip().lower() == key_here.lower()), key) @@ -564,30 +575,30 @@ def from_file(cls, lobsterin: str) -> Self: lobsterin_dict: dict[str, Any] = {} for datum in data: - # Remove all comments - if not datum.startswith(("!", "#", "//")): - pattern = r"\b[^!#//]+" # exclude comments after commands - if matched_pattern := re.findall(pattern, datum): - raw_datum = matched_pattern[0].replace("\t", " ") # handle tab in between and end of command - key_word = raw_datum.strip().split(" ") # extract keyword - if len(key_word) > 1: - # check which type of keyword this is, handle accordingly - if key_word[0].lower() not in [datum2.lower() for datum2 in Lobsterin.LISTKEYWORDS]: - if key_word[0].lower() not in [datum2.lower() for datum2 in Lobsterin.FLOAT_KEYWORDS]: - if key_word[0].lower() not in lobsterin_dict: - lobsterin_dict[key_word[0].lower()] = " ".join(key_word[1:]) - else: - raise ValueError(f"Same keyword {key_word[0].lower()} twice!") - elif key_word[0].lower() not in lobsterin_dict: - lobsterin_dict[key_word[0].lower()] = float(key_word[1]) - else: - raise ValueError(f"Same keyword {key_word[0].lower()} twice!") - elif key_word[0].lower() not in lobsterin_dict: - lobsterin_dict[key_word[0].lower()] = [" ".join(key_word[1:])] + if datum.startswith(("!", "#", "//")): + continue # ignore comments + pattern = r"\b[^!#//]+" # exclude comments after commands + if matched_pattern := re.findall(pattern, datum): + raw_datum = matched_pattern[0].replace("\t", " ") # handle tab in between and end of command + key_word = raw_datum.strip().split(" ") # extract keyword + key = key_word[0].lower() + if len(key_word) > 1: + # check which type of keyword this is, handle accordingly + if key not in [datum2.lower() for datum2 in Lobsterin.LISTKEYWORDS]: + if key not in [datum2.lower() for datum2 in Lobsterin.FLOAT_KEYWORDS]: + if key in lobsterin_dict: + raise ValueError(f"Same keyword {key} twice!") + lobsterin_dict[key] = " ".join(key_word[1:]) + elif key in lobsterin_dict: + raise ValueError(f"Same keyword {key} twice!") else: - lobsterin_dict[key_word[0].lower()].append(" ".join(key_word[1:])) - elif len(key_word) > 0: - lobsterin_dict[key_word[0].lower()] = True + lobsterin_dict[key] = float("nan" if key_word[1].strip() == "None" else key_word[1]) + elif key not in lobsterin_dict: + lobsterin_dict[key] = [" ".join(key_word[1:])] + else: + lobsterin_dict[key].append(" ".join(key_word[1:])) + elif len(key_word) > 0: + lobsterin_dict[key] = True return cls(lobsterin_dict) diff --git a/pymatgen/io/vasp/outputs.py b/pymatgen/io/vasp/outputs.py index 295d9c2b461..51a9eb570d1 100644 --- a/pymatgen/io/vasp/outputs.py +++ b/pymatgen/io/vasp/outputs.py @@ -1471,7 +1471,7 @@ def _parse_calculation(self, elem): return istep @staticmethod - def _parse_dos(elem): + def _parse_dos(elem) -> tuple[Dos, Dos, list[dict]]: efermi = float(elem.find("i").text) energies = None tdensities = {} @@ -1491,22 +1491,18 @@ def _parse_dos(elem): orbs.pop(0) lm = any("x" in s for s in orbs) for s in partial.find("array").find("set").findall("set"): - pdos = defaultdict(dict) + pdos: dict[Orbital | OrbitalType, dict[Spin, np.ndarray]] = defaultdict(dict) for ss in s.findall("set"): spin = Spin.up if ss.attrib["comment"] == "spin 1" else Spin.down data = np.array(_parse_vasp_array(ss)) - _nrow, ncol = data.shape - for j in range(1, ncol): - orb = Orbital(j - 1) if lm else OrbitalType(j - 1) - pdos[orb][spin] = data[:, j] + _n_row, n_col = data.shape + for col_idx in range(1, n_col): + orb = Orbital(col_idx - 1) if lm else OrbitalType(col_idx - 1) + pdos[orb][spin] = data[:, col_idx] # type: ignore[index] pdoss.append(pdos) elem.clear() - return ( - Dos(efermi, energies, tdensities), - Dos(efermi, energies, idensities), - pdoss, - ) + return Dos(efermi, energies, tdensities), Dos(efermi, energies, idensities), pdoss @staticmethod def _parse_eigen(elem): diff --git a/setup.py b/setup.py index b04103451b8..f71fc5a4462 100644 --- a/setup.py +++ b/setup.py @@ -48,10 +48,11 @@ ], extras_require={ "ase": ["ase>=3.3"], - "tblite": ["tblite[ase]>=0.3.0"], + # don't depend on tblite above 3.11 since unsupported https://github.com/tblite/tblite/issues/175 + "tblite": ["tblite[ase]>=0.3.0; python_version<'3.12'"], "vis": ["vtk>=6.0.0"], "abinit": ["netcdf4"], - "relaxation": ["matgl", "chgnet"], + "relaxation": ["matgl", "chgnet>=0.3.0"], "electronic_structure": ["fdint>=2.0.2"], "dev": [ "mypy", @@ -74,7 +75,7 @@ # caused CI failure due to ModuleNotFoundError: No module named 'packaging' # "BoltzTraP2>=22.3.2; platform_system!='Windows'", "chemview>=0.6", - "chgnet", + "chgnet>=0.3.0", "f90nml>=1.1.2", "galore>=0.6.1", "h5py>=3.8.0", @@ -83,7 +84,8 @@ "netCDF4>=1.5.8", "phonopy>=2.4.2", "seekpath>=1.9.4", - "tblite[ase]>=0.3.0; platform_system=='Linux'", + # don't depend on tblite above 3.11 since unsupported https://github.com/tblite/tblite/issues/175 + "tblite[ase]>=0.3.0; platform_system=='Linux' and python_version<'3.12'", # "hiphive>=0.6", # "openbabel>=3.1.1; platform_system=='Linux'", ], @@ -158,6 +160,7 @@ "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3", "Topic :: Scientific/Engineering :: Chemistry", "Topic :: Scientific/Engineering :: Information Analysis", diff --git a/tests/alchemy/test_materials.py b/tests/alchemy/test_materials.py index 2ecd37c91fd..c667b27277e 100644 --- a/tests/alchemy/test_materials.py +++ b/tests/alchemy/test_materials.py @@ -40,9 +40,9 @@ def test_append_transformation(self): [0, -2.2171384943, 3.1355090603], ] struct = Structure(lattice, ["Si4+", "Si4+"], coords) - ts = TransformedStructure(struct, []) - ts.append_transformation(SupercellTransformation.from_scaling_factors(2, 1, 1)) - alt = ts.append_transformation( + t_struct = TransformedStructure(struct, []) + t_struct.append_transformation(SupercellTransformation.from_scaling_factors(2, 1, 1)) + alt = t_struct.append_transformation( PartialRemoveSpecieTransformation("Si4+", 0.5, algo=PartialRemoveSpecieTransformation.ALGO_COMPLETE), 5 ) assert len(alt) == 2 @@ -66,36 +66,36 @@ def test_from_dict(self): with open(f"{TEST_DIR}/transformations.json") as file: dct = json.load(file) dct["other_parameters"] = {"tags": ["test"]} - ts = TransformedStructure.from_dict(dct) - ts.other_parameters["author"] = "Will" - ts.append_transformation(SubstitutionTransformation({"Fe": "Mn"})) - assert ts.final_structure.reduced_formula == "MnPO4" - assert ts.other_parameters == {"author": "Will", "tags": ["test"]} + t_struct = TransformedStructure.from_dict(dct) + t_struct.other_parameters["author"] = "Will" + t_struct.append_transformation(SubstitutionTransformation({"Fe": "Mn"})) + assert t_struct.final_structure.reduced_formula == "MnPO4" + assert t_struct.other_parameters == {"author": "Will", "tags": ["test"]} def test_undo_and_redo_last_change(self): trafos = [ SubstitutionTransformation({"Li": "Na"}), SubstitutionTransformation({"Fe": "Mn"}), ] - ts = TransformedStructure(self.structure, trafos) - assert ts.final_structure.reduced_formula == "NaMnPO4" - ts.undo_last_change() - assert ts.final_structure.reduced_formula == "NaFePO4" - ts.undo_last_change() - assert ts.final_structure.reduced_formula == "LiFePO4" + t_struct = TransformedStructure(self.structure, trafos) + assert t_struct.final_structure.reduced_formula == "NaMnPO4" + t_struct.undo_last_change() + assert t_struct.final_structure.reduced_formula == "NaFePO4" + t_struct.undo_last_change() + assert t_struct.final_structure.reduced_formula == "LiFePO4" with pytest.raises(IndexError, match="No more changes to undo"): - ts.undo_last_change() - ts.redo_next_change() - assert ts.final_structure.reduced_formula == "NaFePO4" - ts.redo_next_change() - assert ts.final_structure.reduced_formula == "NaMnPO4" + t_struct.undo_last_change() + t_struct.redo_next_change() + assert t_struct.final_structure.reduced_formula == "NaFePO4" + t_struct.redo_next_change() + assert t_struct.final_structure.reduced_formula == "NaMnPO4" with pytest.raises(IndexError, match="No more changes to redo"): - ts.redo_next_change() + t_struct.redo_next_change() # Make sure that this works with filters. f3 = ContainsSpecieFilter(["O2-"], strict_compare=True, AND=False) - ts.append_filter(f3) - ts.undo_last_change() - ts.redo_next_change() + t_struct.append_filter(f3) + t_struct.undo_last_change() + t_struct.redo_next_change() def test_set_parameter(self): trans = self.trans.set_parameter("author", "will") @@ -113,19 +113,19 @@ def test_as_dict(self): def test_snl(self): self.trans.set_parameter("author", "will") with pytest.warns(UserWarning) as warns: - snl = self.trans.to_snl([("will", "will@test.com")]) + struct_nl = self.trans.to_snl([("will", "will@test.com")]) - assert len(warns) == 1, "Warning not raised on type conversion with other_parameters" + assert len(warns) >= 1, f"Warning not raised on type conversion with other_parameters {len(warns)=}" assert ( str(warns[0].message) == "Data in TransformedStructure.other_parameters discarded during type conversion to SNL" ) - ts = TransformedStructure.from_snl(snl) - assert ts.history[-1]["@class"] == "SubstitutionTransformation" + t_struct = TransformedStructure.from_snl(struct_nl) + assert t_struct.history[-1]["@class"] == "SubstitutionTransformation" hist = ("testname", "testURL", {"test": "testing"}) - snl = StructureNL(ts.final_structure, [("will", "will@test.com")], history=[hist]) - snl = TransformedStructure.from_snl(snl).to_snl([("notwill", "notwill@test.com")]) - assert snl.history == [hist] - assert snl.authors == [("notwill", "notwill@test.com")] + struct_nl = StructureNL(t_struct.final_structure, [("will", "will@test.com")], history=[hist]) + t_struct = TransformedStructure.from_snl(struct_nl).to_snl([("notwill", "notwill@test.com")]) + assert t_struct.history == [hist] + assert t_struct.authors == [("notwill", "notwill@test.com")] diff --git a/tests/io/lobster/test_inputs.py b/tests/io/lobster/test_inputs.py index 9dc9d75e575..2f790eb2da3 100644 --- a/tests/io/lobster/test_inputs.py +++ b/tests/io/lobster/test_inputs.py @@ -1556,36 +1556,36 @@ def test_get_bandstructure(self): class TestLobsterin(TestCase): def setUp(self): - self.Lobsterinfromfile = Lobsterin.from_file(f"{TEST_DIR}/lobsterin.1") - self.Lobsterinfromfile2 = Lobsterin.from_file(f"{TEST_DIR}/lobsterin.2") - self.Lobsterinfromfile3 = Lobsterin.from_file(f"{TEST_DIR}/lobsterin.3") - self.Lobsterinfromfile4 = Lobsterin.from_file(f"{TEST_DIR}/lobsterin.4.gz") + self.Lobsterin = Lobsterin.from_file(f"{TEST_DIR}/lobsterin.1") + self.Lobsterin2 = Lobsterin.from_file(f"{TEST_DIR}/lobsterin.2") + self.Lobsterin3 = Lobsterin.from_file(f"{TEST_DIR}/lobsterin.3") + self.Lobsterin4 = Lobsterin.from_file(f"{TEST_DIR}/lobsterin.4.gz") def test_from_file(self): # test read from file - assert self.Lobsterinfromfile["cohpstartenergy"] == approx(-15.0) - assert self.Lobsterinfromfile["cohpendenergy"] == approx(5.0) - assert self.Lobsterinfromfile["basisset"] == "pbeVaspFit2015" - assert self.Lobsterinfromfile["gaussiansmearingwidth"] == approx(0.1) - assert self.Lobsterinfromfile["basisfunctions"][0] == "Fe 3d 4p 4s" - assert self.Lobsterinfromfile["basisfunctions"][1] == "Co 3d 4p 4s" - assert self.Lobsterinfromfile["skipdos"] - assert self.Lobsterinfromfile["skipcohp"] - assert self.Lobsterinfromfile["skipcoop"] - assert self.Lobsterinfromfile["skippopulationanalysis"] - assert self.Lobsterinfromfile["skipgrosspopulation"] + assert self.Lobsterin["cohpstartenergy"] == approx(-15.0) + assert self.Lobsterin["cohpendenergy"] == approx(5.0) + assert self.Lobsterin["basisset"] == "pbeVaspFit2015" + assert self.Lobsterin["gaussiansmearingwidth"] == approx(0.1) + assert self.Lobsterin["basisfunctions"][0] == "Fe 3d 4p 4s" + assert self.Lobsterin["basisfunctions"][1] == "Co 3d 4p 4s" + assert self.Lobsterin["skipdos"] + assert self.Lobsterin["skipcohp"] + assert self.Lobsterin["skipcoop"] + assert self.Lobsterin["skippopulationanalysis"] + assert self.Lobsterin["skipgrosspopulation"] # test if comments are correctly removed - assert self.Lobsterinfromfile == self.Lobsterinfromfile2 + assert self.Lobsterin == self.Lobsterin2 def test_getitem(self): # tests implementation of getitem, should be case independent - assert self.Lobsterinfromfile["COHPSTARTENERGY"] == approx(-15.0) + assert self.Lobsterin["COHPSTARTENERGY"] == approx(-15.0) def test_setitem(self): # test implementation of setitem - self.Lobsterinfromfile["skipCOHP"] = False - assert self.Lobsterinfromfile["skipcohp"] is False + self.Lobsterin["skipCOHP"] = False + assert self.Lobsterin["skipcohp"] is False def test_initialize_from_dict(self): # initialize from dict @@ -1614,7 +1614,7 @@ def test_initialize_from_dict(self): lobsterin2 = Lobsterin({"cohpstartenergy": -15.0, "cohpstartEnergy": -20.0}) lobsterin2 = Lobsterin({"cohpstartenergy": -15.0}) # can only calculate nbands if basis functions are provided - with pytest.raises(ValueError, match="No basis functions are provided. The program cannot calculate nbands"): + with pytest.raises(ValueError, match="No basis functions are provided. The program cannot calculate nbands."): lobsterin2._get_nbands(structure=Structure.from_file(f"{VASP_IN_DIR}/POSCAR_Fe3O4")) def test_standard_settings(self): @@ -1749,40 +1749,41 @@ def test_standard_with_energy_range_from_vasprun(self): def test_diff(self): # test diff - assert self.Lobsterinfromfile.diff(self.Lobsterinfromfile2)["Different"] == {} - assert self.Lobsterinfromfile.diff(self.Lobsterinfromfile2)["Same"]["cohpstartenergy"] == approx(-15.0) + assert self.Lobsterin.diff(self.Lobsterin2)["Different"] == {} + assert self.Lobsterin.diff(self.Lobsterin2)["Same"]["cohpstartenergy"] == approx(-15.0) # test diff in both directions - for entry in self.Lobsterinfromfile.diff(self.Lobsterinfromfile3)["Same"]: - assert entry in self.Lobsterinfromfile3.diff(self.Lobsterinfromfile)["Same"] - for entry in self.Lobsterinfromfile3.diff(self.Lobsterinfromfile)["Same"]: - assert entry in self.Lobsterinfromfile.diff(self.Lobsterinfromfile3)["Same"] - for entry in self.Lobsterinfromfile.diff(self.Lobsterinfromfile3)["Different"]: - assert entry in self.Lobsterinfromfile3.diff(self.Lobsterinfromfile)["Different"] - for entry in self.Lobsterinfromfile3.diff(self.Lobsterinfromfile)["Different"]: - assert entry in self.Lobsterinfromfile.diff(self.Lobsterinfromfile3)["Different"] + for entry in self.Lobsterin.diff(self.Lobsterin3)["Same"]: + assert entry in self.Lobsterin3.diff(self.Lobsterin)["Same"] + for entry in self.Lobsterin3.diff(self.Lobsterin)["Same"]: + assert entry in self.Lobsterin.diff(self.Lobsterin3)["Same"] + for entry in self.Lobsterin.diff(self.Lobsterin3)["Different"]: + assert entry in self.Lobsterin3.diff(self.Lobsterin)["Different"] + for entry in self.Lobsterin3.diff(self.Lobsterin)["Different"]: + assert entry in self.Lobsterin.diff(self.Lobsterin3)["Different"] assert ( - self.Lobsterinfromfile.diff(self.Lobsterinfromfile3)["Different"]["skipcohp"]["lobsterin1"] - == self.Lobsterinfromfile3.diff(self.Lobsterinfromfile)["Different"]["skipcohp"]["lobsterin2"] + self.Lobsterin.diff(self.Lobsterin3)["Different"]["skipcohp"]["lobsterin1"] + == self.Lobsterin3.diff(self.Lobsterin)["Different"]["skipcohp"]["lobsterin2"] ) def test_dict_functionality(self): - assert self.Lobsterinfromfile.get("COHPstartEnergy") == -15.0 - assert self.Lobsterinfromfile.get("COHPstartEnergy") == -15.0 - assert self.Lobsterinfromfile.get("COhPstartenergy") == -15.0 - lobsterincopy = self.Lobsterinfromfile.copy() - lobsterincopy.update({"cohpstarteNergy": -10.00}) - assert lobsterincopy["cohpstartenergy"] == -10.0 - lobsterincopy.pop("cohpstarteNergy") - assert "cohpstartenergy" not in lobsterincopy - lobsterincopy.pop("cohpendenergY") - lobsterincopy["cohpsteps"] = 100 - assert lobsterincopy["cohpsteps"] == 100 - before = len(lobsterincopy.items()) - lobsterincopy.popitem() - after = len(lobsterincopy.items()) - assert before != after + for key in ("COHPstartEnergy", "COHPstartEnergy", "COhPstartenergy"): + start_energy = self.Lobsterin.get(key) + assert start_energy == -15.0, f"{start_energy=}, {key=}" + lobsterin_copy = self.Lobsterin.copy() + lobsterin_copy.update({"cohpstarteNergy": -10.00}) + assert lobsterin_copy["cohpstartenergy"] == -10.0 + lobsterin_copy.pop("cohpstarteNergy") + assert "cohpstartenergy" not in lobsterin_copy + lobsterin_copy.pop("cohpendenergY") + lobsterin_copy["cohpsteps"] = 100 + assert lobsterin_copy["cohpsteps"] == 100 + len_before = len(lobsterin_copy.items()) + assert len_before == 9, f"{len_before=}" + lobsterin_copy.popitem() + len_after = len(lobsterin_copy.items()) + assert len_after == len_before - 1 def test_read_write_lobsterin(self): outfile_path = tempfile.mkstemp()[1] @@ -2019,10 +2020,10 @@ def is_kpoint_in_list(self, kpoint, kpointlist, weight, weightlist) -> bool: found += 1 return found == 1 - def test_msonable_implementation(self): + def test_as_from_dict(self): # tests as dict and from dict methods - new_lobsterin = Lobsterin.from_dict(self.Lobsterinfromfile.as_dict()) - assert new_lobsterin == self.Lobsterinfromfile + new_lobsterin = Lobsterin.from_dict(self.Lobsterin.as_dict()) + assert new_lobsterin == self.Lobsterin new_lobsterin.to_json() @@ -2420,13 +2421,17 @@ def test_write_file(self): filename=f"{TEST_DIR}/LCAOWaveFunctionAfterLSO1PlotOfSpin1Kpoint1band1.gz", structure=Structure.from_file(f"{TEST_DIR}/POSCAR_O.gz"), ) - wave1.write_file(filename=f"{self.tmp_path}/wavecar_test.vasp", part="real") - assert os.path.isfile("wavecar_test.vasp") + real_wavecar_path = f"{self.tmp_path}/real-wavecar.vasp" + wave1.write_file(filename=real_wavecar_path, part="real") + assert os.path.isfile(real_wavecar_path) - wave1.write_file(filename=f"{self.tmp_path}/wavecar_test.vasp", part="imaginary") - assert os.path.isfile("wavecar_test.vasp") - wave1.write_file(filename=f"{self.tmp_path}/density.vasp", part="density") - assert os.path.isfile("density.vasp") + imag_wavecar_path = f"{self.tmp_path}/imaginary-wavecar.vasp" + wave1.write_file(filename=imag_wavecar_path, part="imaginary") + assert os.path.isfile(imag_wavecar_path) + + density_wavecar_path = f"{self.tmp_path}/density-wavecar.vasp" + wave1.write_file(filename=density_wavecar_path, part="density") + assert os.path.isfile(density_wavecar_path) class TestSitePotentials(PymatgenTest): diff --git a/tests/io/test_shengbte.py b/tests/io/test_shengbte.py index a1bfcca6ba5..4f0424bced6 100644 --- a/tests/io/test_shengbte.py +++ b/tests/io/test_shengbte.py @@ -84,7 +84,7 @@ def test_from_dict(self): reference_string = reference_file.read() assert test_string == reference_string - def test_msonable_implementation(self): + def test_as_from_dict(self): # tests as dict and from dict methods ctrl_from_file = Control.from_file(self.filename) control_from_dict = Control.from_dict(ctrl_from_file.as_dict()) diff --git a/tests/io/vasp/test_outputs.py b/tests/io/vasp/test_outputs.py index aa8077cbcea..15df25bdb10 100644 --- a/tests/io/vasp/test_outputs.py +++ b/tests/io/vasp/test_outputs.py @@ -21,7 +21,7 @@ from pymatgen.electronic_structure.bandstructure import BandStructureSymmLine from pymatgen.electronic_structure.core import Magmom, Orbital, OrbitalType, Spin from pymatgen.entries.compatibility import MaterialsProjectCompatibility -from pymatgen.io.vasp.inputs import Kpoints, Poscar, Potcar +from pymatgen.io.vasp.inputs import Incar, Kpoints, Poscar, Potcar from pymatgen.io.vasp.outputs import ( WSWQ, BSVasprun, @@ -251,8 +251,8 @@ def test_standard(self): assert len(trajectory) == len(vasp_run.ionic_steps) assert "forces" in trajectory[0].site_properties - for i, step in enumerate(vasp_run.ionic_steps): - assert vasp_run.structures[i] == step["structure"] + for idx, step in enumerate(vasp_run.ionic_steps): + assert vasp_run.structures[idx] == step["structure"] assert all( vasp_run.structures[idx] == vasp_run.ionic_steps[idx]["structure"] @@ -263,14 +263,14 @@ def test_standard(self): assert vasp_run.atomic_symbols == ["Li"] + 4 * ["Fe"] + 4 * ["P"] + 16 * ["O"] assert vasp_run.final_structure.reduced_formula == "LiFe4(PO4)4" - assert vasp_run.incar is not None, "Incar cannot be read" - assert vasp_run.kpoints is not None, "Kpoints cannot be read" - assert vasp_run.eigenvalues is not None, "Eigenvalues cannot be read" + assert isinstance(vasp_run.incar, Incar), f"{vasp_run.incar=}" + assert isinstance(vasp_run.kpoints, Kpoints), f"{vasp_run.kpoints=}" + assert isinstance(vasp_run.eigenvalues, dict), f"{vasp_run.eigenvalues=}" assert vasp_run.final_energy == approx(-269.38319884, abs=1e-7) assert vasp_run.tdos.get_gap() == approx(2.0589, abs=1e-4) expected = (2.539, 4.0906, 1.5516, False) assert vasp_run.eigenvalue_band_properties == approx(expected) - assert not vasp_run.is_hubbard + assert vasp_run.is_hubbard is False assert vasp_run.potcar_symbols == [ "PAW_PBE Li 17Jan2003", "PAW_PBE Fe 06Sep2000", @@ -278,12 +278,12 @@ def test_standard(self): "PAW_PBE P 17Jan2003", "PAW_PBE O 08Apr2002", ] - assert vasp_run.kpoints is not None, "Kpoints cannot be read" - assert vasp_run.actual_kpoints is not None, "Actual kpoints cannot be read" - assert vasp_run.actual_kpoints_weights is not None, "Actual kpoints weights cannot be read" + assert isinstance(vasp_run.kpoints, Kpoints), f"{vasp_run.kpoints=}" + assert isinstance(vasp_run.actual_kpoints, list), f"{vasp_run.actual_kpoints=}" + assert isinstance(vasp_run.actual_kpoints_weights, list), f"{vasp_run.actual_kpoints_weights=}" for atom_doses in vasp_run.pdos: for orbital_dos in atom_doses: - assert orbital_dos is not None, "Partial Dos cannot be read" + assert isinstance(orbital_dos, Orbital), f"{orbital_dos=}" # test skipping ionic steps. vasprun_skip = Vasprun(filepath, 3, parse_potcar_file=False) @@ -318,7 +318,7 @@ def test_unconverged(self): UnconvergedVASPWarning, match="vasprun.unconverged.xml.gz is an unconverged VASP run" ) as warns: vasprun_unconverged = Vasprun(filepath, parse_potcar_file=False) - assert len(warns) == 1 + assert len(warns) >= 1 assert vasprun_unconverged.converged_ionic assert not vasprun_unconverged.converged_electronic @@ -662,7 +662,7 @@ def test_potcar_not_found(self): # Ensure no potcar is found and nothing is updated with pytest.warns(UserWarning, match="No POTCAR file with matching TITEL fields was found in") as warns: vasp_run = Vasprun(filepath, parse_potcar_file=".") - assert len(warns) == 2 + assert len(warns) >= 2, f"{len(warns)=}" assert vasp_run.potcar_spec == [ {"titel": "PAW_PBE Li 17Jan2003", "hash": None, "summary_stats": {}}, {"titel": "PAW_PBE Fe 06Sep2000", "hash": None, "summary_stats": {}},