Skip to content

Commit

Permalink
Officially support Python 3.12 and test in CI (#3685)
Browse files Browse the repository at this point in the history
* add python 3.12 to officially supported versions and test it in CI

* down pin chgnet>=0.3.0

* fix weird typo nrafo_ew_tstructs

* don't depend on tblite above 3.11 since unsupported

tblite/tblite#175

* improve TestVasprun.test_standard

* drop Lobsterin inerheritance from UserDict, use simple dict instead and modify __getitem__ to get the salient __getitem__ behavior from UserDict

* try DotDict as super class for Lobsterin

* override Lobsterin.__contains__ to fix on py312

---------

Co-authored-by: JaGeo <janine.george@bam.de>
  • Loading branch information
janosh and JaGeo authored May 1, 2024
1 parent 5c8b51c commit c1a610c
Show file tree
Hide file tree
Showing 14 changed files with 203 additions and 188 deletions.
8 changes: 4 additions & 4 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ jobs:
- uses: actions/setup-python@v5
name: Install Python
with:
python-version: "3.11"
python-version: "3.12"

- name: Build sdist
run: |
Expand All @@ -45,7 +45,7 @@ jobs:
strategy:
matrix:
os: [ubuntu-latest, macos-14, windows-latest]
python-version: ["39", "310", "311"]
python-version: ["39", "310", "311", "312"]
runs-on: ${{ matrix.os }}
steps:
- name: Check out repo
Expand All @@ -68,10 +68,10 @@ jobs:
# For pypi trusted publishing
id-token: write
steps:
- name: Set up Python 3.11
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: 3.11
python-version: "3.12"

- name: Get build artifacts
uses: actions/download-artifact@v3
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,16 +27,16 @@ jobs:
matrix:
# pytest-split automatically distributes work load so parallel jobs finish in similar time
os: [ubuntu-latest, windows-latest]
python-version: ["3.9", "3.11"]
python-version: ["3.9", "3.12"]
split: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# include/exclude is meant to maximize CI coverage of different platforms and python
# versions while minimizing the total number of jobs. We run all pytest splits with the
# oldest supported python version (currently 3.9) on windows (seems most likely to surface
# errors) and with newest version (currently 3.11) on ubuntu (to get complete and speedy
# errors) and with newest version (currently 3.12) on ubuntu (to get complete and speedy
# coverage on unix). We ignore mac-os, which is assumed to be similar to ubuntu.
exclude:
- os: windows-latest
python-version: "3.11"
python-version: "3.12"
- os: ubuntu-latest
python-version: "3.9"

Expand Down
4 changes: 2 additions & 2 deletions dev_scripts/chemenv/get_plane_permutations_optimized.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ def random_permutations_iterator(initial_permutation, n_permutations):
# Definition of the facets
all_planes_point_indices = [algo.plane_points]
if algo.other_plane_points is not None:
all_planes_point_indices.extend(algo.other_plane_points)
all_planes_point_indices += algo.other_plane_points

# Loop on the facets
explicit_permutations_per_plane = []
Expand Down Expand Up @@ -305,7 +305,7 @@ def random_permutations_iterator(initial_permutation, n_permutations):
# Definition of the facets
all_planes_point_indices = [algo.plane_points]
if algo.other_plane_points is not None:
all_planes_point_indices.extend(algo.other_plane_points)
all_planes_point_indices += algo.other_plane_points

# Setup of the permutations to be used for this algorithm

Expand Down
6 changes: 3 additions & 3 deletions dev_scripts/chemenv/plane_multiplicity.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,11 @@
__date__ = "Feb 20, 2016"

if __name__ == "__main__":
allcg = AllCoordinationGeometries()
all_cg = AllCoordinationGeometries()

cg_symbol = "I:12"
all_plane_points = []
cg = allcg[cg_symbol]
cg = all_cg[cg_symbol]

# I:12
if cg_symbol == "I:12":
Expand All @@ -25,7 +25,7 @@
for edge in edges:
opposite_edge = [opposite_points[edge[0]], opposite_points[edge[1]]]
equiv_plane = list(edge)
equiv_plane.extend(opposite_edge)
equiv_plane += opposite_edge
equiv_plane.sort()
all_plane_points.append(tuple(equiv_plane))
all_plane_points = [tuple(equiv_plane) for equiv_plane in set(all_plane_points)]
Expand Down
2 changes: 1 addition & 1 deletion pymatgen/alchemy/materials.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ def __str__(self) -> str:
for hist in self.history:
hist.pop("input_structure", None)
output.append(str(hist))
output.extend(("\nOther parameters", "------------", str(self.other_parameters)))
output += ("\nOther parameters", "------------", str(self.other_parameters))
return "\n".join(output)

def set_parameter(self, key: str, value: Any) -> TransformedStructure:
Expand Down
44 changes: 22 additions & 22 deletions pymatgen/alchemy/transmuters.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@

from typing_extensions import Self

from pymatgen.alchemy.filters import AbstractStructureFilter

__author__ = "Shyue Ping Ong, Will Richards"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
Expand All @@ -40,7 +42,7 @@ class StandardTransmuter:

def __init__(
self,
transformed_structures,
transformed_structures: list[TransformedStructure],
transformations=None,
extend_collection: int = 0,
ncores: int | None = None,
Expand Down Expand Up @@ -130,8 +132,8 @@ def append_transformation(self, transformation, extend_collection=False, clear_r
for x in self.transformed_structures:
new = x.append_transformation(transformation, extend_collection, clear_redo=clear_redo)
if new is not None:
new_structures.extend(new)
self.transformed_structures.extend(new_structures)
new_structures += new
self.transformed_structures += new_structures

def extend_transformations(self, transformations):
"""Extend a sequence of transformations to the TransformedStructure.
Expand All @@ -142,18 +144,16 @@ def extend_transformations(self, transformations):
for trafo in transformations:
self.append_transformation(trafo)

def apply_filter(self, structure_filter):
def apply_filter(self, structure_filter: AbstractStructureFilter):
"""Apply a structure_filter to the list of TransformedStructures
in the transmuter.
Args:
structure_filter: StructureFilter to apply.
"""

def test_transformed_structure(ts):
return structure_filter.test(ts.final_structure)

self.transformed_structures = list(filter(test_transformed_structure, self.transformed_structures))
self.transformed_structures = list(
filter(lambda ts: structure_filter.test(ts.final_structure), self.transformed_structures)
)
for ts in self.transformed_structures:
ts.append_filter(structure_filter)

Expand All @@ -174,8 +174,8 @@ def set_parameter(self, key, value):
key: The key for the parameter.
value: The value for the parameter.
"""
for x in self.transformed_structures:
x.other_parameters[key] = value
for struct in self.transformed_structures:
struct.other_parameters[key] = value

def add_tags(self, tags):
"""Add tags for the structures generated by the transmuter.
Expand All @@ -196,11 +196,11 @@ def append_transformed_structures(self, trafo_structs_or_transmuter):
transmuter.
"""
if isinstance(trafo_structs_or_transmuter, self.__class__):
self.transformed_structures.extend(trafo_structs_or_transmuter.transformed_structures)
self.transformed_structures += trafo_structs_or_transmuter.transformed_structures
else:
for ts in trafo_structs_or_transmuter:
assert isinstance(ts, TransformedStructure)
self.transformed_structures.extend(trafo_structs_or_transmuter)
self.transformed_structures += trafo_structs_or_transmuter

@classmethod
def from_structures(cls, structures, transformations=None, extend_collection=0) -> Self:
Expand All @@ -219,8 +219,8 @@ def from_structures(cls, structures, transformations=None, extend_collection=0)
Returns:
StandardTransmuter
"""
trafo_struct = [TransformedStructure(s, []) for s in structures]
return cls(trafo_struct, transformations, extend_collection)
t_struct = [TransformedStructure(s, []) for s in structures]
return cls(t_struct, transformations, extend_collection)


class CifTransmuter(StandardTransmuter):
Expand Down Expand Up @@ -253,8 +253,8 @@ def __init__(self, cif_string, transformations=None, primitive=True, extend_coll
if read_data:
structure_data[-1].append(line)
for data in structure_data:
trafo_struct = TransformedStructure.from_cif_str("\n".join(data), [], primitive)
transformed_structures.append(trafo_struct)
t_struct = TransformedStructure.from_cif_str("\n".join(data), [], primitive)
transformed_structures.append(t_struct)
super().__init__(transformed_structures, transformations, extend_collection)

@classmethod
Expand Down Expand Up @@ -293,8 +293,8 @@ def __init__(self, poscar_string, transformations=None, extend_collection=False)
extend_collection: Whether to use more than one output structure
from one-to-many transformations.
"""
trafo_struct = TransformedStructure.from_poscar_str(poscar_string, [])
super().__init__([trafo_struct], transformations, extend_collection=extend_collection)
t_struct = TransformedStructure.from_poscar_str(poscar_string, [])
super().__init__([t_struct], transformations, extend_collection=extend_collection)

@classmethod
def from_filenames(cls, poscar_filenames, transformations=None, extend_collection=False) -> StandardTransmuter:
Expand Down Expand Up @@ -373,7 +373,7 @@ def _apply_transformation(inputs):
"""
ts, transformation, extend_collection, clear_redo = inputs
new = ts.append_transformation(transformation, extend_collection, clear_redo=clear_redo)
o = [ts]
out = [ts]
if new:
o.extend(new)
return o
out += new
return out
16 changes: 8 additions & 8 deletions pymatgen/analysis/structure_prediction/substitutor.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,10 +119,10 @@ def pred_from_structures(
raise ValueError("the species in target_species are not allowed for the probability model you are using")

for permutation in itertools.permutations(target_species):
for s in structures_list:
for dct in structures_list:
# check if: species are in the domain,
# and the probability of subst. is above the threshold
els = s["structure"].elements
els = dct["structure"].elements
if (
len(els) == len(permutation)
and len(set(els) & set(self.get_allowed_species())) == len(els)
Expand All @@ -135,18 +135,18 @@ def pred_from_structures(

transf = SubstitutionTransformation(clean_subst)

if Substitutor._is_charge_balanced(transf.apply_transformation(s["structure"])):
ts = TransformedStructure(
s["structure"],
if Substitutor._is_charge_balanced(transf.apply_transformation(dct["structure"])):
t_struct = TransformedStructure(
dct["structure"],
[transf],
history=[{"source": s["id"]}],
history=[{"source": dct["id"]}],
other_parameters={
"type": "structure_prediction",
"proba": self._sp.cond_prob_list(permutation, els),
},
)
result.append(ts)
transmuter.append_transformed_structures([ts])
result.append(t_struct)
transmuter.append_transformed_structures([t_struct])

if remove_duplicates:
transmuter.apply_filter(RemoveDuplicatesFilter(symprec=self._symprec))
Expand Down
69 changes: 40 additions & 29 deletions pymatgen/io/lobster/inputs.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def __init__(self, settingsdict: dict):
raise KeyError("There are duplicates for the keywords!")
self.update(settingsdict)

def __setitem__(self, key, val):
def __setitem__(self, key, val) -> None:
"""
Add parameter-val pair to Lobsterin. Warns if parameter is not in list of
valid lobsterin tags. Also cleans the parameter and val by stripping
Expand All @@ -146,14 +146,25 @@ def __setitem__(self, key, val):

super().__setitem__(new_key, val.strip() if isinstance(val, str) else val)

def __getitem__(self, item):
def __getitem__(self, key) -> Any:
"""Implements getitem from dict to avoid problems with cases."""
new_item = next((key_here for key_here in self if item.strip().lower() == key_here.lower()), item)
normalized_key = next((k for k in self if key.strip().lower() == k.lower()), key)

if new_item.lower() not in [element.lower() for element in Lobsterin.AVAILABLE_KEYWORDS]:
raise KeyError("Key is currently not available")
key_is_unknown = normalized_key.lower() not in map(str.lower, Lobsterin.AVAILABLE_KEYWORDS)
if key_is_unknown or normalized_key not in self.data:
raise KeyError(f"{key=} is not available")

return self.data[normalized_key]

def __contains__(self, key) -> bool:
"""Implements getitem from dict to avoid problems with cases."""
normalized_key = next((k for k in self if key.strip().lower() == k.lower()), key)

key_is_unknown = normalized_key.lower() not in map(str.lower, Lobsterin.AVAILABLE_KEYWORDS)
if key_is_unknown or normalized_key not in self.data:
return False

return super().__getitem__(new_item)
return True

def __delitem__(self, key):
new_key = next((key_here for key_here in self if key.strip().lower() == key_here.lower()), key)
Expand Down Expand Up @@ -564,30 +575,30 @@ def from_file(cls, lobsterin: str) -> Self:
lobsterin_dict: dict[str, Any] = {}

for datum in data:
# Remove all comments
if not datum.startswith(("!", "#", "//")):
pattern = r"\b[^!#//]+" # exclude comments after commands
if matched_pattern := re.findall(pattern, datum):
raw_datum = matched_pattern[0].replace("\t", " ") # handle tab in between and end of command
key_word = raw_datum.strip().split(" ") # extract keyword
if len(key_word) > 1:
# check which type of keyword this is, handle accordingly
if key_word[0].lower() not in [datum2.lower() for datum2 in Lobsterin.LISTKEYWORDS]:
if key_word[0].lower() not in [datum2.lower() for datum2 in Lobsterin.FLOAT_KEYWORDS]:
if key_word[0].lower() not in lobsterin_dict:
lobsterin_dict[key_word[0].lower()] = " ".join(key_word[1:])
else:
raise ValueError(f"Same keyword {key_word[0].lower()} twice!")
elif key_word[0].lower() not in lobsterin_dict:
lobsterin_dict[key_word[0].lower()] = float(key_word[1])
else:
raise ValueError(f"Same keyword {key_word[0].lower()} twice!")
elif key_word[0].lower() not in lobsterin_dict:
lobsterin_dict[key_word[0].lower()] = [" ".join(key_word[1:])]
if datum.startswith(("!", "#", "//")):
continue # ignore comments
pattern = r"\b[^!#//]+" # exclude comments after commands
if matched_pattern := re.findall(pattern, datum):
raw_datum = matched_pattern[0].replace("\t", " ") # handle tab in between and end of command
key_word = raw_datum.strip().split(" ") # extract keyword
key = key_word[0].lower()
if len(key_word) > 1:
# check which type of keyword this is, handle accordingly
if key not in [datum2.lower() for datum2 in Lobsterin.LISTKEYWORDS]:
if key not in [datum2.lower() for datum2 in Lobsterin.FLOAT_KEYWORDS]:
if key in lobsterin_dict:
raise ValueError(f"Same keyword {key} twice!")
lobsterin_dict[key] = " ".join(key_word[1:])
elif key in lobsterin_dict:
raise ValueError(f"Same keyword {key} twice!")
else:
lobsterin_dict[key_word[0].lower()].append(" ".join(key_word[1:]))
elif len(key_word) > 0:
lobsterin_dict[key_word[0].lower()] = True
lobsterin_dict[key] = float("nan" if key_word[1].strip() == "None" else key_word[1])
elif key not in lobsterin_dict:
lobsterin_dict[key] = [" ".join(key_word[1:])]
else:
lobsterin_dict[key].append(" ".join(key_word[1:]))
elif len(key_word) > 0:
lobsterin_dict[key] = True

return cls(lobsterin_dict)

Expand Down
18 changes: 7 additions & 11 deletions pymatgen/io/vasp/outputs.py
Original file line number Diff line number Diff line change
Expand Up @@ -1471,7 +1471,7 @@ def _parse_calculation(self, elem):
return istep

@staticmethod
def _parse_dos(elem):
def _parse_dos(elem) -> tuple[Dos, Dos, list[dict]]:
efermi = float(elem.find("i").text)
energies = None
tdensities = {}
Expand All @@ -1491,22 +1491,18 @@ def _parse_dos(elem):
orbs.pop(0)
lm = any("x" in s for s in orbs)
for s in partial.find("array").find("set").findall("set"):
pdos = defaultdict(dict)
pdos: dict[Orbital | OrbitalType, dict[Spin, np.ndarray]] = defaultdict(dict)

for ss in s.findall("set"):
spin = Spin.up if ss.attrib["comment"] == "spin 1" else Spin.down
data = np.array(_parse_vasp_array(ss))
_nrow, ncol = data.shape
for j in range(1, ncol):
orb = Orbital(j - 1) if lm else OrbitalType(j - 1)
pdos[orb][spin] = data[:, j]
_n_row, n_col = data.shape
for col_idx in range(1, n_col):
orb = Orbital(col_idx - 1) if lm else OrbitalType(col_idx - 1)
pdos[orb][spin] = data[:, col_idx] # type: ignore[index]
pdoss.append(pdos)
elem.clear()
return (
Dos(efermi, energies, tdensities),
Dos(efermi, energies, idensities),
pdoss,
)
return Dos(efermi, energies, tdensities), Dos(efermi, energies, idensities), pdoss

@staticmethod
def _parse_eigen(elem):
Expand Down
Loading

0 comments on commit c1a610c

Please sign in to comment.