From 2b47486ab5e26079ec3e9457a6de80c860aa326d Mon Sep 17 00:00:00 2001 From: Janosh Riebesell Date: Wed, 17 May 2023 15:03:13 -0700 Subject: [PATCH] Re-export `SiteCollection` + `DummySpecies` from `pymatgen.core` (#2995) * rename dict vars d -> dct * re-export DummySpecie + SiteCollection from pymatgen.core --- dev_scripts/regen_libxcfunc.py | 14 +++---- pymatgen/alchemy/filters.py | 12 +++--- pymatgen/analysis/energy_models.py | 11 ++++-- pymatgen/analysis/local_env.py | 8 ++-- pymatgen/analysis/magnetism/heisenberg.py | 38 +++++++++---------- .../substitution_probability.py | 6 +-- pymatgen/apps/borg/hive.py | 18 ++++----- pymatgen/cli/pmg_config.py | 8 ++-- pymatgen/core/__init__.py | 2 + pymatgen/core/tensors.py | 6 +-- pymatgen/core/tests/test_units.py | 6 +-- pymatgen/ext/matproj.py | 28 +++++++------- pymatgen/io/abinit/abiobjects.py | 22 +++++------ pymatgen/io/abinit/abitimer.py | 6 +-- pymatgen/io/abinit/inputs.py | 6 +-- pymatgen/io/abinit/netcdf.py | 22 +++++------ pymatgen/io/abinit/pseudos.py | 18 ++++----- pymatgen/io/abinit/tests/test_netcdf.py | 8 ++-- pymatgen/io/cif.py | 20 +++++----- pymatgen/io/cp2k/inputs.py | 24 ++++++------ pymatgen/io/fiesta.py | 18 ++++----- pymatgen/io/lammps/outputs.py | 16 ++++---- pymatgen/io/qchem/outputs.py | 10 ++--- pymatgen/phonon/bandstructure.py | 34 ++++++++--------- .../advanced_transformations.py | 6 +-- 25 files changed, 185 insertions(+), 182 deletions(-) diff --git a/dev_scripts/regen_libxcfunc.py b/dev_scripts/regen_libxcfunc.py index b85dd06de21..a9f3a0325bb 100755 --- a/dev_scripts/regen_libxcfunc.py +++ b/dev_scripts/regen_libxcfunc.py @@ -21,14 +21,14 @@ def parse_libxc_docs(path): """ def parse_section(section): - d = {} + dct = {} for line in section: key, value = line.split(":") - d[key.strip()] = value.strip() + dct[key.strip()] = value.strip() - return int(d["Number"]), d + return int(dct["Number"]), dct - d = {} + dct = {} with open(path) as fh: section = [] for line in fh: @@ -36,12 +36,12 @@ def parse_section(section): section.append(line) else: num, entry = parse_section(section) - assert num not in d - d[num] = entry + assert num not in dct + dct[num] = entry section = [] assert not section - return d + return dct def write_libxc_docs_json(xcfuncs, jpath): diff --git a/pymatgen/alchemy/filters.py b/pymatgen/alchemy/filters.py index 111d61c4599..39c9f010c1f 100644 --- a/pymatgen/alchemy/filters.py +++ b/pymatgen/alchemy/filters.py @@ -113,15 +113,15 @@ def as_dict(self): } @classmethod - def from_dict(cls, d): + def from_dict(cls, dct): """ Args: - d (dict): Dict representation + dct (dict): Dict representation Returns: Filter """ - return cls(**d["init_args"]) + return cls(**dct["init_args"]) class SpecieProximityFilter(AbstractStructureFilter): @@ -176,15 +176,15 @@ def as_dict(self): } @classmethod - def from_dict(cls, d): + def from_dict(cls, dct): """ Args: - d (dict): Dict representation + dct (dict): Dict representation Returns: Filter """ - return cls(**d["init_args"]) + return cls(**dct["init_args"]) class RemoveDuplicatesFilter(AbstractStructureFilter): diff --git a/pymatgen/analysis/energy_models.py b/pymatgen/analysis/energy_models.py index 34d0b2f235c..54c7ae8cf27 100644 --- a/pymatgen/analysis/energy_models.py +++ b/pymatgen/analysis/energy_models.py @@ -30,12 +30,15 @@ def get_energy(self, structure) -> float: return 0.0 @classmethod - def from_dict(cls, d): + def from_dict(cls, dct): """ - :param d: Dict representation - :return: EnergyModel + Args: + dct (dict): Dict representation + + Returns: + EnergyModel """ - return cls(**d["init_args"]) + return cls(**dct["init_args"]) class EwaldElectrostaticModel(EnergyModel): diff --git a/pymatgen/analysis/local_env.py b/pymatgen/analysis/local_env.py index 00a3ab253d0..45289bde122 100644 --- a/pymatgen/analysis/local_env.py +++ b/pymatgen/analysis/local_env.py @@ -666,10 +666,10 @@ def get_local_order_parameters(self, structure: Structure, n: int): lostops = LocalStructOrderParams(types, parameters=params) sites = [structure[n], *self.get_nn(structure, n)] lostop_vals = lostops.get_order_parameters(sites, 0, indices_neighs=list(range(1, cn + 1))) # type: ignore - d = {} + dct = {} for i, lostop in enumerate(lostop_vals): - d[names[i]] = lostop - return d + dct[names[i]] = lostop + return dct return None @@ -4223,7 +4223,7 @@ def extend_structure_molecules(self): @staticmethod def from_preset(preset): """ - Initialise a CutOffDictNN according to a preset set of cut-offs. + Initialize a CutOffDictNN according to a preset set of cut-offs. Args: preset (str): A preset name. The list of supported presets are: diff --git a/pymatgen/analysis/magnetism/heisenberg.py b/pymatgen/analysis/magnetism/heisenberg.py index 460d136dcf6..95b7aff12b9 100644 --- a/pymatgen/analysis/magnetism/heisenberg.py +++ b/pymatgen/analysis/magnetism/heisenberg.py @@ -881,28 +881,28 @@ def as_dict(self): """ Because some dicts have tuple keys, some sanitization is required for json compatibility. """ - d = {} - d["@module"] = type(self).__module__ - d["@class"] = type(self).__name__ - d["@version"] = __version__ - d["formula"] = self.formula - d["structures"] = [s.as_dict() for s in self.structures] - d["energies"] = self.energies - d["cutoff"] = self.cutoff - d["tol"] = self.tol - d["sgraphs"] = [sgraph.as_dict() for sgraph in self.sgraphs] - d["dists"] = self.dists - d["ex_params"] = self.ex_params - d["javg"] = self.javg - d["igraph"] = self.igraph.as_dict() + dct = {} + dct["@module"] = type(self).__module__ + dct["@class"] = type(self).__name__ + dct["@version"] = __version__ + dct["formula"] = self.formula + dct["structures"] = [s.as_dict() for s in self.structures] + dct["energies"] = self.energies + dct["cutoff"] = self.cutoff + dct["tol"] = self.tol + dct["sgraphs"] = [sgraph.as_dict() for sgraph in self.sgraphs] + dct["dists"] = self.dists + dct["ex_params"] = self.ex_params + dct["javg"] = self.javg + dct["igraph"] = self.igraph.as_dict() # Sanitize tuple & int keys - d["ex_mat"] = jsanitize(self.ex_mat) - d["nn_interactions"] = jsanitize(self.nn_interactions) - d["unique_site_ids"] = jsanitize(self.unique_site_ids) - d["wyckoff_ids"] = jsanitize(self.wyckoff_ids) + dct["ex_mat"] = jsanitize(self.ex_mat) + dct["nn_interactions"] = jsanitize(self.nn_interactions) + dct["unique_site_ids"] = jsanitize(self.unique_site_ids) + dct["wyckoff_ids"] = jsanitize(self.wyckoff_ids) - return d + return dct @classmethod def from_dict(cls, d): diff --git a/pymatgen/analysis/structure_prediction/substitution_probability.py b/pymatgen/analysis/structure_prediction/substitution_probability.py index 323b7375381..b03aed4aec7 100644 --- a/pymatgen/analysis/structure_prediction/substitution_probability.py +++ b/pymatgen/analysis/structure_prediction/substitution_probability.py @@ -165,15 +165,15 @@ def as_dict(self): } @classmethod - def from_dict(cls, d): + def from_dict(cls, dct): """ Args: - d(dict): Dict representation + dct (dict): Dict representation Returns: Class """ - return cls(**d["init_args"]) + return cls(**dct["init_args"]) class SubstitutionPredictor: diff --git a/pymatgen/apps/borg/hive.py b/pymatgen/apps/borg/hive.py index 2f9669578b8..cb5bf78fc50 100644 --- a/pymatgen/apps/borg/hive.py +++ b/pymatgen/apps/borg/hive.py @@ -184,15 +184,15 @@ def as_dict(self): } @classmethod - def from_dict(cls, d): + def from_dict(cls, dct): """ Args: - d (dict): Dict Representation + dct (dict): Dict Representation Returns: VaspToComputedEntryDrone """ - return cls(**d["init_args"]) + return cls(**dct["init_args"]) class SimpleVaspToComputedEntryDrone(VaspToComputedEntryDrone): @@ -297,15 +297,15 @@ def as_dict(self): } @classmethod - def from_dict(cls, d): + def from_dict(cls, dct): """ Args: - d (dict): Dict Representation + dct (dict): Dict Representation Returns: SimpleVaspToComputedEntryDrone """ - return cls(**d["init_args"]) + return cls(**dct["init_args"]) class GaussianToComputedEntryDrone(AbstractDrone): @@ -422,15 +422,15 @@ def as_dict(self): } @classmethod - def from_dict(cls, d): + def from_dict(cls, dct): """ Args: - d (dict): Dict Representation + dct (dict): Dict Representation Returns: GaussianToComputedEntryDrone """ - return cls(**d["init_args"]) + return cls(**dct["init_args"]) def _get_transformation_history(path): diff --git a/pymatgen/cli/pmg_config.py b/pymatgen/cli/pmg_config.py index ede367a233d..25232d5c32f 100644 --- a/pymatgen/cli/pmg_config.py +++ b/pymatgen/cli/pmg_config.py @@ -272,17 +272,17 @@ def add_config_var(tokens: list[str], backup_suffix: str) -> None: else: # if neither exists, create new config file fpath = SETTINGS_FILE - d = {} + dct = {} if os.path.exists(fpath): if backup_suffix: shutil.copy(fpath, fpath + backup_suffix) print(f"Existing {fpath} backed up to {fpath}{backup_suffix}") - d = loadfn(fpath) + dct = loadfn(fpath) if len(tokens) % 2 != 0: raise ValueError(f"Uneven number {len(tokens)} of tokens passed to pmg config. Needs a value for every key.") for key, val in zip(tokens[0::2], tokens[1::2]): - d[key] = val - dumpfn(d, fpath) + dct[key] = val + dumpfn(dct, fpath) print(f"New {fpath} written!") diff --git a/pymatgen/core/__init__.py b/pymatgen/core/__init__.py index 9d21f11f9ab..5d2d1c21ca2 100644 --- a/pymatgen/core/__init__.py +++ b/pymatgen/core/__init__.py @@ -13,6 +13,7 @@ from pymatgen.core.composition import Composition as Composition from pymatgen.core.lattice import Lattice as Lattice from pymatgen.core.operations import SymmOp as SymmOp +from pymatgen.core.periodic_table import DummySpecie as DummySpecie from pymatgen.core.periodic_table import DummySpecies as DummySpecies from pymatgen.core.periodic_table import Element as Element from pymatgen.core.periodic_table import Species as Species @@ -21,6 +22,7 @@ from pymatgen.core.structure import IMolecule as IMolecule from pymatgen.core.structure import IStructure as IStructure from pymatgen.core.structure import Molecule as Molecule +from pymatgen.core.structure import SiteCollection as SiteCollection from pymatgen.core.structure import Structure as Structure from pymatgen.core.units import ArrayWithUnit as ArrayWithUnit from pymatgen.core.units import FloatWithUnit as FloatWithUnit diff --git a/pymatgen/core/tensors.py b/pymatgen/core/tensors.py index 9fed894051b..77c519c3b85 100644 --- a/pymatgen/core/tensors.py +++ b/pymatgen/core/tensors.py @@ -249,7 +249,7 @@ def get_symbol_dict(self, voigt=True, zero_index=False, **kwargs): list of index groups where tensor values are equivalent to within tolerances """ - d = {} + dct = {} array = self.voigt if voigt else self grouped = self.get_grouped_indices(voigt=voigt, **kwargs) p = 0 if zero_index else 1 @@ -258,8 +258,8 @@ def get_symbol_dict(self, voigt=True, zero_index=False, **kwargs): sym_string += "".join(str(i + p) for i in indices[0]) value = array[indices[0]] if not np.isclose(value, 0): - d[sym_string] = array[indices[0]] - return d + dct[sym_string] = array[indices[0]] + return dct def round(self, decimals=0): """ diff --git a/pymatgen/core/tests/test_units.py b/pymatgen/core/tests/test_units.py index 23748c78cc8..b3444f7e132 100644 --- a/pymatgen/core/tests/test_units.py +++ b/pymatgen/core/tests/test_units.py @@ -106,10 +106,10 @@ def g(): @unitized("pm") def h(): - d = {} + dct = {} for i in range(3): - d[i] = i * 20 - return d + dct[i] = i * 20 + return dct assert str(h()[1]) == "20.0 pm" assert isinstance(h(), dict) diff --git a/pymatgen/ext/matproj.py b/pymatgen/ext/matproj.py index 9e1631ea98d..1ad67235ad1 100644 --- a/pymatgen/ext/matproj.py +++ b/pymatgen/ext/matproj.py @@ -215,44 +215,44 @@ def __init__( try: with open(MP_LOG_FILE) as f: - d = dict(yaml.load(f)) + dct = dict(yaml.load(f)) except (OSError, TypeError): # TypeError: 'NoneType' object is not iterable occurs if MP_LOG_FILE exists but is empty - d = {} + dct = {} - d = d or {} + dct = dct or {} - if "MAPI_DB_VERSION" not in d: - d["MAPI_DB_VERSION"] = {"LOG": {}, "LAST_ACCESSED": None} + if "MAPI_DB_VERSION" not in dct: + dct["MAPI_DB_VERSION"] = {"LOG": {}, "LAST_ACCESSED": None} else: # ensure data is parsed as dict, rather than ordered dict, # due to change in YAML parsing behavior - d["MAPI_DB_VERSION"] = dict(d["MAPI_DB_VERSION"]) + dct["MAPI_DB_VERSION"] = dict(dct["MAPI_DB_VERSION"]) - if "LOG" in d["MAPI_DB_VERSION"]: - d["MAPI_DB_VERSION"]["LOG"] = dict(d["MAPI_DB_VERSION"]["LOG"]) + if "LOG" in dct["MAPI_DB_VERSION"]: + dct["MAPI_DB_VERSION"]["LOG"] = dict(dct["MAPI_DB_VERSION"]["LOG"]) # store a log of what database versions are being connected to - if db_version not in d["MAPI_DB_VERSION"]["LOG"]: - d["MAPI_DB_VERSION"]["LOG"][db_version] = 1 + if db_version not in dct["MAPI_DB_VERSION"]["LOG"]: + dct["MAPI_DB_VERSION"]["LOG"][db_version] = 1 else: - d["MAPI_DB_VERSION"]["LOG"][db_version] += 1 + dct["MAPI_DB_VERSION"]["LOG"][db_version] += 1 # alert user if db version changed - last_accessed = d["MAPI_DB_VERSION"]["LAST_ACCESSED"] + last_accessed = dct["MAPI_DB_VERSION"]["LAST_ACCESSED"] if last_accessed and last_accessed != db_version: print( f"This database version has changed from the database last accessed ({last_accessed}).\n" f"Please see release notes on materialsproject.org for information about what has changed." ) - d["MAPI_DB_VERSION"]["LAST_ACCESSED"] = db_version + dct["MAPI_DB_VERSION"]["LAST_ACCESSED"] = db_version # write out new database log if possible # base Exception is not ideal (perhaps a PermissionError, etc.) but this is not critical # and should be allowed to fail regardless of reason try: with open(MP_LOG_FILE, "w") as f: - yaml.dump(d, f) + yaml.dump(dct, f) except Exception: pass diff --git a/pymatgen/io/abinit/abiobjects.py b/pymatgen/io/abinit/abiobjects.py index 81e4f49a1a4..7dec47d0486 100644 --- a/pymatgen/io/abinit/abiobjects.py +++ b/pymatgen/io/abinit/abiobjects.py @@ -600,17 +600,17 @@ def nspden(self): def as_dict(self): """Json friendly dict representation""" - d = {} - d["@module"] = type(self).__module__ - d["@class"] = type(self).__name__ - d["spin_mode"] = self.spin_mode.as_dict() - d["smearing"] = self.smearing.as_dict() - d["algorithm"] = self.algorithm.as_dict() if self.algorithm else None - d["nband"] = self.nband - d["fband"] = self.fband - d["charge"] = self.charge - d["comment"] = self.comment - return d + dct = {} + dct["@module"] = type(self).__module__ + dct["@class"] = type(self).__name__ + dct["spin_mode"] = self.spin_mode.as_dict() + dct["smearing"] = self.smearing.as_dict() + dct["algorithm"] = self.algorithm.as_dict() if self.algorithm else None + dct["nband"] = self.nband + dct["fband"] = self.fband + dct["charge"] = self.charge + dct["comment"] = self.comment + return dct @classmethod def from_dict(cls, d): diff --git a/pymatgen/io/abinit/abitimer.py b/pymatgen/io/abinit/abitimer.py index b94a1d2b11f..5e78a071241 100644 --- a/pymatgen/io/abinit/abitimer.py +++ b/pymatgen/io/abinit/abitimer.py @@ -176,11 +176,11 @@ def parse_line(line): line = line[1:].strip() if inside == 2: - d = {} + dct = {} for tok in line.split(","): key, val = (s.strip() for s in tok.split("=")) - d[key] = float(val) - cpu_time, wall_time = d["cpu_time"], d["wall_time"] + dct[key] = float(val) + cpu_time, wall_time = dct["cpu_time"], dct["wall_time"] elif inside > 5: sections.append(parse_line(line)) diff --git a/pymatgen/io/abinit/inputs.py b/pymatgen/io/abinit/inputs.py index 0643f66456a..0f1f09953cc 100644 --- a/pymatgen/io/abinit/inputs.py +++ b/pymatgen/io/abinit/inputs.py @@ -803,12 +803,12 @@ def add_abiobjects(self, *abi_objects): This function receive a list of ``AbiVarable`` objects and add the corresponding variables to the input. """ - d = {} + dct = {} for obj in abi_objects: if not hasattr(obj, "to_abivars"): raise TypeError(f"type {type(obj)}: {obj!r} does not have `to_abivars` method") - d.update(self.set_vars(obj.to_abivars())) - return d + dct.update(self.set_vars(obj.to_abivars())) + return dct def __setitem__(self, key, value): if key in _TOLVARS_SCF and hasattr(self, "_vars") and any(t in self._vars and t != key for t in _TOLVARS_SCF): diff --git a/pymatgen/io/abinit/netcdf.py b/pymatgen/io/abinit/netcdf.py index fefe69f974f..46a2ec07cda 100644 --- a/pymatgen/io/abinit/netcdf.py +++ b/pymatgen/io/abinit/netcdf.py @@ -293,26 +293,26 @@ def read_abinit_hdr(self): Return :class:`AbinitHeader` """ - d = {} + dct = {} for hvar in _HDR_VARIABLES.values(): ncname = hvar.etsf_name if hvar.etsf_name is not None else hvar.name if ncname in self.rootgrp.variables: - d[hvar.name] = self.read_value(ncname) + dct[hvar.name] = self.read_value(ncname) elif ncname in self.rootgrp.dimensions: - d[hvar.name] = self.read_dimvalue(ncname) + dct[hvar.name] = self.read_dimvalue(ncname) else: raise ValueError(f"Cannot find `{ncname}` in `{self.path}`") # Convert scalars to (well) scalars. - if hasattr(d[hvar.name], "shape") and not d[hvar.name].shape: - d[hvar.name] = np.asarray(d[hvar.name]).item() + if hasattr(dct[hvar.name], "shape") and not dct[hvar.name].shape: + dct[hvar.name] = np.asarray(dct[hvar.name]).item() if hvar.name in ("title", "md5_pseudos", "codvsn"): # Convert array of numpy bytes to list of strings if hvar.name == "codvsn": - d[hvar.name] = "".join(bs.decode("utf-8").strip() for bs in d[hvar.name]) + dct[hvar.name] = "".join(bs.decode("utf-8").strip() for bs in dct[hvar.name]) else: - d[hvar.name] = ["".join(bs.decode("utf-8") for bs in astr).strip() for astr in d[hvar.name]] + dct[hvar.name] = ["".join(bs.decode("utf-8") for bs in astr).strip() for astr in dct[hvar.name]] - return AbinitHeader(d) + return AbinitHeader(dct) def structure_from_ncdata(ncdata, site_properties=None, cls=Structure): @@ -344,12 +344,12 @@ def structure_from_ncdata(ncdata, site_properties=None, cls=Structure): type_idx = type_atom[atom] - 1 species[atom] = int(znucl_type[type_idx]) - d = {} + dct = {} if site_properties is not None: for prop in site_properties: - d[prop] = ncdata.read_value(prop) + dct[prop] = ncdata.read_value(prop) - structure = cls(lattice, species, red_coords, site_properties=d) + structure = cls(lattice, species, red_coords, site_properties=dct) # Quick and dirty hack. # I need an abipy structure since I need to_abivars and other methods. diff --git a/pymatgen/io/abinit/pseudos.py b/pymatgen/io/abinit/pseudos.py index f269515bdf5..0f561f7b28b 100644 --- a/pymatgen/io/abinit/pseudos.py +++ b/pymatgen/io/abinit/pseudos.py @@ -1705,18 +1705,18 @@ def zlist(self): def as_dict(self, **kwargs): """Return dictionary for MSONable protocol.""" - d = {} + dct = {} for p in self: k, count = p.element.name, 1 # k, count = p.element, 1 # Handle multiple-pseudos with the same name! - while k in d: + while k in dct: k += k.split("#")[0] + "#" + str(count) count += 1 - d.update({k: p.as_dict()}) - d["@module"] = type(self).__module__ - d["@class"] = type(self).__name__ - return d + dct.update({k: p.as_dict()}) + dct["@module"] = type(self).__module__ + dct["@class"] = type(self).__name__ + return dct @classmethod def from_dict(cls, d): @@ -1744,13 +1744,13 @@ def all_combinations_for_elements(self, element_symbols): table.all_combinations_for_elements(["Li", "F"]) """ - d = {} + dct = {} for symbol in element_symbols: - d[symbol] = self.select_symbols(symbol, ret_list=True) + dct[symbol] = self.select_symbols(symbol, ret_list=True) from itertools import product - return list(product(*d.values())) + return list(product(*dct.values())) def pseudo_with_symbol(self, symbol, allow_multi=False): """ diff --git a/pymatgen/io/abinit/tests/test_netcdf.py b/pymatgen/io/abinit/tests/test_netcdf.py index d8cdcf4e65d..a3a9ef08c08 100644 --- a/pymatgen/io/abinit/tests/test_netcdf.py +++ b/pymatgen/io/abinit/tests/test_netcdf.py @@ -24,12 +24,10 @@ def ref_file(filename): class ETSF_Reader_TestCase(PymatgenTest): def setUp(self): - formulas = [ - "Si2", - ] - self.GSR_paths = d = {} + formulas = ["Si2"] + self.GSR_paths = dct = {} for formula in formulas: - d[formula] = ref_file(formula + "_GSR.nc") + dct[formula] = ref_file(formula + "_GSR.nc") @unittest.skipIf(netCDF4 is None, "Requires Netcdf4") def test_read_Si2(self): diff --git a/pymatgen/io/cif.py b/pymatgen/io/cif.py index 79a01b9f3c0..814a69a260a 100644 --- a/pymatgen/io/cif.py +++ b/pymatgen/io/cif.py @@ -253,7 +253,7 @@ def from_string(cls, string): :param string: String representation. :return: CifFile """ - d = {} + dct = {} for x in re.split(r"^\s*data_", "x\n" + string, flags=re.MULTILINE | re.DOTALL)[1:]: # Skip over Cif block that contains powder diffraction data. # Some elements in this block were missing from CIF files in @@ -263,8 +263,8 @@ def from_string(cls, string): if "powder_pattern" in re.split(r"\n", x, 1)[0]: continue c = CifBlock.from_string("data_" + x) - d[c.header] = c - return cls(d, string) + dct[c.header] = c + return cls(dct, string) @classmethod def from_file(cls, filename): @@ -1221,12 +1221,12 @@ def as_dict(self): """ :return: MSONable dict """ - d = {} + dct = {} for k, v in self._cif.data.items(): - d[k] = {} + dct[k] = {} for k2, v2 in v.data.items(): - d[k][k2] = v2 - return d + dct[k][k2] = v2 + return dct @property def has_errors(self): @@ -1417,9 +1417,9 @@ def __init__( "_atom_site_moment_crystalaxis_z", ] ) - d = {} - d[comp.reduced_formula] = CifBlock(block, loops, comp.reduced_formula) - self._cf = CifFile(d) + dct = {} + dct[comp.reduced_formula] = CifBlock(block, loops, comp.reduced_formula) + self._cf = CifFile(dct) @property def ciffile(self): diff --git a/pymatgen/io/cp2k/inputs.py b/pymatgen/io/cp2k/inputs.py index 011792c5ba6..685df04cd09 100644 --- a/pymatgen/io/cp2k/inputs.py +++ b/pymatgen/io/cp2k/inputs.py @@ -126,16 +126,16 @@ def as_dict(self): """ Get a dictionary representation of the Keyword """ - d = {} - d["@module"] = type(self).__module__ - d["@class"] = type(self).__name__ - d["name"] = self.name - d["values"] = self.values - d["description"] = self.description - d["repeats"] = self.repeats - d["units"] = self.units - d["verbose"] = self.verbose - return d + dct = {} + dct["@module"] = type(self).__module__ + dct["@class"] = type(self).__name__ + dct["name"] = self.name + dct["values"] = self.values + dct["description"] = self.description + dct["repeats"] = self.repeats + dct["units"] = self.units + dct["verbose"] = self.verbose + return dct def get_string(self): """ @@ -146,7 +146,7 @@ def get_string(self): @classmethod def from_dict(cls, d): """ - Initialise from dictionary + Initialize from dictionary """ return Keyword( d["name"], @@ -160,7 +160,7 @@ def from_dict(cls, d): @staticmethod def from_string(s): """ - Initialise from a string. + Initialize from a string. Keywords must be labeled with strings. If the postprocessor finds that the keywords is a number, then None is return (used by diff --git a/pymatgen/io/fiesta.py b/pymatgen/io/fiesta.py index d2bdc9629af..98082c554ef 100644 --- a/pymatgen/io/fiesta.py +++ b/pymatgen/io/fiesta.py @@ -800,8 +800,8 @@ def _parse_job(output): m = GW_BANDS_results_patt.search(line) if m: - d = {} - d.update( + dct = {} + dct.update( band=m.group(1).strip(), eKS=m.group(2), eXX=m.group(3), @@ -812,17 +812,17 @@ def _parse_job(output): sigma_c_SCF=m.group(8), eQP_SCF=m.group(9), ) - GW_results[m.group(1).strip()] = d + GW_results[m.group(1).strip()] = dct n = GW_GAPS_results_patt.search(line) if n: - d = {} - d.update( + dct = {} + dct.update( Egap_KS=n.group(1), Egap_QP_Linear=n.group(2), Egap_QP_SCF=n.group(3), ) - GW_results["Gaps"] = d + GW_results["Gaps"] = dct if line.find("GW Results") != -1: parse_gw_results = True @@ -883,9 +883,9 @@ def _parse_job(output): m = BSE_exitons_patt.search(line) if m: - d = {} - d.update(bse_eig=m.group(2), osc_strength=m.group(3)) - BSE_results[str(m.group(1).strip())] = d + dct = {} + dct.update(bse_eig=m.group(2), osc_strength=m.group(3)) + BSE_results[str(m.group(1).strip())] = dct if line.find("FULL BSE eig.(eV), osc. strength and dipoles:") != -1: parse_BSE_results = True diff --git a/pymatgen/io/lammps/outputs.py b/pymatgen/io/lammps/outputs.py index 56260c9ea04..4f9fd561038 100644 --- a/pymatgen/io/lammps/outputs.py +++ b/pymatgen/io/lammps/outputs.py @@ -86,14 +86,14 @@ def as_dict(self): """ Returns: MSONable dict """ - d = {} - d["@module"] = type(self).__module__ - d["@class"] = type(self).__name__ - d["timestep"] = self.timestep - d["natoms"] = self.natoms - d["box"] = self.box.as_dict() - d["data"] = self.data.to_json(orient="split") - return d + dct = {} + dct["@module"] = type(self).__module__ + dct["@class"] = type(self).__name__ + dct["timestep"] = self.timestep + dct["natoms"] = self.natoms + dct["box"] = self.box.as_dict() + dct["data"] = self.data.to_json(orient="split") + return dct def parse_lammps_dumps(file_pattern): diff --git a/pymatgen/io/qchem/outputs.py b/pymatgen/io/qchem/outputs.py index 22c690844c9..43e72a11aae 100644 --- a/pymatgen/io/qchem/outputs.py +++ b/pymatgen/io/qchem/outputs.py @@ -2138,11 +2138,11 @@ def as_dict(self): Returns: MSONable dict. """ - d = {} - d["data"] = self.data - d["text"] = self.text - d["filename"] = self.filename - return jsanitize(d, strict=True) + dct = {} + dct["data"] = self.data + dct["text"] = self.text + dct["filename"] = self.filename + return jsanitize(dct, strict=True) def check_for_structure_changes(mol1: Molecule, mol2: Molecule) -> str: diff --git a/pymatgen/phonon/bandstructure.py b/pymatgen/phonon/bandstructure.py index 5e91e4ef854..811353d9af8 100644 --- a/pymatgen/phonon/bandstructure.py +++ b/pymatgen/phonon/bandstructure.py @@ -484,10 +484,10 @@ def as_phononwebsite(self): Return a dictionary with the phononwebsite format: http://henriquemiranda.github.io/phononwebsite """ - d = {} + dct = {} # define the lattice - d["lattice"] = self.structure.lattice._matrix.tolist() + dct["lattice"] = self.structure.lattice._matrix.tolist() # define atoms atom_pos_car = [] @@ -499,21 +499,21 @@ def as_phononwebsite(self): atom_types.append(site.species_string) # default for now - d["repetitions"] = get_reasonable_repetitions(len(atom_pos_car)) + dct["repetitions"] = get_reasonable_repetitions(len(atom_pos_car)) - d["natoms"] = len(atom_pos_car) - d["atom_pos_car"] = atom_pos_car - d["atom_pos_red"] = atom_pos_red - d["atom_types"] = atom_types - d["atom_numbers"] = self.structure.atomic_numbers - d["formula"] = self.structure.formula - d["name"] = self.structure.formula + dct["natoms"] = len(atom_pos_car) + dct["atom_pos_car"] = atom_pos_car + dct["atom_pos_red"] = atom_pos_red + dct["atom_types"] = atom_types + dct["atom_numbers"] = self.structure.atomic_numbers + dct["formula"] = self.structure.formula + dct["name"] = self.structure.formula # get qpoints qpoints = [] for q in self.qpoints: qpoints.append(list(q.frac_coords)) - d["qpoints"] = qpoints + dct["qpoints"] = qpoints # get labels hsq_dict = {} @@ -540,14 +540,14 @@ def as_phononwebsite(self): dist += np.linalg.norm(q1 - q2) distances.append(dist) line_breaks.append((nqstart, len(qpoints))) - d["distances"] = distances - d["line_breaks"] = line_breaks - d["highsym_qpts"] = list(hsq_dict.items()) + dct["distances"] = distances + dct["line_breaks"] = line_breaks + dct["highsym_qpts"] = list(hsq_dict.items()) # eigenvalues thz2cm1 = 33.35641 bands = self.bands.copy() * thz2cm1 - d["eigenvalues"] = bands.T.tolist() + dct["eigenvalues"] = bands.T.tolist() # eigenvectors eigenvectors = self.eigendisplacements.copy() @@ -555,9 +555,9 @@ def as_phononwebsite(self): eigenvectors = eigenvectors.swapaxes(0, 1) eigenvectors = np.array([eigenvectors.real, eigenvectors.imag]) eigenvectors = np.rollaxis(eigenvectors, 0, 5) - d["vectors"] = eigenvectors.tolist() + dct["vectors"] = eigenvectors.tolist() - return d + return dct def band_reorder(self): """ diff --git a/pymatgen/transformations/advanced_transformations.py b/pymatgen/transformations/advanced_transformations.py index d2eb2591653..9b7631a16c7 100644 --- a/pymatgen/transformations/advanced_transformations.py +++ b/pymatgen/transformations/advanced_transformations.py @@ -1400,14 +1400,14 @@ def _get_disorder_mappings(composition, partitions): """ def _get_replacement_dict_from_partition(partition): - d = {} # to be passed to Structure.replace_species() + dct = {} # to be passed to Structure.replace_species() for sp_list in partition: if len(sp_list) > 1: total_occ = sum(composition[sp] for sp in sp_list) merged_comp = {sp: composition[sp] / total_occ for sp in sp_list} for sp in sp_list: - d[sp] = merged_comp - return d + dct[sp] = merged_comp + return dct disorder_mapping = [_get_replacement_dict_from_partition(p) for p in partitions]