From 5be23749579620fa803a2ff4ad3460494323ca7d Mon Sep 17 00:00:00 2001 From: Jimmy Charnley Kromann Date: Thu, 15 Feb 2024 20:54:56 +0100 Subject: [PATCH] Iteration --- Makefile | 46 +++---- src/qmllib/kernels/wrappers.py | 186 ---------------------------- src/qmllib/representations/slatm.py | 26 ++-- tests/conftest.py | 51 ++++++++ tests/test_fchl_acsf.py | 41 +++--- tests/test_kernels.py | 23 +--- tests/test_representations.py | 137 ++++++++++---------- 7 files changed, 173 insertions(+), 337 deletions(-) delete mode 100644 src/qmllib/kernels/wrappers.py diff --git a/Makefile b/Makefile index e3791ab5..278beaa6 100644 --- a/Makefile +++ b/Makefile @@ -22,29 +22,29 @@ format: test: ${python} -m pytest -rs \ - tests/test_acsf_linear_angles.py \ - tests/test_acsf.py \ - tests/test_arad.py \ - tests/test_armp.py \ - tests/test_compound.py \ - tests/test_distance.py \ - tests/test_energy_krr_atomic_cmat.py \ - tests/test_energy_krr_bob.py \ - tests/test_energy_krr_cmat.py \ - tests/test_fchl_acsf_energy.py \ - tests/test_fchl_acsf_forces.py \ - tests/test_fchl_acsf.py \ - tests/test_fchl_electric_field.py \ - tests/test_fchl_force.py \ - tests/test_fchl_scalar.py \ - tests/test_kernel_derivatives.py \ - tests/test_kernels.py \ - tests/test_mrmp.py \ - tests/test_neural_network.py \ - tests/test_representations.py \ - tests/test_slatm.py \ - tests/test_solvers.py \ - tests/test_symm_funct.py + tests/test_distance.py \ + tests/test_kernels.py \ + tests/test_representations.py \ + tests/test_slatm.py \ + tests/test_solvers.py + # tests/test_fchl_acsf.py + # tests/test_fchl_acsf_energy.py + # tests/test_fchl_acsf_forces.py \ + # tests/test_fchl_electric_field.py \ + # tests/test_fchl_force.py \ + # tests/test_fchl_scalar.py + # REMOVE tests/test_acsf_linear_angles.py \ + # REMOVE tests/test_acsf.py \ + # tests/test_arad.py \ + # REMOVE tests/test_armp.py \ + # REMOVE tests/test_compound.py \ + # integration tests/test_energy_krr_atomic_cmat.py \ + # integration tests/test_energy_krr_bob.py \ + # integration tests/test_energy_krr_cmat.py \ + # tests/test_kernel_derivatives.py \ + # REMOVE tests/test_mrmp.py \ + # REMOVE tests/test_neural_network.py \ + # REMOVE tests/test_symm_funct.py types: ${python} -m monkeytype run $(which pytest) ./tests/ diff --git a/src/qmllib/kernels/wrappers.py b/src/qmllib/kernels/wrappers.py deleted file mode 100644 index f0132894..00000000 --- a/src/qmllib/kernels/wrappers.py +++ /dev/null @@ -1,186 +0,0 @@ -import numpy as np - -from ..arad import get_local_kernels_arad, get_local_symmetric_kernels_arad -from .fkernels import fget_vector_kernels_gaussian, fget_vector_kernels_laplacian - -# TODO Duplicate function definition -# def get_atomic_kernels_laplacian(mols1, mols2, sigmas): - -# n1 = np.array([mol.natoms for mol in mols1], dtype=np.int32) -# n2 = np.array([mol.natoms for mol in mols2], dtype=np.int32) - -# max1 = np.max(n1) -# max2 = np.max(n2) - -# nm1 = n1.size -# nm2 = n2.size - -# cmat_size = mols1[0].representation.shape[1] - -# x1 = np.zeros((nm1, max1, cmat_size), dtype=np.float64, order="F") -# x2 = np.zeros((nm2, max2, cmat_size), dtype=np.float64, order="F") - -# for imol in range(nm1): -# x1[imol, : n1[imol], :cmat_size] = mols1[imol].representation - -# for imol in range(nm2): -# x2[imol, : n2[imol], :cmat_size] = mols2[imol].representation - -# # Reorder for Fortran speed -# x1 = np.swapaxes(x1, 0, 2) -# x2 = np.swapaxes(x2, 0, 2) - -# sigmas = np.asarray(sigmas, dtype=np.float64) -# nsigmas = sigmas.size - -# return fget_vector_kernels_laplacian(x1, x2, n1, n2, sigmas, nm1, nm2, nsigmas) - - -def get_atomic_kernels_laplacian_symmetric(mols, sigmas): - - raise NotImplementedError("x1 is missing definition") - - # n = np.array([mol.natoms for mol in mols], dtype=np.int32) - - # max_atoms = np.max(n) - - # nm = n.size - - # cmat_size = mols[0].representation.shape[1] - - # x = np.zeros((nm, max_atoms, cmat_size), dtype=np.float64, order="F") - - # for imol in range(nm): - # x[imol, : n[imol], :cmat_size] = mols[imol].representation - - # # Reorder for Fortran speed - # x = np.swapaxes(x, 0, 2) - - # sigmas = np.asarray(sigmas, dtype=np.float64) - # nsigmas = sigmas.size - - # return fget_vector_kernels_laplacian(x1, n, sigmas, nm, nsigmas) - - -def arad_local_kernels( - mols1, mols2, sigmas, width=0.2, cut_distance=5.0, r_width=1.0, c_width=0.5 -): - - amax = mols1[0].representation.shape[0] - - nm1 = len(mols1) - nm2 = len(mols2) - - X1 = np.array([mol.representation for mol in mols1]).reshape((nm1, amax, 5, amax)) - X2 = np.array([mol.representation for mol in mols2]).reshape((nm2, amax, 5, amax)) - - K = get_local_kernels_arad( - X1, X2, sigmas, width=width, cut_distance=cut_distance, r_width=r_width, c_width=c_width - ) - - return K - - -def arad_local_symmetric_kernels( - mols1, sigmas, width=0.2, cut_distance=5.0, r_width=1.0, c_width=0.5 -): - - amax = mols1[0].representation.shape[0] - nm1 = len(mols1) - - X1 = np.array([mol.representation for mol in mols1]).reshape((nm1, amax, 5, amax)) - - K = get_local_symmetric_kernels_arad( - X1, sigmas, width=width, cut_distance=cut_distance, r_width=r_width, c_width=c_width - ) - - return K - - -def get_atomic_kernels_laplacian(mols1, mols2, sigmas): - - n1 = np.array([mol.natoms for mol in mols1], dtype=np.int32) - n2 = np.array([mol.natoms for mol in mols2], dtype=np.int32) - - max1 = np.max(n1) - max2 = np.max(n2) - - nm1 = n1.size - nm2 = n2.size - - cmat_size = mols1[0].representation.shape[1] - - x1 = np.zeros((nm1, max1, cmat_size), dtype=np.float64, order="F") - x2 = np.zeros((nm2, max2, cmat_size), dtype=np.float64, order="F") - - for imol in range(nm1): - x1[imol, : n1[imol], :cmat_size] = mols1[imol].representation - - for imol in range(nm2): - x2[imol, : n2[imol], :cmat_size] = mols2[imol].representation - - # Reorder for Fortran speed - x1 = np.swapaxes(x1, 0, 2) - x2 = np.swapaxes(x2, 0, 2) - - sigmas = np.asarray(sigmas, dtype=np.float64) - nsigmas = sigmas.size - - return fget_vector_kernels_laplacian(x1, x2, n1, n2, sigmas, nm1, nm2, nsigmas) - - -def get_atomic_kernels_gaussian(mols1, mols2, sigmas): - - n1 = np.array([mol.natoms for mol in mols1], dtype=np.int32) - n2 = np.array([mol.natoms for mol in mols2], dtype=np.int32) - - max1 = np.max(n1) - max2 = np.max(n2) - - nm1 = n1.size - nm2 = n2.size - - cmat_size = mols1[0].representation.shape[1] - - x1 = np.zeros((nm1, max1, cmat_size), dtype=np.float64, order="F") - x2 = np.zeros((nm2, max2, cmat_size), dtype=np.float64, order="F") - - for imol in range(nm1): - x1[imol, : n1[imol], :cmat_size] = mols1[imol].representation - - for imol in range(nm2): - x2[imol, : n2[imol], :cmat_size] = mols2[imol].representation - - # Reorder for Fortran speed - x1 = np.swapaxes(x1, 0, 2) - x2 = np.swapaxes(x2, 0, 2) - - sigmas = np.array(sigmas, dtype=np.float64) - nsigmas = sigmas.size - - return fget_vector_kernels_gaussian(x1, x2, n1, n2, sigmas, nm1, nm2, nsigmas) - - -def get_atomic_kernels_gaussian_symmetric(mols, sigmas): - - raise NotImplementedError("nm1 not defined and x1 not used") - # n = np.array([mol.natoms for mol in mols], dtype=np.int32) - - # max_atoms = np.max(n) - - # nm = n.size - - # cmat_size = mols[0].representation.shape[1] - - # x1 = np.zeros((nm, max_atoms, cmat_size), dtype=np.float64, order="F") - - # for imol in range(nm1): - # x[imol, : n[imol], :cmat_size] = mols[imol].representation - - # # Reorder for Fortran speed - # x = np.swapaxes(x, 0, 2) - - # sigmas = np.array(sigmas, dtype=np.float64) - # nsigmas = sigmas.size - - # return fget_vector_kernels_gaussian_symmetric(x, n, sigmas, nm, nsigmas) diff --git a/src/qmllib/representations/slatm.py b/src/qmllib/representations/slatm.py index 269aaef9..fd31eb26 100644 --- a/src/qmllib/representations/slatm.py +++ b/src/qmllib/representations/slatm.py @@ -1,4 +1,4 @@ -import itertools as itl +import itertools import numpy as np import scipy.spatial.distance as spatial_distance @@ -50,7 +50,7 @@ def update_m(obj, ia, rcut=9.0, pbc=None): n1s, n2s, n3s = nns - n123s_ = np.array(list(itl.product(n1s, n2s, n3s))) + n123s_ = np.array(list(itertools.product(n1s, n2s, n3s))) n123s = [] for n123 in n123s_: n123u = list(n123) @@ -58,7 +58,7 @@ def update_m(obj, ia, rcut=9.0, pbc=None): n123s.append(n123u) nau = len(n123s) - n123s = np.array(n123s, np.float) + n123s = np.array(n123s, np.float64) na = len(zs) cia = coords[ia] @@ -134,13 +134,14 @@ def get_sbop( assert ia is not None, "#ERROR: plz specify `za and `ia " if pbc != "000": - if rcut < 9.0: - raise ValueError() - assert iloc, "#ERROR: for periodic system, plz use atomic rpst" - zs, coords = update_m(obj, ia, rcut=rcut, pbc=pbc) + raise NotImplementedError("Periodic boundary conditions not implemented") + # if rcut < 9.0: + # raise ValueError() + # assert iloc, "#ERROR: for periodic system, plz use atomic rpst" + # zs, coords = update_m(obj, ia, rcut=rcut, pbc=pbc) # after update of `m, the query atom `ia will become the first atom - ia = 0 + # ia = 0 # bop potential distribution r0 = 0.1 @@ -174,11 +175,12 @@ def get_sbot( assert ia is not None, "#ERROR: plz specify `za and `ia " if pbc != "000": - assert iloc, "#ERROR: for periodic system, plz use atomic rpst" - zs, coords = update_m(obj, ia, rcut=rcut, pbc=pbc) + raise NotImplementedError("Periodic boundary conditions not implemented") + # assert iloc, "#ERROR: for periodic system, plz use atomic rpst" + # zs, coords = update_m(obj, ia, rcut=rcut, pbc=pbc) - # after update of `m, the query atom `ia will become the first atom - ia = 0 + # # after update of `m, the query atom `ia will become the first atom + # ia = 0 # for a normalized gaussian distribution, u should multiply this coeff coeff = 1 / np.sqrt(2 * sigma**2 * np.pi) if normalize else 1.0 diff --git a/tests/conftest.py b/tests/conftest.py index 9cbe412b..abc63c98 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,3 +1,54 @@ from pathlib import Path +import numpy as np + ASSETS = Path("./tests/assets") + + +def get_asize(list_of_atoms, pad): + """TODO Anders what is asize""" + + asize: dict[int, int] = dict() + + # WHAT + + for atoms in list_of_atoms: + + unique_atoms, unique_counts = np.unique(atoms, return_counts=True) + + for atom, count in zip(unique_atoms, unique_counts): + + prev = asize.get(atom, None) + + if prev is None: + asize[atom] = count + pad + continue + + asize[atom] = max(asize[atom], count + pad) + + # for key, value in mol.natypes.items(): + # try: + # asize[key] = max(asize[key], value + pad) + # except KeyError: + # asize[key] = value + pad + + return asize + + +def get_energies(filename: Path): + """Returns a dictionary with heats of formation for each xyz-file.""" + + with open(filename, "r") as f: + lines = f.readlines() + + energies = dict() + + for line in lines: + tokens = line.split() + + xyz_name = tokens[0] + hof = float(tokens[1]) + + energies[xyz_name] = hof + + return energies diff --git a/tests/test_fchl_acsf.py b/tests/test_fchl_acsf.py index 42bd11b3..92908e2e 100644 --- a/tests/test_fchl_acsf.py +++ b/tests/test_fchl_acsf.py @@ -1,30 +1,28 @@ """ This file contains tests for the atom centred symmetry function module. """ -from __future__ import print_function -import os from copy import deepcopy import numpy as np -np.set_printoptions(linewidth=666, edgeitems=10) -from qmllib import Compound from qmllib.representations import generate_fchl_acsf +from qmllib.utils.xyz_format import read_xyz +from tests.conftest import ASSETS +np.set_printoptions(linewidth=666, edgeitems=10) REP_PARAMS = dict() REP_PARAMS["elements"] = [1, 6, 7] -def get_acsf_numgrad(mol, dx=1e-5): +def get_acsf_numgrad(coordinates, nuclear_charges, dx=1e-5): - true_coords = deepcopy(mol.coordinates) + natoms = len(coordinates) + true_coords = deepcopy(coordinates) - true_rep = generate_fchl_acsf( - mol.nuclear_charges, mol.coordinates, gradients=False, **REP_PARAMS - ) + true_rep = generate_fchl_acsf(nuclear_charges, coordinates, gradients=False, **REP_PARAMS) - gradient = np.zeros((3, mol.natoms, true_rep.shape[0], true_rep.shape[1])) + gradient = np.zeros((3, natoms, true_rep.shape[0], true_rep.shape[1])) for n, coord in enumerate(true_coords): for xyz, x in enumerate(coord): @@ -33,25 +31,25 @@ def get_acsf_numgrad(mol, dx=1e-5): temp_coords[n, xyz] = x + 2.0 * dx (rep, grad) = generate_fchl_acsf( - mol.nuclear_charges, temp_coords, gradients=True, **REP_PARAMS + nuclear_charges, temp_coords, gradients=True, **REP_PARAMS ) gradient[xyz, n] -= rep temp_coords[n, xyz] = x + dx (rep, grad) = generate_fchl_acsf( - mol.nuclear_charges, temp_coords, gradients=True, **REP_PARAMS + nuclear_charges, temp_coords, gradients=True, **REP_PARAMS ) gradient[xyz, n] += 8.0 * rep temp_coords[n, xyz] = x - dx (rep, grad) = generate_fchl_acsf( - mol.nuclear_charges, temp_coords, gradients=True, **REP_PARAMS + nuclear_charges, temp_coords, gradients=True, **REP_PARAMS ) gradient[xyz, n] -= 8.0 * rep temp_coords[n, xyz] = x - 2.0 * dx (rep, grad) = generate_fchl_acsf( - mol.nuclear_charges, temp_coords, gradients=True, **REP_PARAMS + nuclear_charges, temp_coords, gradients=True, **REP_PARAMS ) gradient[xyz, n] += rep @@ -66,23 +64,16 @@ def get_acsf_numgrad(mol, dx=1e-5): def test_fchl_acsf(): - test_dir = os.path.dirname(os.path.realpath(__file__)) - - mol = Compound(xyz=test_dir + "/qm7/0101.xyz") + coordinates, nuclear_charges = read_xyz(ASSETS / "qm7/0101.xyz") (repa, anal_grad) = generate_fchl_acsf( - mol.nuclear_charges, mol.coordinates, gradients=True, **REP_PARAMS + nuclear_charges, coordinates, gradients=True, **REP_PARAMS ) - repb = generate_fchl_acsf(mol.nuclear_charges, mol.coordinates, gradients=False, **REP_PARAMS) + repb = generate_fchl_acsf(nuclear_charges, coordinates, gradients=False, **REP_PARAMS) assert np.allclose(repa, repb), "Error in FCHL-ACSF representation implementation" - num_grad = get_acsf_numgrad(mol) + num_grad = get_acsf_numgrad(coordinates, nuclear_charges) assert np.allclose(anal_grad, num_grad), "Error in FCHL-ACSF gradient implementation" - - -if __name__ == "__main__": - - test_fchl_acsf() diff --git a/tests/test_kernels.py b/tests/test_kernels.py index 03ef2998..eec1c451 100644 --- a/tests/test_kernels.py +++ b/tests/test_kernels.py @@ -1,7 +1,5 @@ -from pathlib import Path - import numpy as np -from conftest import ASSETS +from conftest import ASSETS, get_energies from scipy.stats import wasserstein_distance from sklearn.decomposition import KernelPCA @@ -20,25 +18,6 @@ from qmllib.utils.xyz_format import read_xyz -def get_energies(filename: Path): - """Returns a dictionary with heats of formation for each xyz-file.""" - - with open(filename, "r") as f: - lines = f.readlines() - - energies = dict() - - for line in lines: - tokens = line.split() - - xyz_name = tokens[0] - hof = float(tokens[1]) - - energies[xyz_name] = hof - - return energies - - def test_laplacian_kernel(): np.random.seed(666) diff --git a/tests/test_representations.py b/tests/test_representations.py index cb8ca792..88e92c58 100644 --- a/tests/test_representations.py +++ b/tests/test_representations.py @@ -1,86 +1,85 @@ -import os -from collections import defaultdict - import numpy as np +from conftest import ASSETS -import qmllib - - -def get_asize(mols, pad): - - asize = defaultdict() +from qmllib.representations.representations import generate_coulomb_matrix +from qmllib.utils.xyz_format import read_xyz - for mol in mols: - for key, value in mol.natypes.items(): - try: - asize[key] = max(asize[key], value + pad) - except KeyError: - asize[key] = value + pad - return asize - -def test_representations(): +def _get_representations(): files = [ - "qm7/0101.xyz", - "qm7/0102.xyz", - "qm7/0103.xyz", - "qm7/0104.xyz", - "qm7/0105.xyz", - "qm7/0106.xyz", - "qm7/0107.xyz", - "qm7/0108.xyz", - "qm7/0109.xyz", - "qm7/0110.xyz", + ASSETS / "qm7/0101.xyz", + ASSETS / "qm7/0102.xyz", + ASSETS / "qm7/0103.xyz", + ASSETS / "qm7/0104.xyz", + ASSETS / "qm7/0105.xyz", + ASSETS / "qm7/0106.xyz", + ASSETS / "qm7/0107.xyz", + ASSETS / "qm7/0108.xyz", + ASSETS / "qm7/0109.xyz", + ASSETS / "qm7/0110.xyz", ] - path = os.path.dirname(os.path.realpath(__file__)) - mols = [] - for xyz_file in files: - mol = qmllib.Compound(xyz=path + "/" + xyz_file) - mols.append(mol) + for filename in files: + coordinates, atoms = read_xyz(filename) + mols.append((coordinates, atoms)) + + return mols - size = max(mol.nuclear_charges.size for mol in mols) + 1 + # size = max(atoms.size for _, atoms in mols) + 1 - asize = get_asize(mols, 1) + # asize = get_asize([atoms for atoms in mols], 1) - coulomb_matrix(mols, size, path) - atomic_coulomb_matrix(mols, size, path) - eigenvalue_coulomb_matrix(mols, size, path) - bob(mols, size, asize, path) + # coulomb_matrix(mols, size, path) + # atomic_coulomb_matrix(mols, size, path) + # eigenvalue_coulomb_matrix(mols, size, path) + # bob(mols, size, asize, path) -def coulomb_matrix(mols, size, path): +def test_coulomb_matrix(): + + mols = _get_representations() + size = max(atoms.size for _, atoms in mols) + 1 # Generate coulomb matrix representation, sorted by row-norm - for i, mol in enumerate(mols): - mol.generate_coulomb_matrix(size=size, sorting="row-norm") + representations = [] + for coordinates, nuclear_charges in mols: + representation = generate_coulomb_matrix( + nuclear_charges, coordinates, size=size, sorting="row-norm" + ) + representations.append(representation) X_test = np.asarray([mol.representation for mol in mols]) - X_ref = np.loadtxt(path + "/data/coulomb_matrix_representation_row-norm_sorted.txt") + X_ref = np.loadtxt(ASSETS / "/coulomb_matrix_representation_row-norm_sorted.txt") assert np.allclose(X_test, X_ref), "Error in coulomb matrix representation" # Generate coulomb matrix representation, unsorted, using the Compound class - for i, mol in enumerate(mols): - mol.generate_coulomb_matrix(size=size, sorting="unsorted") + for coordinates, nuclear_charges in mols: + representation = generate_coulomb_matrix( + nuclear_charges, coordinates, size=size, sorting="unsorted" + ) + representations.append(representation) X_test = np.asarray([mol.representation for mol in mols]) - X_ref = np.loadtxt(path + "/data/coulomb_matrix_representation_unsorted.txt") + X_ref = np.loadtxt(ASSETS / "data/coulomb_matrix_representation_unsorted.txt") assert np.allclose(X_test, X_ref), "Error in coulomb matrix representation" -def atomic_coulomb_matrix(mols, size, path): +def test_atomic_coulomb_matrix(): + + mols = _get_representations() + size = max(atoms.size for _, atoms in mols) + 1 # Generate coulomb matrix representation, sorted by distance for i, mol in enumerate(mols): mol.generate_atomic_coulomb_matrix(size=size, sorting="distance") X_test = np.concatenate([mol.representation for mol in mols]) - X_ref = np.loadtxt(path + "/data/atomic_coulomb_matrix_representation_distance_sorted.txt") + X_ref = np.loadtxt(ASSETS / "atomic_coulomb_matrix_representation_distance_sorted.txt") assert np.allclose(X_test, X_ref), "Error in atomic coulomb matrix representation" # Compare to old implementation (before 'indices' keyword) X_ref = np.loadtxt( - path + "/data/atomic_coulomb_matrix_representation_distance_sorted_no_indices.txt" + ASSETS / "atomic_coulomb_matrix_representation_distance_sorted_no_indices.txt" ) assert np.allclose(X_test, X_ref), "Error in atomic coulomb matrix representation" @@ -89,7 +88,7 @@ def atomic_coulomb_matrix(mols, size, path): mol.generate_atomic_coulomb_matrix(size=size, sorting="row-norm") X_test = np.concatenate([mol.representation for mol in mols]) - X_ref = np.loadtxt(path + "/data/atomic_coulomb_matrix_representation_row-norm_sorted.txt") + X_ref = np.loadtxt(ASSETS / "atomic_coulomb_matrix_representation_row-norm_sorted.txt") assert np.allclose(X_test, X_ref), "Error in atomic coulomb matrix representation" # Generate coulomb matrix representation, sorted by distance, with soft cutoffs @@ -105,7 +104,7 @@ def atomic_coulomb_matrix(mols, size, path): X_test = np.concatenate([mol.representation for mol in mols]) X_ref = np.loadtxt( - path + "/data/atomic_coulomb_matrix_representation_distance_sorted_with_cutoff.txt" + ASSETS / "data/atomic_coulomb_matrix_representation_distance_sorted_with_cutoff.txt" ) assert np.allclose(X_test, X_ref), "Error in atomic coulomb matrix representation" @@ -122,7 +121,7 @@ def atomic_coulomb_matrix(mols, size, path): X_test = np.concatenate([mol.representation for mol in mols]) X_ref = np.loadtxt( - path + "/data/atomic_coulomb_matrix_representation_row-norm_sorted_with_cutoff.txt" + ASSETS / "data/atomic_coulomb_matrix_representation_row-norm_sorted_with_cutoff.txt" ) assert np.allclose(X_test, X_ref), "Error in atomic coulomb matrix representation" @@ -155,39 +154,39 @@ def atomic_coulomb_matrix(mols, size, path): ), "Error in atomic coulomb matrix representation" -def eigenvalue_coulomb_matrix(mols, size, path): +def test_eigenvalue_coulomb_matrix(mols, size, path): # Generate coulomb matrix representation, sorted by row-norm for i, mol in enumerate(mols): mol.generate_eigenvalue_coulomb_matrix(size=size) X_test = np.asarray([mol.representation for mol in mols]) - X_ref = np.loadtxt(path + "/data/eigenvalue_coulomb_matrix_representation.txt") + X_ref = np.loadtxt(ASSETS / "eigenvalue_coulomb_matrix_representation.txt") assert np.allclose(X_test, X_ref), "Error in eigenvalue coulomb matrix representation" -def bob(mols, size, asize, path): +def test_bob(mols, size, asize, path): for i, mol in enumerate(mols): mol.generate_bob(size=size, asize=asize) X_test = np.asarray([mol.representation for mol in mols]) - X_ref = np.loadtxt(path + "/data/bob_representation.txt") + X_ref = np.loadtxt(ASSETS / "bob_representation.txt") assert np.allclose(X_test, X_ref), "Error in bag of bonds representation" -def print_mol(mol): - n = len(mol.representation.shape) - if n == 1: - for item in mol.representation: - print("{:.9e}".format(item), end=" ") - print() - elif n == 2: - for atom in mol.representation: - for item in atom: - print("{:.9e}".format(item), end=" ") - print() +# def print_mol(mol): +# n = len(mol.representation.shape) +# if n == 1: +# for item in mol.representation: +# print("{:.9e}".format(item), end=" ") +# print() +# elif n == 2: +# for atom in mol.representation: +# for item in atom: +# print("{:.9e}".format(item), end=" ") +# print() -if __name__ == "__main__": - test_representations() +# if __name__ == "__main__": +# test_representations()