diff --git a/src/qiboml/models/encoding.py b/src/qiboml/models/encoding.py index ff9f08d..b32aefa 100644 --- a/src/qiboml/models/encoding.py +++ b/src/qiboml/models/encoding.py @@ -62,7 +62,6 @@ def __call__(self, x: ndarray) -> Circuit: f"Invalid input dimension {x.shape[-1]}, but the allocated qubits are {self.qubits}.", ) circuit = self.circuit.copy() - ones = np.flatnonzero(x.ravel() == 1) - for bit in ones: - circuit.add(gates.X(self.qubits[bit])) + for qubit, bit in zip(self.qubits, x.ravel()): + circuit.add(gates.RX(qubit, theta=bit * np.pi, trainable=False)) return circuit diff --git a/tests/.#test_models_interfaces.py b/tests/.#test_models_interfaces.py deleted file mode 120000 index 8d70c74..0000000 --- a/tests/.#test_models_interfaces.py +++ /dev/null @@ -1 +0,0 @@ -andrea@ubuntu-desktop.6447:1732264529 \ No newline at end of file diff --git a/tests/test_models_encoding.py b/tests/test_models_encoding.py index a2324de..d2b3dff 100644 --- a/tests/test_models_encoding.py +++ b/tests/test_models_encoding.py @@ -10,8 +10,8 @@ def test_binary_encoding_layer(backend): layer = ed.BinaryEncoding(nqubits, qubits=qubits) data = backend.cast(np.random.choice([0, 1], size=(len(qubits),))) c = layer(data) - indices = [gate.qubits[0] for gate in c.queue if gate.name == "x"] - assert [qubits[i] for i in np.flatnonzero(data == 1)] == indices + for bit, gate in zip(data, c.queue): + assert bit == gate.init_kwargs["theta"] / np.pi # test shape error with pytest.raises(RuntimeError): layer(backend.cast(np.random.choice([0, 1], size=(len(qubits) - 1,)))) diff --git a/tests/test_models_interfaces.py b/tests/test_models_interfaces.py index b55629a..1618193 100644 --- a/tests/test_models_interfaces.py +++ b/tests/test_models_interfaces.py @@ -94,7 +94,7 @@ def random_tensor(frontend, shape, binary=False): def train_model(frontend, model, data, target): - max_epochs = 30 + max_epochs = 10 if frontend.__name__ == "qiboml.models.pytorch": optimizer = frontend.torch.optim.Adam(model.parameters()) @@ -168,7 +168,10 @@ def random_parameters(frontend, model): if frontend.__name__ == "qiboml.models.pytorch": new_params = {} for k, v in model.state_dict().items(): - new_params.update({k: v + frontend.torch.randn(v.shape) / 2}) + new_params.update( + {k: v + frontend.torch.randn(v.shape) / 5} + ) # perturbation of max +- 0.2 + # of the original parameters elif frontend.__name__ == "qiboml.models.keras": new_params = [frontend.tf.random.uniform(model.get_weights()[0].shape)] return new_params @@ -230,11 +233,11 @@ def test_encoding(backend, frontend, layer, seed): target = prepare_targets(frontend, q_model, data) backprop_test(frontend, q_model, data, target) - data = random_tensor(frontend, (100, 32)) + data = random_tensor(frontend, (100, 4)) model = build_sequential_model( frontend, [ - build_linear_layer(frontend, 32, dim), + build_linear_layer(frontend, 4, dim), q_model, build_linear_layer(frontend, 2**nqubits, 1), ], @@ -290,12 +293,12 @@ def test_decoding(backend, frontend, layer, seed, analytic): model = build_sequential_model( frontend, [ - build_linear_layer(frontend, 32, dim), + build_linear_layer(frontend, 4, dim), q_model, build_linear_layer(frontend, q_model.output_shape[-1], 1), ], ) - data = random_tensor(frontend, (100, 32)) + data = random_tensor(frontend, (100, 4)) target = prepare_targets(frontend, model, data) backprop_test(frontend, model, data, target)