Skip to content

Commit

Permalink
fix: finally the parameters are updating with pytorch
Browse files Browse the repository at this point in the history
  • Loading branch information
BrunoLiegiBastonLiegi committed Sep 27, 2024
1 parent 0857d1e commit 1b6ee4a
Show file tree
Hide file tree
Showing 5 changed files with 59 additions and 69 deletions.
70 changes: 34 additions & 36 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

13 changes: 4 additions & 9 deletions src/qiboml/models/abstract.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from typing import Generator

from qibo import Circuit
from qibo.backends import Backend
from qibo.backends import Backend, GlobalBackend
from qibo.config import raise_error
from qibo.gates import abstract

Expand All @@ -19,7 +19,7 @@ class QuantumCircuitLayer(ABC):
nqubits: int
qubits: list[int] = None
_circuit: Circuit = None
backend: Backend = JaxBackend()
backend: Backend = GlobalBackend()

def __post_init__(self) -> None:
if self.qubits is None:
Expand All @@ -40,18 +40,13 @@ def has_parameters(self):
return False

@property
def parameters(self) -> Generator[ndarray, ndarray, ndarray]:
# return self.backend.cast(self.circuit.get_parameters(), self.backend.precision)
def parameters(self) -> Generator[ndarray, None, None]:
return (gate.parameters for gate in self.circuit.trainable_gates)

@parameters.setter
def parameters(self, params: list[ndarray]):
# self._circuit.set_parameters(
# params.ravel()
# self.backend.cast(params.ravel(), self.backend.np.float64)
# )
for param, gate in zip(params, self.circuit.trainable_gates):
gate.parameters = param
gate.parameters = [param]

@property
def circuit(self) -> Circuit:
Expand Down
1 change: 0 additions & 1 deletion src/qiboml/models/ansatze.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ def __post_init__(self):
params = self.backend.cast(
[[random.random() - 0.5 for _ in range(2)] for _ in range(self.nqubits)],
dtype=self.backend.np.float64,
requires_grad=True,
)
for q, param in zip(self.qubits, params):
self.circuit.add(gates.RY(q, theta=param[0] * self.backend.np.pi))
Expand Down
40 changes: 19 additions & 21 deletions src/qiboml/models/pytorch.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""Torch interface to qiboml layers"""

from dataclasses import dataclass
from typing import Generator

import numpy as np
import torch
Expand Down Expand Up @@ -38,18 +39,19 @@ def __post_init__(self):
f"The last layer has to be a `QuantumDecodinglayer`, but is {self.layers[-1]}",
)

for layer in self.layers:
for j, layer in enumerate(self._trainable_layers):
for i, params in enumerate(layer.parameters):
# if self.backend.name != "pytorch":
# params = torch.as_tensor(
# params, dtype=self.backend.precision
# )
params = torch.as_tensor(params)
params.requires_grad = True
setattr(
self,
f"{layer.__class__.__name__}_{i}",
torch.nn.Parameter(torch.as_tensor(params)),
f"{layer.__class__.__name__}-{j}_{i}",
torch.nn.Parameter(params.squeeze()),
)

for i, layer in enumerate(self._trainable_layers):
layer.parameters = self._get_parameters_for_layer(i)

self.differentiation = getattr(Diff, self.differentiation)()

def forward(self, x: torch.Tensor):
Expand All @@ -58,20 +60,6 @@ def forward(self, x: torch.Tensor):
x, self.layers, self.backend, self.differentiation, *self.parameters()
)
else:
# breakpoint()
"""
index = 0
circ = self.layers[0](x)
parameters = list(self.parameters())
for layer in self.layers[1:-1]:
if layer.has_parameters and not issubclass(layer.__class__, ed.QuantumEncodingLayer):
layer.parameters = parameters[index]
index += 1
circ = layer.forward(circ)
#circ.set_parameters([p for param in self.parameters() for p in param])
return self.layers[-1](circ)
#x = _run_layers(x, self.layers, list(self.parameters()))
"""
for layer in self.layers:
x = layer(x)

Expand All @@ -89,6 +77,16 @@ def backend(self) -> Backend:
def output_shape(self):
return self.layers[-1].output_shape

@property
def _trainable_layers(
self,
) -> Generator[QuantumCircuitLayer, None, None]:
return (layer for layer in self.layers if layer.has_parameters)

def _get_parameters_for_layer(self, i: int) -> list[torch.nn.Parameter]:
layer_name = list(self._trainable_layers)[i].__class__.__name__
return [v for k, v in self.named_parameters() if f"{layer_name}-{i}_" in k]


class QuantumModelAutoGrad(torch.autograd.Function):

Expand Down
4 changes: 2 additions & 2 deletions tests/test_backprop.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,11 +46,11 @@ def test_backpropagation(backend, differentiation):
optimizer.zero_grad()
output = model(input)
loss = (target - output) ** 2
print(list(model.parameters()))
print(list(model.named_parameters()))
print(f"> loss: {loss}")
loss.backward()
optimizer.step()
print(list(model.parameters()))
print(list(model.named_parameters()))

# print(
# f"> Parameters delta: {torch.cat(tuple(p.ravel() for p in model.parameters())) - params_bkp}"
Expand Down

0 comments on commit 1b6ee4a

Please sign in to comment.