Skip to content

Commit

Permalink
feat: various updates to autograd for torch
Browse files Browse the repository at this point in the history
  • Loading branch information
BrunoLiegiBastonLiegi committed Sep 5, 2024
1 parent a09f9b2 commit ac117d3
Show file tree
Hide file tree
Showing 4 changed files with 86 additions and 21 deletions.
6 changes: 4 additions & 2 deletions src/qiboml/models/abstract.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,13 @@ def __call__(self, x):

@property
def parameters(self) -> ndarray:
return self.backend.cast(self.circuit.get_parameters())
return self.backend.cast(self.circuit.get_parameters(), self.backend.np.float64)

@parameters.setter
def parameters(self, params: ndarray):
self._circuit.set_parameters(self.backend.cast(params.ravel()))
self._circuit.set_parameters(
self.backend.cast(params.ravel(), self.backend.np.float64)
)

@property
def circuit(self) -> Circuit:
Expand Down
39 changes: 28 additions & 11 deletions src/qiboml/models/pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,14 @@

import qiboml.models.encoding_decoding as ed
from qiboml.models.abstract import QuantumCircuitLayer, _run_layers
from qiboml.operations import differentiation as Diff


@dataclass
@dataclass(eq=False)
class QuantumModel(torch.nn.Module):

layers: list[QuantumCircuitLayer]
differentiation: str = "psr"
differentiation: str = "PSR"

def __post_init__(self):
super().__init__()
Expand Down Expand Up @@ -46,17 +47,20 @@ def __post_init__(self):
RuntimeError,
f"The last layer has to be a `QuantumDecodinglayer`, but is {self.layers[-1]}",
)
self.differentiation = getattr(Diff, self.differentiation)(self.backend)

def forward(self, x: torch.Tensor):
if self.backend.name != "pytorch":
x = x.detach().numpy()
x = self.backend.cast(x, dtype=x.dtype)
if torch.is_grad_enabled():
x = QuantumModelAutoGrad.apply(x, self.layers)
breakpoint()
x = QuantumModelAutoGrad.apply(
x,
*list(self.parameters()),
self.layers,
self.backend,
self.differentiation,
)
else:
x = _run_layers(x, self.layers)
if self.backend.name != "pytorch":
x = torch.as_tensor(np.array(x))
return x

@property
Expand All @@ -75,12 +79,25 @@ def output_shape(self):
class QuantumModelAutoGrad(torch.autograd.Function):

@staticmethod
def forward(ctx, x: torch.Tensor, layers: list[QuantumCircuitLayer]):
def forward(
ctx,
x: torch.Tensor,
*parameters,
layers: list[QuantumCircuitLayer],
backend,
differentiation,
):
ctx.save_for_backward(x)
ctx.layers = layers
return _run_layers(x, layers)
ctx.differentiation = differentiation
x_clone = x.clone().detach().numpy()
x_clone = backend.cast(x_clone, dtype=x_clone.dtype)
x_clone = torch.as_tensor(np.array(_run_layers(x_clone, layers)))
x_clone.requires_grad = True
return x_clone

@staticmethod
def backward(ctx, grad_output: torch.Tensor):
(x,) = ctx.saved_tensors
return grad_output * self.differentiation.evaluate(x, ctx.layers)
gradients = ctx.differentiation.evaluate(x, ctx.layers)
return *gradients, None, None, None
21 changes: 13 additions & 8 deletions src/qiboml/operations/differentiation.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,10 @@

class PSR:

def __init__(
self,
):
def __init__(self, backend):
self.backend = backend
self.scale_factor = 1.0
self.epsilon = 1e-2

def evaluate(self, x: ndarray, layers: list[QuantumCircuitLayer]):
gradients = []
Expand All @@ -22,14 +22,19 @@ def evaluate(self, x: ndarray, layers: list[QuantumCircuitLayer]):
continue
parameters_bkup = layer.parameters.copy()
gradients.append(
[
self._evaluate_parameter(x, layers, layer, i, parameters_bkup)
for i in range(len(layer.parameters))
]
self.backend.cast(
[
self._evaluate_for_parameter(
x, layers, layer, i, parameters_bkup
)
for i in range(len(layer.parameters))
],
self.backend.np.float64,
)
)
return gradients

def _evaluate_parameter(self, x, layers, layer, index, parameters_bkup):
def _evaluate_for_parameter(self, x, layers, layer, index, parameters_bkup):
outputs = []
for shift in self._shift_parameters(layer.parameters, index, self.epsilon):
layer.parameters = shift
Expand Down
41 changes: 41 additions & 0 deletions tests/test_backprop.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
import torch
from qibo import hamiltonians
from qibo.backends import NumpyBackend, PyTorchBackend
from qibo.symbols import Z

from qiboml import pytorch as pt
from qiboml.models import ansatze as ans
from qiboml.models import encoding_decoding as ed

# backend = PyTorchBackend()
backend = NumpyBackend()

nqubits = 5
dim = 4
training_layer = ans.ReuploadingLayer(nqubits, backend=backend)
encoding_layer = ed.PhaseEncodingLayer(nqubits, backend=backend)
kwargs = {"backend": backend}
decoding_qubits = range(nqubits)
observable = hamiltonians.SymbolicHamiltonian(
sum([Z(int(i)) for i in decoding_qubits]),
nqubits=nqubits,
backend=backend,
)
kwargs["observable"] = observable
kwargs["analytic"] = True
decoding_layer = ed.ExpectationLayer(nqubits, decoding_qubits, **kwargs)
q_model = pt.QuantumModel(
layers=[
encoding_layer,
training_layer,
decoding_layer,
]
)
print(list(q_model.parameters()))
data = torch.randn(1, 5)
data.requires_grad = True
out = q_model(data)
print(out.requires_grad)
loss = (out - 1.0) ** 2
print(loss.requires_grad)
loss.backward()

0 comments on commit ac117d3

Please sign in to comment.