Skip to content

Commit

Permalink
fix: finally made keras interface working
Browse files Browse the repository at this point in the history
  • Loading branch information
BrunoLiegiBastonLiegi committed Jul 7, 2024
1 parent 76a2b2b commit 86969bd
Show file tree
Hide file tree
Showing 5 changed files with 54 additions and 50 deletions.
42 changes: 24 additions & 18 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ numba = "^0.59.0"
tensorflow = { version = "^2.16.1", markers = "sys_platform == 'linux' or sys_platform == 'darwin'" }
# TODO: the marker is a temporary solution due to the lack of the tensorflow-io 0.32.0's wheels for Windows, this package is one of
# the tensorflow requirements
qibo = "^0.2.6"
qibo = {git="https://github.com/qiboteam/qibo", branch="qiboml_models_updates"}

[tool.poetry.group.dev]
optional = true
Expand Down
9 changes: 7 additions & 2 deletions src/qiboml/models/encoding_decoding.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def __post_init__(self):
self.circuit.add(gates.RZ(q, theta=0.0))

def forward(self, x: "ndarray") -> Circuit:
self.circuit.set_parameters(x)
self.circuit.set_parameters(x.ravel())
return self.circuit


Expand Down Expand Up @@ -92,9 +92,14 @@ def __post_init__(self):

def forward(self, x: Circuit) -> "ndarray":
return self.observable.expectation_from_samples(
super().forward(x).frequencies()
super().forward(x).samples(),
input_samples=True,
)

@property
def output_shape(self):
return (1,)


"""
.
Expand Down
8 changes: 7 additions & 1 deletion src/qiboml/models/keras.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,10 +79,16 @@ def __init__(self, layers: list[QuantumCircuitLayer]):
)

def call(self, x: "ndarray"):
for layer in self.layers[:-1]:
for layer in self.layers:
x = layer.forward(x)
return x

def compute_output_shape(self):
return self.layers[-1].output_shape

@property
def nqubits(self):
return self.layers[0].circuit.nqubits

def __hash__(self):
return super().__hash__()
43 changes: 15 additions & 28 deletions tutorials/model_example.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -56,16 +56,16 @@
"name": "stderr",
"output_type": "stream",
"text": [
"[Qibo 0.2.6|INFO|2024-06-20 12:12:39]: Using qibojit (numba) backend on /CPU:0\n"
"[Qibo 0.2.10|INFO|2024-07-07 13:00:30]: Using qibojit (numba) backend on /CPU:0\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"tf.Tensor(1.0, shape=(), dtype=float64)\n",
"tf.Tensor(1.0, shape=(), dtype=float64)\n",
"tf.Tensor(1.0, shape=(), dtype=float64)\n"
"0.486\n",
"0.528\n",
"0.526\n"
]
}
],
Expand Down Expand Up @@ -93,9 +93,9 @@
"> Model: Sequential(\n",
" (0): Linear(in_features=128, out_features=5, bias=True)\n",
" (1): Sigmoid()\n",
" (2): BinaryEncodingLayer(nqubits=5, qubits=[0, 1, 2, 3, 4], circuit=<qibo.models.circuit.Circuit object at 0x35734a490>, initial_state=None, backend=tensorflow)\n",
" (3): ReuploadingLayer(nqubits=5, qubits=(0, 2, 4), circuit=<qibo.models.circuit.Circuit object at 0x354a67990>, initial_state=None, backend=tensorflow)\n",
" (4): QuantumDecodingLayer(nqubits=5, qubits=<range_iterator object at 0x357448d20>, circuit=<qibo.models.circuit.Circuit object at 0x35131c990>, initial_state=None, backend=tensorflow, nshots=1000)\n",
" (2): BinaryEncodingLayer(nqubits=5, qubits=[0, 1, 2, 3, 4], circuit=<qibo.models.circuit.Circuit object at 0x36d7e9f10>, initial_state=None, backend=tensorflow)\n",
" (3): ReuploadingLayer(nqubits=5, qubits=(0, 2, 4), circuit=<qibo.models.circuit.Circuit object at 0x35afcc650>, initial_state=None, backend=tensorflow)\n",
" (4): QuantumDecodingLayer(nqubits=5, qubits=<range_iterator object at 0x36de9f120>, circuit=<qibo.models.circuit.Circuit object at 0x35fa6e5d0>, initial_state=None, backend=tensorflow, nshots=1000)\n",
")\n",
"> Parameters\n",
"0.weight torch.Size([5, 128])\n",
Expand Down Expand Up @@ -150,21 +150,10 @@
"output_type": "stream",
"text": [
"> Model: <Sequential name=sequential, built=False>\n",
"> Outputs\n"
]
},
{
"ename": "OperatorNotAllowedInGraphError",
"evalue": "Exception encountered when calling Model.call().\n\n\u001b[1mIterating over a symbolic `tf.Tensor` is not allowed. You can attempt the following resolutions to the problem: If you are running in Graph mode, use Eager execution mode or decorate this function with @tf.function. If you are using AutoGraph, you can try decorating this function with @tf.function. If that does not work, then you may be using an unsupported feature or your source code may not be visible to AutoGraph. See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/reference/limitations.md#access-to-source-code for more information.\u001b[0m\n\nArguments received by Model.call():\n • args=('<KerasTensor shape=(1, 5), dtype=float32, sparse=False, name=keras_tensor_1>',)\n • kwargs=<class 'inspect._empty'>",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mOperatorNotAllowedInGraphError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[5], line 31\u001b[0m\n\u001b[1;32m 29\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m x \u001b[38;5;129;01min\u001b[39;00m data:\n\u001b[1;32m 30\u001b[0m x \u001b[38;5;241m=\u001b[39m tf\u001b[38;5;241m.\u001b[39mexpand_dims(x, axis\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0\u001b[39m)\n\u001b[0;32m---> 31\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[43mmodel\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241m.\u001b[39mprobabilities())\n",
"File \u001b[0;32m~/python_envs/qibo/lib/python3.11/site-packages/keras/src/utils/traceback_utils.py:122\u001b[0m, in \u001b[0;36mfilter_traceback.<locals>.error_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 119\u001b[0m filtered_tb \u001b[38;5;241m=\u001b[39m _process_traceback_frames(e\u001b[38;5;241m.\u001b[39m__traceback__)\n\u001b[1;32m 120\u001b[0m \u001b[38;5;66;03m# To get the full stack trace, call:\u001b[39;00m\n\u001b[1;32m 121\u001b[0m \u001b[38;5;66;03m# `keras.config.disable_traceback_filtering()`\u001b[39;00m\n\u001b[0;32m--> 122\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\u001b[38;5;241m.\u001b[39mwith_traceback(filtered_tb) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m 123\u001b[0m \u001b[38;5;28;01mfinally\u001b[39;00m:\n\u001b[1;32m 124\u001b[0m \u001b[38;5;28;01mdel\u001b[39;00m filtered_tb\n",
"File \u001b[0;32m~/git/qiboml/src/qiboml/models/keras.py:77\u001b[0m, in \u001b[0;36mModel.call\u001b[0;34m(self, x)\u001b[0m\n\u001b[1;32m 75\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mcall\u001b[39m(\u001b[38;5;28mself\u001b[39m, x: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mndarray\u001b[39m\u001b[38;5;124m\"\u001b[39m):\n\u001b[1;32m 76\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m layer \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlayers[:\u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m]:\n\u001b[0;32m---> 77\u001b[0m x \u001b[38;5;241m=\u001b[39m \u001b[43mlayer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mforward\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 78\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m x\n",
"File \u001b[0;32m~/git/qiboml/src/qiboml/models/encoding_decoding.py:30\u001b[0m, in \u001b[0;36mBinaryEncodingLayer.forward\u001b[0;34m(self, x)\u001b[0m\n\u001b[1;32m 28\u001b[0m circuit \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcircuit\u001b[38;5;241m.\u001b[39mcopy()\n\u001b[1;32m 29\u001b[0m ones \u001b[38;5;241m=\u001b[39m x\u001b[38;5;241m.\u001b[39mravel() \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[0;32m---> 30\u001b[0m \u001b[43m\u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mbit\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mones\u001b[49m\u001b[43m:\u001b[49m\n\u001b[1;32m 31\u001b[0m \u001b[43m \u001b[49m\u001b[43mcircuit\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43madd\u001b[49m\u001b[43m(\u001b[49m\u001b[43mgates\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mX\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mqubits\u001b[49m\u001b[43m[\u001b[49m\u001b[43mbit\u001b[49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 32\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m circuit\n",
"\u001b[0;31mOperatorNotAllowedInGraphError\u001b[0m: Exception encountered when calling Model.call().\n\n\u001b[1mIterating over a symbolic `tf.Tensor` is not allowed. You can attempt the following resolutions to the problem: If you are running in Graph mode, use Eager execution mode or decorate this function with @tf.function. If you are using AutoGraph, you can try decorating this function with @tf.function. If that does not work, then you may be using an unsupported feature or your source code may not be visible to AutoGraph. See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/reference/limitations.md#access-to-source-code for more information.\u001b[0m\n\nArguments received by Model.call():\n • args=('<KerasTensor shape=(1, 5), dtype=float32, sparse=False, name=keras_tensor_1>',)\n • kwargs=<class 'inspect._empty'>"
"> Outputs\n",
"0.40480000000000005\n",
"0.4228\n",
"0.40959999999999996\n"
]
}
],
Expand All @@ -181,7 +170,7 @@
"\n",
"q_model = ks.Model(\n",
" layers = [\n",
" ed.BinaryEncodingLayer(5),\n",
" ed.PhaseEncodingLayer(5),\n",
" ans.ReuploadingLayer(nqubits=5, qubits=(0,2,4)),\n",
" ed.ExpectationLayer(nqubits=5, qubits=reversed(range(5)), observable=observable)\n",
" ]\n",
Expand All @@ -199,16 +188,14 @@
"print(\"> Outputs\")\n",
"for x in data:\n",
" x = tf.expand_dims(x, axis=0)\n",
" print(model(x).probabilities())\n",
" print(model(x))\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "71bc58a6-bddd-4397-b402-895574989ebc",
"cell_type": "markdown",
"id": "b166d1b6-77d2-4858-843d-9cb756184c29",
"metadata": {},
"outputs": [],
"source": []
}
],
Expand Down

0 comments on commit 86969bd

Please sign in to comment.