diff --git a/community/paper_implementation_project/Quantum Vision Transformer.ipynb b/community/paper_implementation_project/Quantum Vision Transformer.ipynb new file mode 100644 index 000000000..c501e30c6 --- /dev/null +++ b/community/paper_implementation_project/Quantum Vision Transformer.ipynb @@ -0,0 +1,824 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "fb8f88e1-d493-4473-80cd-00042b61f868", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/_internals/authentication/token_manager.py:101: UserWarning: Device is already registered.\n", + "Generating a new refresh token should only be done if the current refresh token is compromised.\n", + "To do so, set the overwrite parameter to true\n", + " warnings.warn(\n" + ] + } + ], + "source": [ + "import classiq\n", + "classiq.authenticate()" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "20d0b3d8-8de2-460b-93d2-bc805561c0c8", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/opt/anaconda3/envs/baler/lib/python3.11/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'dlopen(/opt/anaconda3/envs/baler/lib/python3.11/site-packages/torchvision/image.so, 0x0006): Symbol not found: __ZN3c1017RegisterOperatorsD1Ev\n", + " Referenced from: /opt/anaconda3/envs/baler/lib/python3.11/site-packages/torchvision/image.so\n", + " Expected in: /opt/anaconda3/envs/baler/lib/python3.11/site-packages/torch/lib/libtorch_cpu.dylib'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source?\n", + " warn(\n" + ] + } + ], + "source": [ + "import torch\n", + "import torch.optim as optim\n", + "import tqdm\n", + "import math\n", + "from classiq import *\n", + "from classiq import (\n", + " synthesize,\n", + " qfunc,\n", + " QArray,\n", + " QBit,\n", + " RX,\n", + " CArray,\n", + " Output,\n", + " CReal,\n", + " repeat,\n", + " create_model,\n", + " show\n", + ")\n", + "from classiq.execution import execute_qnn\n", + "from classiq.applications.qnn import QLayer\n", + "from classiq.qmod.symbolic import pi\n", + "from torch.nn.utils.rnn import pad_sequence\n", + "import torchvision.transforms as transforms\n", + "from torchvision import datasets\n", + "from classiq.execution import (\n", + " ExecutionPreferences,\n", + " execute_qnn,\n", + " set_quantum_program_execution_preferences,\n", + ")\n", + "from classiq.synthesis import SerializedQuantumProgram\n", + "from classiq.applications.qnn.types import (\n", + " MultipleArguments,\n", + " ResultsCollection,\n", + " SavedResult,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "91d168c1-d9da-4c10-a728-a179f3abc9db", + "metadata": {}, + "outputs": [], + "source": [ + "N_QUBITS = 4\n", + "num_shots = 1000" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "5d172b4f-c317-4759-ad63-74b2eb4fa87d", + "metadata": {}, + "outputs": [], + "source": [ + "def execute(\n", + " quantum_program: SerializedQuantumProgram, arguments: MultipleArguments\n", + ") -> ResultsCollection:\n", + " quantum_program = set_quantum_program_execution_preferences(\n", + " quantum_program, preferences=ExecutionPreferences(num_shots=num_shots)\n", + " )\n", + " return execute_qnn(quantum_program, arguments)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "b98e5452-3bc8-4aa0-beee-cc6d0c3cb994", + "metadata": {}, + "outputs": [], + "source": [ + "def post_process(result: SavedResult) -> torch.Tensor:\n", + " res = result.value\n", + " yvec = [\n", + " (res.counts_of_qubits(k)[\"1\"] if \"1\" in res.counts_of_qubits(k) else 0)\n", + " / num_shots\n", + " for k in range(N_QUBITS)\n", + " ]\n", + "\n", + " return torch.tensor(yvec)" + ] + }, + { + "cell_type": "markdown", + "id": "68417df2", + "metadata": {}, + "source": [ + "##### Quantum Vision Transformer:\n", + "$$\n", + " y = f_{3} \\circ f_{2} \\circ f_{1} \\circ\t f_{0} \\circ(X)\n", + "$$\n", + ", where X - input tensor, y - result, $$f_{i}$$ is the Neural Network Layer\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "2bb9e435-d050-468a-a61f-0d14ce16eb9c", + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "slide" + }, + "tags": [] + }, + "outputs": [], + "source": [ + "def get_circuit():\n", + "\n", + "\n", + " @qfunc\n", + " def vqc(weight_: CArray[CArray[CReal, N_QUBITS], N_QUBITS], res:QArray) -> None:\n", + " \n", + " num_qubits = N_QUBITS\n", + " num_qlayers = N_QUBITS\n", + " \n", + " repeat(\n", + " count=num_qlayers,\n", + " iteration=lambda i: repeat(count=num_qubits, iteration=lambda j: RX(pi * weight_[i][j], res[j]))\n", + " )\n", + " \n", + " repeat(\n", + " count=num_qubits - 1,\n", + " iteration=lambda index: CX(ctrl=res[index], target=res[index + 1]),\n", + " )\n", + "\n", + " \n", + " @qfunc\n", + " def main(input_: CArray[CReal, N_QUBITS], weight_: CArray[CArray[CReal, N_QUBITS], N_QUBITS], res: Output[QArray[QBit, N_QUBITS]]) -> None:\n", + " \n", + "\n", + " encode_in_angle(input_, res)\n", + " vqc(weight_, res)\n", + "\n", + "\n", + " qmod = create_model(main)\n", + " quantum_program = synthesize(qmod)\n", + " return quantum_program\n" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "163130e1-2905-4572-9a59-99a55a6bc17c", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from IPython.display import Image \n", + " \n", + "# get the image \n", + "Image(url=\"axioms-13-00323-g004-550.jpg\", width=800, height=400) \n" + ] + }, + { + "cell_type": "markdown", + "id": "e8638544", + "metadata": {}, + "source": [ + "![title](\"axioms-13-00323-g004-550.jpg\")" + ] + }, + { + "cell_type": "code", + "execution_count": 61, + "id": "5f609226-9ad5-44d3-a62e-0d1760f030fa", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 61, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "Image(url=\"axioms-13-00323-g005-550.jpg\", width=800, height=300) \n" + ] + }, + { + "cell_type": "code", + "execution_count": 62, + "id": "00f92a77-0b94-4fe6-8f9b-2331d4f77939", + "metadata": {}, + "outputs": [], + "source": [ + "class Patchify(torch.nn.Module):\n", + " \"\"\"\n", + " Patchify layer implemented using the Conv2d layer\n", + " \"\"\"\n", + " def __init__(self, in_channels:int, patch_size:int, hidden_size:int):\n", + " super(Patchify, self).__init__()\n", + " self.patch_size = patch_size\n", + " self.conv = torch.nn.Conv2d(in_channels=in_channels, out_channels=hidden_size, kernel_size=self.patch_size, stride=self.patch_size)\n", + " self.hidden_size = hidden_size\n", + " \n", + " def forward(self, x:torch.Tensor):\n", + " bs, c, h, w = x.size()\n", + " self.num_patches = (h // self.patch_size) ** 2\n", + "\n", + " x = self.conv(x)\n", + " x = x.view(bs, self.num_patches, self.hidden_size)\n", + " return x" + ] + }, + { + "cell_type": "markdown", + "id": "fabc06a4", + "metadata": {}, + "source": [ + "##### Quantum Vision Transformer:\n", + "$$\n", + " y = f_{3} \\circ f_{2} \\circ f_{1} \\circ\t f_{0} \\circ(X)\n", + "$$\n", + ", where X - input tensor, y - result, $$f_{i}$$ is the Neural Network Layer\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2849e3a5-3d01-42bf-ae4b-4c79375e8443", + "metadata": {}, + "outputs": [], + "source": [ + "class RotaryPositionalEmbedding(torch.nn.Module):\n", + " \"\"\"\n", + " Rotary Positional Embedding\n", + " \"\"\"\n", + " def __init__(self, d_model, max_seq_len):\n", + " super(RotaryPositionalEmbedding, self).__init__()\n", + "\n", + " # Create a rotation matrix.\n", + " self.rotation_matrix = torch.zeros(d_model, d_model)\n", + " for i in range(d_model):\n", + " for j in range(d_model):\n", + " self.rotation_matrix[i, j] = math.cos(i * j * 0.01)\n", + "\n", + " # Create a positional embedding matrix.\n", + " self.positional_embedding = torch.zeros(max_seq_len, d_model)\n", + " for i in range(max_seq_len):\n", + " for j in range(d_model):\n", + " self.positional_embedding[i, j] = math.cos(i * j * 0.01)\n", + "\n", + " def forward(self, x):\n", + " \"\"\"\n", + " Args:\n", + " x: A tensor of shape (batch_size, seq_len, d_model).\n", + "\n", + " Returns:\n", + " A tensor of shape (batch_size, seq_len, d_model).\n", + " \"\"\"\n", + "\n", + " # Add the positional embedding to the input tensor.\n", + " x += self.positional_embedding\n", + "\n", + " # Apply the rotation matrix to the input tensor.\n", + " x = torch.matmul(x, self.rotation_matrix)\n", + "\n", + " return x" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3f0c44e1-a638-4237-abf3-dd5b937abed1", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Quantum program link: https://platform.classiq.io/circuit/2we1LvLdzd9RQ9vSPLE1T3WcIxn?login=True&version=0.77.0\n" + ] + } + ], + "source": [ + "quantum_program = get_circuit()\n", + "show(quantum_program)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19042677-6d63-426c-b12d-9d107b47eecb", + "metadata": {}, + "outputs": [], + "source": [ + "class QuantumLayer(torch.nn.Module):\n", + " \"\"\"\n", + " Quantum Layer\n", + " \"\"\"\n", + " def __init__(self, in_dim, out_dim):\n", + " super(QuantumLayer, self).__init__()\n", + " self.quantum_program = get_circuit()\n", + " self.quantum_layer = QLayer(quantum_program, execute_qnn, post_process)\n", + "\n", + " def forward(self, x:torch.Tensor):\n", + " x = self.quantum_layer(x)\n", + " return x" + ] + }, + { + "cell_type": "markdown", + "id": "da820af8", + "metadata": {}, + "source": [ + "##### Feed Forward Neural Network:\n", + "$$\n", + " y = f_{3} \\circ f_{2} \\circ f_{1} \\circ\t f_{0} \\circ(X)\n", + "$$\n", + ", where X - input tensor, y - result, $$f_{i}$$ is the Neural Network Layer\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "376d717d-76bf-45bc-a90f-67d15a92f33a", + "metadata": {}, + "outputs": [], + "source": [ + "class FFN(torch.nn.Module):\n", + " \"\"\"\n", + " Feed Forward Network\n", + " \"\"\"\n", + " def __init__(self, in_dim, hidden_size):\n", + " super().__init__()\n", + " self.linear_1 = torch.nn.Linear(in_dim, hidden_size)\n", + " self.qlinear = QuantumLayer(hidden_size, hidden_size)\n", + " self.dropout = torch.nn.Dropout(p=0.4)\n", + " self.linear_2 = torch.nn.Linear(hidden_size, in_dim)\n", + " return\n", + " \n", + " def forward(self, x:torch.Tensor):\n", + " seq_len = x.size()[1]\n", + " x = self.linear_1(x)\n", + " x = [self.qlinear(x[:, t, :]) for t in range(seq_len)]\n", + " x = torch.Tensor(pad_sequence(x))\n", + " x = self.dropout(x)\n", + " x = torch.nn.functional.gelu(x)\n", + " x = self.linear_2(x)\n", + " return x" + ] + }, + { + "cell_type": "markdown", + "id": "61a8840b", + "metadata": {}, + "source": [ + "##### Feed Forward Neural Network:\n", + "$$\n", + " y = f_{3} \\circ f_{2} \\circ f_{1} \\circ\t f_{0} \\circ(X)\n", + "$$\n", + ", where X - input tensor, y - result, $$f_{i}$$ is the Neural Network Layer\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f2ffa889-38a6-425b-bf76-c1032406e6e3", + "metadata": {}, + "outputs": [], + "source": [ + "class qMHA(torch.nn.Module):\n", + " \"\"\"\n", + " Quantum Multihead Attention\n", + " \"\"\"\n", + " def __init__(self, in_dim:int, num_heads:int) -> None:\n", + " super().__init__()\n", + "\n", + " self.k_linear = QuantumLayer(in_dim, in_dim);\n", + " self.q_linear = QuantumLayer(in_dim, in_dim);\n", + " self.v_linear = QuantumLayer(in_dim, in_dim);\n", + " self.dropout = torch.nn.Dropout(p=0.1)\n", + " \n", + " self.final_l = QuantumLayer(in_dim, in_dim)\n", + " self.num_heads = num_heads\n", + " self.in_dim = in_dim\n", + " \n", + " return\n", + "\n", + " def forward(self, X:torch.Tensor):\n", + "\n", + " seq_len = X.size()[1]\n", + " K = [self.k_linear(X[:, t, :]) for t in range(seq_len)]\n", + " Q = [self.q_linear(X[:, t, :]) for t in range(seq_len)]\n", + " V = [self.v_linear(X[:, t, :]) for t in range(seq_len)]\n", + " \n", + " k = torch.Tensor(pad_sequence(K))\n", + " q = torch.Tensor(pad_sequence(Q))\n", + " v = torch.Tensor(pad_sequence(V))\n", + " \n", + " attention = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))\n", + " attention = torch.nn.functional.softmax(attention, dim=-1)\n", + "\n", + " attention = self.dropout(attention)\n", + " attention = attention @ v \n", + " #x = self.final_l(attention)\n", + " return attention" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "426e1bed-2197-4d62-ba13-6215c466c97b", + "metadata": {}, + "outputs": [], + "source": [ + "class qTransformerEncoder(torch.nn.Module):\n", + " \"\"\"\n", + " Quantum Transformer Encoder Layer\n", + " \"\"\"\n", + " def __init__(self, in_dim:int, num_heads:int) -> None:\n", + " super().__init__()\n", + " \n", + " self.layer_norm_1 = torch.nn.LayerNorm(normalized_shape=in_dim)\n", + " self.layer_norm_2 = torch.nn.LayerNorm(normalized_shape=in_dim)\n", + " \n", + " self.qMHA = qMHA(in_dim, num_heads)\n", + " self.qFFN = FFN(in_dim, hidden_size=in_dim)\n", + " self.dropout = torch.nn.Dropout(p=0.3)\n", + " \n", + "\n", + " def forward(self, X:torch.Tensor):\n", + " x = self.qMHA(X)\n", + " x = (self.layer_norm_1(x) + X)\n", + " x = self.dropout(x)\n", + " \n", + " y = self.qFFN(x)\n", + " y = self.layer_norm_2(y)+x\n", + " return y" + ] + }, + { + "cell_type": "markdown", + "id": "b125354b", + "metadata": {}, + "source": [ + "##### Feed Forward Neural Network:\n", + "$$\n", + " y = f_{3} \\circ f_{2} \\circ f_{1} \\circ\t f_{0} \\circ(X)\n", + "$$\n", + ", where X - input tensor, y - result, $$f_{i}$$ is the Neural Network Layer\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e6a37ec6-d82c-4784-aa97-94bf80be78c1", + "metadata": {}, + "outputs": [], + "source": [ + "class QVT(torch.nn.Module):\n", + " \"\"\"\n", + " Quantum Vision Transformer;\n", + " \"\"\"\n", + " def __init__(self, in_channels, patch_size, in_dim, hidden_size, num_heads, n_classes, n_layers) -> None:\n", + " super().__init__()\n", + " \n", + " self.d_model = (in_dim//patch_size)**2\n", + " self.n_classes = n_classes\n", + "\n", + " self.patch_formation = Patchify(in_channels=in_channels, patch_size=patch_size, hidden_size=hidden_size)\n", + "\n", + " self.pos_encoding = RotaryPositionalEmbedding(hidden_size, self.d_model)\n", + " self.transformer_blocks = torch.nn.ModuleList([qTransformerEncoder(hidden_size, num_heads) for i in range(n_layers)])\n", + " \n", + " self.final_normalization = torch.nn.LayerNorm(hidden_size)\n", + " self.final_layer = torch.nn.Linear(hidden_size, self.n_classes)\n", + "\n", + " def forward(self, x: torch.Tensor) -> torch.Tensor: \n", + " \n", + " x = self.patch_formation(x)\n", + " x += self.pos_encoding(x)\n", + " \n", + " for trans_block in self.transformer_blocks:\n", + " x = trans_block(x)\n", + " \n", + " x = self.final_normalization(x)\n", + " x = x.mean(axis=1)\n", + " x = self.final_layer(x)\n", + " \n", + " return x" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "79282b7f-cb02-4b20-a29a-51de390c2d17", + "metadata": {}, + "outputs": [], + "source": [ + "#### Example with the MNIST Dataset:\n", + "transform=transforms.Compose([\n", + " transforms.ToTensor(), # first, convert image to PyTorch tensor\n", + " transforms.Normalize((0.1307,), (0.3081,)) # normalize inputs\n", + " ])\n", + "dataset1 = datasets.MNIST('../data', train=True, download=True,transform=transform)\n", + "dataset2 = datasets.MNIST('../data', train=False,transform=transform)\n", + "\n", + "train_loader = torch.utils.data.DataLoader(dataset1,batch_size=16)\n", + "test_loader = torch.utils.data.DataLoader(dataset2,batch_size=1)" + ] + }, + { + "cell_type": "code", + "execution_count": 68, + "id": "8ed4f759", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "clf = QVT(in_channels=1, patch_size=7, in_dim=28, hidden_size=4, num_heads=1, n_classes=10, n_layers=2)\n", + "\n", + "opt = optim.SGD(clf.parameters(), lr=0.01, momentum=0.5)\n", + "\n", + "loss_history = []\n", + "acc_history = []" + ] + }, + { + "cell_type": "code", + "execution_count": 69, + "id": "d044b9af", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + " 0%| | 0/3750 [00:00 exception=RuntimeError('Cannot send a request, as the client has been closed.')>\n", + "Traceback (most recent call last):\n", + " File \"/opt/anaconda3/envs/baler/lib/python3.11/asyncio/tasks.py\", line 267, in __step\n", + " result = coro.send(None)\n", + " ^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/execution/jobs.py\", line 128, in result_async\n", + " await self.poll_async(timeout_sec=timeout_sec, _http_client=_http_client)\n", + " File \"/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/execution/jobs.py\", line 253, in poll_async\n", + " await self._poll_job(timeout_sec=timeout_sec, _http_client=_http_client)\n", + " File \"/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/execution/jobs.py\", line 271, in _poll_job\n", + " await poller.poll(\n", + " File \"/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/_internals/jobs.py\", line 141, in poll\n", + " return await self._poll(\n", + " ^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/_internals/jobs.py\", line 124, in _poll\n", + " async for json_response in poll_for(\n", + " File \"/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/_internals/async_utils.py\", line 79, in poll_for\n", + " yield await poller()\n", + " ^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/_internals/jobs.py\", line 119, in poller\n", + " raw_response = await self._request(\n", + " ^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/_internals/jobs.py\", line 85, in _request\n", + " return await client().request(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/_internals/client.py\", line 115, in wrapper\n", + " return await func(*args, **kwargs)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/_internals/client.py\", line 240, in request\n", + " response = await http_client.request(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/baler/lib/python3.11/site-packages/httpx/_client.py\", line 1585, in request\n", + " return await self.send(request, auth=auth, follow_redirects=follow_redirects)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/baler/lib/python3.11/site-packages/httpx/_client.py\", line 1661, in send\n", + " raise RuntimeError(\"Cannot send a request, as the client has been closed.\")\n", + "RuntimeError: Cannot send a request, as the client has been closed.\n", + " 0%| | 0/3750 [39:53 8\u001b[0m \u001b[43mloss\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 9\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 10\u001b[0m loss_history\u001b[38;5;241m.\u001b[39mappend(loss)\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/torch/_tensor.py:581\u001b[0m, in \u001b[0;36mTensor.backward\u001b[0;34m(self, gradient, retain_graph, create_graph, inputs)\u001b[0m\n\u001b[1;32m 571\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m has_torch_function_unary(\u001b[38;5;28mself\u001b[39m):\n\u001b[1;32m 572\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m handle_torch_function(\n\u001b[1;32m 573\u001b[0m Tensor\u001b[38;5;241m.\u001b[39mbackward,\n\u001b[1;32m 574\u001b[0m (\u001b[38;5;28mself\u001b[39m,),\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 579\u001b[0m inputs\u001b[38;5;241m=\u001b[39minputs,\n\u001b[1;32m 580\u001b[0m )\n\u001b[0;32m--> 581\u001b[0m \u001b[43mtorch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mautograd\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 582\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mgradient\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mretain_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcreate_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minputs\u001b[49m\n\u001b[1;32m 583\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/torch/autograd/__init__.py:347\u001b[0m, in \u001b[0;36mbackward\u001b[0;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)\u001b[0m\n\u001b[1;32m 342\u001b[0m retain_graph \u001b[38;5;241m=\u001b[39m create_graph\n\u001b[1;32m 344\u001b[0m \u001b[38;5;66;03m# The reason we repeat the same comment below is that\u001b[39;00m\n\u001b[1;32m 345\u001b[0m \u001b[38;5;66;03m# some Python versions print out the first line of a multi-line function\u001b[39;00m\n\u001b[1;32m 346\u001b[0m \u001b[38;5;66;03m# calls in the traceback and some print out the last line\u001b[39;00m\n\u001b[0;32m--> 347\u001b[0m \u001b[43m_engine_run_backward\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 348\u001b[0m \u001b[43m \u001b[49m\u001b[43mtensors\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 349\u001b[0m \u001b[43m \u001b[49m\u001b[43mgrad_tensors_\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 350\u001b[0m \u001b[43m \u001b[49m\u001b[43mretain_graph\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 351\u001b[0m \u001b[43m \u001b[49m\u001b[43mcreate_graph\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 352\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 353\u001b[0m \u001b[43m \u001b[49m\u001b[43mallow_unreachable\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 354\u001b[0m \u001b[43m \u001b[49m\u001b[43maccumulate_grad\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 355\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/torch/autograd/graph.py:825\u001b[0m, in \u001b[0;36m_engine_run_backward\u001b[0;34m(t_outputs, *args, **kwargs)\u001b[0m\n\u001b[1;32m 823\u001b[0m unregister_hooks \u001b[38;5;241m=\u001b[39m _register_logging_hooks_on_whole_graph(t_outputs)\n\u001b[1;32m 824\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 825\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mVariable\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_execution_engine\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun_backward\u001b[49m\u001b[43m(\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# Calls into the C++ engine to run the backward pass\u001b[39;49;00m\n\u001b[1;32m 826\u001b[0m \u001b[43m \u001b[49m\u001b[43mt_outputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\n\u001b[1;32m 827\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;66;03m# Calls into the C++ engine to run the backward pass\u001b[39;00m\n\u001b[1;32m 828\u001b[0m \u001b[38;5;28;01mfinally\u001b[39;00m:\n\u001b[1;32m 829\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m attach_logging_hooks:\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/torch/autograd/function.py:307\u001b[0m, in \u001b[0;36mBackwardCFunction.apply\u001b[0;34m(self, *args)\u001b[0m\n\u001b[1;32m 301\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mRuntimeError\u001b[39;00m(\n\u001b[1;32m 302\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mImplementing both \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mbackward\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m and \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mvjp\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m for a custom \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 303\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mFunction is not allowed. You should only implement one \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 304\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mof them.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 305\u001b[0m )\n\u001b[1;32m 306\u001b[0m user_fn \u001b[38;5;241m=\u001b[39m vjp_fn \u001b[38;5;28;01mif\u001b[39;00m vjp_fn \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m Function\u001b[38;5;241m.\u001b[39mvjp \u001b[38;5;28;01melse\u001b[39;00m backward_fn\n\u001b[0;32m--> 307\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43muser_fn\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/applications/qnn/qlayer.py:126\u001b[0m, in \u001b[0;36mQLayerFunction.backward\u001b[0;34m(ctx, grad_output)\u001b[0m\n\u001b[1;32m 123\u001b[0m grad_weights \u001b[38;5;241m=\u001b[39m einsum_weigths(grad_output, grad_weights, is_single_layer)\n\u001b[1;32m 125\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m ctx\u001b[38;5;241m.\u001b[39mneeds_input_grad[\u001b[38;5;241m0\u001b[39m]:\n\u001b[0;32m--> 126\u001b[0m grad_inputs \u001b[38;5;241m=\u001b[39m \u001b[43mctx\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mquantum_gradient\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgradient_inputs\u001b[49m\u001b[43m(\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mweights\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 127\u001b[0m grad_inputs \u001b[38;5;241m=\u001b[39m einsum_inputs(grad_output, grad_inputs, is_single_layer)\n\u001b[1;32m 129\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28many\u001b[39m(ctx\u001b[38;5;241m.\u001b[39mneeds_input_grad[i] \u001b[38;5;28;01mfor\u001b[39;00m i \u001b[38;5;129;01min\u001b[39;00m (\u001b[38;5;241m2\u001b[39m, \u001b[38;5;241m3\u001b[39m, \u001b[38;5;241m4\u001b[39m)):\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/applications/qnn/gradients/simple_quantum_gradient.py:156\u001b[0m, in \u001b[0;36mSimpleQuantumGradient.gradient_inputs\u001b[0;34m(self, inputs, weights, *args, **kwargs)\u001b[0m\n\u001b[1;32m 153\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mgradient_inputs\u001b[39m(\n\u001b[1;32m 154\u001b[0m \u001b[38;5;28mself\u001b[39m, inputs: Tensor, weights: Tensor, \u001b[38;5;241m*\u001b[39margs: Any, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any\n\u001b[1;32m 155\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Tensor:\n\u001b[0;32m--> 156\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_gradient\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 157\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 158\u001b[0m \u001b[43m \u001b[49m\u001b[43mweights\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 159\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mconvert_inputs_tensors_to_arguments\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 160\u001b[0m \u001b[43m \u001b[49m\u001b[43mexpected_shape\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m(\u001b[49m\n\u001b[1;32m 161\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mshape\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 162\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m2\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 163\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 164\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/applications/qnn/gradients/simple_quantum_gradient.py:125\u001b[0m, in \u001b[0;36mSimpleQuantumGradient._gradient\u001b[0;34m(self, inputs, weights, convert_tensors_to_arguments, expected_shape)\u001b[0m\n\u001b[1;32m 118\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_gradient\u001b[39m(\n\u001b[1;32m 119\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 120\u001b[0m inputs: Tensor,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 123\u001b[0m expected_shape: Shape,\n\u001b[1;32m 124\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Tensor:\n\u001b[0;32m--> 125\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43miter_inputs_weights\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 126\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 127\u001b[0m \u001b[43m \u001b[49m\u001b[43mweights\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 128\u001b[0m \u001b[43m \u001b[49m\u001b[43mconvert_tensors_to_arguments\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 129\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mexecute\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 130\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_post_process\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 131\u001b[0m \u001b[43m \u001b[49m\u001b[43mexpected_shape\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mexpected_shape\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 132\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 134\u001b[0m axis_to_squeeze \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m2\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m is_single_layer_circuit(weights) \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;241m3\u001b[39m\n\u001b[1;32m 135\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_differentiate_results(result, axis_to_squeeze)\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/applications/qnn/torch_utils.py:114\u001b[0m, in \u001b[0;36miter_inputs_weights\u001b[0;34m(inputs, weights, convert_tensors_to_arguments, execute, post_process, expected_shape, requires_grad)\u001b[0m\n\u001b[1;32m 104\u001b[0m inputs_weights_shape \u001b[38;5;241m=\u001b[39m (inputs\u001b[38;5;241m.\u001b[39mshape[\u001b[38;5;241m0\u001b[39m], weights\u001b[38;5;241m.\u001b[39mshape[\u001b[38;5;241m0\u001b[39m])\n\u001b[1;32m 105\u001b[0m all_arguments \u001b[38;5;241m=\u001b[39m \u001b[38;5;28msum\u001b[39m(\n\u001b[1;32m 106\u001b[0m (\n\u001b[1;32m 107\u001b[0m convert_tensors_to_arguments(batch_item, out_weight)\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 111\u001b[0m (),\n\u001b[1;32m 112\u001b[0m )\n\u001b[0;32m--> 114\u001b[0m execution_results \u001b[38;5;241m=\u001b[39m \u001b[43mexecute\u001b[49m\u001b[43m(\u001b[49m\u001b[43mall_arguments\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 116\u001b[0m all_results \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlist\u001b[39m(\u001b[38;5;28mmap\u001b[39m(post_process, execution_results))\n\u001b[1;32m 118\u001b[0m expected_shape \u001b[38;5;241m=\u001b[39m inputs_weights_shape \u001b[38;5;241m+\u001b[39m expected_shape \u001b[38;5;241m+\u001b[39m all_results[\u001b[38;5;241m0\u001b[39m]\u001b[38;5;241m.\u001b[39mshape\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/execution/qnn.py:77\u001b[0m, in \u001b[0;36mexecute_qnn\u001b[0;34m(quantum_program, arguments, observable)\u001b[0m\n\u001b[1;32m 75\u001b[0m result: ResultsCollection \u001b[38;5;241m=\u001b[39m []\n\u001b[1;32m 76\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m chunk \u001b[38;5;129;01min\u001b[39;00m more_itertools\u001b[38;5;241m.\u001b[39mchunked(arguments, _MAX_ARGUMENTS_SIZE):\n\u001b[0;32m---> 77\u001b[0m chunk_result \u001b[38;5;241m=\u001b[39m \u001b[43mexecute_function\u001b[49m\u001b[43m(\u001b[49m\u001b[43marguments\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mchunk\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 78\u001b[0m result\u001b[38;5;241m.\u001b[39mextend(chunk_result)\n\u001b[1;32m 79\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m result\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/execution/qnn.py:53\u001b[0m, in \u001b[0;36m_execute_qnn_sample\u001b[0;34m(session, arguments)\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_execute_qnn_sample\u001b[39m(\n\u001b[1;32m 44\u001b[0m session: ExecutionSession,\n\u001b[1;32m 45\u001b[0m arguments: \u001b[38;5;28mlist\u001b[39m[Arguments],\n\u001b[1;32m 46\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m ResultsCollection:\n\u001b[1;32m 47\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m [\n\u001b[1;32m 48\u001b[0m TaggedExecutionDetails(\n\u001b[1;32m 49\u001b[0m name\u001b[38;5;241m=\u001b[39mDEFAULT_RESULT_NAME,\n\u001b[1;32m 50\u001b[0m value\u001b[38;5;241m=\u001b[39mresult,\n\u001b[1;32m 51\u001b[0m value_type\u001b[38;5;241m=\u001b[39mSavedResultValueType\u001b[38;5;241m.\u001b[39mExecutionDetails,\n\u001b[1;32m 52\u001b[0m )\n\u001b[0;32m---> 53\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m result \u001b[38;5;129;01min\u001b[39;00m \u001b[43msession\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbatch_sample\u001b[49m\u001b[43m(\u001b[49m\u001b[43marguments\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 54\u001b[0m ]\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/execution/execution_session.py:212\u001b[0m, in \u001b[0;36mExecutionSession.batch_sample\u001b[0;34m(self, parameters)\u001b[0m\n\u001b[1;32m 202\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mbatch_sample\u001b[39m(\u001b[38;5;28mself\u001b[39m, parameters: \u001b[38;5;28mlist\u001b[39m[ExecutionParams]) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m \u001b[38;5;28mlist\u001b[39m[ExecutionDetails]:\n\u001b[1;32m 203\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 204\u001b[0m \u001b[38;5;124;03m Samples the quantum program multiple times with the given parameters for each iteration. The number of samples is determined by the length of the parameters list.\u001b[39;00m\n\u001b[1;32m 205\u001b[0m \n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 210\u001b[0m \u001b[38;5;124;03m List[ExecutionDetails]: The results of all the sampling iterations.\u001b[39;00m\n\u001b[1;32m 211\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m--> 212\u001b[0m job \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msubmit_batch_sample\u001b[49m\u001b[43m(\u001b[49m\u001b[43mparameters\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mparameters\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 213\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m job\u001b[38;5;241m.\u001b[39mget_batch_sample_result(_http_client\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_async_client)\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/execution/execution_session.py:231\u001b[0m, in \u001b[0;36mExecutionSession.submit_batch_sample\u001b[0;34m(self, parameters)\u001b[0m\n\u001b[1;32m 216\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 217\u001b[0m \u001b[38;5;124;03mInitiates an execution job with the `batch_sample` primitive.\u001b[39;00m\n\u001b[1;32m 218\u001b[0m \n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 226\u001b[0m \u001b[38;5;124;03m The execution job.\u001b[39;00m\n\u001b[1;32m 227\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 228\u001b[0m execution_primitives_input \u001b[38;5;241m=\u001b[39m PrimitivesInput(\n\u001b[1;32m 229\u001b[0m sample\u001b[38;5;241m=\u001b[39m[parse_params(params) \u001b[38;5;28;01mfor\u001b[39;00m params \u001b[38;5;129;01min\u001b[39;00m parameters]\n\u001b[1;32m 230\u001b[0m )\n\u001b[0;32m--> 231\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_execute\u001b[49m\u001b[43m(\u001b[49m\u001b[43mexecution_primitives_input\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/execution/execution_session.py:147\u001b[0m, in \u001b[0;36mExecutionSession._execute\u001b[0;34m(self, primitives_input)\u001b[0m\n\u001b[1;32m 145\u001b[0m primitives_input\u001b[38;5;241m.\u001b[39mrandom_seed \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_random_seed\n\u001b[1;32m 146\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_random_seed \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_rng\u001b[38;5;241m.\u001b[39mrandint(\u001b[38;5;241m0\u001b[39m, \u001b[38;5;241m2\u001b[39m\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m32\u001b[39m \u001b[38;5;241m-\u001b[39m \u001b[38;5;241m1\u001b[39m)\n\u001b[0;32m--> 147\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43masync_utils\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 148\u001b[0m \u001b[43m \u001b[49m\u001b[43mApiWrapper\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcall_create_session_job\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 149\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_session_id\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mprimitives_input\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_async_client\u001b[49m\n\u001b[1;32m 150\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 151\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 152\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m ExecutionJob(details\u001b[38;5;241m=\u001b[39mresult)\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/_internals/async_utils.py:37\u001b[0m, in \u001b[0;36mrun\u001b[0;34m(coro)\u001b[0m\n\u001b[1;32m 32\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mrun\u001b[39m(coro: Awaitable[T]) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m T:\n\u001b[1;32m 33\u001b[0m \u001b[38;5;66;03m# Use this function instead of asyncio.run, since it ALWAYS\u001b[39;00m\n\u001b[1;32m 34\u001b[0m \u001b[38;5;66;03m# creates a new event loop and clears the thread event loop.\u001b[39;00m\n\u001b[1;32m 35\u001b[0m \u001b[38;5;66;03m# Never use asyncio.run in library code.\u001b[39;00m\n\u001b[1;32m 36\u001b[0m loop \u001b[38;5;241m=\u001b[39m get_event_loop()\n\u001b[0;32m---> 37\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mloop\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun_until_complete\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcoro\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/nest_asyncio.py:98\u001b[0m, in \u001b[0;36m_patch_loop..run_until_complete\u001b[0;34m(self, future)\u001b[0m\n\u001b[1;32m 95\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m f\u001b[38;5;241m.\u001b[39mdone():\n\u001b[1;32m 96\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mRuntimeError\u001b[39;00m(\n\u001b[1;32m 97\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mEvent loop stopped before Future completed.\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[0;32m---> 98\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mf\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mresult\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/asyncio/futures.py:203\u001b[0m, in \u001b[0;36mFuture.result\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 201\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m__log_traceback \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mFalse\u001b[39;00m\n\u001b[1;32m 202\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_exception \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m--> 203\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_exception\u001b[38;5;241m.\u001b[39mwith_traceback(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_exception_tb)\n\u001b[1;32m 204\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_result\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/asyncio/tasks.py:267\u001b[0m, in \u001b[0;36mTask.__step\u001b[0;34m(***failed resolving arguments***)\u001b[0m\n\u001b[1;32m 263\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 264\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m exc \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 265\u001b[0m \u001b[38;5;66;03m# We use the `send` method directly, because coroutines\u001b[39;00m\n\u001b[1;32m 266\u001b[0m \u001b[38;5;66;03m# don't have `__iter__` and `__next__` methods.\u001b[39;00m\n\u001b[0;32m--> 267\u001b[0m result \u001b[38;5;241m=\u001b[39m coro\u001b[38;5;241m.\u001b[39msend(\u001b[38;5;28;01mNone\u001b[39;00m)\n\u001b[1;32m 268\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 269\u001b[0m result \u001b[38;5;241m=\u001b[39m coro\u001b[38;5;241m.\u001b[39mthrow(exc)\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/_internals/api_wrapper.py:147\u001b[0m, in \u001b[0;36mApiWrapper.call_create_session_job\u001b[0;34m(cls, session_id, primitives_input, http_client)\u001b[0m\n\u001b[1;32m 140\u001b[0m \u001b[38;5;129m@classmethod\u001b[39m\n\u001b[1;32m 141\u001b[0m \u001b[38;5;28;01masync\u001b[39;00m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mcall_create_session_job\u001b[39m(\n\u001b[1;32m 142\u001b[0m \u001b[38;5;28mcls\u001b[39m,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 145\u001b[0m http_client: Optional[httpx\u001b[38;5;241m.\u001b[39mAsyncClient] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 146\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m execution_request\u001b[38;5;241m.\u001b[39mExecutionJobDetails:\n\u001b[0;32m--> 147\u001b[0m data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mawait\u001b[39;00m \u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m_call_task_pydantic(\n\u001b[1;32m 148\u001b[0m http_method\u001b[38;5;241m=\u001b[39mHTTPMethod\u001b[38;5;241m.\u001b[39mPOST,\n\u001b[1;32m 149\u001b[0m url\u001b[38;5;241m=\u001b[39mroutes\u001b[38;5;241m.\u001b[39mEXECUTION_SESSIONS_PREFIX \u001b[38;5;241m+\u001b[39m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m/\u001b[39m\u001b[38;5;132;01m{\u001b[39;00msession_id\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 150\u001b[0m model\u001b[38;5;241m=\u001b[39mprimitives_input,\n\u001b[1;32m 151\u001b[0m http_client\u001b[38;5;241m=\u001b[39mhttp_client,\n\u001b[1;32m 152\u001b[0m )\n\u001b[1;32m 153\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m execution_request\u001b[38;5;241m.\u001b[39mExecutionJobDetails\u001b[38;5;241m.\u001b[39mmodel_validate(data)\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/_internals/api_wrapper.py:78\u001b[0m, in \u001b[0;36mApiWrapper._call_task_pydantic\u001b[0;34m(cls, http_method, url, model, use_versioned_url, http_client, exclude)\u001b[0m\n\u001b[1;32m 64\u001b[0m \u001b[38;5;129m@classmethod\u001b[39m\n\u001b[1;32m 65\u001b[0m \u001b[38;5;28;01masync\u001b[39;00m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_call_task_pydantic\u001b[39m(\n\u001b[1;32m 66\u001b[0m \u001b[38;5;28mcls\u001b[39m,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 75\u001b[0m \u001b[38;5;66;03m# This was added because JSON serializer doesn't serialize complex type, and pydantic does.\u001b[39;00m\n\u001b[1;32m 76\u001b[0m \u001b[38;5;66;03m# We should add support for smarter json serialization.\u001b[39;00m\n\u001b[1;32m 77\u001b[0m body \u001b[38;5;241m=\u001b[39m json\u001b[38;5;241m.\u001b[39mloads(model\u001b[38;5;241m.\u001b[39mmodel_dump_json(exclude\u001b[38;5;241m=\u001b[39mexclude))\n\u001b[0;32m---> 78\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;01mawait\u001b[39;00m \u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m_call_task(\n\u001b[1;32m 79\u001b[0m http_method,\n\u001b[1;32m 80\u001b[0m url,\n\u001b[1;32m 81\u001b[0m body,\n\u001b[1;32m 82\u001b[0m use_versioned_url\u001b[38;5;241m=\u001b[39muse_versioned_url,\n\u001b[1;32m 83\u001b[0m http_client\u001b[38;5;241m=\u001b[39mhttp_client,\n\u001b[1;32m 84\u001b[0m )\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/_internals/api_wrapper.py:98\u001b[0m, in \u001b[0;36mApiWrapper._call_task\u001b[0;34m(cls, http_method, url, body, params, use_versioned_url, headers, allow_none, http_client)\u001b[0m\n\u001b[1;32m 86\u001b[0m \u001b[38;5;129m@classmethod\u001b[39m\n\u001b[1;32m 87\u001b[0m \u001b[38;5;28;01masync\u001b[39;00m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_call_task\u001b[39m(\n\u001b[1;32m 88\u001b[0m \u001b[38;5;28mcls\u001b[39m,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 96\u001b[0m http_client: Optional[httpx\u001b[38;5;241m.\u001b[39mAsyncClient] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 97\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m \u001b[38;5;28mdict\u001b[39m:\n\u001b[0;32m---> 98\u001b[0m res: Any \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mawait\u001b[39;00m client()\u001b[38;5;241m.\u001b[39mcall_api(\n\u001b[1;32m 99\u001b[0m http_method\u001b[38;5;241m=\u001b[39mhttp_method,\n\u001b[1;32m 100\u001b[0m url\u001b[38;5;241m=\u001b[39murl,\n\u001b[1;32m 101\u001b[0m body\u001b[38;5;241m=\u001b[39mbody,\n\u001b[1;32m 102\u001b[0m headers\u001b[38;5;241m=\u001b[39mheaders,\n\u001b[1;32m 103\u001b[0m params\u001b[38;5;241m=\u001b[39mparams,\n\u001b[1;32m 104\u001b[0m use_versioned_url\u001b[38;5;241m=\u001b[39muse_versioned_url,\n\u001b[1;32m 105\u001b[0m http_client\u001b[38;5;241m=\u001b[39mhttp_client,\n\u001b[1;32m 106\u001b[0m )\n\u001b[1;32m 107\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m allow_none \u001b[38;5;129;01mand\u001b[39;00m res \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 108\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m {}\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/_internals/client.py:277\u001b[0m, in \u001b[0;36mClient.call_api\u001b[0;34m(self, http_method, url, body, params, use_versioned_url, headers, http_client)\u001b[0m\n\u001b[1;32m 275\u001b[0m url \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmake_versioned_url(url)\n\u001b[1;32m 276\u001b[0m \u001b[38;5;28;01masync\u001b[39;00m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39muse_client_or_create(http_client) \u001b[38;5;28;01mas\u001b[39;00m async_client:\n\u001b[0;32m--> 277\u001b[0m response \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mawait\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mrequest(\n\u001b[1;32m 278\u001b[0m http_client\u001b[38;5;241m=\u001b[39masync_client,\n\u001b[1;32m 279\u001b[0m method\u001b[38;5;241m=\u001b[39mhttp_method,\n\u001b[1;32m 280\u001b[0m url\u001b[38;5;241m=\u001b[39murl,\n\u001b[1;32m 281\u001b[0m json\u001b[38;5;241m=\u001b[39mbody,\n\u001b[1;32m 282\u001b[0m params\u001b[38;5;241m=\u001b[39mparams,\n\u001b[1;32m 283\u001b[0m headers\u001b[38;5;241m=\u001b[39mheaders,\n\u001b[1;32m 284\u001b[0m )\n\u001b[1;32m 285\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m response\u001b[38;5;241m.\u001b[39mjson()\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/_internals/client.py:115\u001b[0m, in \u001b[0;36mtry_again_on_failure..wrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 113\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(_RETRY_COUNT):\n\u001b[1;32m 114\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 115\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;01mawait\u001b[39;00m func(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 116\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m ClassiqExpiredTokenError:\n\u001b[1;32m 117\u001b[0m _logger\u001b[38;5;241m.\u001b[39minfo(\n\u001b[1;32m 118\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mToken expired when trying to \u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[38;5;124m with args \u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[38;5;124m \u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 119\u001b[0m func,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 122\u001b[0m exc_info\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m,\n\u001b[1;32m 123\u001b[0m )\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/_internals/client.py:254\u001b[0m, in \u001b[0;36mClient.request\u001b[0;34m(self, http_client, method, url, json, params, headers)\u001b[0m\n\u001b[1;32m 240\u001b[0m response \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mawait\u001b[39;00m http_client\u001b[38;5;241m.\u001b[39mrequest(\n\u001b[1;32m 241\u001b[0m method\u001b[38;5;241m=\u001b[39mmethod,\n\u001b[1;32m 242\u001b[0m url\u001b[38;5;241m=\u001b[39murl,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 245\u001b[0m headers\u001b[38;5;241m=\u001b[39mheaders,\n\u001b[1;32m 246\u001b[0m )\n\u001b[1;32m 247\u001b[0m _logger\u001b[38;5;241m.\u001b[39mdebug(\n\u001b[1;32m 248\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mHTTP response: \u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[38;5;124m \u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[38;5;124m \u001b[39m\u001b[38;5;132;01m%d\u001b[39;00m\u001b[38;5;124m (\u001b[39m\u001b[38;5;132;01m%.0f\u001b[39;00m\u001b[38;5;124mms)\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 249\u001b[0m method\u001b[38;5;241m.\u001b[39mupper(),\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 252\u001b[0m (time\u001b[38;5;241m.\u001b[39mmonotonic() \u001b[38;5;241m-\u001b[39m start_time) \u001b[38;5;241m*\u001b[39m \u001b[38;5;241m1000\u001b[39m,\n\u001b[1;32m 253\u001b[0m )\n\u001b[0;32m--> 254\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mhandle_response\u001b[49m\u001b[43m(\u001b[49m\u001b[43mresponse\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 255\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m response\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/_internals/client.py:205\u001b[0m, in \u001b[0;36mClient.handle_response\u001b[0;34m(self, response)\u001b[0m\n\u001b[1;32m 203\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_handle_warnings(response)\n\u001b[1;32m 204\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m response\u001b[38;5;241m.\u001b[39mis_error:\n\u001b[0;32m--> 205\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_handle_error\u001b[49m\u001b[43m(\u001b[49m\u001b[43mresponse\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 206\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_handle_success(response)\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/_internals/client.py:224\u001b[0m, in \u001b[0;36mClient._handle_error\u001b[0;34m(response)\u001b[0m\n\u001b[1;32m 222\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m: \u001b[38;5;66;03m# noqa: S110\u001b[39;00m\n\u001b[1;32m 223\u001b[0m \u001b[38;5;28;01mpass\u001b[39;00m\n\u001b[0;32m--> 224\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m ClassiqAPIError(message, response\u001b[38;5;241m.\u001b[39mstatus_code)\n", + "\u001b[0;31mClassiqAPIError\u001b[0m: Call to API failed with code 500: Internal error occurred. Please contact Classiq support.\n\nError identifier: E19D7F0AC-1EBC-4508-89F6-8D13FEABC06E\nIf you need further assistance, please reach out on our Community Slack channel at: https://short.classiq.io/join-slack or open a support ticket at: https://classiq-community.freshdesk.com/support/tickets/new" + ] + } + ], + "source": [ + "for data, label in tqdm.tqdm(train_loader):\n", + " # forward pass, calculate loss and backprop!\n", + " opt.zero_grad()\n", + "\n", + " preds = clf(data)\n", + " \n", + " loss = torch.nn.functional.nll_loss(preds, label)\n", + " loss.backward()\n", + " try:\n", + " loss_history.append(loss)\n", + " print(loss)\n", + " except Exception as e:\n", + " print(e)\n", + " opt.step()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dc61ea1e-0cfa-4498-b5ec-eab29b102750", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "\n", + "def train():\n", + " clf.train() # set model in training mode (need this because of dropout)\n", + " \n", + " # dataset API gives us pythonic batching \n", + " for data, label in tqdm.tqdm(train_loader):\n", + " # forward pass, calculate loss and backprop!\n", + " opt.zero_grad()\n", + "\n", + " preds = clf(data)\n", + " \n", + " loss = torch.nn.functional.nll_loss(preds, label)\n", + " loss.backward()\n", + " try:\n", + " loss_history.append(loss)\n", + " print(loss)\n", + " except Exception as e:\n", + " print(e)\n", + " opt.step()\n", + " return loss_history\n", + "\n", + "def test():\n", + " clf.eval() # set model in inference mode (need this because of dropout)\n", + " test_loss = 0\n", + " correct = 0\n", + " \n", + " for data, target in tqdm.tqdm(test_loader):\n", + " \n", + " output = clf(data)\n", + " test_loss += torch.nn.functional.nll_loss(output, target).item()\n", + " pred = output.argmax() # get the index of the max log-probability\n", + " correct += pred.eq(target).cpu().sum()\n", + "\n", + " test_loss = test_loss\n", + " test_loss /= len(test_loader) # loss function already averages over batch size\n", + " accuracy = 100. * correct / len(test_loader.dataset)\n", + " acc_history.append(accuracy)\n", + " print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n", + " test_loss, correct, len(test_loader.dataset),\n", + " accuracy))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c6c0281a-c8e9-46c8-adbf-dfdce58ddc61", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + " 0%| | 0/3750 [05:44 2\u001b[0m \u001b[43mtrain\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n", + "Cell \u001b[0;32mIn[51], line 19\u001b[0m, in \u001b[0;36mtrain\u001b[0;34m()\u001b[0m\n\u001b[1;32m 16\u001b[0m preds \u001b[38;5;241m=\u001b[39m clf(data)\n\u001b[1;32m 18\u001b[0m loss \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mnn\u001b[38;5;241m.\u001b[39mfunctional\u001b[38;5;241m.\u001b[39mnll_loss(preds, label)\n\u001b[0;32m---> 19\u001b[0m \u001b[43mloss\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 20\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 21\u001b[0m loss_history\u001b[38;5;241m.\u001b[39mappend(loss)\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/torch/_tensor.py:581\u001b[0m, in \u001b[0;36mTensor.backward\u001b[0;34m(self, gradient, retain_graph, create_graph, inputs)\u001b[0m\n\u001b[1;32m 571\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m has_torch_function_unary(\u001b[38;5;28mself\u001b[39m):\n\u001b[1;32m 572\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m handle_torch_function(\n\u001b[1;32m 573\u001b[0m Tensor\u001b[38;5;241m.\u001b[39mbackward,\n\u001b[1;32m 574\u001b[0m (\u001b[38;5;28mself\u001b[39m,),\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 579\u001b[0m inputs\u001b[38;5;241m=\u001b[39minputs,\n\u001b[1;32m 580\u001b[0m )\n\u001b[0;32m--> 581\u001b[0m \u001b[43mtorch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mautograd\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 582\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mgradient\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mretain_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcreate_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minputs\u001b[49m\n\u001b[1;32m 583\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/torch/autograd/__init__.py:347\u001b[0m, in \u001b[0;36mbackward\u001b[0;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)\u001b[0m\n\u001b[1;32m 342\u001b[0m retain_graph \u001b[38;5;241m=\u001b[39m create_graph\n\u001b[1;32m 344\u001b[0m \u001b[38;5;66;03m# The reason we repeat the same comment below is that\u001b[39;00m\n\u001b[1;32m 345\u001b[0m \u001b[38;5;66;03m# some Python versions print out the first line of a multi-line function\u001b[39;00m\n\u001b[1;32m 346\u001b[0m \u001b[38;5;66;03m# calls in the traceback and some print out the last line\u001b[39;00m\n\u001b[0;32m--> 347\u001b[0m \u001b[43m_engine_run_backward\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 348\u001b[0m \u001b[43m \u001b[49m\u001b[43mtensors\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 349\u001b[0m \u001b[43m \u001b[49m\u001b[43mgrad_tensors_\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 350\u001b[0m \u001b[43m \u001b[49m\u001b[43mretain_graph\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 351\u001b[0m \u001b[43m \u001b[49m\u001b[43mcreate_graph\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 352\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 353\u001b[0m \u001b[43m \u001b[49m\u001b[43mallow_unreachable\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 354\u001b[0m \u001b[43m \u001b[49m\u001b[43maccumulate_grad\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 355\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/torch/autograd/graph.py:825\u001b[0m, in \u001b[0;36m_engine_run_backward\u001b[0;34m(t_outputs, *args, **kwargs)\u001b[0m\n\u001b[1;32m 823\u001b[0m unregister_hooks \u001b[38;5;241m=\u001b[39m _register_logging_hooks_on_whole_graph(t_outputs)\n\u001b[1;32m 824\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 825\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mVariable\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_execution_engine\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun_backward\u001b[49m\u001b[43m(\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# Calls into the C++ engine to run the backward pass\u001b[39;49;00m\n\u001b[1;32m 826\u001b[0m \u001b[43m \u001b[49m\u001b[43mt_outputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\n\u001b[1;32m 827\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;66;03m# Calls into the C++ engine to run the backward pass\u001b[39;00m\n\u001b[1;32m 828\u001b[0m \u001b[38;5;28;01mfinally\u001b[39;00m:\n\u001b[1;32m 829\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m attach_logging_hooks:\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/torch/autograd/function.py:307\u001b[0m, in \u001b[0;36mBackwardCFunction.apply\u001b[0;34m(self, *args)\u001b[0m\n\u001b[1;32m 301\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mRuntimeError\u001b[39;00m(\n\u001b[1;32m 302\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mImplementing both \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mbackward\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m and \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mvjp\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m for a custom \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 303\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mFunction is not allowed. You should only implement one \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 304\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mof them.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 305\u001b[0m )\n\u001b[1;32m 306\u001b[0m user_fn \u001b[38;5;241m=\u001b[39m vjp_fn \u001b[38;5;28;01mif\u001b[39;00m vjp_fn \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m Function\u001b[38;5;241m.\u001b[39mvjp \u001b[38;5;28;01melse\u001b[39;00m backward_fn\n\u001b[0;32m--> 307\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43muser_fn\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/applications/qnn/qlayer.py:122\u001b[0m, in \u001b[0;36mQLayerFunction.backward\u001b[0;34m(ctx, grad_output)\u001b[0m\n\u001b[1;32m 119\u001b[0m is_single_layer \u001b[38;5;241m=\u001b[39m is_single_layer_circuit(weights)\n\u001b[1;32m 121\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m ctx\u001b[38;5;241m.\u001b[39mneeds_input_grad[\u001b[38;5;241m1\u001b[39m]:\n\u001b[0;32m--> 122\u001b[0m grad_weights \u001b[38;5;241m=\u001b[39m \u001b[43mctx\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mquantum_gradient\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgradient_weights\u001b[49m\u001b[43m(\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mweights\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 123\u001b[0m grad_weights \u001b[38;5;241m=\u001b[39m einsum_weigths(grad_output, grad_weights, is_single_layer)\n\u001b[1;32m 125\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m ctx\u001b[38;5;241m.\u001b[39mneeds_input_grad[\u001b[38;5;241m0\u001b[39m]:\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/applications/qnn/gradients/simple_quantum_gradient.py:143\u001b[0m, in \u001b[0;36mSimpleQuantumGradient.gradient_weights\u001b[0;34m(self, inputs, weights, *args, **kwargs)\u001b[0m\n\u001b[1;32m 140\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mgradient_weights\u001b[39m(\n\u001b[1;32m 141\u001b[0m \u001b[38;5;28mself\u001b[39m, inputs: Tensor, weights: Tensor, \u001b[38;5;241m*\u001b[39margs: Any, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any\n\u001b[1;32m 142\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Tensor:\n\u001b[0;32m--> 143\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_gradient\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 144\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 145\u001b[0m \u001b[43m \u001b[49m\u001b[43mweights\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 146\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mconvert_weights_tensors_to_arguments\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 147\u001b[0m \u001b[43m \u001b[49m\u001b[43mexpected_shape\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m(\u001b[49m\n\u001b[1;32m 148\u001b[0m \u001b[43m \u001b[49m\u001b[43mweights\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mshape\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m-\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 149\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m2\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 150\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 151\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/applications/qnn/gradients/simple_quantum_gradient.py:125\u001b[0m, in \u001b[0;36mSimpleQuantumGradient._gradient\u001b[0;34m(self, inputs, weights, convert_tensors_to_arguments, expected_shape)\u001b[0m\n\u001b[1;32m 118\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_gradient\u001b[39m(\n\u001b[1;32m 119\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 120\u001b[0m inputs: Tensor,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 123\u001b[0m expected_shape: Shape,\n\u001b[1;32m 124\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Tensor:\n\u001b[0;32m--> 125\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43miter_inputs_weights\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 126\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 127\u001b[0m \u001b[43m \u001b[49m\u001b[43mweights\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 128\u001b[0m \u001b[43m \u001b[49m\u001b[43mconvert_tensors_to_arguments\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 129\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mexecute\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 130\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_post_process\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 131\u001b[0m \u001b[43m \u001b[49m\u001b[43mexpected_shape\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mexpected_shape\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 132\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 134\u001b[0m axis_to_squeeze \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m2\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m is_single_layer_circuit(weights) \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;241m3\u001b[39m\n\u001b[1;32m 135\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_differentiate_results(result, axis_to_squeeze)\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/applications/qnn/torch_utils.py:114\u001b[0m, in \u001b[0;36miter_inputs_weights\u001b[0;34m(inputs, weights, convert_tensors_to_arguments, execute, post_process, expected_shape, requires_grad)\u001b[0m\n\u001b[1;32m 104\u001b[0m inputs_weights_shape \u001b[38;5;241m=\u001b[39m (inputs\u001b[38;5;241m.\u001b[39mshape[\u001b[38;5;241m0\u001b[39m], weights\u001b[38;5;241m.\u001b[39mshape[\u001b[38;5;241m0\u001b[39m])\n\u001b[1;32m 105\u001b[0m all_arguments \u001b[38;5;241m=\u001b[39m \u001b[38;5;28msum\u001b[39m(\n\u001b[1;32m 106\u001b[0m (\n\u001b[1;32m 107\u001b[0m convert_tensors_to_arguments(batch_item, out_weight)\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 111\u001b[0m (),\n\u001b[1;32m 112\u001b[0m )\n\u001b[0;32m--> 114\u001b[0m execution_results \u001b[38;5;241m=\u001b[39m \u001b[43mexecute\u001b[49m\u001b[43m(\u001b[49m\u001b[43mall_arguments\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 116\u001b[0m all_results \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlist\u001b[39m(\u001b[38;5;28mmap\u001b[39m(post_process, execution_results))\n\u001b[1;32m 118\u001b[0m expected_shape \u001b[38;5;241m=\u001b[39m inputs_weights_shape \u001b[38;5;241m+\u001b[39m expected_shape \u001b[38;5;241m+\u001b[39m all_results[\u001b[38;5;241m0\u001b[39m]\u001b[38;5;241m.\u001b[39mshape\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/execution/qnn.py:77\u001b[0m, in \u001b[0;36mexecute_qnn\u001b[0;34m(quantum_program, arguments, observable)\u001b[0m\n\u001b[1;32m 75\u001b[0m result: ResultsCollection \u001b[38;5;241m=\u001b[39m []\n\u001b[1;32m 76\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m chunk \u001b[38;5;129;01min\u001b[39;00m more_itertools\u001b[38;5;241m.\u001b[39mchunked(arguments, _MAX_ARGUMENTS_SIZE):\n\u001b[0;32m---> 77\u001b[0m chunk_result \u001b[38;5;241m=\u001b[39m \u001b[43mexecute_function\u001b[49m\u001b[43m(\u001b[49m\u001b[43marguments\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mchunk\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 78\u001b[0m result\u001b[38;5;241m.\u001b[39mextend(chunk_result)\n\u001b[1;32m 79\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m result\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/execution/qnn.py:53\u001b[0m, in \u001b[0;36m_execute_qnn_sample\u001b[0;34m(session, arguments)\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_execute_qnn_sample\u001b[39m(\n\u001b[1;32m 44\u001b[0m session: ExecutionSession,\n\u001b[1;32m 45\u001b[0m arguments: \u001b[38;5;28mlist\u001b[39m[Arguments],\n\u001b[1;32m 46\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m ResultsCollection:\n\u001b[1;32m 47\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m [\n\u001b[1;32m 48\u001b[0m TaggedExecutionDetails(\n\u001b[1;32m 49\u001b[0m name\u001b[38;5;241m=\u001b[39mDEFAULT_RESULT_NAME,\n\u001b[1;32m 50\u001b[0m value\u001b[38;5;241m=\u001b[39mresult,\n\u001b[1;32m 51\u001b[0m value_type\u001b[38;5;241m=\u001b[39mSavedResultValueType\u001b[38;5;241m.\u001b[39mExecutionDetails,\n\u001b[1;32m 52\u001b[0m )\n\u001b[0;32m---> 53\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m result \u001b[38;5;129;01min\u001b[39;00m \u001b[43msession\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbatch_sample\u001b[49m\u001b[43m(\u001b[49m\u001b[43marguments\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 54\u001b[0m ]\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/execution/execution_session.py:213\u001b[0m, in \u001b[0;36mExecutionSession.batch_sample\u001b[0;34m(self, parameters)\u001b[0m\n\u001b[1;32m 203\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 204\u001b[0m \u001b[38;5;124;03mSamples the quantum program multiple times with the given parameters for each iteration. The number of samples is determined by the length of the parameters list.\u001b[39;00m\n\u001b[1;32m 205\u001b[0m \n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 210\u001b[0m \u001b[38;5;124;03m List[ExecutionDetails]: The results of all the sampling iterations.\u001b[39;00m\n\u001b[1;32m 211\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 212\u001b[0m job \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msubmit_batch_sample(parameters\u001b[38;5;241m=\u001b[39mparameters)\n\u001b[0;32m--> 213\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mjob\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_batch_sample_result\u001b[49m\u001b[43m(\u001b[49m\u001b[43m_http_client\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_async_client\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/execution/jobs.py:186\u001b[0m, in \u001b[0;36mExecutionJob.get_batch_sample_result\u001b[0;34m(self, _http_client)\u001b[0m\n\u001b[1;32m 173\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mget_batch_sample_result\u001b[39m(\n\u001b[1;32m 174\u001b[0m \u001b[38;5;28mself\u001b[39m, _http_client: Optional[httpx\u001b[38;5;241m.\u001b[39mAsyncClient] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m 175\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m \u001b[38;5;28mlist\u001b[39m[ExecutionDetails]:\n\u001b[1;32m 176\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 177\u001b[0m \u001b[38;5;124;03m Returns the job's result as a single batch_sample result after validation. If the result is not yet available, waits for it.\u001b[39;00m\n\u001b[1;32m 178\u001b[0m \n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 184\u001b[0m \u001b[38;5;124;03m ClassiqAPIError: In case the job has failed.\u001b[39;00m\n\u001b[1;32m 185\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m--> 186\u001b[0m results \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mresult\u001b[49m\u001b[43m(\u001b[49m\u001b[43m_http_client\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_http_client\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 187\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(results) \u001b[38;5;241m!=\u001b[39m \u001b[38;5;241m1\u001b[39m:\n\u001b[1;32m 188\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m ClassiqExecutionResultError(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mbatch_sample\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/_internals/async_utils.py:43\u001b[0m, in \u001b[0;36msyncify_function..async_wrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 41\u001b[0m \u001b[38;5;129m@functools\u001b[39m\u001b[38;5;241m.\u001b[39mwraps(async_func)\n\u001b[1;32m 42\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21masync_wrapper\u001b[39m(\u001b[38;5;241m*\u001b[39margs: Any, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m T:\n\u001b[0;32m---> 43\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\u001b[43masync_func\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/_internals/async_utils.py:37\u001b[0m, in \u001b[0;36mrun\u001b[0;34m(coro)\u001b[0m\n\u001b[1;32m 32\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mrun\u001b[39m(coro: Awaitable[T]) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m T:\n\u001b[1;32m 33\u001b[0m \u001b[38;5;66;03m# Use this function instead of asyncio.run, since it ALWAYS\u001b[39;00m\n\u001b[1;32m 34\u001b[0m \u001b[38;5;66;03m# creates a new event loop and clears the thread event loop.\u001b[39;00m\n\u001b[1;32m 35\u001b[0m \u001b[38;5;66;03m# Never use asyncio.run in library code.\u001b[39;00m\n\u001b[1;32m 36\u001b[0m loop \u001b[38;5;241m=\u001b[39m get_event_loop()\n\u001b[0;32m---> 37\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mloop\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun_until_complete\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcoro\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/nest_asyncio.py:92\u001b[0m, in \u001b[0;36m_patch_loop..run_until_complete\u001b[0;34m(self, future)\u001b[0m\n\u001b[1;32m 90\u001b[0m f\u001b[38;5;241m.\u001b[39m_log_destroy_pending \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mFalse\u001b[39;00m\n\u001b[1;32m 91\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m f\u001b[38;5;241m.\u001b[39mdone():\n\u001b[0;32m---> 92\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_run_once\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 93\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_stopping:\n\u001b[1;32m 94\u001b[0m \u001b[38;5;28;01mbreak\u001b[39;00m\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/site-packages/nest_asyncio.py:115\u001b[0m, in \u001b[0;36m_patch_loop.._run_once\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 108\u001b[0m heappop(scheduled)\n\u001b[1;32m 110\u001b[0m timeout \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m 111\u001b[0m \u001b[38;5;241m0\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m ready \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_stopping\n\u001b[1;32m 112\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mmin\u001b[39m(\u001b[38;5;28mmax\u001b[39m(\n\u001b[1;32m 113\u001b[0m scheduled[\u001b[38;5;241m0\u001b[39m]\u001b[38;5;241m.\u001b[39m_when \u001b[38;5;241m-\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtime(), \u001b[38;5;241m0\u001b[39m), \u001b[38;5;241m86400\u001b[39m) \u001b[38;5;28;01mif\u001b[39;00m scheduled\n\u001b[1;32m 114\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m)\n\u001b[0;32m--> 115\u001b[0m event_list \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_selector\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mselect\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 116\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_process_events(event_list)\n\u001b[1;32m 118\u001b[0m end_time \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtime() \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_clock_resolution\n", + "File \u001b[0;32m/opt/anaconda3/envs/baler/lib/python3.11/selectors.py:561\u001b[0m, in \u001b[0;36mKqueueSelector.select\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m 559\u001b[0m ready \u001b[38;5;241m=\u001b[39m []\n\u001b[1;32m 560\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 561\u001b[0m kev_list \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_selector\u001b[38;5;241m.\u001b[39mcontrol(\u001b[38;5;28;01mNone\u001b[39;00m, max_ev, timeout)\n\u001b[1;32m 562\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mInterruptedError\u001b[39;00m:\n\u001b[1;32m 563\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m ready\n", + "\u001b[0;31mKeyboardInterrupt\u001b[0m: " + ] + } + ], + "source": [ + "#for epoch in range(0, 3):\n", + "train()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ce4a83d3-909c-49f5-a2e4-e1ae9bfcc5b2", + "metadata": {}, + "outputs": [], + "source": [ + "plt.style.use('fivethirtyeight')\n", + "plt.title('Model Loss')\n", + "plt.plot(range(1, epochs+1), history.history['loss'], label=\"training\")\n", + "plt.plot(range(1, epochs+1), history.history['val_loss'], label=\"validation\")\n", + "plt.xlabel('Epochs')\n", + "plt.ylabel('Loss')\n", + "plt.legend()\n", + "plt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/community/paper_implementation_project/qu_vit/Quantum Vision Transformer.ipynb b/community/paper_implementation_project/qu_vit/Quantum Vision Transformer.ipynb new file mode 100644 index 000000000..c5c00aef1 --- /dev/null +++ b/community/paper_implementation_project/qu_vit/Quantum Vision Transformer.ipynb @@ -0,0 +1,786 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "b144e47f-089a-49d4-a033-dbe6dcc7a55e", + "metadata": {}, + "source": [ + "##### \n", + "Quantum Vision Transformer Tutorial:\n", + "##### 1. Description of Quantum Vision Transformer Architecture:\n", + "

Quantum Vision Transformer is SOTA (state of the art) neural network architecture that works on image data. \n", + " It was shown in numerous works that the quantum vision transformer can outperform own classical counterpart.\n", + "This tutorial demonstrates the implementation of the hybrid architecture Quantum Vision Transformer [1][2], where some of the operation is quantum (like a Linear layer, Attention Layer) and some are classical.\n", + "

\n", + "\n", + "\n", + "##### 2. Quantum operations:\n", + "###### 2.1 Angle Encoding.\n", + "###### To feed the input data into the quantum circuit, we need to decode it using the angle encoding procedure; all input tensors are expanded at used as an angle for the rotation operation.\n", + "###### 2.2 Quantum Layer:\n", + "###### The quantum layer is constructed using the Rotation Operator acting on each wire, followed by the CNOT gate.\n", + "\n", + "###### 2.3 Attention mechanism:\n", + "\n", + "

It works with the sequence representation of patched images and utilizes the Attention mechanism that is the backbone of the Transformer architecture family. The multihead attention block is traditional for the transformer architecture; for the quantumness, the classical block is replaced with VQC. Another way to compute the attention is to use the quantum orthogonal layer, and calculate the dot product of i-th and j-th vectors.\n", + "

\n", + "\n", + "##### 3. Classical operations:\n", + "###### 3.1 Positional Encoder - the operation for the positional information of image patches incorporation.\n", + "###### 3.3 FFN - Fully Connected Block, consists of MLP and LayerNormalization, followed by the residual connection.\n", + "###### 3.4. Transformer Block\n", + "##### 4. Training Procedure" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "fb8f88e1-d493-4473-80cd-00042b61f868", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/opt/anaconda3/envs/baler/lib/python3.11/site-packages/classiq/_internals/authentication/token_manager.py:101: UserWarning: Device is already registered.\n", + "Generating a new refresh token should only be done if the current refresh token is compromised.\n", + "To do so, set the overwrite parameter to true\n", + " warnings.warn(\n" + ] + } + ], + "source": [ + "import classiq\n", + "classiq.authenticate()" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "20d0b3d8-8de2-460b-93d2-bc805561c0c8", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/opt/anaconda3/envs/baler/lib/python3.11/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'dlopen(/opt/anaconda3/envs/baler/lib/python3.11/site-packages/torchvision/image.so, 0x0006): Symbol not found: __ZN3c1017RegisterOperatorsD1Ev\n", + " Referenced from: /opt/anaconda3/envs/baler/lib/python3.11/site-packages/torchvision/image.so\n", + " Expected in: /opt/anaconda3/envs/baler/lib/python3.11/site-packages/torch/lib/libtorch_cpu.dylib'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source?\n", + " warn(\n" + ] + } + ], + "source": [ + "import torch\n", + "import torch.optim as optim\n", + "import tqdm\n", + "import math\n", + "from classiq import *\n", + "from classiq import (\n", + " synthesize,\n", + " qfunc,\n", + " QArray,\n", + " QBit,\n", + " RX,\n", + " CArray,\n", + " Output,\n", + " CReal,\n", + " repeat,\n", + " create_model,\n", + " show\n", + ")\n", + "from classiq.execution import execute_qnn\n", + "from classiq.applications.qnn import QLayer\n", + "from classiq.qmod.symbolic import pi\n", + "from torch.nn.utils.rnn import pad_sequence\n", + "import torchvision.transforms as transforms\n", + "from torchvision import datasets\n", + "from classiq.execution import (\n", + " ExecutionPreferences,\n", + " execute_qnn,\n", + " set_quantum_program_execution_preferences,\n", + ")\n", + "from classiq.synthesis import SerializedQuantumProgram\n", + "from classiq.applications.qnn.types import (\n", + " MultipleArguments,\n", + " ResultsCollection,\n", + " SavedResult,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "91d168c1-d9da-4c10-a728-a179f3abc9db", + "metadata": {}, + "outputs": [], + "source": [ + "N_QUBITS = 4\n", + "num_shots = 1000" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "5c9b4ed5-6da6-491c-87eb-5fe7e0029304", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from IPython.display import Image \n", + " \n", + "# get the image \n", + "Image(url=\"axioms-13-00323-g004-550.jpg\", width=800, height=400) " + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "407ef1c0-0a59-4821-911d-d6cb5827181d", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "Image(url=\"axioms-13-00323-g005-550.jpg\", width=800, height=300) \n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "5d172b4f-c317-4759-ad63-74b2eb4fa87d", + "metadata": {}, + "outputs": [], + "source": [ + "def execute(\n", + " quantum_program: SerializedQuantumProgram, arguments: MultipleArguments\n", + ") -> ResultsCollection:\n", + " quantum_program = set_quantum_program_execution_preferences(\n", + " quantum_program, preferences=ExecutionPreferences(num_shots=num_shots)\n", + " )\n", + " return execute_qnn(quantum_program, arguments)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "b98e5452-3bc8-4aa0-beee-cc6d0c3cb994", + "metadata": {}, + "outputs": [], + "source": [ + "def post_process(result: SavedResult) -> torch.Tensor:\n", + " res = result.value\n", + " yvec = [\n", + " (res.counts_of_qubits(k)[\"1\"] if \"1\" in res.counts_of_qubits(k) else 0)\n", + " / num_shots\n", + " for k in range(N_QUBITS)\n", + " ]\n", + "\n", + " return torch.tensor(yvec)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "2bb9e435-d050-468a-a61f-0d14ce16eb9c", + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "slide" + }, + "tags": [] + }, + "outputs": [], + "source": [ + "def get_circuit():\n", + "\n", + " #This function produces the quantum circuit:\n", + " @qfunc\n", + " def vqc(weight_: CArray[CArray[CReal, N_QUBITS], N_QUBITS], res:QArray) -> None:\n", + " \n", + " num_qubits = N_QUBITS\n", + " num_qlayers = N_QUBITS\n", + " \n", + " repeat(\n", + " count=num_qlayers,\n", + " iteration=lambda i: repeat(count=num_qubits, iteration=lambda j: RX(pi * weight_[i][j], res[j]))\n", + " )\n", + " \n", + " repeat(\n", + " count=num_qubits - 1,\n", + " iteration=lambda index: CX(ctrl=res[index], target=res[index + 1]),\n", + " )\n", + " \n", + " CX(ctrl=res[num_qubits-1], target=res[0])\n", + "\n", + " \n", + " \n", + " @qfunc\n", + " def main(input_: CArray[CReal, N_QUBITS], weight_: CArray[CArray[CReal, N_QUBITS], N_QUBITS], res: Output[QArray[QBit, N_QUBITS]]) -> None:\n", + " \n", + "\n", + " encode_in_angle(input_, res)\n", + " vqc(weight_, res)\n", + "\n", + "\n", + " qmod = create_model(main)\n", + " quantum_program = synthesize(qmod)\n", + " return quantum_program\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "00f92a77-0b94-4fe6-8f9b-2331d4f77939", + "metadata": {}, + "outputs": [], + "source": [ + "class Patchify(torch.nn.Module):\n", + " \"\"\"\n", + " Patchify layer implemented using the Conv2d layer\n", + " \"\"\"\n", + " def __init__(self, in_channels:int, patch_size:int, hidden_size:int):\n", + " super(Patchify, self).__init__()\n", + " self.patch_size = patch_size\n", + " self.conv = torch.nn.Conv2d(in_channels=in_channels, out_channels=hidden_size, kernel_size=self.patch_size, stride=self.patch_size)\n", + " self.hidden_size = hidden_size\n", + " \n", + " def forward(self, x:torch.Tensor):\n", + " bs, c, h, w = x.size()\n", + " self.num_patches = (h // self.patch_size) ** 2\n", + "\n", + " x = self.conv(x)\n", + " x = x.view(bs, self.num_patches, self.hidden_size)\n", + " return x" + ] + }, + { + "cell_type": "markdown", + "id": "6280490e-86b2-4023-bd59-8ded8ca43907", + "metadata": {}, + "source": [ + "#### Rotary Positional Embedding:\n", + "#### $$f_{q, k}(x,m) = R^{d}_{\\theta, m} W_{q,x}x_{m}$$\n" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "2849e3a5-3d01-42bf-ae4b-4c79375e8443", + "metadata": {}, + "outputs": [], + "source": [ + "class RotaryPositionalEmbedding(torch.nn.Module):\n", + " \"\"\"\n", + " Rotary Positional Embedding\n", + " \"\"\"\n", + " def __init__(self, d_model, max_seq_len):\n", + " super(RotaryPositionalEmbedding, self).__init__()\n", + "\n", + " # Create a rotation matrix.\n", + " self.rotation_matrix = torch.zeros(d_model, d_model)\n", + " for i in range(d_model):\n", + " for j in range(d_model):\n", + " self.rotation_matrix[i, j] = math.cos(i * j * 0.01)\n", + "\n", + " # Create a positional embedding matrix.\n", + " self.positional_embedding = torch.zeros(max_seq_len, d_model)\n", + " for i in range(max_seq_len):\n", + " for j in range(d_model):\n", + " self.positional_embedding[i, j] = math.cos(i * j * 0.01)\n", + "\n", + " def forward(self, x):\n", + " \"\"\"\n", + " Args:\n", + " x: A tensor of shape (batch_size, seq_len, d_model).\n", + "\n", + " Returns:\n", + " A tensor of shape (batch_size, seq_len, d_model).\n", + " \"\"\"\n", + "\n", + " # Add the positional embedding to the input tensor.\n", + " x += self.positional_embedding\n", + "\n", + " # Apply the rotation matrix to the input tensor.\n", + " x = torch.matmul(x, self.rotation_matrix)\n", + "\n", + " return x" + ] + }, + { + "cell_type": "markdown", + "id": "e5d99e78-0e5b-463c-a5eb-891c8fc35c1d", + "metadata": {}, + "source": [ + "##### Quantum Layer:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "9711b490-835e-4e29-9567-72e4a8f2fe09", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "Image(url=\"classiq_circuit.png\", width=800, height=300) " + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "19042677-6d63-426c-b12d-9d107b47eecb", + "metadata": {}, + "outputs": [], + "source": [ + "class QuantumLayer(torch.nn.Module):\n", + " \"\"\"\n", + " Quantum Layer\n", + " \"\"\"\n", + " def __init__(self, in_dim, out_dim):\n", + " super(QuantumLayer, self).__init__()\n", + " self.quantum_program = get_circuit()\n", + " self.quantum_layer = QLayer(self.quantum_program, execute_qnn, post_process)\n", + "\n", + " def forward(self, x:torch.Tensor):\n", + " x = self.quantum_layer(x)\n", + " return x" + ] + }, + { + "cell_type": "markdown", + "id": "da820af8", + "metadata": {}, + "source": [ + "##### Feed Forward Neural Network:\n", + " $$f_{i}(X) = GELU \\circ Dropout \\circ QuantumLayer(X)$$" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "376d717d-76bf-45bc-a90f-67d15a92f33a", + "metadata": {}, + "outputs": [], + "source": [ + "class FFN(torch.nn.Module):\n", + " \"\"\"\n", + " Feed Forward Network\n", + " \"\"\"\n", + " def __init__(self, in_dim, hidden_size):\n", + " super().__init__()\n", + " self.qlinear = QuantumLayer(hidden_size, hidden_size)\n", + " self.dropout = torch.nn.Dropout(p=0.4)\n", + " return\n", + " \n", + " def forward(self, x:torch.Tensor):\n", + " seq_len = x.size()[1]\n", + " #x = self.linear_1(x)\n", + " x = [self.qlinear(x[:, t, :]) for t in range(seq_len)]\n", + " x = torch.Tensor(pad_sequence(x))\n", + " x = self.dropout(x)\n", + " x = torch.nn.functional.gelu(x)\n", + " return x" + ] + }, + { + "cell_type": "markdown", + "id": "abf5bc46", + "metadata": {}, + "source": [ + "#### Multihead Attention:\n", + "#### $$Attention = softmax(\\frac{K(X)*Q(X)^T}{\\sqrt{dim}})*V(X)$$, where K, Q, V is the quantum Linear Projection of the input data;" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "f2ffa889-38a6-425b-bf76-c1032406e6e3", + "metadata": {}, + "outputs": [], + "source": [ + "class qMHA(torch.nn.Module):\n", + " \"\"\"\n", + " Quantum Multihead Attention\n", + " \"\"\"\n", + " def __init__(self, in_dim:int, num_heads:int) -> None:\n", + " super().__init__()\n", + "\n", + " self.k_linear = QuantumLayer(in_dim, in_dim);\n", + " self.q_linear = QuantumLayer(in_dim, in_dim);\n", + " self.v_linear = QuantumLayer(in_dim, in_dim);\n", + " self.dropout = torch.nn.Dropout(p=0.1)\n", + " \n", + " self.num_heads = num_heads\n", + " self.in_dim = in_dim\n", + " \n", + " return\n", + "\n", + " def forward(self, X:torch.Tensor):\n", + "\n", + " seq_len = X.size()[1]\n", + " K = [self.k_linear(X[:, t, :]) for t in range(seq_len)]\n", + " Q = [self.q_linear(X[:, t, :]) for t in range(seq_len)]\n", + " V = [self.v_linear(X[:, t, :]) for t in range(seq_len)]\n", + " \n", + " k = torch.Tensor(pad_sequence(K))\n", + " q = torch.Tensor(pad_sequence(Q))\n", + " v = torch.Tensor(pad_sequence(V))\n", + " \n", + " attention = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))\n", + " attention = torch.nn.functional.softmax(attention, dim=-1)\n", + "\n", + " attention = self.dropout(attention)\n", + " attention = attention @ v \n", + " #x = self.final_l(attention)\n", + " return attention" + ] + }, + { + "cell_type": "markdown", + "id": "fb7e14e4", + "metadata": {}, + "source": [ + "#### Transformer Encoder Block:\n", + "#### " + ] + }, + { + "cell_type": "markdown", + "id": "86af5bc7-f121-4487-b5dc-55dfaa5508e9", + "metadata": {}, + "source": [ + "$$\n", + " \\begin{equation}\n", + " \\begin{cases}\n", + " f_{i-1}(x) = X + GELU\\circ Linear \\circ Dropout \\circ QuantumLinear \\circ Linear \\circ X\\\\\n", + " f_{i}(x) = f_{i-1}(X) + GELU \\circ Linear \\circ Dropout \\circ QuantumAttention \\circ Linear \\circ f_{i-1}(X)\n", + " \\end{cases}\n", + " \\end{equation}\n", + " $$\n", + "GELU is an activation function. Linear is the linear projection of the input tensor." + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "426e1bed-2197-4d62-ba13-6215c466c97b", + "metadata": {}, + "outputs": [], + "source": [ + "class qTransformerEncoder(torch.nn.Module):\n", + " \"\"\"\n", + " Quantum Transformer Encoder Layer\n", + " \"\"\"\n", + " def __init__(self, in_dim:int, num_heads:int) -> None:\n", + " super().__init__()\n", + " \n", + " self.layer_norm_1 = torch.nn.LayerNorm(normalized_shape=in_dim)\n", + " self.layer_norm_2 = torch.nn.LayerNorm(normalized_shape=in_dim)\n", + " \n", + " self.qMHA = qMHA(in_dim, num_heads)\n", + " self.qFFN = FFN(in_dim, hidden_size=in_dim)\n", + " self.dropout = torch.nn.Dropout(p=0.1)\n", + " \n", + "\n", + " def forward(self, X:torch.Tensor):\n", + " x = self.qMHA(X)\n", + " \n", + " x = (self.layer_norm_1(x) + X)\n", + " x = self.dropout(x)\n", + " \n", + " y = self.qFFN(x)\n", + " y = self.layer_norm_2(y)+x\n", + " return y" + ] + }, + { + "cell_type": "markdown", + "id": "b125354b", + "metadata": {}, + "source": [ + "#### Quantum Vision Transformer:" + ] + }, + { + "cell_type": "markdown", + "id": "f757f693-5d23-4ae7-835f-055c9bc06f6c", + "metadata": {}, + "source": [ + "$$\n", + " \\begin{equation}\n", + " \\begin{cases}\n", + " X = Patrchify(X)\\\\\n", + " X = PositionalEncoding (X)\\\\\n", + " X = TransformerEncoder(X)\\\\\n", + " X = Mean(X)\\\\\n", + " X = Softmax(X)\n", + " \\end{cases}\n", + " \\end{equation}\n", + " $$\n" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "e6a37ec6-d82c-4784-aa97-94bf80be78c1", + "metadata": {}, + "outputs": [], + "source": [ + "class QVT(torch.nn.Module):\n", + " \"\"\"\n", + " Quantum Vision Transformer;\n", + " \"\"\"\n", + " def __init__(self, in_channels, patch_size, in_dim, hidden_size, num_heads, n_classes, n_layers) -> None:\n", + " super().__init__()\n", + " \n", + " self.d_model = (in_dim//patch_size)**2\n", + " self.n_classes = n_classes\n", + "\n", + " self.patch_formation = Patchify(in_channels=in_channels, patch_size=patch_size, hidden_size=hidden_size)\n", + "\n", + " self.pos_encoding = RotaryPositionalEmbedding(hidden_size, self.d_model)\n", + " self.transformer_blocks = torch.nn.ModuleList([qTransformerEncoder(hidden_size, num_heads) for i in range(n_layers)])\n", + " \n", + " self.final_normalization = torch.nn.LayerNorm(hidden_size)\n", + " self.final_layer = torch.nn.Linear(hidden_size, self.n_classes)\n", + "\n", + " def forward(self, x: torch.Tensor) -> torch.Tensor: \n", + " \n", + " x = self.patch_formation(x)\n", + " x += self.pos_encoding(x)\n", + " \n", + " for trans_block in self.transformer_blocks:\n", + " x = trans_block(x)\n", + " \n", + " x = self.final_normalization(x)\n", + " x = x.mean(axis=1)\n", + " x = self.final_layer(x)\n", + " \n", + " return x" + ] + }, + { + "cell_type": "markdown", + "id": "ef9038db", + "metadata": {}, + "source": [ + "#### Definition of MNIST dataset and dataloader:" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "79282b7f-cb02-4b20-a29a-51de390c2d17", + "metadata": {}, + "outputs": [], + "source": [ + "#### Example with the MNIST Dataset:\n", + "transform=transforms.Compose([\n", + " transforms.ToTensor(), # first, convert image to PyTorch tensor\n", + " transforms.Normalize((0.1307,), (0.3081,)) # normalize inputs\n", + " ])\n", + "dataset1 = datasets.MNIST('../data', train=True, download=True,transform=transform)\n", + "dataset2 = datasets.MNIST('../data', train=False,transform=transform)\n", + "\n", + "train_loader = torch.utils.data.DataLoader(dataset1,batch_size=256)\n", + "test_loader = torch.utils.data.DataLoader(dataset2,batch_size=256)" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "f94e4047-abcd-4d48-ab16-e1442474f6d0", + "metadata": {}, + "outputs": [], + "source": [ + "#### Classifier and optimizer definition:" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "8ed4f759", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "clf = QVT(in_channels=1, patch_size=7, in_dim=28, hidden_size=4, num_heads=1, n_classes=10, n_layers=1)\n", + "\n", + "opt = optim.SGD(clf.parameters(), lr=0.001, momentum=0.5)\n", + "\n", + "loss_history = []\n", + "acc_history = []" + ] + }, + { + "cell_type": "markdown", + "id": "12f70853", + "metadata": {}, + "source": [ + "#### Training Procedure:" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "dc61ea1e-0cfa-4498-b5ec-eab29b102750", + "metadata": {}, + "outputs": [], + "source": [ + "def train():\n", + " clf.train() # set model in training mode (need this because of dropout)\n", + " \n", + " # dataset API gives us pythonic batching \n", + " for data, label in tqdm.tqdm(train_loader):\n", + " opt.zero_grad()\n", + " preds = clf(data)\n", + " loss = torch.nn.functional.nll_loss(preds, label)\n", + " loss.backward()\n", + " loss_history.append(loss)\n", + " opt.step()\n", + " return loss_history\n", + "\n", + "def test():\n", + " clf.eval() # set model in inference mode (need this because of dropout)\n", + " test_loss = 0\n", + " correct = 0\n", + " \n", + " for data, target in tqdm.tqdm(test_loader):\n", + " \n", + " output = clf(data)\n", + " test_loss += torch.nn.functional.nll_loss(output, target).item()\n", + " pred = output.argmax() # get the index of the max log-probability\n", + " correct += pred.eq(target).cpu().sum()\n", + "\n", + " test_loss = test_loss\n", + " test_loss /= len(test_loader) # loss function already averages over batch size\n", + " accuracy = 100. * correct / len(test_loader.dataset)\n", + " acc_history.append(accuracy)\n", + " print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n", + " test_loss, correct, len(test_loader.dataset),\n", + " accuracy))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c6c0281a-c8e9-46c8-adbf-dfdce58ddc61", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + " 0%| | 0/235 [00:00