In [1]:
#qiskit 1.4.1
#qiskit-machine-learning 0.8.4
#scipy 1.16.0
import numpy as np
import matplotlib.pyplot as plt
from qiskit import QuantumCircuit
from qiskit.circuit import Parameter, ParameterVector
from qiskit_machine_learning.neural_networks import EstimatorQNN
from qiskit_machine_learning.algorithms import NeuralNetworkRegressor, VQR
from qiskit_algorithms.optimizers import COBYLA, L_BFGS_B, SPSA, ADAM
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from qiskit.circuit.library import ZZFeatureMap
from qiskit.quantum_info import SparsePauliOp
from scipy.interpolate import interp1d
max_iterations = 300
use_list = True # Set to True for your list
my_list = [7.3, 6.7, 5,8, 5.6, 6.2, 4.5, 4.8, 3.2, 2.0, 0.8, -0.1, -0.7, -0.8, -0.6, 0.2, 1.0, 1.9, 2.8,
3.6, 4.3, 5.0, 5.4, 5.8, 6.1, 5.3, 6.3, 5.3, 6.4, 6.5, 6.0, 6.6, 6.2, 6.7, 6.8, 6.9, 7.0, 7.1]
np.random.seed(42)
# Definition of the true function (general, can be modified)
def my_function_or_list(x, use_list=False, y_list=None, X_ref=None):
"""
The function returns values based on the input:
- If use_list=True, it returns interpolated values from y_list according to X_ref.
- If use_list=False, it returns 2 * x**2 - 1 (for y_true_dense without noise, with noise for training data).
"""
if use_list and y_list is not None and X_ref is not None:
# Interpolation from y_list according to X_ref
interp_func = interp1d(X_ref.ravel(), y_list, kind='cubic', fill_value="extrapolate")
return interp_func(x.ravel()).reshape(x.shape)
else:
return 2 * x**2 - 1 + 0.5 * np.random.normal(0, 0.3, size=x.shape) # Current function: y = 10x^2 - 1
# Příklady jiných funkcí (odkomentujte pro změnu):
# return 2 * x**2 - 1 # Původní kvadratická pro deep
# return x**3 # Kubická funkce
# return np.sin(2 * np.pi * x) # Sinusová
# return 10 * x**2 #pro polynomiální funkci
In [2]:
if use_list:
n_samples = len(my_list)
else:
n_samples = 30
X = np.linspace(-2, 2, n_samples).reshape(-1, 1)
y_true = my_function_or_list(X)
#y = y_true + 0.1 * np.random.normal(0, 0.1, size=X.shape)
#y = y.ravel()
if use_list:
y = np.array(my_list)
if len(y) != n_samples:
raise ValueError(f"list y must have lenght {n_samples}, now is {len(y)}")
else:
noise_level = 0.1
y_true = my_function_or_list(X, use_list=False)
y = y_true + 0.1 * np.random.normal(0, 0.1, size=X.shape)
y = y.ravel()
try:
from qiskit.primitives import StatevectorEstimator
estimator = StatevectorEstimator()
print("I used StatevectorEstimator")
except ImportError:
try:
from qiskit_aer.primitives import Estimator
estimator = Estimator()
print("I used Aer Estimator")
except ImportError:
from qiskit.primitives import Estimator
estimator = Estimator()
print("I used primitives Estimator")
I used StatevectorEstimator
In [3]:
def simple_quantum_circuit(num_qubits=2, feature_map_reps=2, ansatz_reps=3):
"""
Parameterized quantum circuit for regression
"""
# Parameters for the feature map (encoding input data)
input_params = ParameterVector('x', 1)
# Parameters for the variational part (trainable weights)
weight_params = ParameterVector('θ', num_qubits * ansatz_reps * 2)
qc = QuantumCircuit(num_qubits)
# Feature map - encoding input data using RY rotations
for rep in range(feature_map_reps):
for i in range(num_qubits):
qc.ry(input_params[0] * (i + 1) * np.pi, i)
if rep < feature_map_reps - 1:
for i in range(num_qubits - 1):
qc.cx(i, i + 1)
# Ansatz - variational part with trainable parameters
param_idx = 0
for rep in range(ansatz_reps):
# Rotations
for i in range(num_qubits):
qc.ry(weight_params[param_idx], i)
param_idx += 1
qc.rz(weight_params[param_idx], i)
param_idx += 1
# Entanglement
if rep < ansatz_reps - 1:
for i in range(num_qubits - 1):
qc.cx(i, i + 1)
if num_qubits > 1:
qc.cx(num_qubits - 1, 0) # Cyclic connection
return qc, input_params, weight_params
def quadratic_circuit(num_qubits=3):
"""
Circuit specifically designed for quadratic functions
"""
input_params = ParameterVector('x', 1)
weight_params = ParameterVector('θ', num_qubits * 4)
qc = QuantumCircuit(num_qubits)
# Feature map inspired by Pauli feature map
for i in range(num_qubits):
qc.h(i)
# Encoding x and x^2
qc.rz(2 * input_params[0], i)
qc.rx(2 * input_params[0]**2, i)
# Entanglement layer
for i in range(num_qubits - 1):
qc.cx(i, i + 1)
# Variational ansatz
for idx, param in enumerate(weight_params):
qc.ry(param, idx % num_qubits)
if idx < len(weight_params) - 1:
qc.rz(param, idx % num_qubits)
# Final entanglement
for i in range(num_qubits - 1):
qc.cx(i, i + 1)
return qc, input_params, weight_params
def polynomial_circuit(num_qubits=3):
"""
Circuit for polynomial functions
"""
input_param = Parameter('x')
weight_params = ParameterVector('θ', num_qubits * 3)
qc = QuantumCircuit(num_qubits)
# Pauli feature map for x and x^2
for i in range(num_qubits):
qc.h(i)
# Encode x
for i in range(num_qubits):
qc.rz(2 * input_param, i)
# Entanglement
for i in range(num_qubits - 1):
qc.cx(i, i + 1)
# Encode x^2
for i in range(num_qubits):
qc.rx(2 * input_param * input_param, i)
# Variational ansatz
for i in range(num_qubits):
qc.ry(weight_params[3*i], i)
qc.rz(weight_params[3*i + 1], i)
qc.rx(weight_params[3*i + 2], i)
return qc, [input_param], weight_params
def deep_circuit(num_qubits=3, depth=2):
"""
Deep circuit
"""
input_param = Parameter('x')
weight_params = ParameterVector('θ', num_qubits * 2 * depth)
qc = QuantumCircuit(num_qubits)
for i in range(num_qubits):
qc.ry(input_param * np.pi, i)
# Variational layers
param_idx = 0
for layer in range(depth):
for i in range(num_qubits):
qc.ry(weight_params[param_idx], i)
qc.rz(weight_params[param_idx + 1], i)
param_idx += 2
if num_qubits > 1:
for i in range(num_qubits - 1):
qc.cx(i, i + 1)
if layer < depth - 1:
qc.cx(num_qubits - 1, 0)
return qc, [input_param], weight_params
def deep_circuit_zfeaturemap(num_qubits=3, depth=3):
"""
Alternative create_deep_circuit with ZFeatureMap on a subset of qubits
"""
input_param = Parameter('x')
weight_params = ParameterVector('θ', num_qubits * 2 * depth)
qc = QuantumCircuit(num_qubits)
# Use ZFeatureMap on the first qubit
feature_map = ZFeatureMap(feature_dimension=1, reps=1)
qc.compose(feature_map.assign_parameters([input_param]), qubits=[0], inplace=True)
# Use RY on other qubits to maintain expressivity
for i in range(1, num_qubits):
qc.ry(input_param * np.pi, i)
# Variational layers
param_idx = 0
for layer in range(depth):
for i in range(num_qubits):
qc.ry(weight_params[param_idx], i)
qc.rz(weight_params[param_idx + 1], i)
param_idx += 2
if num_qubits > 1:
for i in range(num_qubits - 1):
qc.cx(i, i + 1)
if layer < depth - 1:
qc.cx(num_qubits - 1, 0)
return qc, [input_param], weight_params
# CIRCUIT SELECTION AND SETTINGS
print("Select the type of circuit:")
print("1 - Simple circuit (fast, less accurate)")
print("2 - Quadratic circuit (slower, more accurate for x^2)")
print("3 - Deep (2 qubits, 8 parameters) - more layers")
print("4 - Polynomial (3 qubits, 9 parameters) - for x^2")
print("5 - Simple deep (3 qubits, 9 parameters) - for x^2")
choice = input("Your choice (1-5): ").strip() or "5"
Select the type of circuit: 1 - Simple circuit (fast, less accurate) 2 - Quadratic circuit (slower, more accurate for x^2) 3 - Deep (2 qubits, 8 parameters) - more layers 4 - Polynomial (3 qubits, 9 parameters) - for x^2 5 - Simple deep (3 qubits, 9 parameters) - for x^2
In [4]:
if choice == "1":
num_qubits = 2
qc, input_params, weight_params = simple_quantum_circuit(num_qubits)
circuit_name = "Simple circuit"
elif choice == "2":
num_qubits = 3
qc, input_params, weight_params = quadratic_circuit(num_qubits)
circuit_name = "Quadratic circuit"
elif choice == "3":
num_qubits = 3
depth = 3
qc, input_params, weight_params = deep_circuit_zfeaturemap(num_qubits, depth)
circuit_name = "Deep with ZFeatureMap"
elif choice == "4":
num_qubits = 3
qc, input_params, weight_params = polynomial_circuit(num_qubits)
circuit_name = "Polynomial"
else:
num_qubits = 3
depth = 3
qc, input_params, weight_params = deep_circuit(num_qubits, depth)
circuit_name = "Deep simple"
print(f"\nThe quantum circuit has {num_qubits} qubits")
print(f"Number of trainable parameters: {len(weight_params)}")
# Use StandardScaler instead of MinMaxScaler for better numerical stability
if choice == "1":
scaler_X = MinMaxScaler()
scaler_y = MinMaxScaler()
elif choice == "3":
scaler_X = MinMaxScaler()
scaler_y = MinMaxScaler()
elif choice == "5":
scaler_X = MinMaxScaler()
scaler_y = MinMaxScaler()
else:
scaler_X = StandardScaler()
scaler_y = StandardScaler()
X_scaled = scaler_X.fit_transform(X)
y_scaled = scaler_y.fit_transform(y.reshape(-1, 1)).ravel()
# Splitting into training and test data
X_train, X_test, y_train, y_test = train_test_split(
X_scaled, y_scaled, test_size=0.3, random_state=42
)
The quantum circuit has 2 qubits Number of trainable parameters: 12
In [5]:
# CREATING QNN WITH MULTIPLE OBSERVABLES
# Using a combination of measurements for better expressivity
observables = [
SparsePauliOp.from_list([("Z" + "I" * (num_qubits - 1), 1)]),
SparsePauliOp.from_list([("I" + "Z" + "I" * (num_qubits - 2), 0.5)]) if num_qubits > 1 else None
]
observables = [o for o in observables if o is not None]
qnn = EstimatorQNN(
circuit=qc,
input_params=input_params,
weight_params=weight_params,
observables=observables[0], # Use only the first observable
estimator=estimator
)
# OPTIMIZATION WITH CALLBACK FOR LOG
loss_values = []
class LoggingOptimizerWrapper:
"""
Wrapper for the optimizer that manually logs the loss after each iteration.
Works even for optimizers without a built-in callback (like SPSA).
"""
def __init__(self, optimizer):
self.optimizer = optimizer
self.iteration = 0
self._loss_fn = None # Will be set later from the regressor
def minimize(self, fun, x0, jac=None, bounds=None):
"""
Wraps the minimize method and logs the loss.
"""
def wrapped_fun(x):
loss = fun(x)
self.iteration += 1
loss_values.append(loss)
print(f"Iteration {self.iteration}: Loss = {loss:.6f}")
if self.iteration % 20 == 0:
print(f"Iteration {self.iteration}: Loss = {loss:.6f}")
return loss
# Runs the optimizer with the wrapped function
result = self.optimizer.minimize(wrapped_fun, x0, jac=jac, bounds=bounds)
return result
# Callback for compatible optimizers (e.g., COBYLA) – kept as a fallback
def original_callback(weights, loss):
loss_values.append(loss)
print(f"Iteration {len(loss_values)}: Loss = {loss:.6f}")
if len(loss_values) % 20 == 0:
print(f"Iteration {len(loss_values)}: Loss = {loss:.6f}")
# Optimizer selection
print("\nSelect an optimizer:")
print("1 - COBYLA (robust)")
print("2 - L-BFGS-B (fast convergence)")
print("3 - ADAM (adaptive)")
print("4 - SPSA (suitable for quantum systems)")
opt_choice = input("Your choice (1, 2, 3, or 4): ") or "2"
No gradient function provided, creating a gradient function. If your Estimator requires transpilation, please provide a pass manager.
Select an optimizer: 1 - COBYLA (robust) 2 - L-BFGS-B (fast convergence) 3 - ADAM (adaptive) 4 - SPSA (suitable for quantum systems)
In [6]:
if opt_choice == "1":
base_optimizer = COBYLA(maxiter=max_iterations)
use_original_callback = False # COBYLA supports callback natively
elif opt_choice == "3":
base_optimizer = ADAM(maxiter=max_iterations, lr=0.005, amsgrad=True)
use_original_callback = False
elif opt_choice == "4":
base_optimizer = SPSA(maxiter=max_iterations, learning_rate=0.05, perturbation=0.01)
use_original_callback = False
else:
base_optimizer = L_BFGS_B(maxfun=max_iterations)
use_original_callback = True # L-BFGS-B supports callback
# TRAINING
print(f"\nTraining with {base_optimizer.__class__.__name__} optimizer...")
print(f"Maximum number of iterations: {max_iterations}")
initial_weights = np.random.uniform(-0.1, 0.1, len(weight_params))
# Creating the regressor – need access to the internal loss function for the wrapper
regressor = NeuralNetworkRegressor(
neural_network=qnn,
loss='squared_error',
optimizer=None, # Temporarily None, will set wrapper later
warm_start=False,
initial_point=initial_weights
)
# If wrapper is needed (for SPSA/ADAM), wrap the optimizer
if not use_original_callback:
optimizer = LoggingOptimizerWrapper(base_optimizer)
# Set the internal loss function for the wrapper (from the regressor)
regressor.optimizer = optimizer
optimizer._loss_fn = regressor._loss # Access to the internal loss function (private, but works)
else:
optimizer = base_optimizer
regressor.optimizer = optimizer
regressor.callback = original_callback # Use the original callback for COBYLA/L-BFGS-B
# Start training
regressor.fit(X_train, y_train)
print(f"Training completed after {len(loss_values)} iterations!")
# PREDICTION AND EVALUATION
y_train_pred = regressor.predict(X_train)
y_test_pred = regressor.predict(X_test)
# Inverse transformation
y_train_pred_original = scaler_y.inverse_transform(y_train_pred.reshape(-1, 1)).ravel()
y_test_pred_original = scaler_y.inverse_transform(y_test_pred.reshape(-1, 1)).ravel()
y_train_original = scaler_y.inverse_transform(y_train.reshape(-1, 1)).ravel()
y_test_original = scaler_y.inverse_transform(y_test.reshape(-1, 1)).ravel()
X_train_original = scaler_X.inverse_transform(X_train)
X_test_original = scaler_X.inverse_transform(X_test)
# METRICS
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
train_mse = mean_squared_error(y_train_original, y_train_pred_original)
test_mse = mean_squared_error(y_test_original, y_test_pred_original)
train_mae = mean_absolute_error(y_train_original, y_train_pred_original)
test_mae = mean_absolute_error(y_test_original, y_test_pred_original)
train_r2 = r2_score(y_train_original, y_train_pred_original)
test_r2 = r2_score(y_test_original, y_test_pred_original)
print(f"\nResults:")
print(f"Training MSE: {train_mse:.4f} | MAE: {train_mae:.4f} | R²: {train_r2:.4f}")
print(f"Test MSE: {test_mse:.4f} | MAE: {test_mae:.4f} | R²: {test_r2:.4f}")
# DETAILED PREDICTION ON A DENSE GRID
X_dense = np.linspace(-2, 2, 100).reshape(-1, 1)
X_dense_scaled = scaler_X.transform(X_dense)
y_dense_pred = regressor.predict(X_dense_scaled)
y_dense_pred_original = scaler_y.inverse_transform(y_dense_pred.reshape(-1, 1)).ravel()
# my_function_or_list
# y_true_dense = my_function_or_list(X_dense)
y_true_dense = my_function_or_list(X_dense, use_list=False, y_list=y, X_ref=X)
Training with SPSA optimizer... Maximum number of iterations: 300 Iteration 1: Loss = 0.064480 Iteration 2: Loss = 0.070300 Iteration 3: Loss = 0.062419 Iteration 4: Loss = 0.063853 Iteration 5: Loss = 0.064858 Iteration 6: Loss = 0.059810 Iteration 7: Loss = 0.064413 Iteration 8: Loss = 0.061680 Iteration 9: Loss = 0.056014 Iteration 10: Loss = 0.060868 Iteration 11: Loss = 0.058582 Iteration 12: Loss = 0.059188 Iteration 13: Loss = 0.058281 Iteration 14: Loss = 0.057772 Iteration 15: Loss = 0.061507 Iteration 16: Loss = 0.053184 Iteration 17: Loss = 0.049247 Iteration 18: Loss = 0.047999 Iteration 19: Loss = 0.051569 Iteration 20: Loss = 0.052359 Iteration 20: Loss = 0.052359 Iteration 21: Loss = 0.051170 Iteration 22: Loss = 0.046914 Iteration 23: Loss = 0.054018 Iteration 24: Loss = 0.045395 Iteration 25: Loss = 0.047177 Iteration 26: Loss = 0.047153 Iteration 27: Loss = 0.047861 Iteration 28: Loss = 0.044615 Iteration 29: Loss = 0.052128 Iteration 30: Loss = 0.044317 Iteration 31: Loss = 0.041912 Iteration 32: Loss = 0.043253 Iteration 33: Loss = 0.048244 Iteration 34: Loss = 0.038295 Iteration 35: Loss = 0.033491 Iteration 36: Loss = 0.037233 Iteration 37: Loss = 0.037519 Iteration 38: Loss = 0.032119 Iteration 39: Loss = 0.028399 Iteration 40: Loss = 0.032718 Iteration 40: Loss = 0.032718 Iteration 41: Loss = 0.029872 Iteration 42: Loss = 0.026261 Iteration 43: Loss = 0.027097 Iteration 44: Loss = 0.026377 Iteration 45: Loss = 0.027970 Iteration 46: Loss = 0.027294 Iteration 47: Loss = 0.027881 Iteration 48: Loss = 0.027970 Iteration 49: Loss = 0.027600 Iteration 50: Loss = 0.026184 Iteration 51: Loss = 0.026767 Iteration 52: Loss = 0.024972 Iteration 53: Loss = 0.025398 Iteration 54: Loss = 0.025728 Iteration 55: Loss = 0.027401 Iteration 56: Loss = 0.024537 Iteration 57: Loss = 0.027001 Iteration 58: Loss = 0.022219 Iteration 59: Loss = 0.023830 Iteration 60: Loss = 0.023231 Iteration 60: Loss = 0.023231 Iteration 61: Loss = 0.024458 Iteration 62: Loss = 0.020266 Iteration 63: Loss = 0.019589 Iteration 64: Loss = 0.023191 Iteration 65: Loss = 0.023720 Iteration 66: Loss = 0.018543 Iteration 67: Loss = 0.019075 Iteration 68: Loss = 0.018004 Iteration 69: Loss = 0.019835 Iteration 70: Loss = 0.017761 Iteration 71: Loss = 0.019245 Iteration 72: Loss = 0.016858 Iteration 73: Loss = 0.017329 Iteration 74: Loss = 0.018683 Iteration 75: Loss = 0.017937 Iteration 76: Loss = 0.017055 Iteration 77: Loss = 0.019182 Iteration 78: Loss = 0.016458 Iteration 79: Loss = 0.017858 Iteration 80: Loss = 0.014404 Iteration 80: Loss = 0.014404 Iteration 81: Loss = 0.017237 Iteration 82: Loss = 0.014900 Iteration 83: Loss = 0.015712 Iteration 84: Loss = 0.013385 Iteration 85: Loss = 0.012031 Iteration 86: Loss = 0.016166 Iteration 87: Loss = 0.011671 Iteration 88: Loss = 0.013288 Iteration 89: Loss = 0.011796 Iteration 90: Loss = 0.011612 Iteration 91: Loss = 0.012977 Iteration 92: Loss = 0.012127 Iteration 93: Loss = 0.012206 Iteration 94: Loss = 0.011528 Iteration 95: Loss = 0.012169 Iteration 96: Loss = 0.012166 Iteration 97: Loss = 0.011469 Iteration 98: Loss = 0.012726 Iteration 99: Loss = 0.011953 Iteration 100: Loss = 0.011596 Iteration 100: Loss = 0.011596 Iteration 101: Loss = 0.010308 Iteration 102: Loss = 0.013232 Iteration 103: Loss = 0.013118 Iteration 104: Loss = 0.010024 Iteration 105: Loss = 0.013184 Iteration 106: Loss = 0.011449 Iteration 107: Loss = 0.010374 Iteration 108: Loss = 0.011246 Iteration 109: Loss = 0.009281 Iteration 110: Loss = 0.010298 Iteration 111: Loss = 0.011199 Iteration 112: Loss = 0.009314 Iteration 113: Loss = 0.009678 Iteration 114: Loss = 0.010614 Iteration 115: Loss = 0.009819 Iteration 116: Loss = 0.009802 Iteration 117: Loss = 0.009554 Iteration 118: Loss = 0.010882 Iteration 119: Loss = 0.010257 Iteration 120: Loss = 0.009649 Iteration 120: Loss = 0.009649 Iteration 121: Loss = 0.009372 Iteration 122: Loss = 0.010701 Iteration 123: Loss = 0.009149 Iteration 124: Loss = 0.009421 Iteration 125: Loss = 0.010023 Iteration 126: Loss = 0.008530 Iteration 127: Loss = 0.009407 Iteration 128: Loss = 0.009177 Iteration 129: Loss = 0.010426 Iteration 130: Loss = 0.008456 Iteration 131: Loss = 0.009776 Iteration 132: Loss = 0.009147 Iteration 133: Loss = 0.009562 Iteration 134: Loss = 0.007984 Iteration 135: Loss = 0.009714 Iteration 136: Loss = 0.009713 Iteration 137: Loss = 0.009848 Iteration 138: Loss = 0.009675 Iteration 139: Loss = 0.009459 Iteration 140: Loss = 0.010010 Iteration 140: Loss = 0.010010 Iteration 141: Loss = 0.009442 Iteration 142: Loss = 0.009866 Iteration 143: Loss = 0.008993 Iteration 144: Loss = 0.009841 Iteration 145: Loss = 0.008445 Iteration 146: Loss = 0.009017 Iteration 147: Loss = 0.009580 Iteration 148: Loss = 0.008714 Iteration 149: Loss = 0.009985 Iteration 150: Loss = 0.009011 Iteration 151: Loss = 0.007808 Iteration 152: Loss = 0.009698 Iteration 153: Loss = 0.009432 Iteration 154: Loss = 0.008895 Iteration 155: Loss = 0.008622 Iteration 156: Loss = 0.008771 Iteration 157: Loss = 0.008986 Iteration 158: Loss = 0.008274 Iteration 159: Loss = 0.009037 Iteration 160: Loss = 0.008226 Iteration 160: Loss = 0.008226 Iteration 161: Loss = 0.007470 Iteration 162: Loss = 0.009980 Iteration 163: Loss = 0.010130 Iteration 164: Loss = 0.009482 Iteration 165: Loss = 0.007914 Iteration 166: Loss = 0.008602 Iteration 167: Loss = 0.009376 Iteration 168: Loss = 0.009415 Iteration 169: Loss = 0.008822 Iteration 170: Loss = 0.008663 Iteration 171: Loss = 0.010034 Iteration 172: Loss = 0.009591 Iteration 173: Loss = 0.009547 Iteration 174: Loss = 0.008920 Iteration 175: Loss = 0.008602 Iteration 176: Loss = 0.008610 Iteration 177: Loss = 0.009878 Iteration 178: Loss = 0.008472 Iteration 179: Loss = 0.009866 Iteration 180: Loss = 0.008253 Iteration 180: Loss = 0.008253 Iteration 181: Loss = 0.009112 Iteration 182: Loss = 0.008411 Iteration 183: Loss = 0.008522 Iteration 184: Loss = 0.007971 Iteration 185: Loss = 0.008513 Iteration 186: Loss = 0.009517 Iteration 187: Loss = 0.009586 Iteration 188: Loss = 0.009528 Iteration 189: Loss = 0.009488 Iteration 190: Loss = 0.008990 Iteration 191: Loss = 0.008174 Iteration 192: Loss = 0.009226 Iteration 193: Loss = 0.009053 Iteration 194: Loss = 0.009311 Iteration 195: Loss = 0.009730 Iteration 196: Loss = 0.008396 Iteration 197: Loss = 0.009165 Iteration 198: Loss = 0.009146 Iteration 199: Loss = 0.009918 Iteration 200: Loss = 0.009715 Iteration 200: Loss = 0.009715 Iteration 201: Loss = 0.008555 Iteration 202: Loss = 0.009279 Iteration 203: Loss = 0.010165 Iteration 204: Loss = 0.009108 Iteration 205: Loss = 0.009029 Iteration 206: Loss = 0.009552 Iteration 207: Loss = 0.008853 Iteration 208: Loss = 0.009859 Iteration 209: Loss = 0.009372 Iteration 210: Loss = 0.008256 Iteration 211: Loss = 0.009266 Iteration 212: Loss = 0.008504 Iteration 213: Loss = 0.009678 Iteration 214: Loss = 0.008809 Iteration 215: Loss = 0.008223 Iteration 216: Loss = 0.009936 Iteration 217: Loss = 0.008407 Iteration 218: Loss = 0.008697 Iteration 219: Loss = 0.008769 Iteration 220: Loss = 0.008622 Iteration 220: Loss = 0.008622 Iteration 221: Loss = 0.010124 Iteration 222: Loss = 0.009672 Iteration 223: Loss = 0.008242 Iteration 224: Loss = 0.009530 Iteration 225: Loss = 0.008822 Iteration 226: Loss = 0.008923 Iteration 227: Loss = 0.009123 Iteration 228: Loss = 0.007609 Iteration 229: Loss = 0.009285 Iteration 230: Loss = 0.009823 Iteration 231: Loss = 0.008759 Iteration 232: Loss = 0.008728 Iteration 233: Loss = 0.009787 Iteration 234: Loss = 0.007975 Iteration 235: Loss = 0.009686 Iteration 236: Loss = 0.009865 Iteration 237: Loss = 0.008500 Iteration 238: Loss = 0.008347 Iteration 239: Loss = 0.009617 Iteration 240: Loss = 0.008702 Iteration 240: Loss = 0.008702 Iteration 241: Loss = 0.009277 Iteration 242: Loss = 0.010151 Iteration 243: Loss = 0.010248 Iteration 244: Loss = 0.009802 Iteration 245: Loss = 0.008385 Iteration 246: Loss = 0.008603 Iteration 247: Loss = 0.009880 Iteration 248: Loss = 0.008312 Iteration 249: Loss = 0.008568 Iteration 250: Loss = 0.008986 Iteration 251: Loss = 0.009312 Iteration 252: Loss = 0.009105 Iteration 253: Loss = 0.008359 Iteration 254: Loss = 0.008315 Iteration 255: Loss = 0.008082 Iteration 256: Loss = 0.008902 Iteration 257: Loss = 0.009519 Iteration 258: Loss = 0.008479 Iteration 259: Loss = 0.008863 Iteration 260: Loss = 0.008243 Iteration 260: Loss = 0.008243 Iteration 261: Loss = 0.009647 Iteration 262: Loss = 0.008805 Iteration 263: Loss = 0.009302 Iteration 264: Loss = 0.009593 Iteration 265: Loss = 0.009233 Iteration 266: Loss = 0.009661 Iteration 267: Loss = 0.009060 Iteration 268: Loss = 0.008846 Iteration 269: Loss = 0.009633 Iteration 270: Loss = 0.008848 Iteration 271: Loss = 0.009098 Iteration 272: Loss = 0.008866 Iteration 273: Loss = 0.008002 Iteration 274: Loss = 0.008816 Iteration 275: Loss = 0.009184 Iteration 276: Loss = 0.008100 Iteration 277: Loss = 0.008514 Iteration 278: Loss = 0.008834 Iteration 279: Loss = 0.009014 Iteration 280: Loss = 0.008396 Iteration 280: Loss = 0.008396 Iteration 281: Loss = 0.009710 Iteration 282: Loss = 0.008033 Iteration 283: Loss = 0.008490 Iteration 284: Loss = 0.008421 Iteration 285: Loss = 0.008764 Iteration 286: Loss = 0.008690 Iteration 287: Loss = 0.008264 Iteration 288: Loss = 0.008343 Iteration 289: Loss = 0.009128 Iteration 290: Loss = 0.008775 Iteration 291: Loss = 0.009136 Iteration 292: Loss = 0.009762 Iteration 293: Loss = 0.009804 Iteration 294: Loss = 0.008614 Iteration 295: Loss = 0.010595 Iteration 296: Loss = 0.008659 Iteration 297: Loss = 0.008020 Iteration 298: Loss = 0.010242 Iteration 299: Loss = 0.009572 Iteration 300: Loss = 0.010580 Iteration 300: Loss = 0.010580 Iteration 301: Loss = 0.008363 Iteration 302: Loss = 0.009583 Iteration 303: Loss = 0.009012 Iteration 304: Loss = 0.009806 Iteration 305: Loss = 0.007574 Iteration 306: Loss = 0.009821 Iteration 307: Loss = 0.008609 Iteration 308: Loss = 0.009418 Iteration 309: Loss = 0.008751 Iteration 310: Loss = 0.009919 Iteration 311: Loss = 0.008019 Iteration 312: Loss = 0.008039 Iteration 313: Loss = 0.008064 Iteration 314: Loss = 0.008642 Iteration 315: Loss = 0.008194 Iteration 316: Loss = 0.008690 Iteration 317: Loss = 0.008873 Iteration 318: Loss = 0.008234 Iteration 319: Loss = 0.009292 Iteration 320: Loss = 0.009011 Iteration 320: Loss = 0.009011 Iteration 321: Loss = 0.008155 Iteration 322: Loss = 0.009239 Iteration 323: Loss = 0.009258 Iteration 324: Loss = 0.008973 Iteration 325: Loss = 0.008766 Iteration 326: Loss = 0.008866 Iteration 327: Loss = 0.009062 Iteration 328: Loss = 0.009164 Iteration 329: Loss = 0.008720 Iteration 330: Loss = 0.007944 Iteration 331: Loss = 0.008924 Iteration 332: Loss = 0.008680 Iteration 333: Loss = 0.010358 Iteration 334: Loss = 0.008843 Iteration 335: Loss = 0.009874 Iteration 336: Loss = 0.008572 Iteration 337: Loss = 0.008908 Iteration 338: Loss = 0.008969 Iteration 339: Loss = 0.009204 Iteration 340: Loss = 0.008418 Iteration 340: Loss = 0.008418 Iteration 341: Loss = 0.008440 Iteration 342: Loss = 0.008607 Iteration 343: Loss = 0.008136 Iteration 344: Loss = 0.008738 Iteration 345: Loss = 0.008562 Iteration 346: Loss = 0.009227 Iteration 347: Loss = 0.009050 Iteration 348: Loss = 0.008906 Iteration 349: Loss = 0.008480 Iteration 350: Loss = 0.009205 Iteration 351: Loss = 0.008598 Iteration 352: Loss = 0.009150 Iteration 353: Loss = 0.009115 Iteration 354: Loss = 0.008330 Iteration 355: Loss = 0.009075 Iteration 356: Loss = 0.009398 Iteration 357: Loss = 0.008353 Iteration 358: Loss = 0.008965 Iteration 359: Loss = 0.008796 Iteration 360: Loss = 0.009062 Iteration 360: Loss = 0.009062 Iteration 361: Loss = 0.009005 Iteration 362: Loss = 0.009610 Iteration 363: Loss = 0.008710 Iteration 364: Loss = 0.008792 Iteration 365: Loss = 0.008529 Iteration 366: Loss = 0.008359 Iteration 367: Loss = 0.008892 Iteration 368: Loss = 0.008907 Iteration 369: Loss = 0.009144 Iteration 370: Loss = 0.009073 Iteration 371: Loss = 0.008942 Iteration 372: Loss = 0.009136 Iteration 373: Loss = 0.008930 Iteration 374: Loss = 0.009887 Iteration 375: Loss = 0.008825 Iteration 376: Loss = 0.008220 Iteration 377: Loss = 0.009124 Iteration 378: Loss = 0.008594 Iteration 379: Loss = 0.008457 Iteration 380: Loss = 0.008274 Iteration 380: Loss = 0.008274 Iteration 381: Loss = 0.008986 Iteration 382: Loss = 0.009962 Iteration 383: Loss = 0.009407 Iteration 384: Loss = 0.008363 Iteration 385: Loss = 0.008310 Iteration 386: Loss = 0.008686 Iteration 387: Loss = 0.009556 Iteration 388: Loss = 0.009693 Iteration 389: Loss = 0.009357 Iteration 390: Loss = 0.008865 Iteration 391: Loss = 0.009065 Iteration 392: Loss = 0.009210 Iteration 393: Loss = 0.010034 Iteration 394: Loss = 0.009824 Iteration 395: Loss = 0.008195 Iteration 396: Loss = 0.009224 Iteration 397: Loss = 0.008494 Iteration 398: Loss = 0.008390 Iteration 399: Loss = 0.008784 Iteration 400: Loss = 0.010085 Iteration 400: Loss = 0.010085 Iteration 401: Loss = 0.010155 Iteration 402: Loss = 0.008302 Iteration 403: Loss = 0.008153 Iteration 404: Loss = 0.008667 Iteration 405: Loss = 0.008995 Iteration 406: Loss = 0.008845 Iteration 407: Loss = 0.009833 Iteration 408: Loss = 0.008809 Iteration 409: Loss = 0.008488 Iteration 410: Loss = 0.009015 Iteration 411: Loss = 0.008672 Iteration 412: Loss = 0.009656 Iteration 413: Loss = 0.009718 Iteration 414: Loss = 0.008822 Iteration 415: Loss = 0.008008 Iteration 416: Loss = 0.008912 Iteration 417: Loss = 0.008791 Iteration 418: Loss = 0.009111 Iteration 419: Loss = 0.009349 Iteration 420: Loss = 0.009089 Iteration 420: Loss = 0.009089 Iteration 421: Loss = 0.008816 Iteration 422: Loss = 0.009136 Iteration 423: Loss = 0.008885 Iteration 424: Loss = 0.008914 Iteration 425: Loss = 0.007976 Iteration 426: Loss = 0.009345 Iteration 427: Loss = 0.008978 Iteration 428: Loss = 0.007855 Iteration 429: Loss = 0.008737 Iteration 430: Loss = 0.008202 Iteration 431: Loss = 0.008986 Iteration 432: Loss = 0.008840 Iteration 433: Loss = 0.009754 Iteration 434: Loss = 0.008497 Iteration 435: Loss = 0.008172 Iteration 436: Loss = 0.009108 Iteration 437: Loss = 0.008636 Iteration 438: Loss = 0.007848 Iteration 439: Loss = 0.007789 Iteration 440: Loss = 0.009245 Iteration 440: Loss = 0.009245 Iteration 441: Loss = 0.008389 Iteration 442: Loss = 0.008264 Iteration 443: Loss = 0.008872 Iteration 444: Loss = 0.007931 Iteration 445: Loss = 0.009275 Iteration 446: Loss = 0.007961 Iteration 447: Loss = 0.008660 Iteration 448: Loss = 0.008180 Iteration 449: Loss = 0.008591 Iteration 450: Loss = 0.008239 Iteration 451: Loss = 0.008640 Iteration 452: Loss = 0.008422 Iteration 453: Loss = 0.008440 Iteration 454: Loss = 0.008197 Iteration 455: Loss = 0.007893 Iteration 456: Loss = 0.008185 Iteration 457: Loss = 0.008383 Iteration 458: Loss = 0.009399 Iteration 459: Loss = 0.009454 Iteration 460: Loss = 0.009100 Iteration 460: Loss = 0.009100 Iteration 461: Loss = 0.008491 Iteration 462: Loss = 0.008716 Iteration 463: Loss = 0.009294 Iteration 464: Loss = 0.009048 Iteration 465: Loss = 0.008462 Iteration 466: Loss = 0.007675 Iteration 467: Loss = 0.008541 Iteration 468: Loss = 0.007858 Iteration 469: Loss = 0.008478 Iteration 470: Loss = 0.008746 Iteration 471: Loss = 0.008113 Iteration 472: Loss = 0.008952 Iteration 473: Loss = 0.009029 Iteration 474: Loss = 0.009456 Iteration 475: Loss = 0.008952 Iteration 476: Loss = 0.007537 Iteration 477: Loss = 0.008623 Iteration 478: Loss = 0.009222 Iteration 479: Loss = 0.008948 Iteration 480: Loss = 0.008756 Iteration 480: Loss = 0.008756 Iteration 481: Loss = 0.008232 Iteration 482: Loss = 0.009070 Iteration 483: Loss = 0.008309 Iteration 484: Loss = 0.010681 Iteration 485: Loss = 0.009810 Iteration 486: Loss = 0.009414 Iteration 487: Loss = 0.008173 Iteration 488: Loss = 0.008286 Iteration 489: Loss = 0.009727 Iteration 490: Loss = 0.008749 Iteration 491: Loss = 0.009441 Iteration 492: Loss = 0.009315 Iteration 493: Loss = 0.008926 Iteration 494: Loss = 0.007478 Iteration 495: Loss = 0.008223 Iteration 496: Loss = 0.007810 Iteration 497: Loss = 0.008624 Iteration 498: Loss = 0.008942 Iteration 499: Loss = 0.008165 Iteration 500: Loss = 0.008924 Iteration 500: Loss = 0.008924 Iteration 501: Loss = 0.008655 Iteration 502: Loss = 0.010378 Iteration 503: Loss = 0.009370 Iteration 504: Loss = 0.008746 Iteration 505: Loss = 0.008432 Iteration 506: Loss = 0.008674 Iteration 507: Loss = 0.009532 Iteration 508: Loss = 0.008920 Iteration 509: Loss = 0.009282 Iteration 510: Loss = 0.008126 Iteration 511: Loss = 0.008053 Iteration 512: Loss = 0.008142 Iteration 513: Loss = 0.009362 Iteration 514: Loss = 0.009979 Iteration 515: Loss = 0.008981 Iteration 516: Loss = 0.008849 Iteration 517: Loss = 0.009103 Iteration 518: Loss = 0.008990 Iteration 519: Loss = 0.008941 Iteration 520: Loss = 0.007586 Iteration 520: Loss = 0.007586 Iteration 521: Loss = 0.008946 Iteration 522: Loss = 0.007265 Iteration 523: Loss = 0.009597 Iteration 524: Loss = 0.009718 Iteration 525: Loss = 0.008283 Iteration 526: Loss = 0.009738 Iteration 527: Loss = 0.008344 Iteration 528: Loss = 0.008055 Iteration 529: Loss = 0.008061 Iteration 530: Loss = 0.009060 Iteration 531: Loss = 0.008339 Iteration 532: Loss = 0.009622 Iteration 533: Loss = 0.009322 Iteration 534: Loss = 0.009024 Iteration 535: Loss = 0.008536 Iteration 536: Loss = 0.009248 Iteration 537: Loss = 0.008396 Iteration 538: Loss = 0.008641 Iteration 539: Loss = 0.008492 Iteration 540: Loss = 0.008226 Iteration 540: Loss = 0.008226 Iteration 541: Loss = 0.008563 Iteration 542: Loss = 0.007700 Iteration 543: Loss = 0.008015 Iteration 544: Loss = 0.008694 Iteration 545: Loss = 0.008920 Iteration 546: Loss = 0.008555 Iteration 547: Loss = 0.008829 Iteration 548: Loss = 0.008403 Iteration 549: Loss = 0.009179 Iteration 550: Loss = 0.007957 Iteration 551: Loss = 0.008330 Iteration 552: Loss = 0.008469 Iteration 553: Loss = 0.008518 Iteration 554: Loss = 0.008526 Iteration 555: Loss = 0.009003 Iteration 556: Loss = 0.008353 Iteration 557: Loss = 0.008312 Iteration 558: Loss = 0.007636 Iteration 559: Loss = 0.009708 Iteration 560: Loss = 0.009777 Iteration 560: Loss = 0.009777 Iteration 561: Loss = 0.009097 Iteration 562: Loss = 0.008599 Iteration 563: Loss = 0.007512 Iteration 564: Loss = 0.007658 Iteration 565: Loss = 0.008039 Iteration 566: Loss = 0.009143 Iteration 567: Loss = 0.009755 Iteration 568: Loss = 0.008717 Iteration 569: Loss = 0.008017 Iteration 570: Loss = 0.008209 Iteration 571: Loss = 0.008335 Iteration 572: Loss = 0.007922 Iteration 573: Loss = 0.007902 Iteration 574: Loss = 0.008508 Iteration 575: Loss = 0.009337 Iteration 576: Loss = 0.009408 Iteration 577: Loss = 0.007753 Iteration 578: Loss = 0.007548 Iteration 579: Loss = 0.008562 Iteration 580: Loss = 0.008378 Iteration 580: Loss = 0.008378 Iteration 581: Loss = 0.009594 Iteration 582: Loss = 0.008349 Iteration 583: Loss = 0.009354 Iteration 584: Loss = 0.007789 Iteration 585: Loss = 0.008877 Iteration 586: Loss = 0.007666 Iteration 587: Loss = 0.008615 Iteration 588: Loss = 0.009326 Iteration 589: Loss = 0.008860 Iteration 590: Loss = 0.007646 Iteration 591: Loss = 0.008616 Iteration 592: Loss = 0.008547 Iteration 593: Loss = 0.007998 Iteration 594: Loss = 0.007806 Iteration 595: Loss = 0.008146 Iteration 596: Loss = 0.009466 Iteration 597: Loss = 0.008481 Iteration 598: Loss = 0.008063 Iteration 599: Loss = 0.007682 Iteration 600: Loss = 0.008408 Iteration 600: Loss = 0.008408 Iteration 601: Loss = 0.008429 Training completed after 601 iterations! Results: Training MSE: 0.6206 | MAE: 0.6083 | R²: 0.9074 Test MSE: 0.7941 | MAE: 0.6847 | R²: 0.8741
In [7]:
# VISUALIZATION
fig = plt.figure(figsize=(15, 10))
# Graph 1: Main Results
ax1 = plt.subplot(2, 3, 1)
ax1.scatter(X_train_original, y_train_original, alpha=0.6, label='Training data', color='#317AC1', s=50)
ax1.scatter(X_test_original, y_test_original, alpha=0.6, label='Test data', color='#468F5E', s=50)
ax1.plot(X_dense, y_dense_pred_original, '#BE3F12', linewidth=2, label='Quantum prediction')
if use_list==False:
ax1.plot(X_dense, y_true_dense, '#C0B5AB', linewidth=1.5, alpha=0.5, label='True function')
ax1.set_xlabel('X', fontsize=11)
ax1.set_ylabel('y', fontsize=11)
ax1.set_title('Quantum regression of my list', fontsize=12)
ax1.legend()
ax1.grid(True, alpha=0.3)
# Graph 2: Loss Progress
ax2 = plt.subplot(2, 3, 2)
ax2.plot(loss_values, '#57818A', linewidth=2)
ax2.set_xlabel('Iteration', fontsize=11)
ax2.set_ylabel('Loss', fontsize=11)
ax2.set_title('Optimization Progress', fontsize=12)
ax2.grid(True, alpha=0.3)
ax2.set_yscale('log')
# Graph 3: Residuals
ax3 = plt.subplot(2, 3, 3)
train_residuals = y_train_original - y_train_pred_original
test_residuals = y_test_original - y_test_pred_original
ax3.scatter(X_train_original, train_residuals, alpha=0.6, label='Training', color='#317AC1')
ax3.scatter(X_test_original, test_residuals, alpha=0.6, label='Test', color='#468F5E')
ax3.axhline(y=0, color='#BE3F12', linestyle='--', alpha=0.5)
ax3.set_xlabel('X', fontsize=11)
ax3.set_ylabel('Residuals', fontsize=11)
ax3.set_title('Residual Analysis', fontsize=12)
ax3.legend()
ax3.grid(True, alpha=0.3)
# Graph 4: Prediction vs Actual
ax4 = plt.subplot(2, 3, 4)
all_true = np.concatenate([y_train_original, y_test_original])
all_pred = np.concatenate([y_train_pred_original, y_test_pred_original])
ax4.scatter(y_train_original, y_train_pred_original, alpha=0.6, label='Training', color='#317AC1')
ax4.scatter(y_test_original, y_test_pred_original, alpha=0.6, label='Test', color='#468F5E')
min_val, max_val = all_true.min(), all_true.max()
ax4.plot([min_val, max_val], [min_val, max_val], '#BE3F12', alpha=0.5)
ax4.set_xlabel('Actual values', fontsize=11)
ax4.set_ylabel('Predicted values', fontsize=11)
ax4.set_title('Prediction vs. Actual', fontsize=12)
ax4.legend()
ax4.grid(True, alpha=0.3)
# Graph 5: Error Along X
ax5 = plt.subplot(2, 3, 5)
error_dense = np.abs(y_dense_pred_original - y_true_dense.ravel())
ax5.plot(X_dense, error_dense, '#BE3F12', linewidth=2)
ax5.fill_between(X_dense.ravel(), 0, error_dense, alpha=0.3, color='#BE3F12')
ax5.set_xlabel('X', fontsize=11)
ax5.set_ylabel('Absolute error', fontsize=11)
ax5.set_title('Spatial distribution of error', fontsize=12)
ax5.grid(True, alpha=0.3)
# Graph 6: Error Histogram
ax6 = plt.subplot(2, 3, 6)
all_residuals = np.concatenate([train_residuals, test_residuals])
ax6.hist(all_residuals, bins=15, alpha=0.7, color='#BE3F12', edgecolor='black')
ax6.axvline(x=0, color='#BE3F12', linestyle='--', alpha=0.5)
ax6.set_xlabel('Residuals', fontsize=11)
ax6.set_ylabel('Frequency', fontsize=11)
ax6.set_title(f'Error distribution (μ={np.mean(all_residuals):.3f})', fontsize=12)
ax6.grid(True, alpha=0.3)
plt.suptitle(f'Quantum regression: {num_qubits} qubits, {len(weight_params)} parameters, {optimizer.__class__.__name__}',
fontsize=13, y=1.02)
plt.tight_layout()
plt.show()
In [8]:
# VISUALIZATION OF THE QUANTUM CIRCUIT
print("STRUCTURE OF THE QUANTUM CIRCUIT:")
print("-"*50)
# Creating an instance of the circuit with specific values for visualization
example_circuit = qc.assign_parameters({**{p: 0.5 for p in input_params},
**{p: 0.1 for p in weight_params}})
print(example_circuit.draw('text'))
# DIAGNOSTICS
print("DIAGNOSTICS:")
print("-"*50)
print(f"Final loss: {loss_values[-1] if loss_values else 'N/A':.6f}")
print(f"Average absolute prediction error: {np.mean(error_dense):.4f}")
print(f"Maximum absolute error: {np.max(error_dense):.4f}")
print(f"Standard deviation of residuals: {np.std(all_residuals):.4f}")
print("Quantum circuit:")
print("-"*50)
print(qc.decompose().draw('text'))
STRUCTURE OF THE QUANTUM CIRCUIT:
--------------------------------------------------
┌─────────┐ ┌─────────┐┌─────────┐┌─────────┐ ┌───┐┌─────────┐»
q_0: ┤ Ry(π/2) ├──■──┤ Ry(π/2) ├┤ Ry(0.1) ├┤ Rz(0.1) ├──■──┤ X ├┤ Ry(0.1) ├»
└┬───────┬┘┌─┴─┐└┬───────┬┘├─────────┤├─────────┤┌─┴─┐└─┬─┘├─────────┤»
q_1: ─┤ Ry(π) ├─┤ X ├─┤ Ry(π) ├─┤ Ry(0.1) ├┤ Rz(0.1) ├┤ X ├──■──┤ Ry(0.1) ├»
└───────┘ └───┘ └───────┘ └─────────┘└─────────┘└───┘ └─────────┘»
« ┌─────────┐ ┌───┐┌─────────┐┌─────────┐
«q_0: ┤ Rz(0.1) ├──■──┤ X ├┤ Ry(0.1) ├┤ Rz(0.1) ├
« ├─────────┤┌─┴─┐└─┬─┘├─────────┤├─────────┤
«q_1: ┤ Rz(0.1) ├┤ X ├──■──┤ Ry(0.1) ├┤ Rz(0.1) ├
« └─────────┘└───┘ └─────────┘└─────────┘
DIAGNOSTICS:
--------------------------------------------------
Final loss: 0.008429
Average absolute prediction error: 2.8978
Maximum absolute error: 7.8583
Standard deviation of residuals: 0.8121
Quantum circuit:
--------------------------------------------------
global phase: -θ[11]/2 - θ[1]/2 - θ[3]/2 - θ[5]/2 - θ[7]/2 - θ[9]/2
┌───────────────┐ ┌───────────────┐ ┌─────────────┐┌──────────┐ »
q_0: ┤ R(π*x[0],π/2) ├───■──┤ R(π*x[0],π/2) ├─┤ R(θ[0],π/2) ├┤ U1(θ[1]) ├──■──»
├───────────────┴┐┌─┴─┐├───────────────┴┐├─────────────┤├──────────┤┌─┴─┐»
q_1: ┤ R(2π*x[0],π/2) ├┤ X ├┤ R(2π*x[0],π/2) ├┤ R(θ[2],π/2) ├┤ U1(θ[3]) ├┤ X ├»
└────────────────┘└───┘└────────────────┘└─────────────┘└──────────┘└───┘»
« ┌───┐┌─────────────┐┌──────────┐ ┌───┐┌─────────────┐ ┌──────────┐
«q_0: ┤ X ├┤ R(θ[4],π/2) ├┤ U1(θ[5]) ├──■──┤ X ├┤ R(θ[8],π/2) ├──┤ U1(θ[9]) ├
« └─┬─┘├─────────────┤├──────────┤┌─┴─┐└─┬─┘├─────────────┴┐┌┴──────────┤
«q_1: ──■──┤ R(θ[6],π/2) ├┤ U1(θ[7]) ├┤ X ├──■──┤ R(θ[10],π/2) ├┤ U1(θ[11]) ├
« └─────────────┘└──────────┘└───┘ └──────────────┘└───────────┘
In [ ]: