import numpy as np
import mindspore as ms
from mindspore import nn, ops, Tensor
from mindquantum.core.circuit import Circuit
from mindquantum.core.gates import RX, RY, CNOT
from mindquantum.core.operators import Hamiltonian, QubitOperator
from mindquantum.simulator import Simulator
from mindquantum.framework import MQLayer
from mindquantum.core.parameterresolver import PRGenerator
encoder=Circuit()
alpha=PRGenerator('alpha')
for i in range(4):
encoder.rx(alpha.new(),i)
ansatz=Circuit()
alpha=PRGenerator('beta')
ansatz.rz(alpha.new(),0,1)
ansatz.rz(alpha.new(),2,3)
ansatz.rx(alpha.new(),0,1)
ansatz.rx(alpha.new(),2,3)
ansatz.rz(alpha.new(),0,2)
ansatz.rz(alpha.new(),0,2)
circ=encoder.as_encoder()+ansatz.as_ansatz()
ham=Hamiltonian(QubitOperator('Z0',-1))
encoder_names=encoder.params_name
ansatz_names=ansatz.params_name
sim=Simulator('mqvector',circ.n_qubits)
grad_ops=sim.get_expectation_with_grad(ham,circ)
quantum_layer=MQLayer(grad_ops)
import mindspore as ms
from mindspore import nn, ops, Tensor
import numpy as np
class QuantumConvLayer(nn.Cell):
def __init__(self, in_channels=1, out_channels=8, kernel_size=2, stride=1):
super().__init__()
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
# 创建多个量子滤波器(每个通道一个)
self.q_layers = nn.CellList([quantum_layer]*out_channels)
self.sigmoid = nn.Sigmoid()
self.k = kernel_size
self.s = stride
def construct(self, x):
# x shape: (B, C, H, W), e.g., (B, 1, 10, 10)
b, c, h, w = x.shape
k, s = self.k, self.s
# 确保尺寸可整除
assert h % s == 0 and w % s == 0, f"Input size ({h}x{w}) must be divisible by stride {s}"
# Step 1: 将图像分割成 patch 网格
# reshape -> (B, C, H//s, s, W//s, s)
x = x.view(b, c, h // s, s, w // s, s)
# transpose -> (B, C, H//s, W//s, s, s)
x = ops.transpose(x, (0, 1, 2, 4, 3, 5))
# reshape 成 patch 序列: (B, C, num_patches_h, num_patches_w, k*k)
x = x.view(b, c, h // s, w // s, k * k)
# 展平空间维度 -> (B, C, L, k*k), L = num_patches = (h//s)*(w//s)
x = x.view(b, c, -1, k * k)
# 移除通道维度(如果 c=1)-> (B, L, k*k)
x = x.squeeze(1) # now (B, L, k*k)
# Reshape 为 batch 输入: (B * L, k*k)
patches = x.reshape(-1, k * k) # (B*L, k*k)
# Step 2: 对每个量子滤波器处理所有 patch
outputs = []
for q_layer in self.q_layers:
out = q_layer(patches) # 输出形状应为 (B*L, 1)
outputs.append(out)
# 合并通道: (B*L, out_channels)
y = ops.stack(outputs, axis=-1) # (B*L, out_channels)
# 恢复空间结构
h_out = h // self.stride
w_out = w // self.stride
y = y.reshape(b, h_out, w_out, self.out_channels) # (B, h_out, w_out, out_ch)
# 转回 NCHW 格式
y = ops.transpose(y, (0, 3, 1, 2)) # (B, out_ch, h_out, w_out)
return self.sigmoid(y)
class QCNN(nn.Cell):
def __init__(self, num_classes=10):
super().__init__()
# 量子卷积层:从 1@10x10 → 8@5x5 (kernel=2, stride=2)
self.qconv = QuantumConvLayer(in_channels=1, out_channels=8, kernel_size=2, stride=2)
# 全连接层前需要展平
self.flatten = nn.Flatten() # 8*5*5 = 200
self.classifier = nn.SequentialCell([
nn.Dense(200, 64),
nn.ReLU(),
nn.Dense(64, num_classes)
])
def construct(self, x):
x = self.qconv(x) # (B, 8, 5, 5)
x = self.flatten(x) # (B, 200)
logits = self.classifier(x)
return logits
# 构建数据集
import mindspore.dataset as ds
import matplotlib.pyplot as plt
import numpy as np
import mindspore as ms
from mindspore.dataset import vision, transforms
from mindspore.dataset import MnistDataset
load_img_ops = transforms.Compose([
ds.vision.Rescale(1./255, 0),
ds.vision.Resize((10, 10)),
ds.vision.HWC2CHW()
])
label_transform = transforms.TypeCast(ms.int32)
batch_size = 2500
def datapipe(path, batch_size):
dataset = MnistDataset(path)
dataset = dataset.map(load_img_ops, 'image')
dataset = dataset.map(label_transform, 'label')
dataset = dataset.batch(batch_size)
return dataset
train_ds = datapipe('MNIST_Data/train', batch_size=batch_size)
test_ds = datapipe('MNIST_Data/test', batch_size=batch_size)
model=QCNN()
epochs = 30
learning_rate = 1e-1
optimizer = nn.SGD(model.trainable_params(), learning_rate=learning_rate,momentum=0.9)
loss_fn = nn.CrossEntropyLoss()
# Define forward function
def forward_fn(data, label):
logits = model(data)
loss = loss_fn(logits, label)
return loss, logits
# Get gradient function
grad_fn = ms.value_and_grad(forward_fn, None, optimizer.parameters, has_aux=True)
# Define function of one-step training
def train_step(data, label):
(loss, _), grads = grad_fn(data, label)
optimizer(grads)
return loss
def train_loop(model, dataset):
size = dataset.get_dataset_size()
model.set_train()
for batch, (data, label) in enumerate(dataset.create_tuple_iterator()):
loss = train_step(data, label)
def test_loop(model, dataset, loss_fn):
num_batches = dataset.get_dataset_size()
model.set_train(False)
total, test_loss, correct = 0, 0, 0
for data, label in dataset.create_tuple_iterator():
pred = model(data)
total += len(data)
test_loss += loss_fn(pred, label).asnumpy()
correct += (pred.argmax(1) == label).asnumpy().sum()
test_loss /= num_batches
correct /= total
return test_loss,correct
losses=[]
accs=[]
for epoch in range(epochs):
train_loop(model, train_ds)
loss,accuracy=test_loop(model, test_ds, loss_fn)
print(f"epoch: {epoch+1},loss: {loss},accuracy: {accuracy}")
losses.append(loss)
accs.append(accuracy)
print("Done!")
如上所示代码有什么错误吗?为什么到最后训练的时候没反应,似乎是陷入了死循环?