注意
转到末尾 以下载完整的示例代码
PyTorch 后端示例:通过图匹配神经网络匹配图像关键点
此示例展示了如何通过基于神经网络的图匹配求解器来匹配图像关键点。 这些图匹配求解器旨在匹配两个单独的图。匹配后的图像 可以进一步传递以处理下游任务。
# Author: Runzhong Wang <runzhong.wang@sjtu.edu.cn>
#
# License: Mulan PSL v2 License
import torch # pytorch backend
import torchvision # CV models
import pygmtools as pygm
import matplotlib.pyplot as plt # for plotting
from matplotlib.patches import ConnectionPatch # for plotting matching result
import scipy.io as sio # for loading .mat file
import scipy.spatial as spa # for Delaunay triangulation
from sklearn.decomposition import PCA as PCAdimReduc
import itertools
import numpy as np
from PIL import Image
pygm.set_backend('pytorch') # set default backend for pygmtools
通过图匹配神经网络预测匹配
在本节中,我们将展示如何通过图匹配神经网络进行预测(推理)。
让我们以PCA-GM(pca_gm())为例。
加载图片
图像来自Willow对象类数据集(该数据集也可在pygmtools的基准测试中使用,参见WillowObject)。
图像被调整为256x256。
obj_resize = (256, 256)
img1 = Image.open('../data/willow_duck_0001.png')
img2 = Image.open('../data/willow_duck_0002.png')
kpts1 = torch.tensor(sio.loadmat('../data/willow_duck_0001.mat')['pts_coord'])
kpts2 = torch.tensor(sio.loadmat('../data/willow_duck_0002.mat')['pts_coord'])
kpts1[0] = kpts1[0] * obj_resize[0] / img1.size[0]
kpts1[1] = kpts1[1] * obj_resize[1] / img1.size[1]
kpts2[0] = kpts2[0] * obj_resize[0] / img2.size[0]
kpts2[1] = kpts2[1] * obj_resize[1] / img2.size[1]
img1 = img1.resize(obj_resize, resample=Image.BILINEAR)
img2 = img2.resize(obj_resize, resample=Image.BILINEAR)
torch_img1 = torch.from_numpy(np.array(img1, dtype=np.float32) / 256).permute(2, 0, 1).unsqueeze(0) # shape: BxCxHxW
torch_img2 = torch.from_numpy(np.array(img2, dtype=np.float32) / 256).permute(2, 0, 1).unsqueeze(0) # shape: BxCxHxW
可视化图像和关键点
def plot_image_with_graph(img, kpt, A=None):
plt.imshow(img)
plt.scatter(kpt[0], kpt[1], c='w', edgecolors='k')
if A is not None:
for idx in torch.nonzero(A, as_tuple=False):
plt.plot((kpt[0, idx[0]], kpt[0, idx[1]]), (kpt[1, idx[0]], kpt[1, idx[1]]), 'k-')
plt.figure(figsize=(8, 4))
plt.subplot(1, 2, 1)
plt.title('Image 1')
plot_image_with_graph(img1, kpts1)
plt.subplot(1, 2, 2)
plt.title('Image 2')
plot_image_with_graph(img2, kpts2)

构建图表
图结构是基于关键点集的几何结构构建的。在这个例子中,我们参考了Delaunay三角剖分。
def delaunay_triangulation(kpt):
d = spa.Delaunay(kpt.numpy().transpose())
A = torch.zeros(len(kpt[0]), len(kpt[0]))
for simplex in d.simplices:
for pair in itertools.permutations(simplex, 2):
A[pair] = 1
return A
A1 = delaunay_triangulation(kpts1)
A2 = delaunay_triangulation(kpts2)
可视化图表
plt.figure(figsize=(8, 4))
plt.subplot(1, 2, 1)
plt.title('Image 1 with Graphs')
plot_image_with_graph(img1, kpts1, A1)
plt.subplot(1, 2, 2)
plt.title('Image 2 with Graphs')
plot_image_with_graph(img2, kpts2, A2)

通过CNN提取节点特征
深度图匹配求解器可以与CNN特征提取器融合,以构建端到端的学习管道。
在这个例子中,我们采用基于匹配两个独立图的深度图求解器。
图像特征基于VGG16 CNN模型的两个中间层,遵循现有的深度图匹配论文(如pca_gm())
首先,我们获取并下载VGG16模型:
vgg16_cnn = torchvision.models.vgg16_bn(True)
VGG16的层列表:
print(vgg16_cnn.features)
Sequential(
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(inplace=True)
(3): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(4): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(inplace=True)
(6): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(7): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(8): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(9): ReLU(inplace=True)
(10): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(11): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(12): ReLU(inplace=True)
(13): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(14): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(15): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(16): ReLU(inplace=True)
(17): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(18): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(19): ReLU(inplace=True)
(20): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(21): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(22): ReLU(inplace=True)
(23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(24): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(25): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(26): ReLU(inplace=True)
(27): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(28): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(29): ReLU(inplace=True)
(30): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(31): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(32): ReLU(inplace=True)
(33): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(34): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(35): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(36): ReLU(inplace=True)
(37): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(38): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(39): ReLU(inplace=True)
(40): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(41): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(42): ReLU(inplace=True)
(43): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
让我们定义CNN特征提取器,它输出layer (30)和
layer (37)的特征
class CNNNet(torch.nn.Module):
def __init__(self, vgg16_module):
super(CNNNet, self).__init__()
# The naming of the layers follow ThinkMatch convention to load pretrained models.
self.node_layers = torch.nn.Sequential(*[_ for _ in vgg16_module.features[:31]])
self.edge_layers = torch.nn.Sequential(*[_ for _ in vgg16_module.features[31:38]])
def forward(self, inp_img):
feat_local = self.node_layers(inp_img)
feat_global = self.edge_layers(feat_local)
return feat_local, feat_global
下载预训练的CNN权重(来自ThinkMatch),加载权重然后提取CNN特征
cnn = CNNNet(vgg16_cnn)
path = pygm.utils.download('vgg16_pca_voc_pytorch.pt', 'https://drive.google.com/u/0/uc?export=download&confirm=Z-AR&id=1JnX3cSPvRYBSrDKVwByzp7CADgVCJCO_')
if torch.cuda.is_available():
map_location = torch.device('cuda:0')
else:
map_location = torch.device('cpu')
cnn.load_state_dict(torch.load(path, map_location=map_location), strict=False)
with torch.set_grad_enabled(False):
feat1_local, feat1_global = cnn(torch_img1)
feat2_local, feat2_global = cnn(torch_img2)
标准化特征
def l2norm(node_feat):
return torch.nn.functional.local_response_norm(
node_feat, node_feat.shape[1] * 2, alpha=node_feat.shape[1] * 2, beta=0.5, k=0)
feat1_local = l2norm(feat1_local)
feat1_global = l2norm(feat1_global)
feat2_local = l2norm(feat2_local)
feat2_global = l2norm(feat2_global)
将特征上采样到原始图像大小并连接
feat1_local_upsample = torch.nn.functional.interpolate(feat1_local, (obj_resize[1], obj_resize[0]), mode='bilinear')
feat1_global_upsample = torch.nn.functional.interpolate(feat1_global, (obj_resize[1], obj_resize[0]), mode='bilinear')
feat2_local_upsample = torch.nn.functional.interpolate(feat2_local, (obj_resize[1], obj_resize[0]), mode='bilinear')
feat2_global_upsample = torch.nn.functional.interpolate(feat2_global, (obj_resize[1], obj_resize[0]), mode='bilinear')
feat1_upsample = torch.cat((feat1_local_upsample, feat1_global_upsample), dim=1)
feat2_upsample = torch.cat((feat2_local_upsample, feat2_global_upsample), dim=1)
num_features = feat1_upsample.shape[1]
可视化提取的CNN特征(通过主成分分析进行降维)
pca_dim_reduc = PCAdimReduc(n_components=3, whiten=True)
feat_dim_reduc = pca_dim_reduc.fit_transform(
np.concatenate((
feat1_upsample.permute(0, 2, 3, 1).reshape(-1, num_features).numpy(),
feat2_upsample.permute(0, 2, 3, 1).reshape(-1, num_features).numpy()
), axis=0)
)
feat_dim_reduc = feat_dim_reduc / np.max(np.abs(feat_dim_reduc), axis=0, keepdims=True) / 2 + 0.5
feat1_dim_reduc = feat_dim_reduc[:obj_resize[0] * obj_resize[1], :]
feat2_dim_reduc = feat_dim_reduc[obj_resize[0] * obj_resize[1]:, :]
plt.figure(figsize=(8, 4))
plt.subplot(1, 2, 1)
plt.title('Image 1 with CNN features')
plot_image_with_graph(img1, kpts1, A1)
plt.imshow(feat1_dim_reduc.reshape(obj_resize[1], obj_resize[0], 3), alpha=0.5)
plt.subplot(1, 2, 2)
plt.title('Image 2 with CNN features')
plot_image_with_graph(img2, kpts2, A2)
plt.imshow(feat2_dim_reduc.reshape(obj_resize[1], obj_resize[0], 3), alpha=0.5)

<matplotlib.image.AxesImage object at 0x7fe196ff8b50>
通过最近邻插值提取节点特征
rounded_kpts1 = torch.round(kpts1).to(dtype=torch.long)
rounded_kpts2 = torch.round(kpts2).to(dtype=torch.long)
node1 = feat1_upsample[0, :, rounded_kpts1[1], rounded_kpts1[0]].t() # shape: NxC
node2 = feat2_upsample[0, :, rounded_kpts2[1], rounded_kpts2[0]].t() # shape: NxC
调用PCA-GM匹配模型
请参阅pca_gm()以获取API参考。
X = pygm.pca_gm(node1, node2, A1, A2, pretrain='voc')
X = pygm.hungarian(X)
plt.figure(figsize=(8, 4))
plt.suptitle('Image Matching Result by PCA-GM')
ax1 = plt.subplot(1, 2, 1)
plot_image_with_graph(img1, kpts1, A1)
ax2 = plt.subplot(1, 2, 2)
plot_image_with_graph(img2, kpts2, A2)
for i in range(X.shape[0]):
j = torch.argmax(X[i]).item()
con = ConnectionPatch(xyA=kpts1[:, i], xyB=kpts2[:, j], coordsA="data", coordsB="data",
axesA=ax1, axesB=ax2, color="red" if i != j else "green")
plt.gca().add_artist(con)

使用其他神经网络匹配图像
上述管道也适用于其他深度图匹配网络。这里我们给出ipca_gm()和cie()的示例。
通过IPCA-GM模型进行匹配
请参阅ipca_gm()以获取API参考。
path = pygm.utils.download('vgg16_ipca_voc_pytorch.pt', 'https://drive.google.com/u/0/uc?export=download&confirm=Z-AR&id=1TGrbSQRmUkClH3Alz2OCwqjl8r8gf5yI')
cnn.load_state_dict(torch.load(path, map_location=map_location), strict=False)
with torch.set_grad_enabled(False):
feat1_local, feat1_global = cnn(torch_img1)
feat2_local, feat2_global = cnn(torch_img2)
标准化特征
def l2norm(node_feat):
return torch.nn.functional.local_response_norm(
node_feat, node_feat.shape[1] * 2, alpha=node_feat.shape[1] * 2, beta=0.5, k=0)
feat1_local = l2norm(feat1_local)
feat1_global = l2norm(feat1_global)
feat2_local = l2norm(feat2_local)
feat2_global = l2norm(feat2_global)
将特征上采样到原始图像大小并连接
feat1_local_upsample = torch.nn.functional.interpolate(feat1_local, (obj_resize[1], obj_resize[0]), mode='bilinear')
feat1_global_upsample = torch.nn.functional.interpolate(feat1_global, (obj_resize[1], obj_resize[0]), mode='bilinear')
feat2_local_upsample = torch.nn.functional.interpolate(feat2_local, (obj_resize[1], obj_resize[0]), mode='bilinear')
feat2_global_upsample = torch.nn.functional.interpolate(feat2_global, (obj_resize[1], obj_resize[0]), mode='bilinear')
feat1_upsample = torch.cat((feat1_local_upsample, feat1_global_upsample), dim=1)
feat2_upsample = torch.cat((feat2_local_upsample, feat2_global_upsample), dim=1)
num_features = feat1_upsample.shape[1]
通过最近邻插值提取节点特征
rounded_kpts1 = torch.round(kpts1).to(dtype=torch.long)
rounded_kpts2 = torch.round(kpts2).to(dtype=torch.long)
node1 = feat1_upsample[0, :, rounded_kpts1[1], rounded_kpts1[0]].t() # shape: NxC
node2 = feat2_upsample[0, :, rounded_kpts2[1], rounded_kpts2[0]].t() # shape: NxC
构建边缘特征作为边缘长度
kpts1_dis = (kpts1.unsqueeze(0) - kpts1.unsqueeze(1))
kpts1_dis = torch.norm(kpts1_dis, p=2, dim=2).detach()
kpts2_dis = (kpts2.unsqueeze(0) - kpts2.unsqueeze(1))
kpts2_dis = torch.norm(kpts2_dis, p=2, dim=2).detach()
Q1 = torch.exp(-kpts1_dis / obj_resize[0])
Q2 = torch.exp(-kpts2_dis / obj_resize[0])
通过IPCA-GM模型进行匹配
X = pygm.ipca_gm(node1, node2, A1, A2, pretrain='voc')
X = pygm.hungarian(X)
plt.figure(figsize=(8, 4))
plt.suptitle('Image Matching Result by IPCA-GM')
ax1 = plt.subplot(1, 2, 1)
plot_image_with_graph(img1, kpts1, A1)
ax2 = plt.subplot(1, 2, 2)
plot_image_with_graph(img2, kpts2, A2)
for i in range(X.shape[0]):
j = torch.argmax(X[i]).item()
con = ConnectionPatch(xyA=kpts1[:, i], xyB=kpts2[:, j], coordsA="data", coordsB="data",
axesA=ax1, axesB=ax2, color="red" if i != j else "green")
plt.gca().add_artist(con)

通过CIE模型进行匹配
请参阅cie()的API参考。
path = pygm.utils.download('vgg16_cie_voc_pytorch.pt', 'https://drive.google.com/u/0/uc?export=download&confirm=Z-AR&id=1oRwcnw06t1rCbrIN_7p8TJZY-XkBOFEp')
cnn.load_state_dict(torch.load(path, map_location=map_location), strict=False)
with torch.set_grad_enabled(False):
feat1_local, feat1_global = cnn(torch_img1)
feat2_local, feat2_global = cnn(torch_img2)
标准化特征
def l2norm(node_feat):
return torch.nn.functional.local_response_norm(
node_feat, node_feat.shape[1] * 2, alpha=node_feat.shape[1] * 2, beta=0.5, k=0)
feat1_local = l2norm(feat1_local)
feat1_global = l2norm(feat1_global)
feat2_local = l2norm(feat2_local)
feat2_global = l2norm(feat2_global)
将特征上采样到原始图像大小并连接
feat1_local_upsample = torch.nn.functional.interpolate(feat1_local, (obj_resize[1], obj_resize[0]), mode='bilinear')
feat1_global_upsample = torch.nn.functional.interpolate(feat1_global, (obj_resize[1], obj_resize[0]), mode='bilinear')
feat2_local_upsample = torch.nn.functional.interpolate(feat2_local, (obj_resize[1], obj_resize[0]), mode='bilinear')
feat2_global_upsample = torch.nn.functional.interpolate(feat2_global, (obj_resize[1], obj_resize[0]), mode='bilinear')
feat1_upsample = torch.cat((feat1_local_upsample, feat1_global_upsample), dim=1)
feat2_upsample = torch.cat((feat2_local_upsample, feat2_global_upsample), dim=1)
num_features = feat1_upsample.shape[1]
通过最近邻插值提取节点特征
rounded_kpts1 = torch.round(kpts1).to(dtype=torch.long)
rounded_kpts2 = torch.round(kpts2).to(dtype=torch.long)
node1 = feat1_upsample[0, :, rounded_kpts1[1], rounded_kpts1[0]].t() # shape: NxC
node2 = feat2_upsample[0, :, rounded_kpts2[1], rounded_kpts2[0]].t() # shape: NxC
构建边缘特征作为边缘长度
kpts1_dis = (kpts1.unsqueeze(1) - kpts1.unsqueeze(2))
kpts1_dis = torch.norm(kpts1_dis, p=2, dim=0).detach()
kpts2_dis = (kpts2.unsqueeze(1) - kpts2.unsqueeze(2))
kpts2_dis = torch.norm(kpts2_dis, p=2, dim=0).detach()
Q1 = torch.exp(-kpts1_dis / obj_resize[0]).unsqueeze(-1).to(dtype=torch.float32)
Q2 = torch.exp(-kpts2_dis / obj_resize[0]).unsqueeze(-1).to(dtype=torch.float32)
调用CIE匹配模型
X = pygm.cie(node1, node2, A1, A2, Q1, Q2, pretrain='voc')
X = pygm.hungarian(X)
plt.figure(figsize=(8, 4))
plt.suptitle('Image Matching Result by CIE')
ax1 = plt.subplot(1, 2, 1)
plot_image_with_graph(img1, kpts1, A1)
ax2 = plt.subplot(1, 2, 2)
plot_image_with_graph(img2, kpts2, A2)
for i in range(X.shape[0]):
j = torch.argmax(X[i]).item()
con = ConnectionPatch(xyA=kpts1[:, i], xyB=kpts2[:, j], coordsA="data", coordsB="data",
axesA=ax1, axesB=ax2, color="red" if i != j else "green")
plt.gca().add_artist(con)

训练深度图匹配模型
在本节中,我们展示了如何构建一个支持端到端训练的深度图匹配模型。 对于这里考虑的图像匹配问题,该模型由CNN特征提取器和 一个可学习的匹配模块组成。以PCA-GM模型为例。
注意
这个简单的示例旨在向您展示如何在训练端到端深度图匹配神经网络时进行基本的前向和后向传递。一个“更正式”的深度学习管道应该包括异步数据加载器、批处理操作、CUDA支持等,这些都在考虑简化的情况下被省略了。您可以参考ThinkMatch,这是一个包含所有这些高级功能的研究协议。
首先让我们定义神经网络模型。通过调用get_network(),它将简单地返回网络对象。
class GMNet(torch.nn.Module):
def __init__(self):
super(GMNet, self).__init__()
self.gm_net = pygm.utils.get_network(pygm.pca_gm, pretrain=False) # fetch the network object
self.cnn = CNNNet(vgg16_cnn)
def forward(self, img1, img2, kpts1, kpts2, A1, A2):
# CNN feature extractor layers
feat1_local, feat1_global = self.cnn(img1)
feat2_local, feat2_global = self.cnn(img2)
feat1_local = l2norm(feat1_local)
feat1_global = l2norm(feat1_global)
feat2_local = l2norm(feat2_local)
feat2_global = l2norm(feat2_global)
# upsample feature map
feat1_local_upsample = torch.nn.functional.interpolate(feat1_local, (obj_resize[1], obj_resize[0]), mode='bilinear')
feat1_global_upsample = torch.nn.functional.interpolate(feat1_global, (obj_resize[1], obj_resize[0]), mode='bilinear')
feat2_local_upsample = torch.nn.functional.interpolate(feat2_local, (obj_resize[1], obj_resize[0]), mode='bilinear')
feat2_global_upsample = torch.nn.functional.interpolate(feat2_global, (obj_resize[1], obj_resize[0]), mode='bilinear')
feat1_upsample = torch.cat((feat1_local_upsample, feat1_global_upsample), dim=1)
feat2_upsample = torch.cat((feat2_local_upsample, feat2_global_upsample), dim=1)
# assign node features
rounded_kpts1 = torch.round(kpts1).to(dtype=torch.long)
rounded_kpts2 = torch.round(kpts2).to(dtype=torch.long)
node1 = feat1_upsample[0, :, rounded_kpts1[1], rounded_kpts1[0]].t() # shape: NxC
node2 = feat2_upsample[0, :, rounded_kpts2[1], rounded_kpts2[0]].t() # shape: NxC
# PCA-GM matching layers
X = pygm.pca_gm(node1, node2, A1, A2, network=self.gm_net) # the network object is reused
return X
model = GMNet()
定义优化器
optim = torch.optim.Adam(model.parameters(), lr=1e-3)
前向传播
X = model(torch_img1, torch_img2, kpts1, kpts2, A1, A2)
计算损失
在这个例子中,真实匹配矩阵是一个对角矩阵。我们通过permutation_loss()计算损失函数。
X_gt = torch.eye(X.shape[0])
loss = pygm.utils.permutation_loss(X, X_gt)
print(f'loss={loss:.4f}')
loss=2.9619
反向传播
loss.backward()
可视化梯度
plt.figure(figsize=(4, 4))
plt.title('Gradient Sizes of PCA-GM and VGG16 layers')
plt.gca().set_xlabel('Layer Index')
plt.gca().set_ylabel('Average Gradient Size')
grad_size = []
for param in model.parameters():
grad_size.append(torch.abs(param.grad).mean().item())
print(grad_size)
plt.stem(grad_size)

[0.00012030009384034202, 0.0029222508892416954, 0.00019840802997350693, 0.0035125715658068657, 0.00021948321955278516, 0.005211487412452698, 1.2157242963439785e-05, 4.538887151284143e-05, 9.093379776459187e-05, 0.003763300133869052, 0.00013752389349974692, 0.0029113302007317543, 0.0005891890614293516, 1.4391055458418123e-08, 0.00123009888920933, 0.0007960228249430656, 0.00019226272706873715, 7.602498186543016e-09, 0.0023636282421648502, 0.00142657570540905, 0.00027515203692018986, 2.3430166695703747e-09, 0.0014325324445962906, 0.001168975606560707, 0.000243743575992994, 3.208436183399499e-09, 0.002457045018672943, 0.0012053920654579997, 0.00024106756609398872, 9.623200014630129e-10, 0.0019094400340691209, 0.0013064424274489284, 0.00019915227312594652, 1.351938450078194e-09, 0.0020241807214915752, 0.0013774647377431393, 0.00020345798111520708, 1.4647631996211885e-09, 0.0024065959732979536, 0.0011330752167850733, 0.00017511722398921847, 4.4791811926181424e-10, 0.002062384970486164, 0.00124924979172647, 0.00013374598347581923, 5.668324698504534e-10, 0.002199235139414668, 0.0013085041427984834, 0.00012718267680611461, 0.0005019083619117737, 0.0018184399232268333, 0.0009283717372454703, 9.876605327008292e-05, 2.2953100531353243e-10, 0.0017888628644868731, 0.0011573054362088442, 8.91995950951241e-05, 0.0008793459855951369]
<StemContainer object of 3 artists>
更新模型参数。深度学习管道应迭代前向传播和后向传播步骤,直到收敛。
optim.step()
optim.zero_grad()
注意
此示例支持GPU和CPU,在线文档是由仅支持CPU的机器构建的。 如果您在GPU上运行此代码,效率将显著提高。
脚本总运行时间: (0 分钟 29.845 秒)