注意
跳转到末尾 以下载完整示例代码。
用于熵正则化和二次正则化的OT的双重求解器,使用Pytorch
# Author: Remi Flamary <remi.flamary@polytechnique.edu>
#
# License: MIT License
# sphinx_gallery_thumbnail_number = 3
import numpy as np
import matplotlib.pyplot as pl
import torch
import ot
import ot.plot
数据生成
torch.manual_seed(1)
n_source_samples = 100
n_target_samples = 100
theta = 2 * np.pi / 20
noise_level = 0.1
Xs, ys = ot.datasets.make_data_classif("gaussrot", n_source_samples, nz=noise_level)
Xt, yt = ot.datasets.make_data_classif(
"gaussrot", n_target_samples, theta=theta, nz=noise_level
)
# one of the target mode changes its variance (no linear mapping)
Xt[yt == 2] *= 3
Xt = Xt + 4
绘制数据
pl.figure(1, (10, 5))
pl.clf()
pl.scatter(Xs[:, 0], Xs[:, 1], marker="+", label="Source samples")
pl.scatter(Xt[:, 0], Xt[:, 1], marker="o", label="Target samples")
pl.legend(loc=0)
pl.title("Source and target distributions")

Text(0.5, 1.0, 'Source and target distributions')
将数据转换为torch张量
xs = torch.tensor(Xs)
xt = torch.tensor(Xt)
估计熵最优运输的对偶变量
u = torch.randn(n_source_samples, requires_grad=True)
v = torch.randn(n_source_samples, requires_grad=True)
reg = 0.5
optimizer = torch.optim.Adam([u, v], lr=1)
# number of iteration
n_iter = 200
losses = []
for i in range(n_iter):
# generate noise samples
# minus because we maximize the dual loss
loss = -ot.stochastic.loss_dual_entropic(u, v, xs, xt, reg=reg)
losses.append(float(loss.detach()))
if i % 10 == 0:
print("Iter: {:3d}, loss={}".format(i, losses[-1]))
loss.backward()
optimizer.step()
optimizer.zero_grad()
pl.figure(2)
pl.plot(losses)
pl.grid()
pl.title("Dual objective (negative)")
pl.xlabel("Iterations")
Ge = ot.stochastic.plan_dual_entropic(u, v, xs, xt, reg=reg)

Iter: 0, loss=0.20204949002247313
Iter: 10, loss=-19.52270325337508
Iter: 20, loss=-31.389588903066674
Iter: 30, loss=-35.17271642029748
Iter: 40, loss=-37.54013717758049
Iter: 50, loss=-38.818876139890186
Iter: 60, loss=-39.24604111414357
Iter: 70, loss=-39.4001188296349
Iter: 80, loss=-39.47392845162961
Iter: 90, loss=-39.49375454907627
Iter: 100, loss=-39.507535183315305
Iter: 110, loss=-39.5126151212976
Iter: 120, loss=-39.51579555753386
Iter: 130, loss=-39.51813355564716
Iter: 140, loss=-39.519658199962244
Iter: 150, loss=-39.52083895928685
Iter: 160, loss=-39.52174003356729
Iter: 170, loss=-39.52244590134577
Iter: 180, loss=-39.52300325765215
Iter: 190, loss=-39.5234469263669
绘制估计的熵OT计划
pl.figure(3, (10, 5))
pl.clf()
ot.plot.plot2D_samples_mat(Xs, Xt, Ge.detach().numpy(), alpha=0.1)
pl.scatter(Xs[:, 0], Xs[:, 1], marker="+", label="Source samples", zorder=2)
pl.scatter(Xt[:, 0], Xt[:, 1], marker="o", label="Target samples", zorder=2)
pl.legend(loc=0)
pl.title("Source and target distributions")

Text(0.5, 1.0, 'Source and target distributions')
估计二次OT的对偶变量
u = torch.randn(n_source_samples, requires_grad=True)
v = torch.randn(n_source_samples, requires_grad=True)
reg = 0.01
optimizer = torch.optim.Adam([u, v], lr=1)
# number of iteration
n_iter = 200
losses = []
for i in range(n_iter):
# generate noise samples
# minus because we maximize the dual loss
loss = -ot.stochastic.loss_dual_quadratic(u, v, xs, xt, reg=reg)
losses.append(float(loss.detach()))
if i % 10 == 0:
print("Iter: {:3d}, loss={}".format(i, losses[-1]))
loss.backward()
optimizer.step()
optimizer.zero_grad()
pl.figure(4)
pl.plot(losses)
pl.grid()
pl.title("Dual objective (negative)")
pl.xlabel("Iterations")
Gq = ot.stochastic.plan_dual_quadratic(u, v, xs, xt, reg=reg)

Iter: 0, loss=-0.0018442196020623663
Iter: 10, loss=-19.547285885712565
Iter: 20, loss=-31.057615403590688
Iter: 30, loss=-34.89097651180298
Iter: 40, loss=-37.29103437586375
Iter: 50, loss=-38.569937077546115
Iter: 60, loss=-39.02825856308446
Iter: 70, loss=-39.24808806518085
Iter: 80, loss=-39.33623605652886
Iter: 90, loss=-39.369584545680794
Iter: 100, loss=-39.38703972407801
Iter: 110, loss=-39.39412287103031
Iter: 120, loss=-39.39875320026663
Iter: 130, loss=-39.40171357789038
Iter: 140, loss=-39.403580603792
Iter: 150, loss=-39.404816930818924
Iter: 160, loss=-39.40573701839666
Iter: 170, loss=-39.40645795658864
Iter: 180, loss=-39.407026894168844
Iter: 190, loss=-39.40748112780192
绘制估计的二次OT计划
pl.figure(5, (10, 5))
pl.clf()
ot.plot.plot2D_samples_mat(Xs, Xt, Gq.detach().numpy(), alpha=0.1)
pl.scatter(Xs[:, 0], Xs[:, 1], marker="+", label="Source samples", zorder=2)
pl.scatter(Xt[:, 0], Xt[:, 1], marker="o", label="Target samples", zorder=2)
pl.legend(loc=0)
pl.title("OT plan with quadratic regularization")

Text(0.5, 1.0, 'OT plan with quadratic regularization')
脚本的总运行时间: (0分钟 9.961秒)