Bootstrap

A total variation loss

import matplotlib
import torch

x = torch.FloatTensor([1,1,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3,1,1,1,1,1,0,0,1,1,1,1])
#x = torch.FloatTensor([1,1,1,1,1,20,20,20,20,20,20,3,3,3,3,3,3,3,3,1,1,1,1,1,0,0,1,1,1,1])
m = torch.distributions.normal.Normal(torch.tensor([0.0]),torch.tensor([0.3]))

noise = torch.squeeze(m.sample((x.size()[0],)))

x_ = x + noise
x_ = torch.autograd.Variable(x_)
#matplotlib.pyplot.plot(x_.numpy())
#matplotlib.pyplot.plot(x.numpy())

y = torch.zeros([x_.size()[0]])
y = torch.autograd.Variable(y,requires_grad=True)

#optimizer = torch.optim.Adam([{'params':[y]}])
#torch.optim.Adam(params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, #amsgrad=False)
optimizer = torch.optim.SGD([{'params':[y], 'lr':1.0e-2}])
#torch.optim.SGD(params, lr=<required parameter>, momentum=0, dampening=0, #weight_decay=0, nesterov=False)

lamda = 0.3#0.5



for iter_ in range(5000):
    #print(iter_)
    #y.zero_grad()
    optimizer.zero_grad()
    #print(y.data.numpy())
    #print(y.grad)
    '''cal loss'''
    tv_loss = torch.pow((y[1:] - y[:y.size()[0]-1]),2).sum()
    mse_loss = torch.nn.MSELoss(reduction='sum')
    E_
;