Bootstrap

自动梯度(torch.autograd和Variable)

自动梯度

1.torch.autograd和Variable

import torch
from torch.autograd import Variable
batch_n = 100
hidden_layer = 100
input_data = 1000
output_data = 10

x = Variable(torch.randn(batch_n,input_data),requires_grad = False)
y = Variable(torch.randn(batch_n,output_data),requires_grad = False)

w1 = Variable(torch.randn(input_data,hidden_layer),requires_grad = True)
w2 = Variable(torch.randn(hidden_layer,output_data),requires_grad = True)

epoch_n = 20
learning_rate = 1e-6

for epoch in range(epoch_n):
    y_pred = x.mm(w1).clamp(min = 0).mm(w2)
    loss = (y_pred - y).pow(2).sum()
    print("Epoch:{},Loss:{:.4f}".format(epoch,loss.data))
    
    loss.backward()#自动计算每个节点的梯度值并根据需求进行保留
    
    w1.data -= learning_rate*w1.grad.data
    w2.data -= learning_rate*w2.grad.data
    
    w1.grad.data.zero_()#将本次计算的各个参数节点梯度值置零
    w2.grad.data.zero_()

2.自定义传播函数

import torch
from torch.autograd import Variable
batch_n = 64
hidden_layer = 100
input_data = 1000
output_data = 10

class Model(torch.nn.Module):
    def __init__(self):#类的初始化
        super(Model,self).__init__()
        
    def forward(self,input,w1,w2):#forwar函数实现模型前向传播中的矩阵运算
        x = torch.mm(input,w1)
        x = torch.clamp(x,min = 0)
        x = torch.mm(x,w2)
        return x
    
    def backward(self):#backward函数实现模型后向传播中的自动梯度计算
        pass

model = Model()

#训练和参数优化

x = Variable(torch.randn(batch_n,input_data),requires_grad = False)
y = Variable(torch.randn(batch_n,output_data),requires_grad = False)

w1 = Variable(torch.randn(input_data,hidden_layer),requires_grad = True)
w2 = Variable(torch.randn(hidden_layer,output_data),requires_grad = True)

epoch_n = 30
learning_rate = 1e-6

for epoch in range(epoch_n):
    
    y_pred = model(x,w1,w2)
    
    loss = (y_pred - y).pow(2).sum()
    print("Epoch:{},Loss:{:.4f}".format(epoch,loss.data))
    loss.backward()
    
    w1.data -= learning_rate*w1.grad.data
    w2.data -= learning_rate*w2.grad.data
    
    w1.grad.data.zero_()
    w2.grad.data.zero_()

;