Bootstrap

Pytorch深度学习-优化器(小土堆)

  1. API
#基类
#params (iterable) – s 或 `dict` s 的 `torch.Tensor` 可迭代对象。指定应优化的张量。
#defaults (Dict[str, Any]) – (dict):包含优化选项的默认值的字典(当参数组未指定它们时使用)。
torch.optim.Optimizer(params, defaults)
#可以将Optimizer换为各种算法
  1. 代码示例
import torch  
from torch import nn,optim  
from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear  
import torchvision  
from torch.utils.data import DataLoader  
  
  
dataset = torchvision.datasets.CIFAR10("./dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)  
  
dataloader = DataLoader(dataset=dataset,batch_size=4096)  
class seq(nn.Module):  
    def __init__(self, *args, **kwargs) -> None:  
        super().__init__(*args, **kwargs)  
        #sequential使用  
        self.model1 = Sequential(  
            Conv2d(3,32,5,padding=2),  
            MaxPool2d(2),  
            Conv2d(32,32,5,stride=1,padding=2),  
            MaxPool2d(2),  
            Conv2d(32,64,5,stride=1,padding=2),  
            MaxPool2d(2),  
            Flatten(),  
            Linear(64*4*4,64),  
            Linear(64,10),  
        )  
    def forward(self,x):  
        x = self.model1(x)  
        return x  
loss = nn.CrossEntropyLoss()  
seq_test = seq()  
#实例化优化器  
#获取神经网络参数  
#学习率设为0.01  
optim = torch.optim.SGD(seq_test.parameters(),lr=0.01)  
for epoch in range(2):  
    running_loss = 0.0  
    for data in dataloader:  
        imgs,targets = data  
        outputs = seq_test(imgs)  
        results = loss(outputs,targets)  
        #因为要循环使用,所以要每次都把梯度设为零  
        optim.zero_grad()  
        #获取梯度  
        results.backward()  
        #调用优化器进行调优  
        optim.step()  
        #每次的学习过程都加上Loss就会降低损失  
        running_loss = running_loss + results  
        print(running_loss)
;