"""---------------逻辑回归·----------------------------"""
import torch
from torch import nn, optim
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets
import time
import numpy as np
import matplotlib.pyplot as plt
# 定义超参数
batch_size = 32
learning_rate = 1e-3
num_epoches = 1
# 1.下载训练集 MNIST 手写数字训练集-----------------------------
"""torchvision.transforms里面的操作是对导入的图片做处理,比如可以随机取(50, 50)这样的窗框大小,或者随机翻转,或者去中间的(50, 50)的窗框大小部分等等,
但是里面必须要用的是transforms.ToTensor(),这可以将PIL的图片类型转换成tensor,这样pytorch才可以对其做处理"""
train_dataset = datasets.MNIST(
root='F:/PycharmProjects/pytorch-beginner-master/02-Logistic Regression/data', train=True, transform=transforms.ToTensor(), download=True)
test_dataset = datasets.MNIST(
root='F:/PycharmProjects/pytorch-beginner-master/02-Logistic Regression/data', train=False, transform=transforms.ToTensor())
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
# 2.定义 Logistic Regression 模型
class Logstic_Regression(nn.Module):
def __init__(self, in_dim, n_class):
super(Logstic_Regression, self).__init__()
self.logstic = nn.Linear(in_dim, n_class)#第一个参数定义为数据的维度,第二维数是我们分类的数目
def forward(self, x):
out = self.logstic(x)
return out
#3.加载模型
model = Logstic_Regression(28 * 28, 10) # 图片大小是28x28
use_gpu = torch.cuda.is_available() # 判断是否有GPU加速
if use_gpu:
model = model.cuda()
# 3.定义loss(误差)和optimizer(优化函数)
criterion = nn.CrossEntropyLoss() #计算公式:loss(x, class) = -x[class] + log(exp(x[0]) + ...+ exp(x[j]))
optimizer = optim.SGD(model.parameters(), lr=learning_rate)
#4. 开始训练
for epoch in range(num_epoches):
print('*' * 20)
print('epoch {}'.format(epoch + 1))
since = time.time()
running_loss = 0.0
running_acc = 0.0
for i, data in enumerate(train_loader, 1):
img, label = data
#print("img.size(0)", img.size(0), "size: ", img.data.size())
img = img.view(img.size(0), -1) # 将图片展开成 28x28
if use_gpu:
img = Variable(img).cuda()
label = Variable(label).cuda()
else:
img = Variable(img)
label = Variable(label)
# 向前传播
out = model(img)
loss = criterion(out, label)#计算loss
running_loss += loss.item() * label.size(0)#把每一次的loss值加起来 后面求平均loss,最后得到的loss = running_loss/i
_, pred = torch.max(out, 1)#取张量的最大值 组成一个一维矩阵
#print("pred: ", pred)
num_correct = (pred == label).sum()
running_acc += num_correct.item()
# 向后传播
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 300 == 0:
print('[{}/{}] Loss: {:.6f}, Acc: {:.6f}'.format(
epoch + 1, num_epoches, running_loss / (batch_size * i),
running_acc / (batch_size * i)))
print('Finish {} epoch, Loss: {:.6f}, Acc: {:.6f}'.format(
epoch + 1, running_loss / (len(train_dataset)), running_acc / (len(
train_dataset))))
print("loss: ", loss.data.item())
model.eval()
eval_loss = 0.
eval_acc = 0.
for data in test_loader:
img, label = data
img = img.view(img.size(0), -1)
if use_gpu:
img = Variable(img).cuda()
label = Variable(label).cuda()
else:
img = Variable(img)
label = Variable(label)
out = model(img)
loss = criterion(out, label)
eval_loss += loss.item() * label.size(0)
_, pred = torch.max(out, 1)
num_correct = (pred == label).sum()
eval_acc += num_correct.item()
print('Test Loss: {:.6f}, Acc: {:.6f}'.format(eval_loss / (len(
test_dataset)), eval_acc / (len(test_dataset))))
print('Time:{:.1f} s'.format(time.time() - since))
print()
# 保存模型
torch.save(model.state_dict(), 'F:/PycharmProjects/pytorch-beginner-master/02-Logistic Regression/logstic.pth')