import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
x = [
[0, 0],
[0, 1],
[1, 0],
[1, 1],
]
y=[[0], [1], [1], [0]]
x=np.array(x)
y=np.array(y)
x = torch.from_numpy(x).float()
y = torch.from_numpy(y).float()
train_data = zip(x, y)
test_data = zip(x, y)
train_data = DataLoader(list(train_data), batch_size=32, shuffle=True)
test_data = DataLoader(list(test_data), batch_size=64, shuffle=False)
class BPNNModel(torch.nn.Module):
def __init__(self):
super(BPNNModel, self).__init__()
self.layer1 = nn.Sequential(nn.Linear(2, 5), nn.ReLU())
self.layer4 = nn.Sequential(nn.Linear(5, 1))
def forward(self, img):
img = self.layer1(img)
img = self.layer4(img)
return img
model = BPNNModel()
print(model)
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), 3e-3)
train_losses = []
train_acces = []
eval_losses = []
eval_acces = []
for e in range(10000):
train_loss = 0
train_acc = 0
model.train()
for im, label in train_data:
out = model(im)
loss = criterion(out, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
train_losses.append(train_loss / len(train_data))
eval_loss = 0
eval_acc = 0
model.eval()
for im, label in test_data:
im = Variable(im)
label = Variable(label)
out = model(im)
loss = criterion(out, label)
eval_loss += loss.item()
eval_losses.append(eval_loss / len(test_data))
print('epoch: {}, Train Loss: {:.6f},Eval Loss: {:.6f}'
.format(e, train_loss / len(train_data),eval_loss / len(test_data)))
plt.title('train loss')
plt.plot(np.arange(len(train_losses)), train_losses)
plt.plot(np.arange(len(train_acces)), train_acces)
plt.title('train acc')
plt.plot(np.arange(len(eval_losses)), eval_losses)
plt.title('test loss')
plt.plot(np.arange(len(eval_acces)), eval_acces)
plt.title('test acc')
plt.show()
for i in range(10):
out = model(x[i, :])
print("predict:"," ",out.detach().numpy())