Bootstrap

pytorch简单的二分类

train.py

# from torchvision.models.resnet import resnet34
# from dataset import datasest
# from torch.optim.lr_scheduler import StepLR
# from torch.nn import CrossEntropyLoss
# import torch
# from torch.utils.tensorboard import SummaryWriter
#
# train_root = '/home/lixuan/sensors/baopai'
# batch_size = 20
# lr = 1e-3
# weight_decay = 1e-5
# lr_step = 10
# lr_decay = 0.5  # when val_loss increase, lr = lr*lr_decay
# max_epoch = 100
#
# model = resnet34(pretrained=False,num_classes=2)
# traindataset = datasest(train_root)
# trainloader = torch.utils.data.DataLoader(traindataset,batch_size=batch_size,shuffle=True,num_workers=4)
#
# criterion = CrossEntropyLoss()
#
# model.cuda()
#
# optimizer = torch.optim.Adam([{'params':model.parameters()}],lr=lr,weight_decay = weight_decay)
#
# scheduler = StepLR(optimizer,step_size=lr_step,gamma=lr_decay)
# tb_writer = SummaryWriter('runs/scalar_example')
# for epoch in range(max_epoch):
#     model.train()
#     for batch, (data_input,label) in enumerate(trainloader):
#         data_input = data_input.cuda()
#         label = label.cuda()
#         output = model(data_input)
#         loss = criterion(output,label)
#         optimizer.zero_grad()
#         loss.backward()
#         optimizer.step()
#         iters = epoch * len(trainloader) + batch
#         if iters % 20 == 0:
#             tb_writer.add_scalar('iter_loss',loss,iters)



from model import resnet34
from dataset import datasest
from torch.optim.lr_scheduler import StepLR
from torch.nn import BCELoss#SmoothL1Loss
import torch
from torch.utils.tensorboard import SummaryWriter
from torch import nn

train_root = '/home/lixuan/sensors/baopai'
batch_size = 200
lr = 1e-3
weight_decay = 1e-5
lr_step = 10
lr_decay = 0.5  # when val_loss increase, lr = lr*lr_decay
max_epoch = 100

model = resnet34(pretrained=False)
metric_fc = nn.Linear(512,1)
model.load_state_dict(torch.load('resnet34-333f7ec4.pth'))
traindataset = datasest(train_root)
trainloader = torch.utils.data.DataLoader(traindataset,batch_size=batch_size,shuffle=True,num_workers=4)

criterion = BCELoss()#SmoothL1Loss()

model.cuda()
metric_fc.cuda()

optimizer = torch.optim.Adam([{'params':model.parameters()},{'params': metric_fc.parameters()}],lr=lr,weight_decay = weight_decay)

scheduler = StepLR(optimizer,step_size=lr_step,gamma=lr_decay)
tb_writer = SummaryWriter('runs/scalar_example')
for epoch in range(max_epoch):
    model.train()
    for batch, (data_input,label) in enumerate(trainloader):
        data_input = data_input.cuda()
        label = label.cuda()
        feature = model(data_input)
        output = torch.nn.functional.sigmoid(metric_fc(feature))
        label = label.type_as(output)
        label = torch.unsqueeze(label,1)
        loss = criterion(output,label)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        iters = epoch * len(trainloader) + batch
        if iters % 5 == 0:
            print(loss,label[:3],output[:3],'#'*20)
            tb_writer.add_scalar('iter_loss',loss,iters)
    if epoch % 5 == 0:
        torch.save(model.state_dict(), 'net.pth')
        torch.save(metric_fc.state_dict(), 'linear.pth')

test.py

import os
from model import resnet34
import torch
from torch import nn
from torchvision import transforms as T
import cv2

transforms = T.Compose([
            lambda x: cv2.resize(x, (224,224)),
            T.ToTensor(),
            T.Normalize(mean=[0.5], std=[0.5]),
        ])

model = resnet34(pretrained=False)
metric_fc = nn.Linear(512,1)
metric_fc.load_state_dict(torch.load('linear.pth'))
model.load_state_dict(torch.load('net.pth'))
model.cuda()
metric_fc.cuda()
model.eval()
metric_fc.eval()
for file in os.listdir('/media/lixuan/Data/MSCOCO/浙江非摆拍'):
    img = cv2.imread('/media/lixuan/Data/MSCOCO/浙江非摆拍/{}'.format(file))
    img = transforms(img)
    data_input = torch.unsqueeze(img,0)
    data_input = data_input.cuda()
    feature = model(data_input)
    output = torch.nn.functional.sigmoid(metric_fc(feature))
    output = output.item()
    if output < 0.2:
        print(file)
        os.system('mv /media/lixuan/Data/MSCOCO/浙江非摆拍/{} /media/lixuan/Data/MSCOCO/浙江摆拍2'.format(file))

dataset.py

# from torch.utils import data
# import os
# import cv2
# from torchvision import transforms as T
#
# class datasest(data.Dataset):
#     def __init__(self, path):
#         self.img_path = []
#         self.labels = []
#         self.transforms = T.Compose([
#             lambda x: cv2.resize(x, (224,224)),
#             T.ToTensor(),
#             T.Normalize(mean=[0.5], std=[0.5]),
#         ])
#         for dir in os.listdir(path):
#             for file in os.listdir(os.path.join(path,dir)):
#                 self.img_path.append(os.path.join(path,dir,file))
#                 self.labels.append(int(dir))
#
#     def __len__(self):
#         return len(self.img_path)
#
#     def __getitem__(self, index):
#         img = cv2.imread(self.img_path[index])
#         img = self.transforms(img)
#         return img, self.labels[index]


from torch.utils import data
import os
import cv2
from torchvision import transforms as T

class datasest(data.Dataset):
    def __init__(self, path):
        self.img_path = []
        self.labels = []
        self.transforms = T.Compose([
            lambda x: cv2.resize(x, (224,224)),
            T.ToTensor(),
            T.Normalize(mean=[0.5], std=[0.5]),
        ])
        for dir in os.listdir(path):
            for file in os.listdir(os.path.join(path,dir)):
                self.img_path.append(os.path.join(path,dir,file))
                self.labels.append((abs(int(dir) - 0.001)))

    def __len__(self):
        return len(self.img_path)

    def __getitem__(self, index):
        img = cv2.imread(self.img_path[index])
        img = self.transforms(img)
        return img, self.labels[index]

;