1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
| import torch import torchvision from torch import nn from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter
device = torch.device("cuda:0")
train_data = torchvision.datasets.CIFAR10(root='./dataset', train=True, download=True, transform=torchvision.transforms.ToTensor())
test_data = torchvision.datasets.CIFAR10(root='./dataset', train=False, download=True, transform=torchvision.transforms.ToTensor())
train_data_size = len(train_data) test_data_size = len(test_data) print("训练数据集的长度为:{}".format(train_data_size)) print("测试数据集的长度为:{}".format(test_data_size))
train_dataloader = DataLoader(train_data, batch_size=64) test_dataloader = DataLoader(test_data, batch_size=64)
class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.model = nn.Sequential( nn.Conv2d(3, 32, 5, 1, 2), nn.MaxPool2d(2), nn.Conv2d(32, 32, 5, 1, 2), nn.MaxPool2d(2), nn.Conv2d(32, 64, 5, 1, 2), nn.MaxPool2d(2), nn.Flatten(), nn.Linear(64 * 4 * 4, 64), nn.Linear(64, 10) )
def forward(self, x): x = self.model(x) return x
net = Net() net.to(device)
loss_fn = nn.CrossEntropyLoss() loss_fn.to(device)
learning_rate = 1e-2 optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate)
total_train_step = 0
total_test_step = 0
epoch = 10
writer = SummaryWriter(log_dir='./logs_train') for i in range(epoch): print("-------第{}轮训练开始-------".format(i + 1))
net.train(True) for data in train_dataloader: imgs, targets = data imgs = imgs.to(device) targets = targets.to(device) output = net(imgs) loss = loss_fn(output, targets)
optimizer.zero_grad() loss.backward() optimizer.step()
total_train_step += 1 if total_train_step % 100 == 0: print("训练次数:{},loss:{}".format(total_train_step, loss.item())) writer.add_scalar('train_loss', loss.item(), total_train_step)
net.eval() total_test_loss = 0 total_accuracy = 0 with torch.no_grad(): for data in test_dataloader: imgs, targets = data imgs = imgs.to(device) targets = targets.to(device) output = net(imgs) loss = loss_fn(output, targets) total_test_loss += loss.item() accuracy = (output.argmax(1) == targets).sum() total_accuracy += accuracy
print("整体测试集上的Loss::{}".format(total_test_loss)) print("整体测试集上的正确率:{}".format(total_accuracy / test_data_size)) writer.add_scalar('test_loss', total_test_loss, total_test_step) writer.add_scalar('test_accuracy', total_accuracy / test_data_size, total_test_step) total_test_step += 1
torch.save(net, "train_{}.pth".format(i)) print("模型已保存")
writer.close()
|