Day4+学习笔记+7/20
实现了一个简单的Lenet模型:
- import torch
- import torch.nn as nn
- import time
- from torch.utils.data import DataLoader
- from torchvision.datasets import MNIST
- from torchvision import transforms
- from torch import optim
- def loadMNIST(batch_size):# MNIST图片的大小是28*28
- # 加载数据函数
- trans_img = transforms.Compose([transforms.ToTensor()])
- trainset = MNIST('./data',train=True,transform=trans_img,download=True)
- testset = MNIST('./data',train=False,transform=trans_img,download=True)
- trainloader = DataLoader(trainset,batch_size=batch_size,shuffle=True,num_workers=10)
- testloader = DataLoader(testset,batch_size=batch_size,shuffle=False,num_workers=10)
- return trainset,testset,trainloader,testloader
- # 定义Lenet卷积神经网络
- class Lenet(nn.Module):
- def __init__(self):
- super(Lenet,self).__init__()
- layer1 = nn.Sequential()
- layer1.add_module('conv1',nn.Conv2d(1,6,5,padding=1))
- # 第一层是卷积神经网络,输入是1张图像,输出是6张特征图,卷积核是5*5,填充为1
- layer1.add_module('ac1',nn.ReLU())
- # 激活函数为ReLU
- layer1.add_module('pool1',nn.MaxPool2d(2,2))
- # 2*2的池化层
- self.layer1 = layer1
-
- layer2 = nn.Sequential()
- layer2.add_module('conv2',nn.Conv2d(6,16,5,padding=1))
- # 输入是6个通道输出是16个特征图
- layer2.add_module('ac1',nn.ReLU())
- layer2.add_module('pool2',nn.MaxPool2d(2,2))
- self.layer2 = layer2
-
- # 全连接,优化提取参数
- layer3 = nn.Sequential()
- layer3.add_module('fc1',nn.Linear(400,120))
- layer3.add_module('ac1',nn.ReLU())
- layer3.add_module('fc2',nn.Linear(120,84))
- layer3.add_module('ac2',nn.ReLU())
- layer3.add_module('fc3',nn.Linear(84,10)) # 提取完之后可以得到对应0、1、2...9的10个参数
- self.layer3 = layer3
-
- def forward(self,x):
- x = self.layer1(x)
- x = self.layer2(x)
- x = x.view(x.size(0),-1)
- x = self.layer3(x)
- return x
- if __name__ == '__main__':
- t_start = time.time()
- learning_rate = 0.001 # 学习率为0.001
- batch_size = 200 # 一次读入200个数据到神经网络
- epoches = 50 # 训练50次
- lenet = Lenet()
- trainset,testset,trainloader,testloader = loadMNIST(batch_size)
- criterian = nn.CrossEntropyLoss(reduction='sum') # 求和的方式计算Loss
- optimizer = optim.SGD(lenet.parameters(),lr=learning_rate)
- for i in range(epoches):
- running_loss = 0.
- running_acc = 0.
- for (img,label) in trainloader:
- optimizer.zero_grad()# 求梯度之前对梯度清零以防梯度累加
- output = lenet(img) # 向前计算网络输出值
- loss = criterian(output,label) # 计算loss值
- loss.backward() # loss反传存到相应的变量结构中
- optimizer.step() # 使用计算好的梯度对参数进行更新
-
- running_loss += loss.item()
- valu,predict = torch.max(output,1)
- correct_num = (predict==label).sum()
- running_acc += correct_num.item()
-
- running_loss /= len(trainset)
- running_acc /= len(trainset)
- t_now = time.time()
- time_spend = t_now - t_start
- print("[%d/%d] Loss:%.5f, Acc:%.2f,Time:%.2f" % (i+1,epoches,running_loss,100*running_acc,time_spend))
复制代码
|