900字范文,内容丰富有趣,生活中的好帮手!
900字范文 > 深度学习笔记(2)——pytorch实现MNIST数据集分类(FNN CNN RNN LSTM GRU)

深度学习笔记(2)——pytorch实现MNIST数据集分类(FNN CNN RNN LSTM GRU)

时间:2023-04-11 01:13:57

相关推荐

深度学习笔记(2)——pytorch实现MNIST数据集分类(FNN CNN RNN LSTM GRU)

文章目录

0 前言1 数据预处理2 FNN(前馈神经网络)3 CNN(卷积神经网络)4 RNN(循环神经网络)5 LSTM(长短期记忆网络)6 GRU(门控循环单元)7 完整代码

0 前言

快开学了,花了一个晚上时间复习深度学习基础代码,复习了最基础的MNIST手写数字识别数据集分类,使用FNN、CNN、RNN、LSTM、GRU实现。

1 数据预处理

import matplotlib.pyplot as pltimport torchimport timeimport torch.nn.functional as Ffrom torch import nn, optimfrom torchvision.datasets import MNISTfrom torchvision.transforms import Compose, ToTensor, Normalize, Resizefrom torch.utils.data import DataLoaderfrom sklearn.metrics import accuracy_score# 超参数BATCH_SIZE = 64 # 批次大小EPOCHS = 5 # 迭代轮数# 设备DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'# 数据转换transformers = Compose(transforms=[ToTensor(), Normalize(mean=(0.1307,), std=(0.3081,))])# 数据装载dataset_train = MNIST(root=r'./data', train=True, download=False, transform=transformers)dataset_test = MNIST(root=r'./data', train=False, download=False, transform=transformers)dataloader_train = DataLoader(dataset=dataset_train, batch_size=BATCH_SIZE, shuffle=True)dataloader_test = DataLoader(dataset=dataset_test, batch_size=BATCH_SIZE, shuffle=True)

2 FNN(前馈神经网络)

# FNNclass FNN(nn.Module):# 定义网络结构def __init__(self):super(FNN, self).__init__()self.layer1 = nn.Linear(28 * 28, 28) # 隐藏层self.out = nn.Linear(28, 10) # 输出层# 计算def forward(self, x):# 初始形状[batch_size, 1, 28, 28]x = x.view(-1, 28 * 28)x = torch.relu(self.layer1(x)) # 使用relu函数激活x = self.out(x) # 输出层return x

结果:

3 CNN(卷积神经网络)

# CNNclass CNN(nn.Module):# 定义网络结构def __init__(self):super(CNN, self).__init__()# 卷积层+池化层+卷积层self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=(3, 3), stride=(1, 1), padding=1)self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=1)self.pool = nn.MaxPool2d(2, 2)# dropoutself.dropout = nn.Dropout(p=0.25)# 全连接层self.fc1 = nn.Linear(64 * 7 * 7, 512)self.fc2 = nn.Linear(512, 64)self.fc3 = nn.Linear(64, 10)# 计算def forward(self, x):# 初始形状[batch_size, 1, 28, 28]x = self.pool(F.relu(self.conv1(x)))x = self.dropout(x)x = self.pool(F.relu(self.conv2(x)))x = x.view(-1, 64 * 7 * 7)x = F.relu(self.fc1(x))x = F.relu(self.fc2(x))x = self.fc3(x)return x

结果:

4 RNN(循环神经网络)

# RNNclass RNN(nn.Module):# 定义网络结构def __init__(self):super(RNN, self).__init__()self.rnn = nn.RNN(input_size=28, hidden_size=64, num_layers=1, batch_first=True) # RNNself.dropout = nn.Dropout(p=0.25)self.out = nn.Linear(64, 10) # 全连接层# 计算def forward(self, x):x = x.view(-1, 28, 28)x = self.dropout(x)r_out, _ = self.rnn(x, None)x = self.out(r_out[:, -1, :])return x

结果:

5 LSTM(长短期记忆网络)

# LSTMclass LSTM(nn.Module):# 定义网络结构def __init__(self):super(LSTM, self).__init__()self.lstm = nn.LSTM(input_size=28, hidden_size=64, num_layers=1, batch_first=True) # LSTMself.dropout = nn.Dropout(p=0.25)self.out = nn.Linear(64, 10) # 全连接层# 计算def forward(self, x):x = x.view(-1, 28, 28) # [64, 28, 28]x = self.dropout(x)r_out, _ = self.lstm(x, None)x = self.out(r_out[:, -1, :]) # [64, 10]return x

结果:

6 GRU(门控循环单元)

class GRU(nn.Module):# 定义网络结构def __init__(self):super(GRU, self).__init__()self.gru = nn.GRU(input_size=28, hidden_size=64, num_layers=1, batch_first=True) # GRUself.dropout = nn.Dropout(p=0.25)self.out = nn.Linear(64, 10) # 全连接层def forward(self, x):x = x.view(-1, 28, 28)x = self.dropout(x)r_out, _ = self.gru(x, None)x = self.out(r_out[:, -1, :])return x

结果:

7 完整代码

综合评价:物种模型均能达到96%以上的准确率。CNN效果最好,FNN其次,RNN、LSTM、GRU波动较大。

代码中封装了(造轮子)几个函数,包括get_accuracy()、train()、test()、run()、initialize()

"""MNIST数据集分类尝试使用FNN、CNN、RNN、LSTM、GRU"""import matplotlib.pyplot as pltimport torchimport timeimport torch.nn.functional as Ffrom torch import nn, optimfrom torchvision.datasets import MNISTfrom torchvision.transforms import Compose, ToTensor, Normalize, Resizefrom torch.utils.data import DataLoaderfrom sklearn.metrics import accuracy_score# 超参数BATCH_SIZE = 64 # 批次大小EPOCHS = 5 # 迭代轮数# 设备DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'# 数据转换transformers = Compose(transforms=[ToTensor(), Normalize(mean=(0.1307,), std=(0.3081,))])# 数据装载dataset_train = MNIST(root=r'./data', train=True, download=False, transform=transformers)dataset_test = MNIST(root=r'./data', train=False, download=False, transform=transformers)dataloader_train = DataLoader(dataset=dataset_train, batch_size=BATCH_SIZE, shuffle=True)dataloader_test = DataLoader(dataset=dataset_test, batch_size=BATCH_SIZE, shuffle=True)# FNNclass FNN(nn.Module):# 定义网络结构def __init__(self):super(FNN, self).__init__()self.layer1 = nn.Linear(28 * 28, 28) # 隐藏层self.out = nn.Linear(28, 10) # 输出层# 计算def forward(self, x):# 初始形状[batch_size, 1, 28, 28]x = x.view(-1, 28 * 28)x = torch.relu(self.layer1(x)) # 使用relu函数激活x = self.out(x) # 输出层return x# CNNclass CNN(nn.Module):# 定义网络结构def __init__(self):super(CNN, self).__init__()# 卷积层+池化层+卷积层self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=(3, 3), stride=(1, 1), padding=1)self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=1)self.pool = nn.MaxPool2d(2, 2)# dropoutself.dropout = nn.Dropout(p=0.25)# 全连接层self.fc1 = nn.Linear(64 * 7 * 7, 512)self.fc2 = nn.Linear(512, 64)self.fc3 = nn.Linear(64, 10)# 计算def forward(self, x):# 初始形状[batch_size, 1, 28, 28]x = self.pool(F.relu(self.conv1(x)))x = self.dropout(x)x = self.pool(F.relu(self.conv2(x)))x = x.view(-1, 64 * 7 * 7)x = F.relu(self.fc1(x))x = F.relu(self.fc2(x))x = self.fc3(x)return x# RNNclass RNN(nn.Module):# 定义网络结构def __init__(self):super(RNN, self).__init__()self.rnn = nn.RNN(input_size=28, hidden_size=64, num_layers=1, batch_first=True) # RNNself.dropout = nn.Dropout(p=0.25)self.out = nn.Linear(64, 10) # 全连接层# 计算def forward(self, x):x = x.view(-1, 28, 28)x = self.dropout(x)r_out, _ = self.rnn(x, None)x = self.out(r_out[:, -1, :])return x# LSTMclass LSTM(nn.Module):# 定义网络结构def __init__(self):super(LSTM, self).__init__()self.lstm = nn.LSTM(input_size=28, hidden_size=64, num_layers=1, batch_first=True) # LSTMself.dropout = nn.Dropout(p=0.25)self.out = nn.Linear(64, 10) # 全连接层# 计算def forward(self, x):x = x.view(-1, 28, 28) # [64, 28, 28]x = self.dropout(x)r_out, _ = self.lstm(x, None)x = self.out(r_out[:, -1, :]) # [64, 10]return xclass GRU(nn.Module):# 定义网络结构def __init__(self):super(GRU, self).__init__()self.gru = nn.GRU(input_size=28, hidden_size=64, num_layers=1, batch_first=True) # GRUself.dropout = nn.Dropout(p=0.25)self.out = nn.Linear(64, 10) # 全连接层def forward(self, x):x = x.view(-1, 28, 28)x = self.dropout(x)r_out, _ = self.gru(x, None)x = self.out(r_out[:, -1, :])return xloss_func = nn.CrossEntropyLoss() # 交叉熵损失函数# 记录损失值、准确率loss_list, accuracy_list = [], []# 计算准确率def get_accuracy(model, datas, labels):out = torch.softmax(model(datas), dim=1, dtype=torch.float32)predictions = torch.max(input=out, dim=1)[1] # 最大值的索引y_predict = predictions.to('cpu').data.numpy()y_true = labels.to('cpu').data.numpy()# accuracy = float(np.sum(y_predict == y_true)) / float(y_true.size) # 准确率accuracy = accuracy_score(y_true, y_predict) # 准确率return accuracy# 训练def train(model, optimizer, epoch):model.train() # 模型训练for i, (datas, labels) in enumerate(dataloader_train):# 设备转换datas = datas.to(DEVICE)labels = labels.to(DEVICE)# 计算结果out = model(datas)# 计算损失值loss = loss_func(out, labels)# 梯度清零optimizer.zero_grad()# 反向传播loss.backward()# 梯度更新optimizer.step()# 打印损失值if i % 100 == 0:print('Train Epoch:%d Loss:%0.6f' % (epoch, loss.item()))loss_list.append(loss.item())# 测试def test(model, epoch):model.eval()with torch.no_grad():for i, (datas, labels) in enumerate(dataloader_test):# 设备转换datas = datas.to(DEVICE)labels = labels.to(DEVICE)# 打印信息if i % 20 == 0:accuracy = get_accuracy(model, datas, labels)print('Test Epoch:%d Accuracy:%0.6f' % (epoch, accuracy))accuracy_list.append(accuracy)# 运行def run(model, optimizer, model_name):t1 = time.time()for epoch in range(EPOCHS):train(model, optimizer, epoch)test(model, epoch)t2 = time.time()print(f'共耗时{t2 - t1}秒')# 绘制Loss曲线plt.rcParams['figure.figsize'] = (16, 8)plt.subplots(1, 2)plt.subplot(1, 2, 1)plt.plot(range(len(loss_list)), loss_list)plt.title('Loss Curve')plt.subplot(1, 2, 2)plt.plot(range(len(accuracy_list)), accuracy_list)plt.title('Accuracy Cure')# plt.show()plt.savefig(f'./figure/mnist_{model_name}.png')def initialize(model, model_name):print(f'Start {model_name}')# 查看分配显存print('GPU_Allocated:%d' % torch.cuda.memory_allocated())# 优化器optimizer = optim.Adam(params=model.parameters(), lr=0.001)run(model, optimizer, model_name)if __name__ == '__main__':models = [FNN().to(DEVICE),CNN().to(DEVICE),LSTM().to(DEVICE),RNN().to(DEVICE),GRU().to(DEVICE)]model_names = ['FNN', 'CNN', 'RNN', 'LSTM', 'GRU']for model, model_name in zip(models, model_names):initialize(model, model_name)# 保存模型torch.save(model.state_dict(), f'./model/mnist_{model_name}.pkl')

欢迎点赞收藏关注。

本人深度学习小白一枚,如有误,欢迎留言指正。

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。