写在前面

走走停停,我自己做的第一个机器学习的项目:MNIST手写字符集的识别,终于结束了,一路走来也算是踩了大大小小的坑,在这篇文章里做一个总结。

模型配置

模型总共有4层,输入层有784个神经元,分别对应28X28个像素的MNIST手写字符集图像(预先进行归一化),隐藏层有2个,每层16个神经元,输出层有10个神经元,分别对应数字0~9。

模型结构如图所示:

pytorch环境版本:1.8.1+cu102

模型最终在验证集上的准确率是95%:

知识点

1:pytorch模型保存:

save_path = './model/mnist_net'
torch.save(mnist_net, save_path)

2:模型训练技巧

在模型初步训练的时候使用一个较大的batch(例如128)进行初步的训练,大概100个epoch之后验证集上的正确率大概可以达到90%,之后如果还用大batch的话loss下降的就很慢,正确率上不去,所以之后我分别使用小batch(例如32,1)进行训练,最终达到了95%的正确率。

源码

相关的资料关注我的公众号后即可下载,文件结构如图所示:


# coding: utf-8

# In[2]:


import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import torchvision.transforms as transforms
import struct
import matplotlib.pyplot as plt
import sys
import warnings
from torch.utils.data import Dataset,DataLoader

#print beta
print(torch.__version__)
torch.set_default_tensor_type(torch.DoubleTensor)


# In[3]:


user_train_imgs_path = './dataset/train-images.idx3-ubyte'# 6w
user_train_labels_path = './dataset/train-labels.idx1-ubyte'

user_validate_imgs_path = './dataset/t10k-images.idx3-ubyte'# 1w
user_validate_labels_path = './dataset/t10k-labels.idx1-ubyte'

save_path = './model/mnist_net'

#hyperparameters
input_size = 784 #28*28
hidden_size = 16
user_batch_size = 1
out_put_size = 10 #0~9


# In[4]:


class UserMNIST(Dataset):
    def __init__(self, imgs_path, labels_path, root='', train=True):
        super(UserMNIST, self).__init__()
        self.train = train #type of datasets
        self.train_nums = int(6e4)
        self.test_ratio = int(9e-1)
        self.validate_nums = int(1e4)
#         print(self.train_nums)#The scientific counting method is float,which should be changed by user

        #load files path
        self.imgs_folder_path = imgs_path
        self.labels_folder_path = labels_path
        if self.train :      
            self.img_nums = self.train_nums
        else:
            self.img_nums = self.validate_nums

        #load dataset
        with open(self.imgs_folder_path, 'rb') as _imgs:
            self._train_images = _imgs.read()
        with open(self.labels_folder_path, 'rb') as _labs:
            self._train_labels = _labs.read()


    def __getitem__(self, index):
        image = self.getImages(self._train_images, index)
        label = torch.zeros(out_put_size)
        temp = self.getLabels(self._train_labels, index)
        label[temp] = 1
        return image,label

    def __len__(self):
        return self.img_nums

    def getImages(self, image, index):
        img_size_bit = struct.calcsize('>784B')
        start_index = struct.calcsize('>IIII') + index * img_size_bit
        temp = struct.unpack_from('>784B', image, start_index)
        img = self.normalization(np.array(temp, dtype=float))
        return img

    def getLabels(self, label, index):
        lab_size_bit = struct.calcsize('>1B')
        start_index = struct.calcsize('>II') + index * lab_size_bit
        lab = struct.unpack_from('>1B', label, start_index)
#         lab = torch.Tensor(lab)
        return lab

    def normalization(self, x):
        max = float(255)
        min = float(0)
        for i in range(0, 784):
            x[i] = (x[i] - min) / (max - min)
        return x



# In[5]:


usermnist_train = UserMNIST(user_train_imgs_path, user_train_labels_path, train=True)#how to one data,return numpy(user define) type
usermnist_train_loader = DataLoader(dataset=usermnist_train, batch_size=user_batch_size, shuffle=True)#do somethings to get all data,return tensor type

usermnist_validate = UserMNIST(user_validate_imgs_path, user_validate_labels_path, train=False)#how to one data,return numpy(user define) type
usermnist_validate_loader = DataLoader(dataset=usermnist_validate, batch_size=user_batch_size, shuffle=True)#do somethings to get all data,return tensor type


# In[6]:


img, lab = usermnist_train.__getitem__(6) # get the 34th sample
print(type(img))
print(type(lab))
# plt.imshow(img)
# plt.show()


# In[7]:


dataiter = iter(usermnist_train_loader)
images,labels = dataiter.next()
# print(images.shape, labels)
print(images.size(), labels.size(), labels.size(1))
print(type(images), type(labels))
# print(dataiter.batch_size)


# In[8]:


#set up NeuralNet
class NeuralNet(nn.Module):
    def __init__(self, input_size, hidden_size, out_put_size):
        super(NeuralNet, self).__init__()
        #recode hyperparameters
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.out_put_size = out_put_size

#       2 hidden_layers
        self.gap0 = nn.Linear(input_size, hidden_size)
        self.gap1 = nn.Linear(hidden_size, hidden_size)
        self.gap2 = nn.Linear(hidden_size, out_put_size)


    def forward(self, x):
        out = self.gap0(x)
        out = torch.relu(out)
        out = self.gap1(out)
        out = torch.relu(out)
        out = self.gap2(out)
        out = torch.sigmoid(out)
        return out


# In[9]:


# mnist_net = torch.load(save_path)
mnist_net = torch.load(save_path)
# print(mnist_net)


# In[10]:


learning_rate = 1e-2
optimizer = torch.optim.SGD(mnist_net.parameters(), lr=learning_rate)


# In[11]:


for name,parameters in mnist_net.named_parameters():
    print(name,':',parameters.size())
#     print(parameters)


# In[12]:


device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device
mnist_net.to(device)
print(next(mnist_net.parameters()).device)


# In[13]:


def validate_data(usermnist_validate_loader):
    with torch.no_grad():
        total = float(0)
        correct = float(0)
        for i, (imgs, labs) in enumerate(usermnist_validate_loader):
            sys.stdout.write('\r'+str(i)+str())
            imgs = imgs.to(device)
            labs = labs.to(device)
            outputs = mnist_net(imgs)
            predicted, predicted_index = torch.max(outputs, 1)
            sample, sample_index = torch.max(labs, 1)
            total += labs.size(0)
            correct += (predicted_index == sample_index).sum()
#             print(predicted, predicted_index, sample, sample_index)
    print('validate right rate: %.2f %%' % (100 * correct / total))

validate_data(usermnist_validate_loader)


# In[13]:


criterion = nn.MSELoss()


# In[ ]:


epoches = 600
for epoch in range(epoches):
    torch.save(mnist_net, save_path) 
    print('current epoch = %d' % epoch)
    validate_data(usermnist_validate_loader)
    for i, (images, labels) in enumerate(usermnist_train_loader):  

        images = images.to(device)#move to gpu
        labels = labels.to(device)

        optimizer.zero_grad()  
        outputs = mnist_net(images)  
        with warnings.catch_warnings():#ignore some warnings
            warnings.simplefilter("ignore")
            loss = criterion(outputs, labels)  # calculate loss
        loss.backward()  
        optimizer.step()


# In[80]:


torch.save(mnist_net, save_path)

之后我就会做一些轨迹规划算法的学习以及实现了,欢迎大家持续关注。