文章目录(Table of Contents)
介绍
由于最近经常会调试网络的不同参数,不同结构对结果的不同,于是就想有一个比较方便修改的范例,于是想使用MNIST dataset实现一个分类,之后在测试dropout, Batch Normal等性能的之后,方便自己的直接修改。
下面就把详细的代码贴在这里。
详细代码
详细的注释写在了上面,代码的主体是根据feedforward_neural_network修改得到的。
- # 参考资料 : https://discuss.pytorch.org/t/dynamically-add-or-delete-layers/10447
- import torch
- import torch.nn as nn
- import torchvision
- import torchvision.transforms as transforms
- # Device configuration
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
- # Hyper-parameters
- input_size = 784
- hidden_size = 500
- num_classes = 10
- n_layers = 2
- num_epochs = 5
- batch_size = 100
- learning_rate = 0.001
- # MNIST dataset
- train_dataset = torchvision.datasets.MNIST(root='./data',
- train=True,
- transform=transforms.ToTensor(),
- download=True)
- test_dataset = torchvision.datasets.MNIST(root='./data',
- train=False,
- transform=transforms.ToTensor())
- # Data loader
- train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
- batch_size=batch_size,
- shuffle=True)
- test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
- batch_size=batch_size,
- shuffle=False)
- # Fully connected neural network with one hidden layer
- class NeuralNet(nn.Module):
- def __init__(self, input_size, hidden_size, num_classes, n_layers):
- super(NeuralNet, self).__init__()
- layers = []
- for i in range(n_layers):
- layers.append(nn.Linear(hidden_size, hidden_size))
- layers.append(nn.ReLU())
- layers.append(nn.BatchNorm1d(hidden_size))
- # layers.append(nn.Dropout(0.5))
- self.inLayer = nn.Linear(input_size, hidden_size)
- self.relu = nn.ReLU()
- self.hiddenLayer = nn.Sequential(*layers)
- self.outLayer = nn.Linear(hidden_size, num_classes)
- self.softmax = nn.Softmax(dim=1)
- def forward(self, x):
- out = self.inLayer(x)
- out = self.relu(out)
- out = self.hiddenLayer(out)
- out = self.outLayer(out)
- out = self.softmax(out)
- return out
- model = NeuralNet(input_size, hidden_size, num_classes, n_layers).to(device)
- # 打印模型结构
- print(model)
- # Loss and optimizer
- criterion = nn.CrossEntropyLoss()
- optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
- # Train the model
- model.train()
- total_step = len(train_loader)
- for epoch in range(num_epochs):
- for i, (images, labels) in enumerate(train_loader):
- # Move tensors to the configured device
- images = images.reshape(-1, 28*28).to(device)
- labels = labels.to(device)
- # Forward pass
- outputs = model(images)
- loss = criterion(outputs, labels)
- # Backward and optimize
- optimizer.zero_grad()
- loss.backward()
- optimizer.step()
- if (i+1) % 100 == 0:
- # 计算每个batch的准确率
- correct = 0
- total = 0
- _, predicted = torch.max(outputs.data, 1)
- total += labels.size(0)
- correct += (predicted == labels).sum().item()
- acc = 100*correct/total
- # 打印结果
- print ('Epoch [{}/{}], Step [{}/{}], Accuracy: {}, Loss: {:.4f}'
- .format(epoch+1, num_epochs, i+1, total_step, acc, loss.item()))
- # Test the model
- # In test phase, we don't need to compute gradients (for memory efficiency)
- model.eval()
- with torch.no_grad():
- correct = 0
- total = 0
- for images, labels in test_loader:
- images = images.reshape(-1, 28*28).to(device)
- labels = labels.to(device)
- outputs = model(images)
- _, predicted = torch.max(outputs.data, 1)
- total += labels.size(0)
- correct += (predicted == labels).sum().item()
- print('Accuracy of the network on the 10000 test images: {} %'.format(100 * correct / total))
- # Save the model checkpoint
- # torch.save(model.state_dict(), 'model.ckpt')
我们可以通过调整n_layers很方便的调整网络hidden layer的层数,便于之后自己的测试。
详细代码
关于详细的代码,可以查看如下地址:Dynamically_add_or_delete_layers
- 微信公众号
- 关注微信公众号
- QQ群
- 我们的QQ群号
评论