PyTorch示例——CNN图像分类-FashionMNIST
PyTorch示例-CNN多分类-FashionMNIST数据
·
版本信息
- PyTorch:
1.12.1
- Python:
3.7.13
导包
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision.transforms import ToTensor, Normalize, Compose
import torchvision.datasets as datasets
数据集-探索 FashionMNIST
- 先加载FashionMNIST数据(第一次会自动下载,需要等等)
explore_data = datasets.FashionMNIST(
root="./data",
train=True,
download=True
)
# 取第0张图片
explore_data[0]
(<PIL.Image.Image image mode=L size=28x28>, 9)
- 多打印几张看看
import matplotlib.pyplot as plt
def show_images(n_rows, n_cols, x_data):
assert n_rows * n_cols < len(x_data)
plt.figure(figsize=(n_cols * 1.5, n_rows * 1.5))
for row in range(n_rows):
for col in range(n_cols):
index = row * n_cols + col
plt.subplot(n_rows, n_cols, index + 1)
plt.imshow(x_data[index][0], cmap="binary", interpolation="nearest") # 图像
plt.axis("off")
label = explore_data.classes[x_data[index][1]]
plt.title(label) # 标签
plt.show()
show_images(3, 5, explore_data)
数据集-加载与预处理处理
transform_funcs = Compose([
ToTensor()
])
train_data = datasets.FashionMNIST(
root="./data",
train=True,
download=True,
transform=transform_funcs
)
test_data = datasets.FashionMNIST(
root="./data",
train=False,
download=True,
transform=transform_funcs
)
print(train_data.data.shape)
print(test_data.data.shape)
torch.Size([60000, 28, 28])
torch.Size([10000, 28, 28])
构建模型 CNN
- 简单的CNN模型,做两次卷积
class CNNModel(nn.Module):
def __init__(self):
super(CNNModel, self).__init__()
self.module1 = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.module2 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.flatten = nn.Flatten()
self.linear1 = nn.Linear(7 * 7 * 64, 64)
self.linear2 = nn.Linear(64, 10)
self.relu = nn.ReLU()
def forward(self, x):
out = self.module1(x)
out = self.module2(out)
out = self.flatten(out)
out = self.linear1(out)
out = self.relu(out)
out = self.linear2(out)
return out
- 打印模型信息
print(CNNModel())
CNNModel(
(module1): Sequential(
(0): Conv2d(1, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU()
(3): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(module2): Sequential(
(0): Conv2d(32, 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU()
(3): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(flatten): Flatten(start_dim=1, end_dim=-1)
(linear1): Linear(in_features=3136, out_features=64, bias=True)
(linear2): Linear(in_features=64, out_features=10, bias=True)
(relu): ReLU()
)
开始训练
# 参数配置
epoch_num = 10
batch_size = 64
learning_rate = 0.005
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# 数据
train_loader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True)
# 模型
model = CNNModel().to(device)
# 交叉熵损失的计算包含了softmax,模型中不需要做softmax
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# 开始训练
model.train()
loss_list = []
for epoch in range(epoch_num):
for i, (X_train, y_train) in enumerate(train_loader):
out = model(X_train.to(device))
l = loss(out, y_train.to(device))
optimizer.zero_grad()
l.backward()
optimizer.step()
if (i + 1) % 100 == 0:
print(f"Train... [epoch {epoch + 1}/{epoch_num}, step {i + 1}/{len(train_loader)}]\t[loss {l.item()}]")
loss_list.append(l.item())
Train... [epoch 1/10, step 100/938] [loss 0.5055983662605286]
Train... [epoch 1/10, step 200/938] [loss 0.4261328876018524]
Train... [epoch 1/10, step 300/938] [loss 0.31329622864723206]
Train... [epoch 1/10, step 400/938] [loss 0.4195505678653717]
Train... [epoch 1/10, step 500/938] [loss 0.4172752797603607]
Train... [epoch 1/10, step 600/938] [loss 0.4623292088508606]
Train... [epoch 1/10, step 700/938] [loss 0.44347164034843445]
Train... [epoch 1/10, step 800/938] [loss 0.22579336166381836]
Train... [epoch 1/10, step 900/938] [loss 0.34102970361709595]
Train... [epoch 2/10, step 100/938] [loss 0.3091823160648346]
Train... [epoch 2/10, step 200/938] [loss 0.3477814793586731]
Train... [epoch 2/10, step 300/938] [loss 0.26896584033966064]
Train... [epoch 2/10, step 400/938] [loss 0.3499389886856079]
Train... [epoch 2/10, step 500/938] [loss 0.2202177345752716]
Train... [epoch 2/10, step 600/938] [loss 0.11520379781723022]
Train... [epoch 2/10, step 700/938] [loss 0.2193664163351059]
Train... [epoch 2/10, step 800/938] [loss 0.24063025414943695]
Train... [epoch 2/10, step 900/938] [loss 0.29591041803359985]
Train... [epoch 3/10, step 100/938] [loss 0.4646122455596924]
Train... [epoch 3/10, step 200/938] [loss 0.23519711196422577]
Train... [epoch 3/10, step 300/938] [loss 0.17583540081977844]
Train... [epoch 3/10, step 400/938] [loss 0.22302353382110596]
Train... [epoch 3/10, step 500/938] [loss 0.21519353985786438]
Train... [epoch 3/10, step 600/938] [loss 0.19604721665382385]
Train... [epoch 3/10, step 700/938] [loss 0.27179521322250366]
Train... [epoch 3/10, step 800/938] [loss 0.2679252326488495]
Train... [epoch 3/10, step 900/938] [loss 0.19720779359340668]
...
Train... [epoch 10/10, step 100/938] [loss 0.04720400273799896]
Train... [epoch 10/10, step 200/938] [loss 0.09908416867256165]
Train... [epoch 10/10, step 300/938] [loss 0.15088242292404175]
Train... [epoch 10/10, step 400/938] [loss 0.12225984781980515]
Train... [epoch 10/10, step 500/938] [loss 0.16585980355739594]
Train... [epoch 10/10, step 600/938] [loss 0.19017626345157623]
Train... [epoch 10/10, step 700/938] [loss 0.16856203973293304]
Train... [epoch 10/10, step 800/938] [loss 0.163274884223938]
Train... [epoch 10/10, step 900/938] [loss 0.13963419198989868]
绘制训练曲线
import matplotlib.pyplot as plt
plt.plot(range(epoch_num), loss_list)
plt.xlabel("epoch")
plt.ylabel("loss")
plt.show()
测试-计算准确度
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True)
model.eval()
with torch.no_grad():
correct = 0
total = 0
for X_test, y_test in test_loader:
X_test = X_test.to(device)
y_test = y_test.to(device)
output = model(X_test)
_, pred = torch.max(output, 1)
total += y_test.size(0)
correct += (pred == y_test).sum().item()
print(f'total = {total}, acurrcy = {100 * correct / total}%')
total = 10000, acurrcy = 91.42%
更多推荐
所有评论(0)