多层感知机(MLP)的工程构建

这个博客将以工程性的编排思路进行多层感知机的构建。

主要分为三个部分:model.py, util.py, train.py, 分别代表模型的构建、需要用到的函数以及训练主文件。

一下我们用神经网络学习“加法”这个运算法则为例进行构建:

model.py

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
import torch
import torch.nn as nn


# 定义多层感知机模型
class MLP(nn.Module):
def __init__(self, in_dim, out_dim):
super(MLP, self).__init__()
self.fc1 = nn.Linear(in_dim, 16) # 输入层到第一个隐藏层
self.fc2 = nn.Linear(16, 8) # 第一个隐藏层到第二个隐藏层
self.fc3 = nn.Linear(8, out_dim) # 第二个隐藏层到输出层

def forward(self, x):
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
x = self.fc3(x)
return x

util.py

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
import numpy as np


def process_data(x, y):
# 进行打乱处理
np.random.seed(0)
np.random.shuffle(x)
np.random.seed(0)
np.random.shuffle(y)
# 开始数据分割
data = {}
lenth = len(x)
print(lenth)
data['x_train'] = x[:int(lenth * 0.7), :]
data['y_train'] = y[:int(lenth * 0.7)]
data['x_val'] = x[int(lenth * 0.7):int(lenth * 0.8), :]
data['y_val'] = y[int(lenth * 0.7):int(lenth * 0.8)]
data['x_test'] = x[int(lenth * 0.8):, :]
data['y_test'] = y[int(lenth * 0.8):]
return data

train.py

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import numpy as np
import torch.optim as optim
import util
from model import *
import matplotlib.pyplot as plt

# 生成数据
num_samples = 10000
x_data = np.random.randint(0, 1000, (num_samples, 2))
y_data = np.sum(x_data, axis=-1, keepdims=True)
# y_data = np.expand_dims(y_data, axis=-1) # 上面keepdims=True和这行二选一

# 数据处理
data = util.process_data(x_data, y_data)
x_train = torch.tensor(data['x_train'], dtype=torch.float32)
y_train = torch.tensor(data['y_train'], dtype=torch.float32)
x_val = torch.tensor(data['x_val'], dtype=torch.float32)
y_val = torch.tensor(data['y_val'], dtype=torch.float32)
x_test = torch.tensor(data['x_test'], dtype=torch.float32)
y_test = torch.tensor(data['y_test'], dtype=torch.float32)

# 创建模型实例
net = MLP(in_dim=2, out_dim=1)

# 设置一些参数
max_epochs = 5000
learn_rate = 0.01
criterion = nn.MSELoss() # sum((yi-y)^2) / n
optimizer = optim.Adam(net.parameters(), lr=learn_rate)
loss_all = []
val_acc = []

# 开始训练
for epoch in range(max_epochs):
net.train()
# 前向传播
outputs = net(x_train)
loss = criterion(outputs, y_train)

# 反向传播和优化
optimizer.zero_grad()
loss.backward()
optimizer.step()

if (epoch + 1) % 100 == 0:
loss_all.append(loss.item())
net.eval()
# val集进行评估
outputs = net(x_val)
res = (torch.round(outputs) == y_val)
acc = torch.sum(res) / y_val.shape[0]
val_acc.append(acc)
print(f'Epoch [{epoch + 1}/{max_epochs}], Loss = {loss.item():.4f}, val_acc = {acc * 100 :.2f}%')

# 保存模型
torch.save(net.state_dict(), f'model_save/finish_{loss_all[-1]:.2f}.pth')

# 读取模型
# net = MLP(2, 1)
# net.load_state_dict(torch.load('model_save/finish_0.00.pth'))

# test
net.eval()
outputs = net(x_test)
res = (torch.round(outputs) == y_test)
acc = torch.sum(res) / y_test.shape[0]
print(f'test_acc = {acc * 100 :.2f}%')

# 可视化
x = np.arange(len(loss_all))
fig1 = plt.plot(x, np.array(loss_all), linestyle='-', color='b', label='o')
plt.show()
fig2 = plt.plot(x, np.array(val_acc), linestyle='-', color='r', label='o')
plt.show()

关于最后的可视化大家可以去看我绘图的博客进行学习。

  • 版权声明: 本博客所有文章除特别声明外,著作权归作者所有。转载请注明出处!
Runtime Display
  • Copyrights © 2023-2024 Lucas
  • 访问人数: | 浏览次数:

请我喝杯咖啡吧~

支付宝
微信