Deeplearning/Qtorch/Models/Qmlp.py

76 lines
2.3 KiB
Python

import numpy as np
import torch.nn as nn
from Qtorch.Models.Qnn import Qnn
class Qmlp(Qnn):
def __init__(self, data,
hidden_layers,
labels=None,
dropout_rate=0.3,
test_size = 0.2,
random_state=None,
batch_size=64,
learning_rate=0.00001,
weight_decay=1e-5,
lr_scheduler_patience=10,
early_stop_patience=100,
early_stop_threshold=0.99,
):
super(Qmlp, self).__init__(
data=data,
labels=labels,
test_size=test_size,
random_state=random_state,
batch_size=batch_size,
learning_rate=learning_rate,
weight_decay=weight_decay,
lr_scheduler_patience=lr_scheduler_patience,
early_stop_patience=early_stop_patience,
early_stop_threshold=early_stop_threshold,
)
self.hidden_layers = hidden_layers
self.dropout_rate = dropout_rate
self.layers = nn.ModuleList()
# 构造 MLP 网络结构
self.build_model(input_shape=self.X_train.shape[1:], num_classes=self.num_classes)
self._model_built = True
def build_model(self, input_shape, num_classes):
if not self.hidden_layers:
raise ValueError("'hidden_layers' must contain at least one layer size.")
input_size = int(np.prod(input_shape))
self.layers = nn.ModuleList()
# 连接输入层和第一个隐藏层
self.layers.append(nn.Linear(input_size, self.hidden_layers[0]))
self.layers.append(nn.BatchNorm1d(self.hidden_layers[0]))
self.layers.append(nn.ReLU())
self.layers.append(nn.Dropout(self.dropout_rate))
# 创建隐藏层
for i in range(1, len(self.hidden_layers)):
self.layers.append(nn.Linear(self.hidden_layers[i-1], self.hidden_layers[i]))
self.layers.append(nn.BatchNorm1d(self.hidden_layers[i]))
self.layers.append(nn.ReLU())
self.layers.append(nn.Dropout(self.dropout_rate))
# 创建输出层
self.layers.append(nn.Linear(self.hidden_layers[-1], num_classes))
self.__init_weights()
def forward(self, x):
x = x.view(x.size(0), -1)
for layer in self.layers:
x = layer(x)
return x
def __init_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.zeros_(m.bias)