54 lines
1.6 KiB
Python
54 lines
1.6 KiB
Python
import torch
|
|
import torch.nn as nn
|
|
from Qtorch.Models.Qnn import Qnn
|
|
from sklearn.preprocessing import LabelEncoder
|
|
from sklearn.metrics import confusion_matrix
|
|
import pandas as pd
|
|
|
|
|
|
class Qmlp(Qnn):
|
|
|
|
def __init__(self, X_train, y_train, X_test, y_test,
|
|
hidden_layers,
|
|
labels=None,
|
|
dropout_rate=0.3
|
|
):
|
|
super(Qmlp, self).__init__()
|
|
|
|
self.LABEL_ENCODER = LabelEncoder()
|
|
|
|
self.X_train, self.y_train, self.X_test, self.y_test = X_train, y_train, X_test, y_test
|
|
|
|
self.labels = labels
|
|
|
|
input_size = X_train.shape[1]
|
|
num_classes = len(set(y_train))
|
|
self.layers = nn.ModuleList()
|
|
|
|
# Input layer to first hidden layer
|
|
self.layers.append(nn.Linear(input_size, hidden_layers[0]))
|
|
self.layers.append(nn.BatchNorm1d(hidden_layers[0]))
|
|
self.layers.append(nn.ReLU())
|
|
self.layers.append(nn.Dropout(dropout_rate))
|
|
|
|
# Create hidden layers
|
|
for i in range(1, len(hidden_layers)):
|
|
self.layers.append(nn.Linear(hidden_layers[i-1], hidden_layers[i]))
|
|
self.layers.append(nn.BatchNorm1d(hidden_layers[i]))
|
|
self.layers.append(nn.ReLU())
|
|
self.layers.append(nn.Dropout(dropout_rate))
|
|
|
|
# Output layer
|
|
self.layers.append(nn.Linear(hidden_layers[-1], num_classes))
|
|
self.__init_weights()
|
|
|
|
def forward(self, x):
|
|
for layer in self.layers:
|
|
x = layer(x)
|
|
return x
|
|
|
|
def __init_weights(self):
|
|
for m in self.modules():
|
|
if isinstance(m, nn.Linear):
|
|
nn.init.xavier_uniform_(m.weight)
|
|
nn.init.zeros_(m.bias) |