Deeplearning/Qtorch/Models/Qcnn.py

49 lines
1.5 KiB
Python

import torch.nn as nn
from Qtorch.Models.Qnn import Qnn
from sklearn.preprocessing import LabelEncoder
class QCNN(Qnn):
def __init__(self, X_train, y_train, X_test, y_test,
labels=None,
dropout_rate=0.3
):
super(QCNN, self).__init__()
self.LABEL_ENCODER = LabelEncoder()
self.X_train, self.y_train, self.X_test, self.y_test = X_train, y_train, X_test, y_test
self.labels = labels
input_size = X_train.shape[1]
num_classes = len(set(y_train))
self.layers = nn.ModuleList()
# Input layer to first Convolutional layer
self.layers.append(nn.Conv1d(in_channels=1, out_channels=32, kernel_size=3, stride=1, padding=1))
self.layers.append(nn.ReLU())
self.layers.append(nn.MaxPool1d(kernel_size=2, stride=2))
# Calculate the size after convolutions and pooling
conv_output_size = input_size // 4 # Assuming two pooling layers with stride 2
self.layers.append(nn.Linear(32 * conv_output_size, 128))
self.layers.append(nn.ReLU())
self.layers.append(nn.Dropout(dropout_rate))
# Output layer
self.layers.append(nn.Linear(128, num_classes))
self.__init_weights()
def forward(self, x):
for layer in self.layers:
x = layer(x)
return x
def __init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
m.bias.data.fill_(0.01)