From 847bdae9f6bd3fd5abfbd5173d586adbdb210a05 Mon Sep 17 00:00:00 2001 From: newbie Date: Thu, 28 Nov 2024 22:56:06 +0800 Subject: [PATCH] save --- Qtorch/Models/Qcnn.py | 68 ++++++++++++++++++++++++++----------------- Qtorch/Models/Qmlp.py | 3 -- main.py | 20 ++++++++----- 3 files changed, 54 insertions(+), 37 deletions(-) diff --git a/Qtorch/Models/Qcnn.py b/Qtorch/Models/Qcnn.py index ca5f55f..83ea21a 100644 --- a/Qtorch/Models/Qcnn.py +++ b/Qtorch/Models/Qcnn.py @@ -1,32 +1,48 @@ -import torch import torch.nn as nn -import torch.optim as optim +from Qtorch.Models.Qnn import Qnn +from sklearn.preprocessing import LabelEncoder -class Simple1DCNN(nn.Module): - def __init__(self, input_size, num_classes): - super(Simple1DCNN, self).__init__() - self.conv1 = nn.Conv1d(in_channels=1, out_channels=32, kernel_size=3, stride=1, padding=1) - self.relu = nn.ReLU() - self.pool = nn.MaxPool1d(kernel_size=2, stride=2) - self.conv2 = nn.Conv1d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1) - self.fc1 = nn.Linear(64 * (input_size // 4), 128) # 假设经过两次池化后,长度减半两次 - self.fc2 = nn.Linear(128, num_classes) +class QCNN(Qnn): + def __init__(self, X_train, y_train, X_test, y_test, + labels=None, + dropout_rate=0.3 + ): + super(QCNN, self).__init__() - def forward(self, x): - x = self.pool(self.relu(self.conv1(x))) - x = self.pool(self.relu(self.conv2(x))) - x = x.view(-1, 64 * (self.input_size // 4)) # 展平特征图 - x = self.relu(self.fc1(x)) - x = self.fc2(x) - return x + self.LABEL_ENCODER = LabelEncoder() + + self.X_train, self.y_train, self.X_test, self.y_test = X_train, y_train, X_test, y_test + + self.labels = labels + + input_size = X_train.shape[1] + num_classes = len(set(y_train)) + + self.layers = nn.ModuleList() -# 实例化模型 -input_size = 100 # 假设n=100 -num_classes = 10 # 假设有10个类别 -model = Simple1DCNN(input_size, num_classes) + # Input layer to first Convolutional layer + self.layers.append(nn.Conv1d(in_channels=1, out_channels=32, kernel_size=3, stride=1, padding=1)) + self.layers.append(nn.ReLU()) + self.layers.append(nn.MaxPool1d(kernel_size=2, stride=2)) + + # Calculate the size after convolutions and pooling + conv_output_size = input_size // 4 # Assuming two pooling layers with stride 2 + self.layers.append(nn.Linear(32 * conv_output_size, 128)) + self.layers.append(nn.ReLU()) + self.layers.append(nn.Dropout(dropout_rate)) + + # Output layer + self.layers.append(nn.Linear(128, num_classes)) + self.__init_weights() -# 定义损失函数和优化器 -criterion = nn.CrossEntropyLoss() -optimizer = optim.Adam(model.parameters(), lr=0.001) + def forward(self, x): + for layer in self.layers: + x = layer(x) + return x -# 训练和评估模型的代码与之前类似,这里不再赘述。 \ No newline at end of file + def __init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + if m.bias is not None: + m.bias.data.fill_(0.01) diff --git a/Qtorch/Models/Qmlp.py b/Qtorch/Models/Qmlp.py index 1bfccc5..72b42a9 100644 --- a/Qtorch/Models/Qmlp.py +++ b/Qtorch/Models/Qmlp.py @@ -1,9 +1,6 @@ -import torch import torch.nn as nn from Qtorch.Models.Qnn import Qnn from sklearn.preprocessing import LabelEncoder -from sklearn.metrics import confusion_matrix -import pandas as pd class Qmlp(Qnn): diff --git a/main.py b/main.py index 56bea2c..f1e5db9 100644 --- a/main.py +++ b/main.py @@ -1,25 +1,29 @@ from Qtorch.Models.Qmlp import Qmlp +from Qtorch.Models.Qcnn import QCNN from Qfunctions.divSet import divSet from Qfunctions.loaData import load_data from Qfunctions.saveToxlsx import save_to_xlsx as save_to_xlsx def main(): - projet_name = '20241112Numbers' # 输入元数据文件夹名称 - label_names =['1', '2', '3', '4', '5', '6', '7' ,'8', '9'] # 请在[]内输入每一个分类的名称 + projet_name = '20241112Numbers' # 输入元数据文件夹名称 + label_names =['1', '2', '3', '4', '5', '6', '7' ,'8', '9'] # 请在[]内输入每一个分类的名称 data = load_data(projet_name, label_names, isDir=False, fileClass='xls') X_train, X_test, y_train, y_test, encoder = divSet( data=data, labels=label_names, test_size= 0.3 ) - model = Qmlp( - X_train=X_train, X_test=X_test, y_train=y_train, y_test= y_test, - hidden_layers=[128, 128], - dropout_rate=0 - ) + # model = Qmlp( + # X_train=X_train, X_test=X_test, y_train=y_train, y_test= y_test, + # hidden_layers = [128], + # dropout_rate=0 + # ) + model = QCNN( + X_train=X_train, X_test=X_test, y_train=y_train, y_test= y_test, + dropout_rate=0 + ) pca_2d, pca_3d = model.get_PCA() - model.fit(300) cm = model.get_cm()