75 lines
2.6 KiB
Python
75 lines
2.6 KiB
Python
|
import torch
|
||
|
import torch.nn as nn
|
||
|
import torch.optim as optim
|
||
|
|
||
|
from Qtorch.Models.QSVM import QSVM as svm
|
||
|
|
||
|
class QSVM_BRF(svm):
|
||
|
def __init__(self, data, labels=None, test_size=0.2, random_state=None,
|
||
|
gamma=1.0, C=100, batch_size = 64, learning_rate=0.01):
|
||
|
super().__init__(data, labels, test_size, random_state)
|
||
|
self.gamma, self.C, self.n_features = gamma, C, data.shape[0] - 1
|
||
|
self.support_vectors = torch.cat([batch[0] for batch in self.train_loader])
|
||
|
self.alpha = nn.Parameter(torch.zeros(self.support_vectors.shape[0]))
|
||
|
self.b = nn.Parameter(torch.zeros(1))
|
||
|
self.batch_size = batch_size
|
||
|
self.learning_rate = learning_rate
|
||
|
self.optimizer = optim.SGD(self.parameters(), lr=self.learning_rate)
|
||
|
|
||
|
def rbf_kernel(self, X, Y):
|
||
|
X_norm = (X**2).sum(1).view(-1, 1)
|
||
|
Y_norm = (Y**2).sum(1).view(1, -1)
|
||
|
dist = X_norm + Y_norm - 2.0 * torch.mm(X, Y.t())
|
||
|
return torch.exp(-self.gamma * dist)
|
||
|
|
||
|
def forward(self, X):
|
||
|
K = self.rbf_kernel(X, self.support_vectors)
|
||
|
return torch.mm(K, self.alpha.unsqueeze(1)).squeeze() + self.b
|
||
|
|
||
|
def hinge_loss(self, outputs, targets):
|
||
|
return torch.mean(torch.clamp(1 - outputs * targets, min=0))
|
||
|
|
||
|
def regularization(self):
|
||
|
return 0.5 * (self.alpha ** 2).sum()
|
||
|
|
||
|
def train_model(self, epoch_times=100, learning_rate=0.01):
|
||
|
|
||
|
losses, train_accs, test_accs = [], [], []
|
||
|
|
||
|
for epoch in range(epoch_times):
|
||
|
self.train()
|
||
|
epoch_loss, correct_train, total_train = 0, 0, 0
|
||
|
|
||
|
for batch_X, batch_y in self.train_loader:
|
||
|
|
||
|
self.optimizer.zero_grad()
|
||
|
outputs = self(batch_X)
|
||
|
loss = self.hinge_loss(outputs, batch_y) + self.C * self.regularization()
|
||
|
loss.backward()
|
||
|
self.optimizer.step()
|
||
|
|
||
|
epoch_loss += loss.item()
|
||
|
predicted = torch.sign(outputs)
|
||
|
correct_train += (predicted == batch_y).sum().item()
|
||
|
total_train += batch_y.size(0)
|
||
|
|
||
|
train_acc = correct_train / total_train
|
||
|
test_acc = self.evaluate()
|
||
|
|
||
|
losses.append(epoch_loss / len(self.train_loader))
|
||
|
train_accs.append(train_acc)
|
||
|
test_accs.append(test_acc)
|
||
|
print(f'Epoch [{epoch+1}/{epoch_times}], Loss: {losses[-1]:.4f}, Train Acc: {train_acc:.4f}, Test Acc: {test_acc:.4f}')
|
||
|
|
||
|
def evaluate(self):
|
||
|
self.eval()
|
||
|
correct = 0
|
||
|
total = 0
|
||
|
with torch.no_grad():
|
||
|
for batch_X, batch_y in self.test_loader:
|
||
|
outputs = self(batch_X)
|
||
|
predicted = torch.sign(outputs)
|
||
|
correct += (predicted == batch_y).sum().item()
|
||
|
total += batch_y.size(0)
|
||
|
return correct / total
|