import numpy as np import torch import torch.nn as nn import torch.optim as optim from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, f1_score, classification_report class ELM(nn.Module): def __init__(self, input_dim, hidden_dim, output_dim): super(ELM, self).__init__() self.input_dim = input_dim self.hidden_dim = hidden_dim self.output_dim = output_dim # Initialize random weights for the input-to-hidden layer self.hidden_weights = nn.Parameter(torch.randn(input_dim, hidden_dim) * np.sqrt(2 / input_dim), requires_grad=False) # Initialize the hidden-to-output layer weights, which will be learned self.output_weights = nn.Parameter(torch.zeros(hidden_dim, output_dim), requires_grad=True) def forward(self, x): # Calculate the hidden layer output using the ReLU activation function h = torch.relu(x @ self.hidden_weights) # Calculate the output layer (no activation function) out = h @ self.output_weights return out def fit(self, x, y, alpha=1e-5): # Calculate the hidden layer output h = torch.relu(x @ self.hidden_weights) # Calculate the Moore-Penrose pseudo-inverse h_pseudo_inverse = torch.pinverse(h.T @ h + alpha * torch.eye(self.hidden_dim)) @ h.T # Calculate the optimal output weights self.output_weights.data = h_pseudo_inverse @ y def predict(self, x): with torch.no_grad(): out = self.forward(x) return torch.argmax(out, dim=1)