Files
mask-ddpm/example/window_models.py

66 lines
1.9 KiB
Python

#!/usr/bin/env python3
"""Small neural models used by the evaluation suite."""
from __future__ import annotations
import torch
import torch.nn as nn
class MLPClassifier(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int = 256, dropout: float = 0.1):
super().__init__()
mid_dim = max(hidden_dim // 2, 32)
self.net = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, mid_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(mid_dim, 1),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.net(x).squeeze(-1)
class MLPRegressor(nn.Module):
def __init__(self, input_dim: int, output_dim: int, hidden_dim: int = 256, dropout: float = 0.1):
super().__init__()
mid_dim = max(hidden_dim // 2, 32)
self.net = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, mid_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(mid_dim, output_dim),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.net(x)
class MLPAutoencoder(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int = 256, latent_dim: int = 64, dropout: float = 0.1):
super().__init__()
self.encoder = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, latent_dim),
nn.GELU(),
)
self.decoder = nn.Sequential(
nn.Linear(latent_dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, input_dim),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.decoder(self.encoder(x))