|
| 1 | +""" |
| 2 | +
|
| 3 | +""" |
| 4 | + |
| 5 | +# Created by Wenjie Du <[email protected]> |
| 6 | +# License: BSD-3-Clause |
| 7 | + |
| 8 | +import torch |
| 9 | +import torch.nn as nn |
| 10 | + |
| 11 | +from ...nn.modules import ModelCore |
| 12 | +from ...nn.modules.loss import Criterion |
| 13 | +from ...nn.modules.autoformer import AutoformerEncoder |
| 14 | +from ...nn.modules.saits import SaitsEmbedding |
| 15 | + |
| 16 | + |
| 17 | +class _Autoformer(ModelCore): |
| 18 | + |
| 19 | + def __init__( |
| 20 | + self, |
| 21 | + n_classes: int, |
| 22 | + n_steps: int, |
| 23 | + n_features: int, |
| 24 | + n_layers: int, |
| 25 | + d_model: int, |
| 26 | + n_heads: int, |
| 27 | + d_ffn: int, |
| 28 | + factor: int, |
| 29 | + moving_avg_window_size: int, |
| 30 | + dropout: float, |
| 31 | + training_loss: Criterion, |
| 32 | + validation_metric: Criterion, |
| 33 | + ): |
| 34 | + super().__init__() |
| 35 | + |
| 36 | + self.n_steps = n_steps |
| 37 | + self.d_model = d_model |
| 38 | + self.n_layers = n_layers |
| 39 | + self.training_loss = training_loss |
| 40 | + if validation_metric.__class__.__name__ == "Criterion": |
| 41 | + # in this case, we need validation_metric.lower_better in _train_model() so only pass Criterion() |
| 42 | + # we use training_loss as validation_metric for concrete calculation process |
| 43 | + self.validation_metric = self.training_loss |
| 44 | + else: |
| 45 | + self.validation_metric = validation_metric |
| 46 | + |
| 47 | + self.saits_embedding = SaitsEmbedding( |
| 48 | + n_features * 2, |
| 49 | + d_model, |
| 50 | + with_pos=False, |
| 51 | + dropout=dropout, |
| 52 | + ) |
| 53 | + self.encoder = AutoformerEncoder( |
| 54 | + n_layers, |
| 55 | + d_model, |
| 56 | + n_heads, |
| 57 | + d_ffn, |
| 58 | + factor, |
| 59 | + moving_avg_window_size, |
| 60 | + dropout, |
| 61 | + "relu", |
| 62 | + ) |
| 63 | + self.projection = nn.Linear(d_model * n_steps, n_classes) |
| 64 | + |
| 65 | + def forward( |
| 66 | + self, |
| 67 | + inputs: dict, |
| 68 | + calc_criterion: bool = False, |
| 69 | + ) -> dict: |
| 70 | + X, missing_mask = inputs["X"], inputs["missing_mask"] |
| 71 | + enc_out = self.saits_embedding(X, missing_mask) |
| 72 | + |
| 73 | + # Autoformer encoder processing |
| 74 | + enc_out, attns = self.encoder(enc_out) |
| 75 | + logits = self.projection(enc_out.reshape(-1, self.n_steps * self.d_model)) |
| 76 | + classification_proba = torch.softmax(logits, dim=1) |
| 77 | + |
| 78 | + results = { |
| 79 | + "classification_proba": classification_proba, |
| 80 | + "logits": logits, |
| 81 | + } |
| 82 | + |
| 83 | + if calc_criterion: |
| 84 | + if self.training: # if in the training mode (the training stage), return loss result from training_loss |
| 85 | + # `loss` is always the item for backward propagating to update the model |
| 86 | + results["loss"] = self.training_loss(logits, inputs["y"]) |
| 87 | + else: # if in the eval mode (the validation stage), return metric result from validation_metric |
| 88 | + results["metric"] = self.validation_metric(logits, inputs["y"]) |
| 89 | + |
| 90 | + return results |
0 commit comments