Skip to content

Commit a478836

Browse files
authored
Merge pull request #326 from WenjieDu/dev
Adding PatchTST, renaming d_innner into d_ffn, and refactoring Autofomer
2 parents bf53667 + 2a33326 commit a478836

File tree

32 files changed

+774
-85
lines changed

32 files changed

+774
-85
lines changed

docs/examples.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ You can also find a simple and quick-start tutorial notebook on Google Colab
4545
n_features=37,
4646
n_layers=2,
4747
d_model=256,
48-
d_inner=128,
48+
d_ffn=128,
4949
n_heads=4,
5050
d_k=64,
5151
d_v=64,

pypots/classification/raindrop/model.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ class Raindrop(BaseNNClassifier):
4646
The dimension of the Transformer encoder backbone.
4747
It is the input dimension of the multi-head self-attention layers.
4848
49-
d_inner :
49+
d_ffn :
5050
The dimension of the layer in the Feed-Forward Networks (FFN).
5151
5252
n_heads :
@@ -121,7 +121,7 @@ def __init__(
121121
n_classes,
122122
n_layers,
123123
d_model,
124-
d_inner,
124+
d_ffn,
125125
n_heads,
126126
dropout,
127127
d_static=0,
@@ -156,7 +156,7 @@ def __init__(
156156
n_features,
157157
n_layers,
158158
d_model,
159-
d_inner,
159+
d_ffn,
160160
n_heads,
161161
n_classes,
162162
dropout,

pypots/classification/raindrop/modules/core.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ def __init__(
4444
n_features,
4545
n_layers,
4646
d_model,
47-
d_inner,
47+
d_ffn,
4848
n_heads,
4949
n_classes,
5050
dropout=0.3,
@@ -59,7 +59,7 @@ def __init__(
5959
self.n_layers = n_layers
6060
self.n_features = n_features
6161
self.d_model = d_model
62-
self.d_inner = d_inner
62+
self.d_ffn = d_ffn
6363
self.n_heads = n_heads
6464
self.n_classes = n_classes
6565
self.dropout = dropout
@@ -84,13 +84,13 @@ def __init__(
8484
dim_check = n_features * (self.d_ob + d_pe)
8585
assert dim_check % n_heads == 0, "dim_check must be divisible by n_heads"
8686
encoder_layers = TransformerEncoderLayer(
87-
n_features * (self.d_ob + d_pe), n_heads, d_inner, dropout
87+
n_features * (self.d_ob + d_pe), n_heads, d_ffn, dropout
8888
)
8989
else:
9090
dim_check = d_model + d_pe
9191
assert dim_check % n_heads == 0, "dim_check must be divisible by n_heads"
9292
encoder_layers = TransformerEncoderLayer(
93-
d_model + d_pe, n_heads, d_inner, dropout
93+
d_model + d_pe, n_heads, d_ffn, dropout
9494
)
9595
self.transformer_encoder = TransformerEncoder(encoder_layers, n_layers)
9696

pypots/imputation/__init__.py

+2
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
from .transformer import Transformer
1515
from .timesnet import TimesNet
1616
from .autoformer import Autoformer
17+
from .patchtst import PatchTST
1718
from .usgan import USGAN
1819

1920
# naive imputation methods
@@ -26,6 +27,7 @@
2627
"SAITS",
2728
"Transformer",
2829
"TimesNet",
30+
"PatchTST",
2931
"Autoformer",
3032
"BRITS",
3133
"MRNN",

pypots/imputation/autoformer/data.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
"""
2-
Dataset class for TimesNet.
2+
Dataset class for Autoformer.
33
"""
44

55
# Created by Wenjie Du <[email protected]>

pypots/imputation/autoformer/model.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
"""
2-
The implementation of Transformer for the partially-observed time-series imputation task.
2+
The implementation of Autoformer for the partially-observed time-series imputation task.
33
44
Refer to the paper "Wu, H., Xu, J., Wang, J., & Long, M. (2021).
55
Autoformer: Decomposition transformers with auto-correlation for long-term series forecasting. NeurIPS 2021.".
@@ -31,7 +31,7 @@
3131

3232
class Autoformer(BaseNNImputer):
3333
"""The PyTorch implementation of the Autoformer model.
34-
TimesNet is originally proposed by Wu et al. in :cite:`wu2021autoformer`.
34+
Autoformer is originally proposed by Wu et al. in :cite:`wu2021autoformer`.
3535
3636
Parameters
3737
----------
@@ -56,7 +56,7 @@ class Autoformer(BaseNNImputer):
5656
factor :
5757
The factor of the auto correlation mechanism for the Autoformer model.
5858
59-
moving_avg_kernel_size :
59+
moving_avg_window_size :
6060
The window size of moving average.
6161
6262
dropout :
@@ -120,7 +120,7 @@ def __init__(
120120
d_model: int,
121121
d_ffn: int,
122122
factor: int,
123-
moving_avg_kernel_size: int,
123+
moving_avg_window_size: int,
124124
dropout: float = 0,
125125
batch_size: int = 32,
126126
epochs: int = 100,
@@ -149,7 +149,7 @@ def __init__(
149149
self.d_model = d_model
150150
self.d_ffn = d_ffn
151151
self.factor = factor
152-
self.moving_avg_kernel_size = moving_avg_kernel_size
152+
self.moving_avg_window_size = moving_avg_window_size
153153
self.dropout = dropout
154154

155155
# set up the model
@@ -161,7 +161,7 @@ def __init__(
161161
self.d_model,
162162
self.d_ffn,
163163
self.factor,
164-
self.moving_avg_kernel_size,
164+
self.moving_avg_window_size,
165165
self.dropout,
166166
)
167167
self._send_model_to_given_device()

pypots/imputation/autoformer/modules/core.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ def __init__(
2929
d_model,
3030
d_ffn,
3131
factor,
32-
moving_avg_kernel_size,
32+
moving_avg_window_size,
3333
dropout,
3434
activation="relu",
3535
output_attention=False,
@@ -38,7 +38,7 @@ def __init__(
3838

3939
self.seq_len = n_steps
4040
self.n_layers = n_layers
41-
self.series_decomp = SeriesDecompositionBlock(moving_avg_kernel_size)
41+
self.series_decomp = SeriesDecompositionBlock(moving_avg_window_size)
4242
self.enc_embedding = DataEmbedding_wo_Pos(
4343
n_features,
4444
d_model,
@@ -54,7 +54,7 @@ def __init__(
5454
),
5555
d_model,
5656
d_ffn,
57-
moving_avg_kernel_size,
57+
moving_avg_window_size,
5858
dropout,
5959
activation,
6060
)
+17
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
"""
2+
The package of the partially-observed time-series imputation model PatchTST.
3+
4+
Refer to the paper "Wu, H., Xu, J., Wang, J., & Long, M. (2021).
5+
PatchTST: Decomposition transformers with auto-correlation for long-term series forecasting. NeurIPS 2021.".
6+
7+
"""
8+
9+
# Created by Wenjie Du <[email protected]>
10+
# License: BSD-3-Clause
11+
12+
13+
from .model import PatchTST
14+
15+
__all__ = [
16+
"PatchTST",
17+
]

pypots/imputation/patchtst/data.py

+24
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
"""
2+
Dataset class for PatchTST.
3+
"""
4+
5+
# Created by Wenjie Du <[email protected]>
6+
# License: BSD-3-Clause
7+
8+
from typing import Union
9+
10+
from ..saits.data import DatasetForSAITS
11+
12+
13+
class DatasetForPatchTST(DatasetForSAITS):
14+
"""Actually PatchTST uses the same data strategy as SAITS, needs MIT for training."""
15+
16+
def __init__(
17+
self,
18+
data: Union[dict, str],
19+
return_X_ori: bool,
20+
return_labels: bool,
21+
file_type: str = "h5py",
22+
rate: float = 0.2,
23+
):
24+
super().__init__(data, return_X_ori, return_labels, file_type, rate)

0 commit comments

Comments
 (0)