Skip to content

Commit 812951d

Browse files
committed
Merge branch 'master' of github.com:learnables/learn2learn
2 parents 443c8f7 + 06893e8 commit 812951d

File tree

7 files changed

+141
-33
lines changed

7 files changed

+141
-33
lines changed

CHANGELOG.md

+13-1
Original file line numberDiff line numberDiff line change
@@ -10,15 +10,27 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
1010

1111
### Added
1212

13+
### Changed
14+
15+
### Fixed
16+
17+
18+
## v0.1.6
19+
20+
### Added
21+
1322
* PyTorch Lightning interface to MAML, ANIL, ProtoNet, MetaOptNet.
14-
* Automatic batcher for Lighting: `l2l.data.EpisodicBatcher`.
23+
* Automatic batcher for Lightning: `l2l.data.EpisodicBatcher`.
1524
* `l2l.nn.PrototypicalClassifier` and `l2l.nn.SVMClassifier`.
1625
* Add `l2l.vision.models.WRN28`.
1726
* Separate modules for `CNN4Backbone`, `ResNet12Backbone`, `WRN28Backbones` w/ pretrained weights.
1827
* Add `l2l.data.OnDeviceDataset` and implement `device` parameter for benchmarks.
28+
* (Beta) Add `l2l.data.partition_task` and `l2l.data.InfiniteIterator`.
1929

2030
### Changed
2131

32+
* Renamed and clarify dropout parameters for `ResNet12`.
33+
2234
### Fixed
2335

2436
* Improved support for 1D inputs in `l2l.nn.KroneckerLinear`. (@timweiland)

docs/pydocmd.yml

+4
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,10 @@ generate:
2525
- learn2learn.data.transforms.FusedNWaysKShots
2626
- learn2learn.data.transforms.RemapLabels
2727
- learn2learn.data.transforms.ConsecutiveLabels
28+
- learn2learn.data.utils:
29+
- learn2learn.data.utils.OnDeviceDataset
30+
- learn2learn.data.utils.InfiniteIterator
31+
- learn2learn.data.utils.partition_task
2832
- docs/learn2learn.algorithms.md:
2933
- learn2learn.algorithms:
3034
- learn2learn.algorithms.MAML++

learn2learn/_version.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__ = '0.1.5'
1+
__version__ = '0.1.6'

learn2learn/data/utils.py

+79-3
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,26 @@ def save_response_content(response, destination):
4949

5050
class InfiniteIterator(object):
5151

52+
"""
53+
[[Source]](https://github.com/learnables/learn2learn/blob/master/learn2learn/data/utils.py)
54+
55+
**Description**
56+
57+
Infinitely loops over a given iterator.
58+
59+
**Arguments**
60+
61+
* **dataloader** (iterator) - Iterator to loop over.
62+
63+
**Example**
64+
~~~python
65+
dataloader = DataLoader(dataset, shuffle=True, batch_size=32)
66+
inf_dataloader = InfiniteIterator(dataloader)
67+
for iteration in range(10000): # guaranteed to reach 10,000 regardless of len(dataloader)
68+
X, y = next(inf_dataloader)
69+
~~~
70+
"""
71+
5272
def __init__(self, dataloader):
5373
self.dataloader = dataloader
5474
self.iterator = iter(self.dataloader)
@@ -64,11 +84,35 @@ def __next__(self):
6484
self.iterator = iter(self.dataloader)
6585

6686

67-
def partition_task(data, labels, shots=1, ways=None):
87+
def partition_task(data, labels, shots=1):
88+
89+
"""
90+
[[Source]](https://github.com/learnables/learn2learn/blob/master/learn2learn/data/utils.py)
91+
92+
**Description**
93+
94+
Partitions a classification task into support and query sets.
95+
96+
The support set will contain `shots` samples per class, the query will take the remaining samples.
97+
98+
Assumes each class in `labels` is associated with the same number of samples in `data`.
99+
100+
**Arguments**
101+
102+
* **data** (Tensor) - Data to be partitioned into support and query.
103+
* **labels** (Tensor) - Labels of each data sample, used for partitioning.
104+
* **shots** (int, *optional*, default=1) - Number of data samples per class in the support set.
105+
106+
**Example**
107+
~~~python
108+
X, y = taskset.sample()
109+
(X_support, y_support), (X_query, y_query) = partition_task(X, y, shots=5)
110+
~~~
111+
"""
112+
68113
assert data.size(0) == labels.size(0)
69114
unique_labels = labels.unique()
70-
if ways is None:
71-
ways = unique_labels.numel()
115+
ways = unique_labels.numel()
72116
data_shape = data.shape[1:]
73117
num_support = ways * shots
74118
num_query = data.size(0) - num_support
@@ -119,6 +163,35 @@ def partition_task(data, labels, shots=1, ways=None):
119163

120164
class OnDeviceDataset(torch.utils.data.TensorDataset):
121165

166+
"""
167+
[[Source]](https://github.com/learnables/learn2learn/blob/master/learn2learn/data/utils.py)
168+
169+
**Description**
170+
171+
Converts an entire dataset into a TensorDataset, and optionally puts it on the desired device.
172+
173+
Useful to accelerate training with relatively small datasets.
174+
If the device is cpu and cuda is available, the TensorDataset will live in pinned memory.
175+
176+
**Arguments**
177+
178+
* **dataset** (Dataset) - Dataset to put on a device.
179+
* **device** (torch.device, *optional*, default=None) - Device of dataset. Defaults to CPU.
180+
* **transform** (transform, *optional*, default=None) - Transform to apply on the first variate of the dataset's samples X.
181+
182+
**Example**
183+
~~~python
184+
transforms = transforms.Compose([
185+
transforms.ToTensor(),
186+
transforms.Normalize((0.1307,), (0.3081,)),
187+
lambda x: x.view(1, 28, 28),
188+
])
189+
mnist = MNIST('~/data')
190+
mnist_ondevice = OnDeviceDataset(mnist, device='cuda', transform=transforms)
191+
mnist_meta = MetaDataset(mnist_ondevice)
192+
~~~
193+
"""
194+
122195
def __init__(self, dataset, device=None, transform=None):
123196
data = []
124197
labels = []
@@ -130,6 +203,9 @@ def __init__(self, dataset, device=None, transform=None):
130203
if device is not None:
131204
data = data.to(device)
132205
labels = labels.to(device)
206+
if data.device == torch.device('cpu') and torch.cuda.is_available():
207+
data = data.pin_memory()
208+
labels = labels.pin_memory()
133209
super(OnDeviceDataset, self).__init__(data, labels)
134210
self.transform = transform
135211
if hasattr(dataset, '_bookkeeping_path'):

learn2learn/vision/models/cnn4.py

+20-3
Original file line numberDiff line numberDiff line change
@@ -227,6 +227,22 @@ def forward(self, x):
227227

228228
class CNN4Backbone(ConvBase):
229229

230+
def __init__(
231+
self,
232+
hidden_size=64,
233+
layers=4,
234+
channels=3,
235+
max_pool=False,
236+
max_pool_factor=1.0,
237+
):
238+
super(CNN4Backbone, self).__init__(
239+
hidden=hidden_size,
240+
layers=layers,
241+
channels=channels,
242+
max_pool=max_pool,
243+
max_pool_factor=max_pool_factor,
244+
)
245+
230246
def forward(self, x):
231247
x = super(CNN4Backbone, self).forward(x)
232248
x = x.reshape(x.size(0), -1)
@@ -265,18 +281,19 @@ class CNN4(torch.nn.Module):
265281
def __init__(
266282
self,
267283
output_size,
268-
hidden_size=32,
284+
hidden_size=64,
269285
layers=4,
270286
channels=3,
287+
max_pool=True,
271288
embedding_size=None,
272289
):
273290
super(CNN4, self).__init__()
274291
if embedding_size is None:
275292
embedding_size = 25 * hidden_size
276293
self.features = CNN4Backbone(
277-
hidden=hidden_size,
294+
hidden_size=hidden_size,
278295
channels=channels,
279-
max_pool=True,
296+
max_pool=max_pool,
280297
layers=layers,
281298
max_pool_factor=4 // layers,
282299
)

learn2learn/vision/models/resnet12.py

+19-21
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
#!/usr/bin/env python3
22

3-
import os
43
import torch
54
import torch.nn as nn
65
import torch.nn.functional as F
@@ -167,10 +166,10 @@ class ResNet12Backbone(nn.Module):
167166

168167
def __init__(
169168
self,
170-
keep_prob=1.0, # dropout for embedding
171169
avg_pool=True, # Set to False for 16000-dim embeddings
172170
wider=True, # True mimics MetaOptNet, False mimics TADAM
173-
drop_rate=0.1, # dropout for residual layers
171+
embedding_dropout=0.0, # dropout for embedding
172+
dropblock_dropout=0.1, # dropout for residual layers
174173
dropblock_size=5,
175174
channels=3,
176175
):
@@ -186,27 +185,27 @@ def __init__(
186185
block,
187186
num_filters[0],
188187
stride=2,
189-
drop_rate=drop_rate,
188+
dropblock_dropout=dropblock_dropout,
190189
)
191190
self.layer2 = self._make_layer(
192191
block,
193192
num_filters[1],
194193
stride=2,
195-
drop_rate=drop_rate,
194+
dropblock_dropout=dropblock_dropout,
196195
)
197196
self.layer3 = self._make_layer(
198197
block,
199198
num_filters[2],
200199
stride=2,
201-
drop_rate=drop_rate,
200+
dropblock_dropout=dropblock_dropout,
202201
drop_block=True,
203202
block_size=dropblock_size,
204203
)
205204
self.layer4 = self._make_layer(
206205
block,
207206
num_filters[3],
208207
stride=2,
209-
drop_rate=drop_rate,
208+
dropblock_dropout=dropblock_dropout,
210209
drop_block=True,
211210
block_size=dropblock_size,
212211
)
@@ -215,10 +214,10 @@ def __init__(
215214
else:
216215
self.avgpool = l2l.nn.Lambda(lambda x: x)
217216
self.flatten = l2l.nn.Flatten()
218-
self.keep_prob = keep_prob
217+
self.embedding_dropout = embedding_dropout
219218
self.keep_avg_pool = avg_pool
220-
self.dropout = nn.Dropout(p=1.0 - self.keep_prob, inplace=False)
221-
self.drop_rate = drop_rate
219+
self.dropout = nn.Dropout(p=self.embedding_dropout, inplace=False)
220+
self.dropblock_dropout = dropblock_dropout
222221

223222
for m in self.modules():
224223
if isinstance(m, nn.Conv2d):
@@ -236,7 +235,7 @@ def _make_layer(
236235
block,
237236
planes,
238237
stride=1,
239-
drop_rate=0.0,
238+
dropblock_dropout=0.0,
240239
drop_block=False,
241240
block_size=1,
242241
):
@@ -253,7 +252,7 @@ def _make_layer(
253252
planes,
254253
stride,
255254
downsample,
256-
drop_rate,
255+
dropblock_dropout,
257256
drop_block,
258257
block_size)
259258
)
@@ -301,14 +300,14 @@ class ResNet12(nn.Module):
301300
302301
**Arguments**
303302
304-
* **output_size** (int) - The dimensionality of the output.
303+
* **output_size** (int) - The dimensionality of the output (eg, number of classes).
305304
* **hidden_size** (list, *optional*, default=640) - Size of the embedding once features are extracted.
306305
(640 is for mini-ImageNet; used for the classifier layer)
307-
* **keep_prob** (float, *optional*, default=1.0) - Dropout rate on the embedding layer.
308306
* **avg_pool** (bool, *optional*, default=True) - Set to False for the 16k-dim embeddings of Lee et al, 2019.
309307
* **wider** (bool, *optional*, default=True) - True uses (64, 160, 320, 640) filters akin to Lee et al, 2019.
310308
False uses (64, 128, 256, 512) filters, akin to Oreshkin et al, 2018.
311-
* **drop_rate** (float, *optional*, default=0.1) - Dropout rate for the residual layers.
309+
* **embedding_dropout** (float, *optional*, default=0.0) - Dropout rate on the flattened embedding layer.
310+
* **dropblock_dropout** (float, *optional*, default=0.1) - Dropout rate for the residual layers.
312311
* **dropblock_size** (int, *optional*, default=5) - Size of drop blocks.
313312
314313
**Example**
@@ -321,19 +320,19 @@ def __init__(
321320
self,
322321
output_size,
323322
hidden_size=640, # mini-ImageNet images, used for the classifier
324-
keep_prob=1.0, # dropout for embedding
325323
avg_pool=True, # Set to False for 16000-dim embeddings
326324
wider=True, # True mimics MetaOptNet, False mimics TADAM
327-
drop_rate=0.1, # dropout for residual layers
325+
embedding_dropout=0.0, # dropout for embedding
326+
dropblock_dropout=0.1, # dropout for residual layers
328327
dropblock_size=5,
329328
channels=3,
330329
):
331330
super(ResNet12, self).__init__()
332331
self.features = ResNet12Backbone(
333-
keep_prob=keep_prob,
334332
avg_pool=avg_pool,
335333
wider=wider,
336-
drop_rate=drop_rate,
334+
embedding_dropout=embedding_dropout,
335+
dropblock_dropout=dropblock_dropout,
337336
dropblock_size=dropblock_size,
338337
channels=channels,
339338
)
@@ -346,10 +345,9 @@ def forward(self, x):
346345

347346

348347
if __name__ == '__main__':
349-
model = ResNet12(output_size=5, avg_pool=False, drop_rate=0.0)
348+
model = ResNet12(output_size=5, avg_pool=False, dropblock_dropout=0.0)
350349
img = torch.randn(5, 3, 84, 84)
351350
model = model.to('cuda')
352351
img = img.to('cuda')
353352
out = model.features(img)
354353
print(out.shape)
355-
__import__('pdb').set_trace()

setup.py

+5-4
Original file line numberDiff line numberDiff line change
@@ -42,10 +42,11 @@
4242
]
4343

4444
if use_cython:
45-
compiler_directives = {'language_level': 3,
46-
'embedsignature': True,
47-
# 'profile': True,
48-
# 'binding': True,
45+
compiler_directives = {
46+
'language_level': 3,
47+
'embedsignature': True,
48+
# 'profile': True,
49+
# 'binding': True,
4950
}
5051
extensions = cythonize(extensions, compiler_directives=compiler_directives)
5152

0 commit comments

Comments
 (0)