-
Notifications
You must be signed in to change notification settings - Fork 30
/
Copy pathutils.py
129 lines (109 loc) · 4.39 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import numpy as np
import torch
def init_embedding(embeddings):
"""
Fills embedding tensor with values from the uniform distribution.
:param embeddings: embedding tensor
"""
bias = np.sqrt(3.0 / embeddings.size(1))
torch.nn.init.uniform_(embeddings, -bias, bias)
def load_embeddings(emb_file, word_map):
"""
Creates an embedding tensor for the specified word map, for loading into the model.
:param emb_file: file containing embeddings (stored in GloVe format)
:param word_map: word map
:return: embeddings in the same order as the words in the word map, dimension of embeddings
"""
# Find embedding dimension
with open(emb_file, 'r') as f:
emb_dim = len(f.readline().split(' ')) - 1
vocab = set(word_map.keys())
# Create tensor to hold embeddings, initialize
embeddings = torch.FloatTensor(len(vocab), emb_dim)
init_embedding(embeddings)
# Read embedding file
print("\nLoading embeddings...")
for line in open(emb_file, 'r'):
line = line.split(' ')
emb_word = line[0]
embedding = list(map(lambda t: float(t), filter(lambda n: n and not n.isspace(), line[1:])))
# Ignore word if not in train_vocab
if emb_word not in vocab:
continue
embeddings[word_map[emb_word]] = torch.FloatTensor(embedding)
return embeddings, emb_dim
def clip_gradient(optimizer, grad_clip):
"""
Clips gradients computed during backpropagation to avoid explosion of gradients.
:param optimizer: optimizer with the gradients to be clipped
:param grad_clip: clip value
"""
for group in optimizer.param_groups:
for param in group['params']:
if param.grad is not None:
param.grad.data.clamp_(-grad_clip, grad_clip)
def save_checkpoint(data_name, epoch, epochs_since_improvement, encoder, decoder, encoder_optimizer, decoder_optimizer,
bleu4, is_best):
"""
Saves model checkpoint.
:param data_name: base name of processed dataset
:param epoch: epoch number
:param epochs_since_improvement: number of epochs since last improvement in BLEU-4 score
:param encoder: encoder model
:param decoder: decoder model
:param encoder_optimizer: optimizer to update encoder's weights, if fine-tuning
:param decoder_optimizer: optimizer to update decoder's weights
:param bleu4: validation BLEU-4 score for this epoch
:param is_best: is this checkpoint the best so far?
"""
state = {'epoch': epoch,
'epochs_since_improvement': epochs_since_improvement,
'bleu-4': bleu4,
'encoder': encoder,
'decoder': decoder,
'encoder_optimizer': encoder_optimizer,
'decoder_optimizer': decoder_optimizer}
filename = 'checkpoint_' + data_name + '.pth.tar'
torch.save(state, filename)
# If this checkpoint is the best so far, store a copy so it doesn't get overwritten by a worse checkpoint
if is_best:
torch.save(state, 'BEST_' + filename)
class AverageMeter(object):
"""
Keeps track of most recent, average, sum, and count of a metric.
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, shrink_factor):
"""
Shrinks learning rate by a specified factor.
:param optimizer: optimizer whose learning rate must be shrunk.
:param shrink_factor: factor in interval (0, 1) to multiply learning rate with.
"""
print("\nDECAYING learning rate.")
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * shrink_factor
print("The new learning rate is %f\n" % (optimizer.param_groups[0]['lr'],))
def accuracy(scores, targets, k):
"""
Computes top-k accuracy, from predicted and true labels.
:param scores: scores from the model
:param targets: true labels
:param k: k in top-k accuracy
:return: top-k accuracy
"""
batch_size = targets.size(0)
_, ind = scores.topk(k, 1, True, True)
correct = ind.eq(targets.view(-1, 1).expand_as(ind))
correct_total = correct.view(-1).float().sum() # 0D tensor
return correct_total.item() * (100.0 / batch_size)