-
Notifications
You must be signed in to change notification settings - Fork 69
/
Copy pathregularizer.py
195 lines (143 loc) · 4.26 KB
/
regularizer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
from MLlib import Tensor
import MLlib.nn as nn
from MLlib.loss_func import MSELoss
from MLlib.optim import SGDWithMomentum
import numpy as np
from MLlib.functional import absolute
class L1_Regularizer:
"""
Implement L1 Regularizer a.k.a. Lasso Regression
ATTRIBUTES
==========
None
METHODS
=======
get_loss(parameters, Lambda)
Calculates and returns the L1 Regression Loss
"""
def __init__(self, parameters, Lambda):
"""
PARAMETERS
==========
params: list or iterator
Parameters which need to be regularized
Lambda: float
Regularization rate
"""
if type(parameters).__name__ == 'Tensor':
self.params = (parameters,)
else:
self.params = tuple(parameters)
self.Lambda = Lambda
def get_loss(self):
"""
Calculates and returns the L2 Regression Loss
"""
reg_loss = Tensor(0., requires_grad=True)
for param in self.params:
reg_loss += absolute(param).sum()
return (reg_loss * self.Lambda)
class L2_Regularizer:
"""
Implement L2 Regularizer a.k.a. Ridge Regression
ATTRIBUTES
==========
None
METHODS
=======
get_loss(parameters, Lambda)
Calculates and returns the L2 Regression Loss
"""
def __init__(self, parameters, Lambda):
"""
PARAMETERS
==========
params: list or iterator
Parameters which need to be regularized
Lambda: float
Regularization rate
"""
if type(parameters).__name__ == 'Tensor':
self.params = (parameters,)
else:
self.params = tuple(parameters)
self.Lambda = Lambda
def get_loss(self):
"""
Calculates the returns the L2 Regression Loss
"""
reg_loss = Tensor(0., requires_grad=True)
for param in self.params:
reg_loss += (param**2).sum()
return (reg_loss*self.Lambda)
class LinearRegWith_Regularization(nn.Module):
""""
LinearRegWith_Regularization
It inherits the Class Module
It implements Linear Regression with different types of
Regularizer(L1 and L2)
"""
def __init__(self,
in_features,
regularizer,
loss_fn=MSELoss,
optimizer=SGDWithMomentum,
Lambda=10):
"""
PARAMETERS
==========
in_features: int
number of features
regularizer: class
Class of one of the Regularizers like
L1_Regularizer and L2_Regularizer
optimizer: class
Class of one of the Optimizers like
SGD and SGDWithMomentum
loss_fn: class
Class of one of the loss functions like
MSELoss
Lambda: float
Regularization rate
"""
super().__init__()
self.linear_layer = nn.Linear(in_features, 1)
self.loss_fn = loss_fn()
self.regularizer = regularizer(self.linear_layer.weights, Lambda)
self.optimizer = optimizer(self.linear_layer.parameters())
def forward(self, input):
"""
Forward pass
"""
return self.linear_layer(input)
def fit(self, x, y, epochs=1):
"""
Train LinearRegWith_Regularization Model
by fitting its associated Regularizer
PARAMETERS
==========
x: list or iterator
input Dataset
y: list or iterator
input Dataset
epochs: int
Number of times, the loop to calculate loss
and optimize weights, will going to take
place.
RETURNS
=======
output: ndarray(dtype=float, ndim=1)
Total loss
"""
output = []
for i in range(epochs):
# for batch in train_batches:
prediction = self(x)
loss = self.loss_fn(prediction, y) \
+ self.regularizer.get_loss()/(2*prediction.shape[0])
if (i+1) % 100 == 0:
output.append(loss.data)
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
return np.array(output)