Skip to content

Commit 2136dd6

Browse files
committed
Implemented L1(Lasso) and L2(Ridge) Regularizer
1 parent a65a1b0 commit 2136dd6

File tree

2 files changed

+215
-0
lines changed

2 files changed

+215
-0
lines changed

Examples/regularizer_example.py

+20
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
from MLlib import Tensor
2+
from MLlib.regularizer import LinearRegWith_Regularization
3+
from MLlib.regularizer import L1_Regularizer
4+
from MLlib.optim import SGDWithMomentum
5+
from MLlib.utils.misc_utils import printmat
6+
import numpy as np
7+
8+
np.random.seed(5322)
9+
10+
x = Tensor.randn(10, 8) # (batch_size, features)
11+
12+
y = Tensor.randn(10, 1)
13+
14+
reg = LinearRegWith_Regularization(8, L1_Regularizer,
15+
optimizer=SGDWithMomentum,
16+
Lambda=7)
17+
18+
# Regularizer,optimizer and Lambda as per user's choice
19+
20+
printmat("Total Loss", reg.fit(x, y, 800))

MLlib/regularizer.py

+195
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,195 @@
1+
from MLlib import Tensor
2+
import MLlib.nn as nn
3+
from MLlib.loss_func import MSELoss
4+
from MLlib.optim import SGDWithMomentum
5+
import numpy as np
6+
from MLlib.functional import absolute
7+
8+
9+
class L1_Regularizer:
10+
"""
11+
Implement L1 Regularizer a.k.a. Lasso Regression
12+
13+
ATTRIBUTES
14+
==========
15+
16+
None
17+
18+
METHODS
19+
=======
20+
21+
get_loss(parameters, Lambda)
22+
Calculates and returns the L1 Regression Loss
23+
24+
"""
25+
26+
def __init__(self, parameters, Lambda):
27+
"""
28+
PARAMETERS
29+
==========
30+
31+
params: list or iterator
32+
Parameters which need to be regularized
33+
34+
Lambda: float
35+
Regularization rate
36+
37+
"""
38+
if type(parameters).__name__ == 'Tensor':
39+
self.params = (parameters,)
40+
else:
41+
self.params = tuple(parameters)
42+
43+
self.Lambda = Lambda
44+
45+
def get_loss(self):
46+
"""
47+
Calculates and returns the L2 Regression Loss
48+
49+
"""
50+
reg_loss = Tensor(0., requires_grad=True)
51+
52+
for param in self.params:
53+
reg_loss += absolute(param).sum()
54+
55+
return (reg_loss * self.Lambda)
56+
57+
58+
class L2_Regularizer:
59+
"""
60+
Implement L2 Regularizer a.k.a. Ridge Regression
61+
62+
ATTRIBUTES
63+
==========
64+
65+
None
66+
67+
METHODS
68+
=======
69+
70+
get_loss(parameters, Lambda)
71+
Calculates and returns the L2 Regression Loss
72+
73+
"""
74+
75+
def __init__(self, parameters, Lambda):
76+
"""
77+
PARAMETERS
78+
==========
79+
80+
params: list or iterator
81+
Parameters which need to be regularized
82+
83+
Lambda: float
84+
Regularization rate
85+
86+
"""
87+
88+
if type(parameters).__name__ == 'Tensor':
89+
self.params = (parameters,)
90+
else:
91+
self.params = tuple(parameters)
92+
93+
self.Lambda = Lambda
94+
95+
def get_loss(self):
96+
"""
97+
Calculates the returns the L2 Regression Loss
98+
99+
"""
100+
reg_loss = Tensor(0., requires_grad=True)
101+
for param in self.params:
102+
reg_loss += (param**2).sum()
103+
return (reg_loss*self.Lambda)
104+
105+
106+
class LinearRegWith_Regularization(nn.Module):
107+
""""
108+
LinearRegWith_Regularization
109+
110+
It inherits the Class Module
111+
112+
It implements Linear Regression with different types of
113+
Regularizer(L1 and L2)
114+
115+
"""
116+
117+
def __init__(self,
118+
in_features,
119+
regularizer,
120+
loss_fn=MSELoss,
121+
optimizer=SGDWithMomentum,
122+
Lambda=10):
123+
"""
124+
PARAMETERS
125+
==========
126+
127+
in_features: int
128+
number of features
129+
130+
regularizer: class
131+
Class of one of the Regularizers like
132+
L1_Regularizer and L2_Regularizer
133+
134+
optimizer: class
135+
Class of one of the Optimizers like
136+
SGD and SGDWithMomentum
137+
138+
loss_fn: class
139+
Class of one of the loss functions like
140+
MSELoss
141+
142+
Lambda: float
143+
Regularization rate
144+
145+
"""
146+
super().__init__()
147+
self.linear_layer = nn.Linear(in_features, 1)
148+
self.loss_fn = loss_fn()
149+
self.regularizer = regularizer(self.linear_layer.weights, Lambda)
150+
self.optimizer = optimizer(self.linear_layer.parameters())
151+
152+
def forward(self, input):
153+
"""
154+
Forward pass
155+
"""
156+
return self.linear_layer(input)
157+
158+
def fit(self, x, y, epochs=1):
159+
"""
160+
Train LinearRegWith_Regularization Model
161+
by fitting its associated Regularizer
162+
163+
PARAMETERS
164+
==========
165+
166+
x: list or iterator
167+
input Dataset
168+
169+
y: list or iterator
170+
input Dataset
171+
172+
epochs: int
173+
Number of times, the loop to calculate loss
174+
and optimize weights, will going to take
175+
place.
176+
177+
RETURNS
178+
=======
179+
output: ndarray(dtype=float, ndim=1)
180+
Total loss
181+
182+
"""
183+
output = []
184+
for i in range(epochs):
185+
# for batch in train_batches:
186+
prediction = self(x)
187+
loss = self.loss_fn(prediction, y) \
188+
+ self.regularizer.get_loss()/(2*prediction.shape[0])
189+
if (i+1) % 100 == 0:
190+
output.append(loss.data)
191+
loss.backward()
192+
self.optimizer.step()
193+
self.optimizer.zero_grad()
194+
195+
return np.array(output)

0 commit comments

Comments
 (0)