Skip to content

Commit 84f3adf

Browse files
Akshaya Purohitcopybara-github
authored andcommitted
No public description
PiperOrigin-RevId: 712590121 Change-Id: I5998e535ee492df506ca81a777c52d39a4453cab
1 parent 4eedf1e commit 84f3adf

File tree

11 files changed

+119
-102
lines changed

11 files changed

+119
-102
lines changed

qkeras/autoqkeras/forgiving_metrics/forgiving_bits.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ def __init__(
3535
self.ref_size = {}
3636
self.config = config if config else {}
3737

38-
super(ForgivingFactorBits, self).__init__(delta_p, delta_n, rate)
38+
super().__init__(delta_p, delta_n, rate)
3939

4040
def _param_size(self, layer):
4141
"""Computes size of parameters of a layer in bits."""

qkeras/autoqkeras/forgiving_metrics/forgiving_energy.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ def __init__(self, delta_p, delta_n, rate, stress=1.0, **kwargs):
5050
# energy calculation
5151
# keras_layer_quantizer: quantizer for keras layers in hybrid models
5252

53-
super(ForgivingFactorPower, self).__init__(delta_p, delta_n, rate)
53+
super().__init__(delta_p, delta_n, rate)
5454

5555
self.stress = stress
5656
# process: horowitz... - must be present in config_json

qkeras/callbacks.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ def __init__(self,
5858
not.
5959
log_dir: Str. log directory to save qnoise_factor every epoch end.
6060
"""
61-
super(QNoiseScheduler, self).__init__()
61+
super().__init__()
6262

6363
self.start = start
6464
self.finish = finish

qkeras/qconv2d_batchnorm.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ def __init__(
109109
"""
110110

111111
# intialization the qconv2d part of the composite layer
112-
super(QConv2DBatchnorm, self).__init__(
112+
super().__init__(
113113
filters=filters,
114114
kernel_size=kernel_size,
115115
strides=strides,
@@ -126,7 +126,8 @@ def __init__(
126126
bias_constraint=bias_constraint,
127127
kernel_quantizer=kernel_quantizer,
128128
bias_quantizer=bias_quantizer,
129-
**kwargs)
129+
**kwargs
130+
)
130131

131132
# initialization of batchnorm part of the composite layer
132133
self.batchnorm = layers.BatchNormalization(

qkeras/qconvolutional.py

Lines changed: 18 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -170,7 +170,7 @@ def __init__(self,
170170
if activation is not None:
171171
activation = get_quantizer(activation)
172172

173-
super(QConv1D, self).__init__(
173+
super().__init__(
174174
filters=filters,
175175
kernel_size=kernel_size,
176176
strides=strides,
@@ -185,7 +185,8 @@ def __init__(self,
185185
activity_regularizer=activity_regularizer,
186186
kernel_constraint=kernel_constraint,
187187
bias_constraint=bias_constraint,
188-
**kwargs)
188+
**kwargs
189+
)
189190

190191
def call(self, inputs):
191192
if self.kernel_quantizer:
@@ -323,7 +324,7 @@ def __init__(self,
323324
if activation is not None:
324325
activation = get_quantizer(activation)
325326

326-
super(QConv2D, self).__init__(
327+
super().__init__(
327328
filters=filters,
328329
kernel_size=kernel_size,
329330
strides=strides,
@@ -339,7 +340,8 @@ def __init__(self,
339340
activity_regularizer=activity_regularizer,
340341
kernel_constraint=kernel_constraint,
341342
bias_constraint=bias_constraint,
342-
**kwargs)
343+
**kwargs
344+
)
343345

344346
def call(self, inputs):
345347
if self.kernel_quantizer:
@@ -463,7 +465,7 @@ def __init__(self,
463465
if activation is not None:
464466
activation = get_quantizer(activation)
465467

466-
super(QConv2DTranspose, self).__init__(
468+
super().__init__(
467469
filters=filters,
468470
kernel_size=kernel_size,
469471
strides=strides,
@@ -480,7 +482,8 @@ def __init__(self,
480482
activity_regularizer=activity_regularizer,
481483
kernel_constraint=kernel_constraint,
482484
bias_constraint=bias_constraint,
483-
**kwargs)
485+
**kwargs
486+
)
484487

485488
def call(self, inputs):
486489
inputs_shape = array_ops.shape(inputs)
@@ -650,7 +653,7 @@ def __init__(self,
650653
if activation is not None:
651654
activation = get_quantizer(activation)
652655

653-
super(QSeparableConv1D, self).__init__(
656+
super().__init__(
654657
filters=filters,
655658
kernel_size=kernel_size,
656659
strides=strides,
@@ -670,7 +673,8 @@ def __init__(self,
670673
depthwise_constraint=constraints.get(depthwise_constraint),
671674
pointwise_constraint=constraints.get(pointwise_constraint),
672675
bias_constraint=constraints.get(bias_constraint),
673-
**kwargs)
676+
**kwargs
677+
)
674678

675679
def call(self, inputs):
676680
if self.padding == 'causal':
@@ -828,7 +832,7 @@ def __init__(self,
828832
if activation is not None:
829833
activation = get_quantizer(activation)
830834

831-
super(QSeparableConv2D, self).__init__(
835+
super().__init__(
832836
filters=filters,
833837
kernel_size=kernel_size,
834838
strides=strides,
@@ -848,7 +852,8 @@ def __init__(self,
848852
depthwise_constraint=constraints.get(depthwise_constraint),
849853
pointwise_constraint=constraints.get(pointwise_constraint),
850854
bias_constraint=constraints.get(bias_constraint),
851-
**kwargs)
855+
**kwargs
856+
)
852857

853858
def call(self, inputs):
854859
# Apply the actual ops.
@@ -986,7 +991,7 @@ def __init__(self,
986991
if activation is not None:
987992
activation = get_quantizer(activation)
988993

989-
super(QDepthwiseConv2D, self).__init__(
994+
super().__init__(
990995
kernel_size=kernel_size,
991996
strides=strides,
992997
padding=padding,
@@ -1002,7 +1007,8 @@ def __init__(self,
10021007
depthwise_constraint=depthwise_constraint,
10031008
bias_constraint=bias_constraint,
10041009
dilation_rate=dilation_rate,
1005-
**kwargs)
1010+
**kwargs
1011+
)
10061012

10071013
def build(self, input_shape):
10081014
if len(input_shape) < 4:

qkeras/qdepthwiseconv2d_batchnorm.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ def __init__(
107107
"""
108108

109109
# intialization the QDepthwiseConv2d part of the composite layer
110-
super(QDepthwiseConv2DBatchnorm, self).__init__(
110+
super().__init__(
111111
kernel_size=kernel_size,
112112
strides=strides,
113113
padding=padding,
@@ -127,7 +127,8 @@ def __init__(
127127
bias_quantizer=bias_quantizer,
128128
depthwise_range=depthwise_range,
129129
bias_range=bias_range,
130-
**kwargs)
130+
**kwargs
131+
)
131132

132133
# initialization of batchnorm part of the composite layer
133134
self.batchnorm = layers.BatchNormalization(

qkeras/qlayers.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ class QActivation(Layer, PrunableLayer):
155155
# object if string is given as activation.
156156
def __init__(self, activation, **kwargs):
157157

158-
super(QActivation, self).__init__(**kwargs)
158+
super().__init__(**kwargs)
159159

160160
self.activation = activation
161161

@@ -267,7 +267,7 @@ def __init__(self,
267267
that this param is ignored if the activation is not quantized_relu
268268
**kwargs: Args passed to the Layer class.
269269
"""
270-
super(QAdaptiveActivation, self).__init__(**kwargs)
270+
super().__init__(**kwargs)
271271

272272
self.total_bits = total_bits
273273
self.symmetric = symmetric
@@ -630,7 +630,7 @@ def __init__(self,
630630
if activation is not None:
631631
activation = get_quantizer(activation)
632632

633-
super(QDense, self).__init__(
633+
super().__init__(
634634
units=units,
635635
activation=activation,
636636
use_bias=use_bias,
@@ -641,7 +641,8 @@ def __init__(self,
641641
activity_regularizer=activity_regularizer,
642642
kernel_constraint=kernel_constraint,
643643
bias_constraint=bias_constraint,
644-
**kwargs)
644+
**kwargs,
645+
)
645646

646647
def call(self, inputs):
647648
if self.kernel_quantizer:

qkeras/qnormalization.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,7 @@ def __init__(
154154
'in qkeras qnormalization.py.')
155155
del kwargs['adjustment']
156156

157-
super(QBatchNormalization, self).__init__(
157+
super().__init__(
158158
axis=axis,
159159
momentum=momentum,
160160
epsilon=epsilon,
@@ -172,7 +172,8 @@ def __init__(
172172
renorm=False,
173173
virtual_batch_size=None,
174174
adjustment=None,
175-
**kwargs)
175+
**kwargs
176+
)
176177

177178
def call(self, inputs, training=None):
178179
if self.scale and self.gamma_quantizer:

qkeras/qpooling.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -45,9 +45,13 @@ def __init__(self, pool_size=(2, 2),
4545
else:
4646
self.activation = activation
4747

48-
super(QAveragePooling2D, self).__init__(
49-
pool_size=pool_size, strides=strides, padding=padding,
50-
data_format=data_format, **kwargs)
48+
super().__init__(
49+
pool_size=pool_size,
50+
strides=strides,
51+
padding=padding,
52+
data_format=data_format,
53+
**kwargs
54+
)
5155

5256
def call(self, inputs):
5357
"""Performs quantized AveragePooling followed by QActivation.
@@ -142,8 +146,7 @@ def __init__(self, data_format=None,
142146
else:
143147
self.activation = activation
144148

145-
super(QGlobalAveragePooling2D, self).__init__(
146-
data_format=data_format, **kwargs)
149+
super().__init__(data_format=data_format, **kwargs)
147150

148151
def compute_pooling_area(self, input_shape):
149152
if not isinstance(input_shape, tuple):

0 commit comments

Comments
 (0)