Skip to content
This repository was archived by the owner on Nov 8, 2022. It is now read-only.

Commit b721947

Browse files
authored
Merge pull request #129 from ofirzaf/fix-q8bert-bug
Fix QuantizedBertLayer is_decoder Attribute
2 parents 214b15d + 3be33df commit b721947

File tree

1 file changed

+4
-0
lines changed

1 file changed

+4
-0
lines changed

nlp_architect/models/transformers/quantized_bert.py

+4
Original file line numberDiff line numberDiff line change
@@ -159,6 +159,10 @@ class QuantizedBertLayer(BertLayer):
159159
def __init__(self, config):
160160
super(BertLayer, self).__init__()
161161
self.attention = QuantizedBertAttention(config)
162+
self.is_decoder = config.is_decoder
163+
if self.is_decoder:
164+
logger.warning("Using QuantizedBertLayer as decoder was not tested.")
165+
self.crossattention = QuantizedBertAttention(config)
162166
self.intermediate = QuantizedBertIntermediate(config)
163167
self.output = QuantizedBertOutput(config)
164168

0 commit comments

Comments
 (0)