Skip to content

Commit 1197741

Browse files
committed
Fix loading issue of fine-tuned NER models
1 parent b8a165c commit 1197741

File tree

3 files changed

+3
-3
lines changed

3 files changed

+3
-3
lines changed

hanlp/components/taggers/transformers/transformer_tagger.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ def build_model(self, training=True, extra_embeddings: Embedding = None, finetun
151151
self.config.get('secondary_encoder', None),
152152
extra_embeddings=extra_embeddings.module(self.vocabs) if extra_embeddings else None,
153153
)
154-
if finetune:
154+
if finetune and self.model:
155155
model_state = model.state_dict()
156156
load_state = self.model.state_dict()
157157
safe_state = filter_state_dict_safely(model_state, load_state)

hanlp/version.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
# Author: hankcs
33
# Date: 2019-12-28 19:26
44

5-
__version__ = '2.1.0-beta.62'
5+
__version__ = '2.1.0-beta.63'
66
"""HanLP version"""
77

88

setup.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@
6464
'transformers>=4.1.1',
6565
'sentencepiece>=0.1.91', # Essential for tokenization_bert_japanese
6666
'torch>=1.6.0',
67-
'hanlp-common>=0.0.20',
67+
'hanlp-common>=0.0.21',
6868
'hanlp-trie>=0.0.4',
6969
'hanlp-downloader',
7070
*TOKENIZERS,

0 commit comments

Comments
 (0)