generated from fastai/nbdev_template
-
Notifications
You must be signed in to change notification settings - Fork 2.2k
Labels
🐛 bugSomething isn't workingSomething isn't working
Description
CI fails with Python 3.13 with error message: https://github.com/huggingface/trl/actions/runs/18338979358/job/52229557647
IndentationError: expected an indented block after function definition on line 3
FAILED tests/test_judges.py::TestJudges::test_pair_rm_judge - File "<unknown>", line 3
def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
^
IndentationError: expected an indented block after function definition on line 3
FAILED tests/test_judges.py::TestJudges::test_pair_rm_judge_return_scores - File "<unknown>", line 3
def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
^
IndentationError: expected an indented block after function definition on line 3
Stacktrace:
tests/test_judges.py:56: in load_pair_rm_judge
return PairRMJudge()
^^^^^^^^^^^^^
trl/trainer/judges.py:218: in __init__
self.blender.loadranker("llm-blender/PairRM", device=Accelerator().device)
.venv/lib/python3.13/site-packages/llm_blender/blender/blender.py:156: in loadranker
self.ranker, self.ranker_tokenizer, self.ranker_collator = load_ranker(ranker_config)
^^^^^^^^^^^^^^^^^^^^^^^^^^
.venv/lib/python3.13/site-packages/llm_blender/blender/blender_utils.py:65: in load_ranker
ranker = build_ranker(
.venv/lib/python3.13/site-packages/llm_blender/pair_ranker/model_util.py:101: in build_ranker
pretrained_model = build_pretrained_model(model_type, model_name, cache_dir=cache_dir)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.venv/lib/python3.13/site-packages/llm_blender/pair_ranker/model_util.py:48: in build_pretrained_model
model = AutoModel.from_pretrained(model_name, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.venv/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py:601: in from_pretrained
model_class = _get_model_class(config, cls._model_mapping)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.venv/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py:394: in _get_model_class
supported_models = model_mapping[type(config)]
^^^^^^^^^^^^^^^^^^^^^^^^^^^
.venv/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py:807: in __getitem__
return self._load_attr_from_module(model_type, model_name)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.venv/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py:821: in _load_attr_from_module
return getattribute_from_module(self._modules[module_name], attr)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.venv/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py:733: in getattribute_from_module
if hasattr(module, attr):
^^^^^^^^^^^^^^^^^^^^^
.venv/lib/python3.13/site-packages/transformers/utils/import_utils.py:2317: in __getattr__
module = self._get_module(self._class_to_module[name])
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.venv/lib/python3.13/site-packages/transformers/utils/import_utils.py:2347: in _get_module
raise e
.venv/lib/python3.13/site-packages/transformers/utils/import_utils.py:2345: in _get_module
return importlib.import_module("." + module_name, self.__name__)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/__w/_tool/Python/3.13.8/x64/lib/python3.13/importlib/__init__.py:88: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
<frozen importlib._bootstrap>:1387: in _gcd_import
???
<frozen importlib._bootstrap>:1360: in _find_and_load
???
<frozen importlib._bootstrap>:1331: in _find_and_load_unlocked
???
<frozen importlib._bootstrap>:935: in _load_unlocked
???
<frozen importlib._bootstrap_external>:1027: in exec_module
???
<frozen importlib._bootstrap>:488: in _call_with_frames_removed
???
.venv/lib/python3.13/site-packages/transformers/models/deberta_v2/modeling_deberta_v2.py:105: in <module>
@torch.jit.script
^^^^^^^^^^^^^^^^
.venv/lib/python3.13/site-packages/torch/jit/_script.py:1443: in script
ret = _script_impl(
.venv/lib/python3.13/site-packages/torch/jit/_script.py:1211: in _script_impl
ast = get_jit_def(obj, obj.__name__)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.venv/lib/python3.13/site-packages/torch/jit/frontend.py:341: in get_jit_def
parsed_def = parse_def(fn) if not isinstance(fn, _ParsedDef) else fn
^^^^^^^^^^^^^
.venv/lib/python3.13/site-packages/torch/_sources.py:127: in parse_def
py_ast = ast.parse(dedent_src)
^^^^^^^^^^^^^^^^^^^^^
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
source = '@torch.jit.script\n# Copied from transformers.models.deberta.modeling_deberta.c2p_dynamic_expand\ndef c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):\n'
filename = '<unknown>', mode = 'exec'
def parse(source, filename='<unknown>', mode='exec', *,
type_comments=False, feature_version=None, optimize=-1):
"""
Parse the source into an AST node.
Equivalent to compile(source, filename, mode, PyCF_ONLY_AST).
Pass type_comments=True to get back type comments where the syntax allows.
"""
flags = PyCF_ONLY_AST
if optimize > 0:
flags |= PyCF_OPTIMIZED_AST
if type_comments:
flags |= PyCF_TYPE_COMMENTS
if feature_version is None:
feature_version = -1
elif isinstance(feature_version, tuple):
major, minor = feature_version # Should be a 2-tuple.
if major != 3:
raise ValueError(f"Unsupported major version: {major}")
feature_version = minor
# Else it should be an int giving the minor version for 3.x.
> return compile(source, filename, mode, flags,
_feature_version=feature_version, optimize=optimize)
E File "<unknown>", line 3
E def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
E ^
E IndentationError: expected an indented block after function definition on line 3
Metadata
Metadata
Assignees
Labels
🐛 bugSomething isn't workingSomething isn't working