Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
from ....cache_utils import Cache
from ....modeling_outputs import MoECausalLMOutputWithPast, MoEModelOutputWithPastAndCrossAttentions
from ....modeling_utils import PreTrainedModel
from ....utils import DUMMY_INPUTS, DUMMY_MASK, auto_docstring
from ....utils import DUMMY_INPUTS, DUMMY_MASK
from .configuration_gptsan_japanese import GPTSanJapaneseConfig


Expand Down Expand Up @@ -635,7 +635,6 @@ def __init__(self, config: GPTSanJapaneseConfig):
def set_input_embeddings(self, new_embeddings):
self.embed_tokens = new_embeddings

@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
Expand Down
13 changes: 0 additions & 13 deletions src/transformers/models/nougat/tokenization_nougat_fast.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,7 @@

import numpy as np

from transformers.tokenization_utils_base import INIT_TOKENIZER_DOCSTRING
from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
from transformers.utils import add_end_docstrings

from ...utils import is_levenshtein_available, is_nltk_available, logging, requires_backends

Expand All @@ -40,16 +38,6 @@
logger = logging.get_logger(__name__)


INIT_TOKENIZER_DOCSTRING += """
tokenizer_object ([`tokenizers.Tokenizer`]):
A [`tokenizers.Tokenizer`] object from 🤗 tokenizers to instantiate from. See [Using tokenizers from 🤗
tokenizers](../fast_tokenizers) for more information.
tokenizer_file ([`str`]):
A path to a local JSON file representing a previously serialized [`tokenizers.Tokenizer`] object from 🤗
tokenizers.
"""


VOCAB_FILES_NAMES = {"tokenizer_file": "tokenizer.json"}


Expand Down Expand Up @@ -358,7 +346,6 @@ def remove_slice_from_lines(lines, clean_text, slice) -> str:
return to_delete.strip()


@add_end_docstrings(INIT_TOKENIZER_DOCSTRING)
class NougatTokenizerFast(PreTrainedTokenizerFast):
"""
Fast tokenizer for Nougat (backed by HuggingFace tokenizers library).
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/utils/auto_docstring.py
Original file line number Diff line number Diff line change
Expand Up @@ -1729,9 +1729,9 @@ def auto_method_docstring(
model_name_lowercase, class_name, config_class = _get_model_info(func, parent_class)
func_documentation = func.__doc__
if custom_args is not None and func_documentation is not None:
func_documentation = set_min_indent(custom_args, indent_level + 4) + "\n" + func_documentation
func_documentation = "\n" + set_min_indent(custom_args.strip("\n"), 0) + "\n" + func_documentation
elif custom_args is not None:
func_documentation = custom_args
func_documentation = "\n" + set_min_indent(custom_args.strip("\n"), 0)

# Add intro to the docstring before args description if needed
if custom_intro is not None:
Expand Down
4 changes: 4 additions & 0 deletions utils/check_docstrings.py
Original file line number Diff line number Diff line change
Expand Up @@ -1378,6 +1378,10 @@ def check_auto_docstrings(overwrite: bool = False, check_all: bool = False):
print(f"[ERROR] Docstring needs to be filled for the following arguments in {candidate_file}:")
for warning in fill_docstring_args_warnings:
print(warning)
if missing_docstring_args_warnings or docstring_args_ro_remove_warnings or fill_docstring_args_warnings:
raise ValueError(
"There was at least one problem when checking docstrings of objects decorated with @auto_docstring."
)


def check_docstrings(overwrite: bool = False, check_all: bool = False):
Expand Down