Skip to content

Commit 6fbd432

Browse files
authored
py : logging and flake8 suppression refactoring (ggml-org#7081)
Set one as executable and add basicConfig() to another. Also added noqa tag to test scripts.
1 parent 8425001 commit 6fbd432

5 files changed

+29
-13
lines changed

.flake8

+14-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,17 @@
11
[flake8]
22
max-line-length = 125
33
ignore = E203,E211,E221,E225,E231,E241,E251,E261,E266,E501,E701,E704,W503
4-
exclude = examples/*,examples/*/**,*/**/__init__.py,scripts/gen-unicode-data.py,tests/test-tokenizer-0.py
4+
exclude =
5+
# Do not traverse examples
6+
examples,
7+
# Do not include package initializers
8+
__init__.py,
9+
# No need to traverse our git directory
10+
.git,
11+
# There's no value in checking cache directories
12+
__pycache__,
13+
# No need to include the build path
14+
build,
15+
# This contains builds that we don't want to check
16+
dist # This is generated with `python build .` for package releases
17+
# max-complexity = 10

convert-hf-to-gguf-update.py

100644100755
+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
#!/usr/bin/env python3
2+
13
# This script downloads the tokenizer models of the specified models from Huggingface and
24
# generates the get_vocab_base_pre() function for convert-hf-to-gguf.py
35
#

convert-lora-to-ggml.py

+1
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
sys.path.insert(1, str(Path(__file__).parent / 'gguf-py' / 'gguf'))
1717
import gguf
1818

19+
logging.basicConfig(level=logging.DEBUG)
1920
logger = logging.getLogger("lora-to-gguf")
2021

2122
NUMPY_TYPE_TO_FTYPE: dict[str, int] = {"float32": 0, "float16": 1}

scripts/gen-unicode-data.py

+7-7
Original file line numberDiff line numberDiff line change
@@ -41,20 +41,20 @@ def get_matches(regex_expr):
4141

4242

4343
def print_cat(cat, ranges):
44-
print("const std::vector<std::pair<uint32_t, uint32_t>> unicode_ranges_{} = {{".format(cat))
44+
print("const std::vector<std::pair<uint32_t, uint32_t>> unicode_ranges_{} = {{".format(cat)) # noqa: NP100
4545
cnt = 0
4646
for start, end in ranges:
4747
if cnt % 4 != 0:
48-
print(" ", end="")
49-
print("{{0x{:08X}, 0x{:08X}}},".format(start, end), end="")
48+
print(" ", end="") # noqa: NP100
49+
print("{{0x{:08X}, 0x{:08X}}},".format(start, end), end="") # noqa: NP100
5050
if cnt % 4 == 3:
51-
print("")
51+
print("") # noqa: NP100
5252
cnt += 1
5353

5454
if cnt % 4 != 0:
55-
print("")
56-
print("};")
57-
print("")
55+
print("") # noqa: NP100
56+
print("};") # noqa: NP100
57+
print("") # noqa: NP100
5858

5959

6060
print_cat("number", get_matches(r'\p{N}'))

tests/test-tokenizer-0.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -13,15 +13,15 @@
1313

1414
tokenizer = AutoTokenizer.from_pretrained(dir_tokenizer)
1515

16-
print('tokenizing file: ', fname_tok)
16+
print('tokenizing file: ', fname_tok) # noqa: NP100
1717
fname_out = fname_tok + '.tok'
1818
with open(fname_tok, 'r', encoding='utf-8') as f:
1919
lines = f.readlines()
2020
s = ''.join(lines)
2121
t_start = time.time()
2222
res = tokenizer.encode(s, add_special_tokens=False)
2323
t_end = time.time()
24-
print('\nmain : tokenized in', "{:.3f}".format(1000.0 * (t_end - t_start)), 'ms (py)')
24+
print('\nmain : tokenized in', "{:.3f}".format(1000.0 * (t_end - t_start)), 'ms (py)') # noqa: NP100
2525
with open(fname_out, 'w', encoding='utf-8') as f:
2626
for x in res:
2727
# LLaMA v3 for some reason strips the space for these tokens (and others)
@@ -41,6 +41,6 @@
4141
# f.write(str(x) + ' \'' + tokenizer.decode(x) + '\'\n')
4242
# f.write(str(x) + ' \'' + tokenizer.decode(x).strip() + '\'\n')
4343
f.write(str(x) + '\n')
44-
print('len(res): ', len(res))
45-
print('len(lines): ', len(lines))
46-
print('results written to: ', fname_out)
44+
print('len(res): ', len(res)) # noqa: NP100
45+
print('len(lines): ', len(lines)) # noqa: NP100
46+
print('results written to: ', fname_out) # noqa: NP100

0 commit comments

Comments
 (0)