Skip to content
Open
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions src/json_stream/loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@
from json_stream.select_tokenizer import default_tokenizer


def load(fp_or_iterable, persistent=False, tokenizer=default_tokenizer):
def load(fp_or_iterable, persistent=False, tokenizer=default_tokenizer, buffering=-1, strings_as_files=False):
fp = ensure_file(fp_or_iterable)
token_stream = tokenizer(fp)
token_stream = tokenizer(fp, buffering=buffering, strings_as_files=strings_as_files)
token_type, token = next(token_stream)
if token_type == TokenType.OPERATOR:
return StreamingJSONBase.factory(token, token_stream, persistent)
Expand Down
6 changes: 3 additions & 3 deletions src/json_stream/tests/test_buffering.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,9 @@ def test_buffering(self):
self._test_buffering(tokenizer=rust_tokenizer_or_raise())

def test_buffering_python_tokenizer(self):
self._test_buffering(tokenizer=tokenize)
self._test_buffering(tokenizer=tokenize, buffering=0)

def _test_buffering(self, tokenizer):
def _test_buffering(self, tokenizer, **load_args):
happenings = []

def data_in_chunks(data, chunk_size=15):
Expand All @@ -24,7 +24,7 @@ def data_in_chunks(data, chunk_size=15):
yield part

json_string = b'{"tasks":[{"id":1,"title":"task1"},{"id":2,"title":"task2"},{"id":3,"title":"task3"}]}'
stream = json_stream.load(data_in_chunks(json_string), tokenizer=tokenizer)
stream = json_stream.load(data_in_chunks(json_string), tokenizer=tokenizer, **load_args)

for task in stream["tasks"]:
happenings.append(('item', to_standard_types(task)))
Expand Down
Loading