Skip to content

Commit 5a2e8ec

Browse files
[pre-commit.ci] pre-commit suggestions (#266)
* [pre-commit.ci] pre-commit suggestions updates: - [github.com/astral-sh/ruff-pre-commit: v0.5.6 → v0.6.3](astral-sh/ruff-pre-commit@v0.5.6...v0.6.3) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
1 parent 38d0ee1 commit 5a2e8ec

File tree

8 files changed

+39
-39
lines changed

8 files changed

+39
-39
lines changed

.pre-commit-config.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ repos:
5959
- pydocstyle
6060

6161
- repo: https://github.com/astral-sh/ruff-pre-commit
62-
rev: v0.5.6
62+
rev: v0.6.3
6363
hooks:
6464
- id: ruff-format
6565
args: ["--preview"]

tests/conftest.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -83,35 +83,35 @@ def unbatch(self, output):
8383
yield from output
8484

8585

86-
@pytest.fixture()
86+
@pytest.fixture
8787
def simple_litapi():
8888
return SimpleLitAPI()
8989

9090

91-
@pytest.fixture()
91+
@pytest.fixture
9292
def simple_stream_api():
9393
return SimpleStreamAPI()
9494

9595

96-
@pytest.fixture()
96+
@pytest.fixture
9797
def simple_batched_stream_api():
9898
return SimpleBatchedStreamAPI()
9999

100100

101-
@pytest.fixture()
101+
@pytest.fixture
102102
def lit_server(simple_litapi):
103103
server = LitServer(simple_litapi, accelerator="cpu", devices=1, timeout=10)
104104
with wrap_litserve_start(server) as s:
105105
yield s
106106

107107

108-
@pytest.fixture()
108+
@pytest.fixture
109109
def sync_testclient(lit_server):
110110
with TestClient(lit_server.app) as client:
111111
yield client
112112

113113

114-
@pytest.fixture()
114+
@pytest.fixture
115115
def killall():
116116
def _run(process):
117117
parent = psutil.Process(process.pid)
@@ -122,7 +122,7 @@ def _run(process):
122122
return _run
123123

124124

125-
@pytest.fixture()
125+
@pytest.fixture
126126
def openai_request_data():
127127
return {
128128
"model": "",
@@ -139,7 +139,7 @@ def openai_request_data():
139139
}
140140

141141

142-
@pytest.fixture()
142+
@pytest.fixture
143143
def openai_response_data():
144144
return {
145145
"id": "chatcmpl-9dEtoQu4g45g3431SZ2s98S",
@@ -164,7 +164,7 @@ def openai_response_data():
164164
}
165165

166166

167-
@pytest.fixture()
167+
@pytest.fixture
168168
def openai_request_data_with_image():
169169
return {
170170
"model": "lit",
@@ -192,7 +192,7 @@ def openai_request_data_with_image():
192192
}
193193

194194

195-
@pytest.fixture()
195+
@pytest.fixture
196196
def openai_request_data_with_tools():
197197
return {
198198
"model": "lit",
@@ -226,7 +226,7 @@ def openai_request_data_with_tools():
226226
}
227227

228228

229-
@pytest.fixture()
229+
@pytest.fixture
230230
def openai_request_data_with_response_format():
231231
return {
232232
"model": "lit",

tests/test_batch.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ def encode_response(self, output) -> Response:
8282
return {"output": float(output)}
8383

8484

85-
@pytest.mark.asyncio()
85+
@pytest.mark.asyncio
8686
async def test_batched():
8787
api = SimpleBatchLitAPI()
8888
server = LitServer(api, accelerator="cpu", devices=1, timeout=10, max_batch_size=2, batch_timeout=4)
@@ -97,7 +97,7 @@ async def test_batched():
9797
assert response2.json() == {"output": 11.0}
9898

9999

100-
@pytest.mark.asyncio()
100+
@pytest.mark.asyncio
101101
async def test_unbatched():
102102
api = SimpleTorchAPI()
103103
server = LitServer(api, accelerator="cpu", devices=1, timeout=10, max_batch_size=1)

tests/test_examples.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
import litserve as ls
1515

1616

17-
@pytest.mark.asyncio()
17+
@pytest.mark.asyncio
1818
async def test_simple_pytorch_api():
1919
api = ls.test_examples.SimpleTorchAPI()
2020
server = ls.LitServer(api, accelerator="cpu")
@@ -24,7 +24,7 @@ async def test_simple_pytorch_api():
2424
assert response.json() == {"output": 9.0}
2525

2626

27-
@pytest.mark.asyncio()
27+
@pytest.mark.asyncio
2828
async def test_simple_batched_api():
2929
api = ls.test_examples.SimpleBatchedAPI()
3030
server = ls.LitServer(api, max_batch_size=4, batch_timeout=0.1)
@@ -34,7 +34,7 @@ async def test_simple_batched_api():
3434
assert response.json() == {"output": 16.0}
3535

3636

37-
@pytest.mark.asyncio()
37+
@pytest.mark.asyncio
3838
async def test_simple_api():
3939
api = ls.test_examples.SimpleLitAPI()
4040
server = ls.LitServer(api)
@@ -44,15 +44,15 @@ async def test_simple_api():
4444
assert response.json() == {"output": 16.0}
4545

4646

47-
@pytest.mark.asyncio()
47+
@pytest.mark.asyncio
4848
async def test_simple_api_without_server():
4949
api = ls.test_examples.SimpleLitAPI()
5050
api.setup(None)
5151
assert api.model is not None, "Model should be loaded after setup"
5252
assert api.predict(4) == 16, "Model should be able to predict"
5353

5454

55-
@pytest.mark.asyncio()
55+
@pytest.mark.asyncio
5656
async def test_simple_pytorch_api_without_server():
5757
api = ls.test_examples.SimpleTorchAPI()
5858
api.setup("cpu")
@@ -63,7 +63,7 @@ async def test_simple_pytorch_api_without_server():
6363
assert api.encode_response(9) == {"output": 9}, "Response should be encoded"
6464

6565

66-
@pytest.mark.asyncio()
66+
@pytest.mark.asyncio
6767
async def test_simple_stream_api_without_server():
6868
api = SimpleStreamAPI()
6969
api.setup(None)
@@ -77,7 +77,7 @@ async def test_simple_stream_api_without_server():
7777
], "Response should be encoded"
7878

7979

80-
@pytest.mark.asyncio()
80+
@pytest.mark.asyncio
8181
async def test_openai_with_usage():
8282
api = OpenAIWithUsage()
8383
api.setup(None)
@@ -93,7 +93,7 @@ async def test_openai_with_usage():
9393
], "Response should match expected output"
9494

9595

96-
@pytest.mark.asyncio()
96+
@pytest.mark.asyncio
9797
async def test_openai_with_usage_encode_response():
9898
api = OpenAIWithUsageEncodeResponse()
9999
api.setup(None)
@@ -114,7 +114,7 @@ async def test_openai_with_usage_encode_response():
114114
], "Encoded response should match expected output"
115115

116116

117-
@pytest.mark.asyncio()
117+
@pytest.mark.asyncio
118118
async def test_openai_batching_with_usage():
119119
api = OpenAIBatchingWithUsage()
120120
api.setup(None)
@@ -136,7 +136,7 @@ async def test_openai_batching_with_usage():
136136
], "Encoded batched response should match expected output"
137137

138138

139-
@pytest.mark.asyncio()
139+
@pytest.mark.asyncio
140140
async def test_openai_batch_context():
141141
api = OpenAIBatchContext()
142142
api.setup(None)

tests/test_lit_server.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ def test_device_identifiers(lifespan_mock, simple_litapi):
6060
assert server.devices[1][0] == "cuda:2"
6161

6262

63-
@pytest.mark.asyncio()
63+
@pytest.mark.asyncio
6464
async def test_stream(simple_stream_api):
6565
server = LitServer(simple_stream_api, stream=True, timeout=10)
6666
expected_output1 = "prompt=Hello generated_output=LitServe is streaming output".lower().replace(" ", "")
@@ -81,7 +81,7 @@ async def test_stream(simple_stream_api):
8181
), "Server returns input prompt and generated output which didn't match."
8282

8383

84-
@pytest.mark.asyncio()
84+
@pytest.mark.asyncio
8585
async def test_batched_stream_server(simple_batched_stream_api):
8686
server = LitServer(simple_batched_stream_api, stream=True, max_batch_size=4, batch_timeout=2, timeout=30)
8787
expected_output1 = "Hello LitServe is streaming output".lower().replace(" ", "")
@@ -315,7 +315,7 @@ def encode_response(self, output, context):
315315
return {"output": input}
316316

317317

318-
@pytest.mark.asyncio()
318+
@pytest.mark.asyncio
319319
@patch("litserve.server.load_and_raise")
320320
async def test_inject_context(mocked_load_and_raise):
321321
def dummy_load_and_raise(resp):

tests/test_loops.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828
from litserve.utils import LitAPIStatus
2929

3030

31-
@pytest.fixture()
31+
@pytest.fixture
3232
def loop_args():
3333
requests_queue = Queue()
3434
requests_queue.put((0, "uuid-123", time.monotonic(), 1)) # response_queue_id, uid, timestamp, x_enc

tests/test_simple.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ def unbatch(self, output):
119119
return list(output)
120120

121121

122-
@pytest.mark.asyncio()
122+
@pytest.mark.asyncio
123123
async def test_timeout():
124124
# Scenario: first request completes, second request times out in queue
125125
api = SlowLitAPI() # takes 2 seconds for each prediction
@@ -146,7 +146,7 @@ async def test_timeout():
146146
), "Server takes longer than specified timeout and request should timeout"
147147

148148

149-
@pytest.mark.asyncio()
149+
@pytest.mark.asyncio
150150
async def test_batch_timeout():
151151
# Scenario: first 2 requests finish as a batch and third request times out in queue
152152
server = LitServer(

tests/test_specs.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
from litserve.specs.openai import OpenAISpec, ChatMessage
3131

3232

33-
@pytest.mark.asyncio()
33+
@pytest.mark.asyncio
3434
async def test_openai_spec(openai_request_data):
3535
spec = OpenAISpec()
3636
server = ls.LitServer(TestAPI(), spec=spec)
@@ -45,7 +45,7 @@ async def test_openai_spec(openai_request_data):
4545

4646

4747
# OpenAIWithUsage
48-
@pytest.mark.asyncio()
48+
@pytest.mark.asyncio
4949
@pytest.mark.parametrize(
5050
("api", "batch_size"),
5151
[
@@ -72,7 +72,7 @@ async def test_openai_token_usage(api, batch_size, openai_request_data, openai_r
7272
assert result["usage"] == openai_response_data["usage"]
7373

7474

75-
@pytest.mark.asyncio()
75+
@pytest.mark.asyncio
7676
async def test_openai_spec_with_image(openai_request_data_with_image):
7777
server = ls.LitServer(TestAPI(), spec=OpenAISpec())
7878
with wrap_litserve_start(server) as server:
@@ -85,7 +85,7 @@ async def test_openai_spec_with_image(openai_request_data_with_image):
8585
), "LitAPI predict response should match with the generated output"
8686

8787

88-
@pytest.mark.asyncio()
88+
@pytest.mark.asyncio
8989
async def test_override_encode(openai_request_data):
9090
server = ls.LitServer(TestAPIWithCustomEncode(), spec=OpenAISpec())
9191
with wrap_litserve_start(server) as server:
@@ -98,7 +98,7 @@ async def test_override_encode(openai_request_data):
9898
), "LitAPI predict response should match with the generated output"
9999

100100

101-
@pytest.mark.asyncio()
101+
@pytest.mark.asyncio
102102
async def test_openai_spec_with_tools(openai_request_data_with_tools):
103103
spec = OpenAISpec()
104104
server = ls.LitServer(TestAPIWithToolCalls(), spec=spec)
@@ -118,7 +118,7 @@ async def test_openai_spec_with_tools(openai_request_data_with_tools):
118118
], "LitAPI predict response should match with the generated output"
119119

120120

121-
@pytest.mark.asyncio()
121+
@pytest.mark.asyncio
122122
async def test_openai_spec_with_response_format(openai_request_data_with_response_format):
123123
spec = OpenAISpec()
124124
server = ls.LitServer(TestAPIWithStructuredOutput(), spec=spec)
@@ -148,7 +148,7 @@ def encode_response(self, output):
148148
return ChatMessage(role="assistant", content="This is a generated output")
149149

150150

151-
@pytest.mark.asyncio()
151+
@pytest.mark.asyncio
152152
async def test_openai_spec_validation(openai_request_data):
153153
server = ls.LitServer(IncorrectAPI1(), spec=OpenAISpec())
154154
with pytest.raises(ValueError, match="predict is not a generator"), wrap_litserve_start(server) as server:
@@ -172,7 +172,7 @@ def predict(self, prompt, context):
172172
return
173173

174174

175-
@pytest.mark.asyncio()
175+
@pytest.mark.asyncio
176176
async def test_oai_prepopulated_context(openai_request_data):
177177
openai_request_data["max_tokens"] = 3
178178
spec = OpenAISpec()
@@ -194,7 +194,7 @@ def predict(self, prompt):
194194
raise HTTPException(501, "test LitAPI.predict error")
195195

196196

197-
@pytest.mark.asyncio()
197+
@pytest.mark.asyncio
198198
async def test_fail_http(openai_request_data):
199199
server = ls.LitServer(WrongLitAPI(), spec=ls.OpenAISpec())
200200
with wrap_litserve_start(server) as server:

0 commit comments

Comments
 (0)