1
1
absl-py==2.3.0 # via rouge-score
2
2
accelerate==1.7.0 # via lm-eval, peft, -r requirements.txt
3
3
aiohappyeyeballs==2.6.1 # via aiohttp
4
- aiohttp==3.12.11 # via fsspec, langchain-community, vllm
4
+ aiohttp==3.12.13 # via fsspec, langchain-community, vllm
5
5
aiosignal==1.3.2 # via aiohttp, ray
6
6
airportsdata==20250523 # via outlines
7
7
annotated-types==0.7.0 # via pydantic
@@ -15,15 +15,15 @@ beautifulsoup4==4.13.4 # via pyspelling
15
15
blake3==1.0.5 # via vllm
16
16
bracex==2.5.post1 # via wcmatch
17
17
cachetools==6.0.0 # via tox
18
- certifi==2025.4.26 # via httpcore, httpx, requests
18
+ certifi==2025.6.15 # via httpcore, httpx, requests
19
19
cfgv==3.4.0 # via pre-commit
20
20
chardet==5.2.0 # via mbstrdecoder, tox
21
21
charset-normalizer==3.4.2 # via requests
22
22
click==8.2.1 # via nltk, ray, rich-toolkit, typer, uvicorn
23
23
cloudpickle==3.1.1 # via outlines, vllm
24
24
colorama==0.4.6 # via sacrebleu, tox, tqdm-multiprocess
25
25
compressed-tensors==0.9.1 # via vllm
26
- coverage==7.8.2 # via pytest-cov
26
+ coverage==7.9.1 # via pytest-cov
27
27
cupy-cuda12x==13.4.1 # via ray
28
28
dataclasses-json==0.6.7 # via langchain-community
29
29
dataproperty==1.1.0 # via pytablewriter, tabledata
@@ -41,7 +41,7 @@ fastapi==0.115.12 # via vllm
41
41
fastapi-cli==0.0.7 # via fastapi
42
42
fastrlock==0.8.3 # via cupy-cuda12x
43
43
filelock==3.18.0 # via datasets, huggingface-hub, ray, torch, tox, transformers, triton, virtualenv, vllm
44
- frozenlist==1.6.2 # via aiohttp, aiosignal, ray
44
+ frozenlist==1.7.0 # via aiohttp, aiosignal, ray
45
45
fsspec==2025.3.0 # via datasets, evaluate, huggingface-hub, torch
46
46
gguf==0.10.0 # via vllm
47
47
gitdb==4.0.12 # via gitpython
@@ -54,7 +54,7 @@ httpcore==1.0.9 # via httpx
54
54
httptools==0.6.4 # via uvicorn
55
55
httpx==0.28.1 # via fastapi, langsmith, openai, -r requirements.txt
56
56
httpx-sse==0.4.0 # via langchain-community
57
- huggingface-hub==0.32.4 # via accelerate, datasets, evaluate, peft, tokenizers, transformers
57
+ huggingface-hub==0.33.0 # via accelerate, datasets, evaluate, peft, tokenizers, transformers
58
58
identify==2.6.12 # via pre-commit
59
59
idna==3.10 # via anyio, email-validator, httpx, requests, yarl
60
60
immutabledict==4.2.1 # via lm-eval
@@ -71,9 +71,9 @@ jsonpointer==3.0.0 # via jsonpatch
71
71
jsonschema==4.24.0 # via mistral-common, outlines, outlines-core, ray
72
72
jsonschema-specifications==2025.4.1 # via jsonschema
73
73
langchain==0.3.25 # via langchain-community, ragas
74
- langchain-community==0.3.24 # via ragas
75
- langchain-core==0.3.64 # via langchain, langchain-community, langchain-openai, langchain-text-splitters, ragas
76
- langchain-openai==0.3.21 # via ragas
74
+ langchain-community==0.3.25 # via ragas
75
+ langchain-core==0.3.65 # via langchain, langchain-community, langchain-openai, langchain-text-splitters, ragas
76
+ langchain-openai==0.3.23 # via ragas
77
77
langchain-text-splitters==0.3.8 # via langchain
78
78
langdetect==1.0.9 # via lm-eval
79
79
langsmith==0.3.45 # via langchain, langchain-community, langchain-core
@@ -91,10 +91,10 @@ math-verify==0.7.0 # via lm-eval
91
91
mbstrdecoder==1.1.4 # via dataproperty, pytablewriter, typepy
92
92
mccabe==0.7.0 # via pylint
93
93
mdurl==0.1.2 # via markdown-it-py
94
- mistral-common==1.5.6 # via vllm
94
+ mistral-common==1.6.2 # via vllm
95
95
more-itertools==10.7.0 # via lm-eval
96
96
mpmath==1.3.0 # via sympy
97
- msgpack==1.1.0 # via ray
97
+ msgpack==1.1.1 # via ray
98
98
msgspec==0.19.0 # via vllm
99
99
multidict==6.4.4 # via aiohttp, yarl
100
100
multiprocess==0.70.16 # via datasets, evaluate
@@ -105,7 +105,7 @@ networkx==3.5 # via torch
105
105
nltk==3.9.1 # via lm-eval, rouge-score
106
106
nodeenv==1.9.1 # via pre-commit
107
107
numba==0.60.0 # via vllm
108
- numexpr==2.10.2 # via lm-eval
108
+ numexpr==2.11.0 # via lm-eval
109
109
numpy==1.26.4 # via accelerate, cupy-cuda12x, datasets, evaluate, gguf, langchain-community, mistral-common, numba, numexpr, opencv-python-headless, outlines, pandas, pandas-stubs, peft, ragas, rouge-score, sacrebleu, scikit-learn, scipy, torchvision, transformers, vllm, xformers
110
110
nvidia-cublas-cu12==12.4.5.8 # via nvidia-cudnn-cu12, nvidia-cusolver-cu12, torch
111
111
nvidia-cuda-cupti-cu12==12.4.127 # via torch
@@ -119,7 +119,7 @@ nvidia-cusparse-cu12==12.3.1.170 # via nvidia-cusolver-cu12, torch
119
119
nvidia-nccl-cu12==2.21.5 # via torch
120
120
nvidia-nvjitlink-cu12==12.4.127 # via nvidia-cusolver-cu12, nvidia-cusparse-cu12, torch
121
121
nvidia-nvtx-cu12==12.4.127 # via torch
122
- openai==1.84 .0 # via langchain-openai, ragas, vllm, -r requirements.txt
122
+ openai==1.86 .0 # via langchain-openai, ragas, vllm, -r requirements.txt
123
123
opencv-python-headless==4.11.0.86 # via mistral-common
124
124
orjson==3.10.18 # via langsmith
125
125
outlines==0.1.11 # via vllm
@@ -129,23 +129,23 @@ pandas==2.3.0 # via datasets, evaluate, -r requirements.txt
129
129
pandas-stubs==2.2.3.250527 # via -r requirements.txt
130
130
partial-json-parser==0.2.1.1.post5 # via vllm
131
131
pathspec==0.12.1 # via mypy
132
- pathvalidate==3.2.3 # via pytablewriter
132
+ pathvalidate==3.3.1 # via pytablewriter
133
133
peft==0.15.2 # via lm-eval
134
134
pillow==11.2.1 # via mistral-common, torchvision, vllm
135
135
platformdirs==4.3.8 # via pylint, tox, virtualenv
136
- pluggy==1.6.0 # via pytest, tox
137
- portalocker==3.1.1 # via sacrebleu
136
+ pluggy==1.6.0 # via pytest, pytest-cov, tox
137
+ portalocker==3.2.0 # via sacrebleu
138
138
pre-commit==4.2.0 # via -r requirements-dev.txt
139
139
prometheus-client==0.22.1 # via prometheus-fastapi-instrumentator, vllm
140
140
prometheus-fastapi-instrumentator==7.1.0 # via vllm
141
- propcache==0.3.1 # via aiohttp, yarl
141
+ propcache==0.3.2 # via aiohttp, yarl
142
142
protobuf==6.31.1 # via ray, vllm
143
143
psutil==7.0.0 # via accelerate, peft, vllm, -r requirements.txt
144
144
py-cpuinfo==9.0.0 # via vllm
145
145
pyarrow==20.0.0 # via datasets
146
146
pybind11==2.13.6 # via lm-eval, xgrammar
147
147
pycountry==24.6.1 # via outlines
148
- pydantic==2.11.5 # via compressed-tensors, fastapi, langchain, langchain-core, langsmith, lm-format-enforcer, mistral-common, openai, outlines, pydantic-settings, pylint-pydantic, ragas, vllm, xgrammar
148
+ pydantic==2.11.7 # via compressed-tensors, fastapi, langchain, langchain-core, langsmith, lm-format-enforcer, mistral-common, openai, outlines, pydantic-settings, pylint-pydantic, ragas, vllm, xgrammar
149
149
pydantic-core==2.33.2 # via pydantic
150
150
pydantic-settings==2.9.1 # via langchain-community
151
151
pygments==2.19.1 # via pytest, rich
@@ -157,20 +157,20 @@ pyspelling==2.10 # via -r requirements-dev.txt
157
157
pytablewriter==1.2.1 # via lm-eval
158
158
pytest==8.4.0 # via pytest-asyncio, pytest-cov, pytest-html, pytest-metadata, xgrammar, -r requirements-dev.txt
159
159
pytest-asyncio==1.0.0 # via -r requirements-dev.txt
160
- pytest-cov==6.1 .1 # via -r requirements-dev.txt
160
+ pytest-cov==6.2 .1 # via -r requirements-dev.txt
161
161
pytest-html==4.1.1 # via -r requirements-dev.txt
162
162
pytest-metadata==3.1.1 # via pytest-html
163
163
python-dateutil==2.9.0.post0 # via pandas, typepy
164
164
python-dotenv==1.1.0 # via pydantic-settings, uvicorn
165
165
python-multipart==0.0.20 # via fastapi
166
166
pytz==2025.2 # via pandas, typepy
167
167
pyyaml==6.0.2 # via accelerate, datasets, gguf, huggingface-hub, langchain, langchain-community, langchain-core, lm-format-enforcer, peft, pre-commit, pyspelling, ray, transformers, uvicorn, vllm
168
- pyzmq==26.4 .0 # via vllm
168
+ pyzmq==27.0 .0 # via vllm
169
169
ragas==0.2.15 # via -r requirements.txt
170
170
ray==2.40.0 # via vllm
171
171
referencing==0.36.2 # via jsonschema, jsonschema-specifications, outlines
172
172
regex==2024.11.6 # via nltk, sacrebleu, tiktoken, transformers
173
- requests==2.32.3 # via datasets, evaluate, huggingface-hub, langchain, langchain-community, langsmith, mistral-common, outlines, ray, requests-toolbelt, tiktoken, transformers, vllm
173
+ requests==2.32.4 # via datasets, evaluate, huggingface-hub, langchain, langchain-community, langsmith, mistral-common, outlines, ray, requests-toolbelt, tiktoken, transformers, vllm
174
174
requests-toolbelt==1.0.0 # via langsmith
175
175
rich==14.0.0 # via rich-toolkit, typer
176
176
rich-toolkit==0.14.7 # via fastapi-cli
@@ -213,7 +213,7 @@ typepy==1.3.4 # via dataproperty, pytablewriter, tabledata
213
213
typer==0.16.0 # via fastapi-cli
214
214
types-pytz==2025.2.0.20250516 # via pandas-stubs
215
215
types-pyyaml==6.0.12.20250516 # via -r requirements-dev.txt
216
- types-requests==2.32.0.20250602 # via types-tqdm
216
+ types-requests==2.32.4.20250611 # via types-tqdm
217
217
types-tqdm==4.67.0.20250516 # via -r requirements-dev.txt
218
218
typing-extensions==4.14.0 # via anyio, beautifulsoup4, fastapi, huggingface-hub, langchain-core, mistral-common, mypy, openai, outlines, pydantic, pydantic-core, referencing, rich-toolkit, sqlalchemy, torch, typer, typing-inspect, typing-inspection, vllm
219
219
typing-inspect==0.9.0 # via dataclasses-json
@@ -224,14 +224,14 @@ uvicorn==0.34.3 # via fastapi, fastapi-cli
224
224
uvloop==0.21.0 # via uvicorn
225
225
virtualenv==20.31.2 # via pre-commit, tox
226
226
vllm==0.7.3 # via lm-eval, -r requirements-leaderboard.txt
227
- watchfiles==1.0.5 # via uvicorn
227
+ watchfiles==1.1.0 # via uvicorn
228
228
wcmatch==10.0 # via pyspelling
229
229
webencodings==0.5.1 # via html5lib
230
230
websockets==15.0.1 # via uvicorn
231
231
word2number==1.1 # via lm-eval
232
232
xformers==0.0.28.post3 # via vllm
233
233
xgrammar==0.1.11 # via vllm
234
234
xxhash==3.5.0 # via datasets, evaluate
235
- yarl==1.20.0 # via aiohttp
235
+ yarl==1.20.1 # via aiohttp
236
236
zipp==3.23.0 # via importlib-metadata
237
237
zstandard==0.23.0 # via langsmith, lm-eval
0 commit comments