Skip to content

Commit 0865e52

Browse files
fix(proxy_server.py): get master key from environment, if not set in … (#9617)
* fix(proxy_server.py): get master key from environment, if not set in general settings or general settings not set at all * test: mark flaky test * test(test_proxy_server.py): mock prisma client * ci: add new github workflow for testing just the mock tests * fix: fix linting error * ci(conftest.py): add conftest.py to isolate proxy tests * build(pyproject.toml): add respx to dev dependencies * build(pyproject.toml): add prisma to dev dependencies * test: fix mock prompt management tests to use a mock anthropic key * ci(test-litellm.yml): parallelize mock testing make it run faster * build(pyproject.toml): add hypercorn as dev dep * build(pyproject.toml): separate proxy vs. core dev dependencies make it easier for non-proxy contributors to run tests locally - e.g. no need to install hypercorn * ci(test-litellm.yml): pin python version * test(test_rerank.py): move test - cannot be mocked, requires aws credentials for e2e testing * ci: add thank you message to ci * test: add mock env var to test * test: add autouse to tests * test: test mock env vars for e2e tests
1 parent 69e28b9 commit 0865e52

File tree

14 files changed

+479
-284
lines changed

14 files changed

+479
-284
lines changed

.github/workflows/test-litellm.yml

+35
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
name: LiteLLM Tests
2+
3+
on:
4+
pull_request:
5+
branches: [ main ]
6+
7+
jobs:
8+
test:
9+
runs-on: ubuntu-latest
10+
timeout-minutes: 5
11+
12+
steps:
13+
- uses: actions/checkout@v4
14+
15+
- name: Thank You Message
16+
run: |
17+
echo "### 🙏 Thank you for contributing to LiteLLM!" >> $GITHUB_STEP_SUMMARY
18+
echo "Your PR is being tested now. We appreciate your help in making LiteLLM better!" >> $GITHUB_STEP_SUMMARY
19+
20+
- name: Set up Python
21+
uses: actions/setup-python@v4
22+
with:
23+
python-version: '3.12'
24+
25+
- name: Install Poetry
26+
uses: snok/install-poetry@v1
27+
28+
- name: Install dependencies
29+
run: |
30+
poetry install --with dev,proxy-dev --extras proxy
31+
poetry run pip install pytest-xdist
32+
33+
- name: Run tests
34+
run: |
35+
poetry run pytest tests/litellm -x -vv -n 4

Makefile

+3
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,9 @@ help:
1414
install-dev:
1515
poetry install --with dev
1616

17+
install-proxy-dev:
18+
poetry install --with dev,proxy-dev
19+
1720
lint: install-dev
1821
poetry run pip install types-requests types-setuptools types-redis types-PyYAML
1922
cd litellm && poetry run mypy . --ignore-missing-imports

litellm/llms/xai/common_utils.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from typing import Optional
1+
from typing import List, Optional
22

33
import httpx
44

@@ -22,7 +22,7 @@ def get_base_model(model: str) -> Optional[str]:
2222

2323
def get_models(
2424
self, api_key: Optional[str] = None, api_base: Optional[str] = None
25-
) -> list[str]:
25+
) -> List[str]:
2626
api_base = self.get_api_base(api_base)
2727
api_key = self.get_api_key(api_key)
2828
if api_base is None or api_key is None:

litellm/proxy/proxy_server.py

+2
Original file line numberDiff line numberDiff line change
@@ -462,6 +462,8 @@ async def proxy_startup_event(app: FastAPI):
462462
if premium_user is False:
463463
premium_user = _license_check.is_premium()
464464

465+
## CHECK MASTER KEY IN ENVIRONMENT ##
466+
master_key = get_secret_str("LITELLM_MASTER_KEY")
465467
### LOAD CONFIG ###
466468
worker_config: Optional[Union[str, dict]] = get_secret("WORKER_CONFIG") # type: ignore
467469
env_config_yaml: Optional[str] = get_secret_str("CONFIG_FILE_PATH")

poetry.lock

+216-228
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pyproject.toml

+5
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,11 @@ mypy = "^1.0"
9999
pytest = "^7.4.3"
100100
pytest-mock = "^3.12.0"
101101
pytest-asyncio = "^0.21.1"
102+
respx = "^0.20.2"
103+
104+
[tool.poetry.group.proxy-dev.dependencies]
105+
prisma = "0.11.0"
106+
hypercorn = "^0.15.0"
102107

103108
[build-system]
104109
requires = ["poetry-core", "wheel"]

tests/litellm/conftest.py

+63
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
# conftest.py
2+
3+
import importlib
4+
import os
5+
import sys
6+
7+
import pytest
8+
9+
sys.path.insert(
10+
0, os.path.abspath("../..")
11+
) # Adds the parent directory to the system path
12+
import litellm
13+
14+
15+
@pytest.fixture(scope="function", autouse=True)
16+
def setup_and_teardown():
17+
"""
18+
This fixture reloads litellm before every function. To speed up testing by removing callbacks being chained.
19+
"""
20+
curr_dir = os.getcwd() # Get the current working directory
21+
sys.path.insert(
22+
0, os.path.abspath("../..")
23+
) # Adds the project directory to the system path
24+
25+
import litellm
26+
from litellm import Router
27+
28+
importlib.reload(litellm)
29+
30+
try:
31+
if hasattr(litellm, "proxy") and hasattr(litellm.proxy, "proxy_server"):
32+
import litellm.proxy.proxy_server
33+
34+
importlib.reload(litellm.proxy.proxy_server)
35+
except Exception as e:
36+
print(f"Error reloading litellm.proxy.proxy_server: {e}")
37+
38+
import asyncio
39+
40+
loop = asyncio.get_event_loop_policy().new_event_loop()
41+
asyncio.set_event_loop(loop)
42+
print(litellm)
43+
# from litellm import Router, completion, aembedding, acompletion, embedding
44+
yield
45+
46+
# Teardown code (executes after the yield point)
47+
loop.close() # Close the loop created earlier
48+
asyncio.set_event_loop(None) # Remove the reference to the loop
49+
50+
51+
def pytest_collection_modifyitems(config, items):
52+
# Separate tests in 'test_amazing_proxy_custom_logger.py' and other tests
53+
custom_logger_tests = [
54+
item for item in items if "custom_logger" in item.parent.name
55+
]
56+
other_tests = [item for item in items if "custom_logger" not in item.parent.name]
57+
58+
# Sort tests based on their names
59+
custom_logger_tests.sort(key=lambda x: x.name)
60+
other_tests.sort(key=lambda x: x.name)
61+
62+
# Reorder the items list
63+
items[:] = custom_logger_tests + other_tests

tests/litellm/integrations/test_custom_prompt_management.py

+6-1
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,11 @@
1919
from litellm.types.utils import StandardCallbackDynamicParams
2020

2121

22+
@pytest.fixture(autouse=True)
23+
def setup_anthropic_api_key(monkeypatch):
24+
monkeypatch.setenv("ANTHROPIC_API_KEY", "sk-ant-some-key")
25+
26+
2227
class TestCustomPromptManagement(CustomPromptManagement):
2328
def get_chat_completion_prompt(
2429
self,
@@ -50,7 +55,7 @@ def get_chat_completion_prompt(
5055

5156

5257
@pytest.mark.asyncio
53-
async def test_custom_prompt_management_with_prompt_id():
58+
async def test_custom_prompt_management_with_prompt_id(monkeypatch):
5459
custom_prompt_management = TestCustomPromptManagement()
5560
litellm.callbacks = [custom_prompt_management]
5661

tests/litellm/proxy/spend_tracking/test_spend_management_endpoints.py

+6-1
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,11 @@ def client():
2626
return TestClient(app)
2727

2828

29+
@pytest.fixture(autouse=True)
30+
def add_anthropic_api_key_to_env(monkeypatch):
31+
monkeypatch.setenv("ANTHROPIC_API_KEY", "sk-ant-api03-1234567890")
32+
33+
2934
@pytest.mark.asyncio
3035
async def test_ui_view_spend_logs_with_user_id(client, monkeypatch):
3136
# Mock data for the test
@@ -500,7 +505,7 @@ def mock_anthropic_response(*args, **kwargs):
500505
return mock_response
501506

502507
@pytest.mark.asyncio
503-
async def test_spend_logs_payload_success_log_with_api_base(self):
508+
async def test_spend_logs_payload_success_log_with_api_base(self, monkeypatch):
504509
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler
505510

506511
litellm.callbacks = [_ProxyDBLogger(message_logging=False)]

tests/litellm/proxy/test_proxy_server.py

+90
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
import click
1010
import httpx
1111
import pytest
12+
import yaml
1213
from fastapi import FastAPI
1314
from fastapi.testclient import TestClient
1415

@@ -74,3 +75,92 @@ async def test_initialize_scheduled_jobs_credentials(monkeypatch):
7475
call[0] for call in mock_proxy_config.get_credentials.mock_calls
7576
]
7677
assert len(mock_scheduler_calls) > 0
78+
79+
80+
# Mock Prisma
81+
class MockPrisma:
82+
def __init__(self, database_url=None, proxy_logging_obj=None, http_client=None):
83+
self.database_url = database_url
84+
self.proxy_logging_obj = proxy_logging_obj
85+
self.http_client = http_client
86+
87+
async def connect(self):
88+
pass
89+
90+
async def disconnect(self):
91+
pass
92+
93+
94+
mock_prisma = MockPrisma()
95+
96+
97+
@patch(
98+
"litellm.proxy.proxy_server.ProxyStartupEvent._setup_prisma_client",
99+
return_value=mock_prisma,
100+
)
101+
@pytest.mark.asyncio
102+
async def test_aaaproxy_startup_master_key(mock_prisma, monkeypatch, tmp_path):
103+
"""
104+
Test that master_key is correctly loaded from either config.yaml or environment variables
105+
"""
106+
import yaml
107+
from fastapi import FastAPI
108+
109+
# Import happens here - this is when the module probably reads the config path
110+
from litellm.proxy.proxy_server import proxy_startup_event
111+
112+
# Mock the Prisma import
113+
monkeypatch.setattr("litellm.proxy.proxy_server.PrismaClient", MockPrisma)
114+
115+
# Create test app
116+
app = FastAPI()
117+
118+
# Test Case 1: Master key from config.yaml
119+
test_master_key = "sk-12345"
120+
test_config = {"general_settings": {"master_key": test_master_key}}
121+
122+
# Create a temporary config file
123+
config_path = tmp_path / "config.yaml"
124+
with open(config_path, "w") as f:
125+
yaml.dump(test_config, f)
126+
127+
print(f"SET ENV VARIABLE - CONFIG_FILE_PATH, str(config_path): {str(config_path)}")
128+
# Second setting of CONFIG_FILE_PATH to a different value
129+
monkeypatch.setenv("CONFIG_FILE_PATH", str(config_path))
130+
print(f"config_path: {config_path}")
131+
print(f"os.getenv('CONFIG_FILE_PATH'): {os.getenv('CONFIG_FILE_PATH')}")
132+
async with proxy_startup_event(app):
133+
from litellm.proxy.proxy_server import master_key
134+
135+
assert master_key == test_master_key
136+
137+
# Test Case 2: Master key from environment variable
138+
test_env_master_key = "sk-67890"
139+
140+
# Create empty config
141+
empty_config = {"general_settings": {}}
142+
with open(config_path, "w") as f:
143+
yaml.dump(empty_config, f)
144+
145+
monkeypatch.setenv("LITELLM_MASTER_KEY", test_env_master_key)
146+
print("test_env_master_key: {}".format(test_env_master_key))
147+
async with proxy_startup_event(app):
148+
from litellm.proxy.proxy_server import master_key
149+
150+
assert master_key == test_env_master_key
151+
152+
# Test Case 3: Master key with os.environ prefix
153+
test_resolved_key = "sk-resolved-key"
154+
test_config_with_prefix = {
155+
"general_settings": {"master_key": "os.environ/CUSTOM_MASTER_KEY"}
156+
}
157+
158+
# Create config with os.environ prefix
159+
with open(config_path, "w") as f:
160+
yaml.dump(test_config_with_prefix, f)
161+
162+
monkeypatch.setenv("CUSTOM_MASTER_KEY", test_resolved_key)
163+
async with proxy_startup_event(app):
164+
from litellm.proxy.proxy_server import master_key
165+
166+
assert master_key == test_resolved_key

tests/litellm/rerank_api/test_rerank_main.py

-51
This file was deleted.

tests/litellm/test_main.py

+11-1
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,15 @@
1414
import litellm
1515

1616

17+
@pytest.fixture(autouse=True)
18+
def add_api_keys_to_env(monkeypatch):
19+
monkeypatch.setenv("ANTHROPIC_API_KEY", "sk-ant-api03-1234567890")
20+
monkeypatch.setenv("OPENAI_API_KEY", "sk-openai-api03-1234567890")
21+
monkeypatch.setenv("AWS_ACCESS_KEY_ID", "my-fake-aws-access-key-id")
22+
monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "my-fake-aws-secret-access-key")
23+
monkeypatch.setenv("AWS_REGION", "us-east-1")
24+
25+
1726
@pytest.fixture
1827
def openai_api_response():
1928
mock_response_data = {
@@ -130,7 +139,8 @@ def test_completion_missing_role(openai_api_response):
130139
)
131140
@pytest.mark.parametrize("sync_mode", [True, False])
132141
@pytest.mark.asyncio
133-
async def test_url_with_format_param(model, sync_mode):
142+
async def test_url_with_format_param(model, sync_mode, monkeypatch):
143+
134144
from litellm import acompletion, completion
135145
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler
136146

tests/llm_translation/test_cohere.py

+1
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222

2323

2424
@pytest.mark.parametrize("stream", [True, False])
25+
@pytest.mark.flaky(retries=3, delay=1)
2526
@pytest.mark.asyncio
2627
async def test_chat_completion_cohere_citations(stream):
2728
try:

0 commit comments

Comments
 (0)