|
1 |
| -from typing import List |
| 1 | +from typing import List, Optional |
2 | 2 | from uuid import UUID
|
| 3 | +from enum import Enum |
3 | 4 |
|
4 | 5 | from fastapi import APIRouter, Depends, HTTPException, Query, Response
|
5 | 6 | from fastapi.responses import StreamingResponse
|
|
13 | 14 | get_test_results_for_test_run,
|
14 | 15 | test_run_results_to_csv,
|
15 | 16 | )
|
| 17 | +from rhesis.backend.app.services.stats.test_run import get_test_run_stats |
16 | 18 | from rhesis.backend.app.utils.decorators import with_count_header
|
17 | 19 | from rhesis.backend.app.utils.schema_factory import create_detailed_schema
|
18 | 20 |
|
|
23 | 25 | include_nested_relationships={"test_configuration": {"endpoint": ["project"], "test_set": []}},
|
24 | 26 | )
|
25 | 27 |
|
| 28 | + |
| 29 | +class TestRunStatsMode(str, Enum): |
| 30 | + ALL = "all" |
| 31 | + STATUS = "status" |
| 32 | + RESULTS = "results" |
| 33 | + TEST_SETS = "test_sets" |
| 34 | + EXECUTORS = "executors" |
| 35 | + TIMELINE = "timeline" |
| 36 | + SUMMARY = "summary" |
| 37 | + |
| 38 | + |
26 | 39 | router = APIRouter(
|
27 | 40 | prefix="/test_runs",
|
28 | 41 | tags=["test_runs"],
|
@@ -67,6 +80,204 @@ def read_test_runs(
|
67 | 80 | return test_runs
|
68 | 81 |
|
69 | 82 |
|
| 83 | +@router.get("/stats", response_model=schemas.TestRunStatsResponse) |
| 84 | +def generate_test_run_stats( |
| 85 | + mode: TestRunStatsMode = Query( |
| 86 | + TestRunStatsMode.ALL, |
| 87 | + description="Data mode: 'summary' (lightweight), 'status' (status distribution), " |
| 88 | + "'results' (result distribution), 'test_sets' (most run test sets), " |
| 89 | + "'executors' (top executors), 'timeline' (trends), 'all' (complete)", |
| 90 | + ), |
| 91 | + top: Optional[int] = Query( |
| 92 | + None, description="Max items per dimension (e.g., top 10 executors)" |
| 93 | + ), |
| 94 | + months: Optional[int] = Query( |
| 95 | + 6, description="Months of historical data to include (default: 6)" |
| 96 | + ), |
| 97 | + # Test run filters |
| 98 | + test_run_ids: Optional[List[UUID]] = Query(None, description="Filter by specific test run IDs"), |
| 99 | + # User-related filters |
| 100 | + user_ids: Optional[List[UUID]] = Query(None, description="Filter by executor user IDs"), |
| 101 | + # Configuration filters |
| 102 | + endpoint_ids: Optional[List[UUID]] = Query(None, description="Filter by endpoint IDs"), |
| 103 | + test_set_ids: Optional[List[UUID]] = Query(None, description="Filter by test set IDs"), |
| 104 | + # Status filters |
| 105 | + status_list: Optional[List[str]] = Query(None, description="Filter by test run statuses"), |
| 106 | + # Date range filters |
| 107 | + start_date: Optional[str] = Query( |
| 108 | + None, description="Start date (ISO format, overrides months parameter)" |
| 109 | + ), |
| 110 | + end_date: Optional[str] = Query( |
| 111 | + None, description="End date (ISO format, overrides months parameter)" |
| 112 | + ), |
| 113 | + db: Session = Depends(get_db), |
| 114 | + current_user: User = Depends(require_current_user_or_token), |
| 115 | +): |
| 116 | + """Get test run statistics with configurable data modes for optimal performance |
| 117 | +
|
| 118 | + ## Available Modes |
| 119 | +
|
| 120 | + ### Performance-Optimized Modes (recommended for specific use cases): |
| 121 | +
|
| 122 | + **`summary`** - Ultra-lightweight (~5% of full data size) |
| 123 | + - Returns: `overall_summary` + `metadata` |
| 124 | + - Use case: Dashboard widgets, quick overviews |
| 125 | + - Response time: ~50ms |
| 126 | +
|
| 127 | + **`status`** - Test run status distribution (~15% of full data size) |
| 128 | + - Returns: `status_distribution` + `metadata` |
| 129 | + - Contains: Count and percentage of runs by status (pending, running, completed, failed) |
| 130 | + - Use case: Status monitoring dashboards, operational views |
| 131 | +
|
| 132 | + **`results`** - Test run result distribution (~15% of full data size) |
| 133 | + - Returns: `result_distribution` + `metadata` |
| 134 | + - Contains: Pass/fail rates and counts for test runs |
| 135 | + - Use case: Success rate tracking, quality metrics |
| 136 | +
|
| 137 | + **`test_sets`** - Most run test sets analysis (~20% of full data size) |
| 138 | + - Returns: `most_run_test_sets` + `metadata` |
| 139 | + - Contains: Test sets ranked by execution frequency |
| 140 | + - Use case: Popular test set identification, usage analytics |
| 141 | +
|
| 142 | + **`executors`** - Top test executors (~20% of full data size) |
| 143 | + - Returns: `top_executors` + `metadata` |
| 144 | + - Contains: Users ranked by test run execution count |
| 145 | + - Use case: User activity tracking, workload distribution |
| 146 | +
|
| 147 | + **`timeline`** - Trend analysis (~40% of full data size) |
| 148 | + - Returns: `timeline` + `metadata` |
| 149 | + - Contains: Monthly test run counts and status/result breakdowns |
| 150 | + - Use case: Trend charts, historical analysis, capacity planning |
| 151 | +
|
| 152 | + ### Complete Dataset Mode: |
| 153 | +
|
| 154 | + **`all`** - Complete dataset (default, full data size) |
| 155 | + - Returns: All sections above combined |
| 156 | + - Use case: Comprehensive dashboards, full analytics |
| 157 | + - Response time: ~200-500ms depending on data volume |
| 158 | +
|
| 159 | + ## Response Structure Examples |
| 160 | +
|
| 161 | + ### Summary Mode Response: |
| 162 | + ```json |
| 163 | + { |
| 164 | + "overall_summary": { |
| 165 | + "total_runs": 150, |
| 166 | + "unique_test_sets": 25, |
| 167 | + "unique_executors": 8, |
| 168 | + "most_common_status": "completed", |
| 169 | + "pass_rate": 85.5 |
| 170 | + }, |
| 171 | + "metadata": { |
| 172 | + "mode": "summary", |
| 173 | + "total_test_runs": 150, |
| 174 | + "available_statuses": ["completed", "failed", "running"], |
| 175 | + ... |
| 176 | + } |
| 177 | + } |
| 178 | + ``` |
| 179 | +
|
| 180 | + ### Status Mode Response: |
| 181 | + ```json |
| 182 | + { |
| 183 | + "status_distribution": [ |
| 184 | + { |
| 185 | + "status": "completed", |
| 186 | + "count": 90, |
| 187 | + "percentage": 60.0 |
| 188 | + }, |
| 189 | + { |
| 190 | + "status": "failed", |
| 191 | + "count": 30, |
| 192 | + "percentage": 20.0 |
| 193 | + } |
| 194 | + ], |
| 195 | + "metadata": { "mode": "status", ... } |
| 196 | + } |
| 197 | + ``` |
| 198 | +
|
| 199 | + ## Comprehensive Filtering System |
| 200 | +
|
| 201 | + ### Test Run Filters |
| 202 | + - `test_run_ids`: Filter specific test runs - `?test_run_ids={uuid1}&test_run_ids={uuid2}` |
| 203 | +
|
| 204 | + ### User-Related Filters |
| 205 | + - `user_ids`: Filter by executors - `?user_ids={uuid1}&user_ids={uuid2}` |
| 206 | +
|
| 207 | + ### Configuration Filters |
| 208 | + - `endpoint_ids`: Filter by endpoints - `?endpoint_ids={uuid1}&endpoint_ids={uuid2}` |
| 209 | + - `test_set_ids`: Filter by test sets - `?test_set_ids={uuid1}&test_set_ids={uuid2}` |
| 210 | +
|
| 211 | + ### Status Filters |
| 212 | + - `status_list`: Filter by statuses - `?status_list=completed&status_list=failed` |
| 213 | +
|
| 214 | + ### Date Range Filters |
| 215 | + - `start_date/end_date`: Date range - `?start_date=2024-01-01&end_date=2024-12-31` |
| 216 | +
|
| 217 | + ## Usage Examples |
| 218 | +
|
| 219 | + ### Basic Usage |
| 220 | + - Dashboard widget: `?mode=summary` |
| 221 | + - Status monitoring: `?mode=status&months=1` |
| 222 | + - Timeline charts: `?mode=timeline&months=6` |
| 223 | + - Full analytics: `?mode=all` (or omit mode parameter) |
| 224 | +
|
| 225 | + ### Filtered Analysis |
| 226 | + - User activity: `?mode=executors&user_ids={uuid}&months=3` |
| 227 | + - Test set popularity: `?mode=tests&test_set_ids={uuid1}&test_set_ids={uuid2}` |
| 228 | + - Endpoint performance: `?mode=results&endpoint_ids={uuid}` |
| 229 | + - Status trends: `?mode=timeline&status_list=failed&months=12` |
| 230 | +
|
| 231 | + Args: |
| 232 | + mode: Data mode to retrieve (default: 'all'). See mode descriptions above. |
| 233 | + top: Optional number of top items to show per dimension |
| 234 | + months: Number of months to include in historical timeline |
| 235 | + (default: 6, overridden by date range) |
| 236 | +
|
| 237 | + # Test run filters |
| 238 | + test_run_ids: Optional list of test run UUIDs to include |
| 239 | +
|
| 240 | + # User-related filters |
| 241 | + user_ids: Optional list of user UUIDs (test run executors) to include |
| 242 | +
|
| 243 | + # Configuration filters |
| 244 | + endpoint_ids: Optional list of endpoint UUIDs to include |
| 245 | + test_set_ids: Optional list of test set UUIDs to include |
| 246 | +
|
| 247 | + # Status filters |
| 248 | + status_list: Optional list of test run statuses to include |
| 249 | +
|
| 250 | + # Date range filters |
| 251 | + start_date: Optional start date (ISO format, overrides months parameter) |
| 252 | + end_date: Optional end date (ISO format, overrides months parameter) |
| 253 | +
|
| 254 | + db: Database session |
| 255 | + current_user: Current authenticated user |
| 256 | +
|
| 257 | + Returns: |
| 258 | + Dict: Response structure varies by mode (see examples above) |
| 259 | + """ |
| 260 | + return get_test_run_stats( |
| 261 | + db=db, |
| 262 | + organization_id=str(current_user.organization_id) if current_user.organization_id else None, |
| 263 | + months=months, |
| 264 | + mode=mode.value, |
| 265 | + top=top, |
| 266 | + # Test run filters |
| 267 | + test_run_ids=[str(id) for id in test_run_ids] if test_run_ids else None, |
| 268 | + # User-related filters |
| 269 | + user_ids=[str(id) for id in user_ids] if user_ids else None, |
| 270 | + # Configuration filters |
| 271 | + endpoint_ids=[str(id) for id in endpoint_ids] if endpoint_ids else None, |
| 272 | + test_set_ids=[str(id) for id in test_set_ids] if test_set_ids else None, |
| 273 | + # Status filters |
| 274 | + status_list=status_list, |
| 275 | + # Date range filters |
| 276 | + start_date=start_date, |
| 277 | + end_date=end_date, |
| 278 | + ) |
| 279 | + |
| 280 | + |
70 | 281 | @router.get("/{test_run_id}", response_model=TestRunDetailSchema)
|
71 | 282 | def read_test_run(
|
72 | 283 | test_run_id: UUID,
|
|
0 commit comments