Skip to content

Commit a05211f

Browse files
authored
Merge branch 'dev' into swiftyos/get-linear-issues
2 parents 2941b4c + 746dbba commit a05211f

File tree

35 files changed

+1211
-431
lines changed

35 files changed

+1211
-431
lines changed

autogpt_platform/backend/backend/data/execution.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -460,6 +460,7 @@ def to_node_execution_entry(
460460
async def get_graph_executions(
461461
graph_exec_id: Optional[str] = None,
462462
graph_id: Optional[str] = None,
463+
graph_version: Optional[int] = None,
463464
user_id: Optional[str] = None,
464465
statuses: Optional[list[ExecutionStatus]] = None,
465466
created_time_gte: Optional[datetime] = None,
@@ -476,6 +477,8 @@ async def get_graph_executions(
476477
where_filter["userId"] = user_id
477478
if graph_id:
478479
where_filter["agentGraphId"] = graph_id
480+
if graph_version is not None:
481+
where_filter["agentGraphVersion"] = graph_version
479482
if created_time_gte or created_time_lte:
480483
where_filter["createdAt"] = {
481484
"gte": created_time_gte or datetime.min.replace(tzinfo=timezone.utc),

autogpt_platform/backend/backend/executor/activity_status_generator.py

Lines changed: 128 additions & 82 deletions
Large diffs are not rendered by default.

autogpt_platform/backend/backend/server/v2/admin/execution_analytics_routes.py

Lines changed: 138 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
from fastapi import APIRouter, HTTPException, Security
88
from pydantic import BaseModel, Field
99

10+
from backend.blocks.llm import LlmModel
1011
from backend.data.execution import (
1112
ExecutionStatus,
1213
GraphExecutionMeta,
@@ -15,6 +16,8 @@
1516
)
1617
from backend.data.model import GraphExecutionStats
1718
from backend.executor.activity_status_generator import (
19+
DEFAULT_SYSTEM_PROMPT,
20+
DEFAULT_USER_PROMPT,
1821
generate_activity_status_for_execution,
1922
)
2023
from backend.executor.manager import get_db_async_client
@@ -30,12 +33,21 @@ class ExecutionAnalyticsRequest(BaseModel):
3033
created_after: Optional[datetime] = Field(
3134
None, description="Optional created date lower bound"
3235
)
33-
model_name: Optional[str] = Field(
34-
"gpt-4o-mini", description="Model to use for generation"
35-
)
36+
model_name: str = Field("gpt-4o-mini", description="Model to use for generation")
3637
batch_size: int = Field(
3738
10, description="Batch size for concurrent processing", le=25, ge=1
3839
)
40+
system_prompt: Optional[str] = Field(
41+
None, description="Custom system prompt (default: built-in prompt)"
42+
)
43+
user_prompt: Optional[str] = Field(
44+
None,
45+
description="Custom user prompt with {{GRAPH_NAME}} and {{EXECUTION_DATA}} placeholders (default: built-in prompt)",
46+
)
47+
skip_existing: bool = Field(
48+
True,
49+
description="Whether to skip executions that already have activity status and correctness score",
50+
)
3951

4052

4153
class ExecutionAnalyticsResult(BaseModel):
@@ -58,13 +70,120 @@ class ExecutionAnalyticsResponse(BaseModel):
5870
results: list[ExecutionAnalyticsResult]
5971

6072

73+
class ModelInfo(BaseModel):
74+
value: str
75+
label: str
76+
provider: str
77+
78+
79+
class ExecutionAnalyticsConfig(BaseModel):
80+
available_models: list[ModelInfo]
81+
default_system_prompt: str
82+
default_user_prompt: str
83+
recommended_model: str
84+
85+
6186
router = APIRouter(
6287
prefix="/admin",
6388
tags=["admin", "execution_analytics"],
6489
dependencies=[Security(requires_admin_user)],
6590
)
6691

6792

93+
@router.get(
94+
"/execution_analytics/config",
95+
response_model=ExecutionAnalyticsConfig,
96+
summary="Get Execution Analytics Configuration",
97+
)
98+
async def get_execution_analytics_config(
99+
admin_user_id: str = Security(get_user_id),
100+
):
101+
"""
102+
Get the configuration for execution analytics including:
103+
- Available AI models with metadata
104+
- Default system and user prompts
105+
- Recommended model selection
106+
"""
107+
logger.info(f"Admin user {admin_user_id} requesting execution analytics config")
108+
109+
# Generate model list from LlmModel enum with provider information
110+
available_models = []
111+
112+
# Function to generate friendly display names from model values
113+
def generate_model_label(model: LlmModel) -> str:
114+
"""Generate a user-friendly label from the model enum value."""
115+
value = model.value
116+
117+
# For all models, convert underscores/hyphens to spaces and title case
118+
# e.g., "gpt-4-turbo" -> "GPT 4 Turbo", "claude-3-haiku-20240307" -> "Claude 3 Haiku"
119+
parts = value.replace("_", "-").split("-")
120+
121+
# Handle provider prefixes (e.g., "google/", "x-ai/")
122+
if "/" in value:
123+
_, model_name = value.split("/", 1)
124+
parts = model_name.replace("_", "-").split("-")
125+
126+
# Capitalize and format parts
127+
formatted_parts = []
128+
for part in parts:
129+
# Skip date-like patterns - check for various date formats:
130+
# - Long dates like "20240307" (8 digits)
131+
# - Year components like "2024", "2025" (4 digit years >= 2020)
132+
# - Month/day components like "04", "16" when they appear to be dates
133+
if part.isdigit():
134+
if len(part) >= 8: # Long date format like "20240307"
135+
continue
136+
elif len(part) == 4 and int(part) >= 2020: # Year like "2024", "2025"
137+
continue
138+
elif len(part) <= 2 and int(part) <= 31: # Month/day like "04", "16"
139+
# Skip if this looks like a date component (basic heuristic)
140+
continue
141+
# Keep version numbers as-is
142+
if part.replace(".", "").isdigit():
143+
formatted_parts.append(part)
144+
# Capitalize normal words
145+
else:
146+
formatted_parts.append(
147+
part.upper()
148+
if part.upper() in ["GPT", "LLM", "API", "V0"]
149+
else part.capitalize()
150+
)
151+
152+
model_name = " ".join(formatted_parts)
153+
154+
# Format provider name for better display
155+
provider_name = model.provider.replace("_", " ").title()
156+
157+
# Return with provider prefix for clarity
158+
return f"{provider_name}: {model_name}"
159+
160+
# Include all LlmModel values (no more filtering by hardcoded list)
161+
recommended_model = LlmModel.GPT4O_MINI.value
162+
for model in LlmModel:
163+
label = generate_model_label(model)
164+
# Add "(Recommended)" suffix to the recommended model
165+
if model.value == recommended_model:
166+
label += " (Recommended)"
167+
168+
available_models.append(
169+
ModelInfo(
170+
value=model.value,
171+
label=label,
172+
provider=model.provider,
173+
)
174+
)
175+
176+
# Sort models by provider and name for better UX
177+
available_models.sort(key=lambda x: (x.provider, x.label))
178+
179+
return ExecutionAnalyticsConfig(
180+
available_models=available_models,
181+
default_system_prompt=DEFAULT_SYSTEM_PROMPT,
182+
default_user_prompt=DEFAULT_USER_PROMPT,
183+
recommended_model=recommended_model,
184+
)
185+
186+
68187
@router.post(
69188
"/execution_analytics",
70189
response_model=ExecutionAnalyticsResponse,
@@ -100,6 +219,7 @@ async def generate_execution_analytics(
100219
# Fetch executions to process
101220
executions = await get_graph_executions(
102221
graph_id=request.graph_id,
222+
graph_version=request.graph_version,
103223
user_id=request.user_id,
104224
created_time_gte=request.created_after,
105225
statuses=[
@@ -113,21 +233,20 @@ async def generate_execution_analytics(
113233
f"Found {len(executions)} total executions for graph {request.graph_id}"
114234
)
115235

116-
# Filter executions that need analytics generation (missing activity_status or correctness_score)
236+
# Filter executions that need analytics generation
117237
executions_to_process = []
118238
for execution in executions:
239+
# Skip if we should skip existing analytics and both activity_status and correctness_score exist
119240
if (
120-
not execution.stats
121-
or not execution.stats.activity_status
122-
or execution.stats.correctness_score is None
241+
request.skip_existing
242+
and execution.stats
243+
and execution.stats.activity_status
244+
and execution.stats.correctness_score is not None
123245
):
246+
continue
124247

125-
# If version is specified, filter by it
126-
if (
127-
request.graph_version is None
128-
or execution.graph_version == request.graph_version
129-
):
130-
executions_to_process.append(execution)
248+
# Add execution to processing list
249+
executions_to_process.append(execution)
131250

132251
logger.info(
133252
f"Found {len(executions_to_process)} executions needing analytics generation"
@@ -152,9 +271,7 @@ async def generate_execution_analytics(
152271
f"Processing batch {batch_idx + 1}/{total_batches} with {len(batch)} executions"
153272
)
154273

155-
batch_results = await _process_batch(
156-
batch, request.model_name or "gpt-4o-mini", db_client
157-
)
274+
batch_results = await _process_batch(batch, request, db_client)
158275

159276
for result in batch_results:
160277
results.append(result)
@@ -212,7 +329,7 @@ async def generate_execution_analytics(
212329

213330

214331
async def _process_batch(
215-
executions, model_name: str, db_client
332+
executions, request: ExecutionAnalyticsRequest, db_client
216333
) -> list[ExecutionAnalyticsResult]:
217334
"""Process a batch of executions concurrently."""
218335

@@ -237,8 +354,11 @@ async def process_single_execution(execution) -> ExecutionAnalyticsResult:
237354
db_client=db_client,
238355
user_id=execution.user_id,
239356
execution_status=execution.status,
240-
model_name=model_name, # Pass model name parameter
357+
model_name=request.model_name,
241358
skip_feature_flag=True, # Admin endpoint bypasses feature flags
359+
system_prompt=request.system_prompt or DEFAULT_SYSTEM_PROMPT,
360+
user_prompt=request.user_prompt or DEFAULT_USER_PROMPT,
361+
skip_existing=request.skip_existing,
242362
)
243363

244364
if not activity_response:

0 commit comments

Comments
 (0)