Skip to content

User experiment migration tool #1724

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 13 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,12 +1,9 @@
from uuid import uuid4

from django.core.management.base import BaseCommand
from django.db import transaction
from django.db.models import Q

from apps.experiments.helper import convert_non_pipeline_experiment_to_pipeline
from apps.experiments.models import Experiment
from apps.pipelines.models import Pipeline
from apps.pipelines.nodes.nodes import AssistantNode, LLMResponseWithPrompt
from apps.teams.models import Flag


Expand Down Expand Up @@ -104,7 +101,7 @@ def handle(self, *args, **options):
for experiment in experiments_to_convert:
try:
with transaction.atomic():
self._convert_experiment(experiment)
convert_non_pipeline_experiment_to_pipeline(experiment)
converted_count += 1
self.stdout.write(self.style.SUCCESS(f"Success: {experiment.name}"))
except Exception as e:
Expand All @@ -122,79 +119,8 @@ def _get_experiment_type(self, experiment):
return "LLM"
else:
return "Unknown"

def _convert_experiment(self, experiment):
if experiment.assistant:
pipeline = self._create_assistant_pipeline(experiment)
elif experiment.llm_provider:
pipeline = self._create_llm_pipeline(experiment)
else:
raise ValueError(f"Unknown experiment type for experiment {experiment.id}")

experiment.pipeline = pipeline
experiment.assistant = None
experiment.llm_provider = None
experiment.llm_provider_model = None

experiment.save()

def _get_chatbots_flag_team_ids(self):
chatbots_flag = Flag.objects.get(name="flag_chatbots")
return list(chatbots_flag.teams.values_list("id", flat=True))

def _create_pipeline_with_node(self, experiment, node_type, node_label, node_params):
"""Create a pipeline with start -> custom_node -> end structure."""
pipeline_name = f"{experiment.name} Pipeline"
middle_node_config = {
"id": str(uuid4()),
"type": "pipelineNode",
"position": {"x": 400, "y": 200},
"data": {"type": node_type, "label": node_label, "params": node_params},
}

return Pipeline._create_pipeline_with_nodes(
team=experiment.team, name=pipeline_name, middle_nodes_config=middle_node_config
)

def _create_llm_pipeline(self, experiment):
"""Create a start -> LLMResponseWithPrompt -> end nodes pipeline for an LLM experiment."""
llm_params = {
"name": "llm",
"llm_provider_id": experiment.llm_provider.id,
"llm_provider_model_id": experiment.llm_provider_model.id,
"llm_temperature": experiment.temperature,
"history_type": "global",
"history_name": None,
"history_mode": "summarize",
"user_max_token_limit": experiment.llm_provider_model.max_token_limit,
"max_history_length": 10,
"source_material_id": experiment.source_material.id if experiment.source_material else None,
"prompt": experiment.prompt_text or "",
"tools": list(experiment.tools) if experiment.tools else [],
"custom_actions": [
op.get_model_id(False)
for op in experiment.custom_action_operations.select_related("custom_action").all()
],
"built_in_tools": [],
"tool_config": {},
}

return self._create_pipeline_with_node(
experiment=experiment, node_type=LLMResponseWithPrompt.__name__, node_label="LLM", node_params=llm_params
)

def _create_assistant_pipeline(self, experiment):
"""Createii a start -> AssistantNode -> end nodes pipeline for an assistant experiment."""
assistant_params = {
"name": "assistant",
"assistant_id": str(experiment.assistant.id),
"citations_enabled": experiment.citations_enabled,
"input_formatter": experiment.input_formatter or "",
}

return self._create_pipeline_with_node(
experiment=experiment,
node_type=AssistantNode.__name__,
node_label="OpenAI Assistant",
node_params=assistant_params,
)
16 changes: 0 additions & 16 deletions apps/experiments/tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@

from celery.app import shared_task
from django.core.files.base import ContentFile
from django.core.management import call_command
from django.utils import timezone
from field_audit.models import AuditAction
from langchain_core.messages import AIMessage, HumanMessage
Expand Down Expand Up @@ -154,18 +153,3 @@ def _convert_prompt_builder_history(messages_history):
elif message["author"] == "Assistant":
history.append(AIMessage(content=message["message"]))
return history


@shared_task(bind=True, base=TaskbadgerTask)
def migrate_experiment_to_pipeline_task(self, experiment_id: int) -> dict:
"""
Migrate a single assistant or llm experiment to pipeline experiment.
"""
try:
experiment = Experiment.objects.get(id=experiment_id)
call_command("migrate_nonpipeline_to_pipeline_experiments", experiment_id=experiment_id, skip_confirmation=True)

return {"success": True, "experiment_id": experiment_id, "experiment_name": experiment.name}
except Exception as e:
logger.exception(f"Failed to migrate experiment {experiment_id}: {e}")
raise
12 changes: 8 additions & 4 deletions apps/experiments/views/experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from celery_progress.backend import Progress
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import permission_required
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.core.exceptions import PermissionDenied, ValidationError
from django.db import transaction
Expand Down Expand Up @@ -87,7 +87,6 @@
async_create_experiment_version,
async_export_chat,
get_response_for_webchat_task,
migrate_experiment_to_pipeline_task,
)
from apps.experiments.views.prompt import PROMPT_DATA_SESSION_KEY
from apps.files.forms import get_file_formset
Expand Down Expand Up @@ -1504,15 +1503,20 @@ def get_release_status_badge(request, team_slug: str, experiment_id: int):
return render(request, "experiments/components/unreleased_badge.html", context)


@login_required
@permission_required(("experiments.change_experiment", "pipelines.add_pipeline"))
def migrate_experiment_view(request, team_slug, experiment_id):
from apps.pipelines.helper import convert_non_pipeline_experiment_to_pipeline

experiment = get_object_or_404(Experiment, id=experiment_id, team__slug=team_slug)
failed_url = reverse(
"experiments:single_experiment_home",
kwargs={"team_slug": team_slug, "experiment_id": experiment_id},
)
try:
task = migrate_experiment_to_pipeline_task.delay(experiment_id)
task.get(timeout=60)
with transaction.atomic():
experiment = Experiment.objects.get(id=experiment_id)
convert_non_pipeline_experiment_to_pipeline(experiment)
messages.success(request, f'Successfully migrated experiment "{experiment.name}" to chatbot!')
return redirect("chatbots:single_chatbot_home", team_slug=team_slug, experiment_id=experiment_id)
except Exception as e:
Expand Down
84 changes: 84 additions & 0 deletions apps/pipelines/helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,3 +35,87 @@ def duplicate_pipeline_with_new_ids(pipeline_data):
edge["target"] = new_target_id

return new_data, old_to_new_node_ids


# TODO: function is temporary and can be deleted after the exp -> chatbot transition is complete
def convert_non_pipeline_experiment_to_pipeline(experiment):
if experiment.assistant:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: extra paranoid validation that we can actually migrate this before we do

Suggested change
if experiment.assistant:
if experiment.pipeline:
raise ValueError(f"Experiment already has a pipeline attached: {experiment.id}")
elif experiment.assistant:

pipeline = _create_assistant_pipeline(experiment)
elif experiment.llm_provider:
pipeline = _create_llm_pipeline(experiment)
else:
raise ValueError(f"Unknown experiment type for experiment {experiment.id}")

experiment.pipeline = pipeline
experiment.assistant = None
experiment.llm_provider = None
experiment.llm_provider_model = None
experiment.save()


# TODO: function is temporary and can be deleted after the exp -> chatbot transition is complete
def _create_pipeline_with_node(experiment, node_type, node_label, node_params):
from .models import Pipeline

"""Create a pipeline with start -> custom_node -> end structure."""
pipeline_name = f"{experiment.name} Pipeline"
middle_node_config = {
"id": str(uuid4()),
"type": "pipelineNode",
"position": {"x": 400, "y": 200},
"data": {"type": node_type, "label": node_label, "params": node_params},
}

return Pipeline._create_pipeline_with_nodes(
team=experiment.team, name=pipeline_name, middle_nodes_config=middle_node_config
)


# TODO: function is temporary and can be deleted after the exp -> chatbot transition is complete
def _create_llm_pipeline(experiment):
from apps.pipelines.nodes.nodes import LLMResponseWithPrompt

"""Create a start -> LLMResponseWithPrompt -> end nodes pipeline for an LLM experiment."""
llm_params = {
"name": "llm",
"llm_provider_id": experiment.llm_provider.id,
"llm_provider_model_id": experiment.llm_provider_model.id,
"llm_temperature": experiment.temperature,
"history_type": "global",
"history_name": None,
"history_mode": "summarize",
"user_max_token_limit": experiment.llm_provider_model.max_token_limit,
"max_history_length": 10,
"source_material_id": experiment.source_material.id if experiment.source_material else None,
"prompt": experiment.prompt_text or "",
"tools": list(experiment.tools) if experiment.tools else [],
"custom_actions": [
op.get_model_id(False) for op in experiment.custom_action_operations.select_related("custom_action").all()
],
"built_in_tools": [],
"tool_config": {},
}

return _create_pipeline_with_node(
experiment=experiment, node_type=LLMResponseWithPrompt.__name__, node_label="LLM", node_params=llm_params
)


# TODO: function is temporary and can be deleted after the exp -> chatbot transition is complete
def _create_assistant_pipeline(experiment):
from apps.pipelines.nodes.nodes import AssistantNode

"""Create a start -> AssistantNode -> end nodes pipeline for an assistant experiment."""
assistant_params = {
"name": "assistant",
"assistant_id": str(experiment.assistant.id),
"citations_enabled": experiment.citations_enabled,
"input_formatter": experiment.input_formatter or "",
}

return _create_pipeline_with_node(
experiment=experiment,
node_type=AssistantNode.__name__,
node_label="OpenAI Assistant",
node_params=assistant_params,
)