diff --git a/antora.yml b/antora.yml
index 919e0a80..63374faf 100644
--- a/antora.yml
+++ b/antora.yml
@@ -13,24 +13,25 @@ asciidoc:
serverlessoperatorname: OpenShift Serverless Operator
serverlessproductname: OpenShift Serverless
product_name: OpenShift Serverless Logic
- kogito_version_redhat: 1.38.0.Final-redhat-00002
- operator_name: Kogito Serverless Workflow Operator
+ kogito_version_redhat: 1.44.0.Final-redhat-00005
+ operator_name: Serverless Logic Operator
quarkus_platform: com.redhat.quarkus.platform
kogito_sw_ga: >-
org.kie.kogito:kogito-quarkus-serverless-workflow
- quarkus_version: 2.13.7.Final-redhat-00003
- quarkus_platform_version: 2.13.7.Final-redhat-00003
+ quarkus_version: 2.13.8.SP3-redhat-00001
+ quarkus_platform_version: 2.13.8.SP3-redhat-00001
java_min_version: 11+
maven_min_version: 3.8.6
graalvm_min_version: 22.3.0
spec_version: 0.8
- vscode_version: 1.66.0
- kn_cli_version: 0.28.0
+ vscode_version: 1.84.0
+ kn_cli_version: 0.32.0
docker_min_version: 20.10.7
docker_compose_min_version: 1.27.2
- operator_version: v1.38.0
+ operator_version: 1.31
kogito_examples_repository_url: 'https://github.com/kiegroup/kogito-examples'
- kogito_sw_examples_url: https://github.com/kiegroup/kogito-examples/tree/1.38.x/serverless-workflow-examples
+ kogito_sw_operator_examples_url: https://github.com/apache/incubator-kie-kogito-examples/tree/1.44.x/serverless-operator-examples
+ kogito_sw_examples_url: https://github.com/kiegroup/kogito-examples/tree/1.44.x/serverless-workflow-examples
kogito_examples_url: 'https://github.com/kiegroup/kogito-examples.git'
kogito_apps_url: https://github.com/kiegroup/kogito-apps/tree/main
quarkus_cli_url: 'https://quarkus.io/guides/cli-tooling'
@@ -79,5 +80,5 @@ asciidoc:
kaoto_url: https://marketplace.visualstudio.com/items?itemName=redhat.vscode-kaoto
minikube_url: https://minikube.sigs.k8s.io
- kogito_serverless_operator_url: https://github.com/kiegroup/kogito-serverless-operator/
+ kogito_serverless_operator_url: https://github.com/apache/incubator-kie-kogito-serverless-operator/
docs_issues_url: https://github.com/kiegroup/kogito-docs/issues/new
\ No newline at end of file
diff --git a/modules/ROOT/nav.adoc b/modules/ROOT/nav.adoc
index 479c766a..6843e39d 100644
--- a/modules/ROOT/nav.adoc
+++ b/modules/ROOT/nav.adoc
@@ -12,27 +12,24 @@
** xref:serverless-logic:about.adoc[About OpenShift Serverless Logic]
** User Guides
*** Getting Started
+**** xref:serverless-logic:getting-started/create-your-first-workflow-service-with-kn-cli-and-vscode.adoc[Creating your first workflow service with KN CLI and VS Code]
**** xref:serverless-logic:getting-started/create-your-first-workflow-service.adoc[Creating your first workflow service]
-**** xref:serverless-logic:getting-started/cncf-serverless-workflow-specification-support.adoc[CNCF Serverless Workflow specification]
**** xref:serverless-logic:getting-started/getting-familiar-with-our-tooling.adoc[Getting familiar with tooling]
*** Core
+**** xref:serverless-logic:core/cncf-serverless-workflow-specification-support.adoc[CNCF Serverless Workflow specification]
+**** xref:serverless-logic:core/handling-events-on-workflows.adoc[Events]
+**** xref:serverless-logic:core/working-with-callbacks.adoc[Callbacks]
**** xref:serverless-logic:core/understanding-jq-expressions.adoc[jq expressions]
**** xref:serverless-logic:core/understanding-workflow-error-handling.adoc[Error handling]
-**** xref:serverless-logic:core/working-with-parallelism.adoc[Parallelism]
**** xref:serverless-logic:core/configuration-properties.adoc[Configuration properties]
-//**** xref:serverless-logic:core/accessing-workflow-metainformation-in-runtime.adoc[Accessing workflow metainformation in runtime]
**** xref:serverless-logic:core/defining-an-input-schema-for-workflows.adoc[Defining an input schema for your workflows]
**** xref:serverless-logic:core/custom-functions-support.adoc[Custom functions for your service]
**** xref:serverless-logic:core/timeouts-support.adoc[Timeouts]
+**** xref:serverless-logic:core/working-with-parallelism.adoc[Parallelism]
*** Tooling
**** xref:serverless-logic:tooling/serverless-workflow-editor/swf-editor-overview.adoc[Serverless Workflow editor]
***** xref:serverless-logic:tooling/serverless-workflow-editor/swf-editor-vscode-extension.adoc[VS Code extension for Serverless Workflow editor]
***** xref:serverless-logic:tooling/serverless-workflow-editor/swf-editor-chrome-extension.adoc[Chrome GitHub extension for Serverless Workflow editor]
-**** xref:serverless-logic:tooling/quarkus-dev-ui-extension/quarkus-dev-ui-overview.adoc[Kogito Serverless Workflow Tools extension in Quarkus Dev UI]
-***** xref:serverless-logic:tooling/quarkus-dev-ui-extension/quarkus-dev-ui-workflow-instances-page.adoc[Workflow Instances in Kogito Serverless Workflow Tools extension]
-***** xref:serverless-logic:tooling/quarkus-dev-ui-extension/quarkus-dev-ui-workflow-definition-page.adoc[Workflow Definitions in Kogito Serverless Workflow Tools extension]
-***** xref:serverless-logic:tooling/quarkus-dev-ui-extension/quarkus-dev-ui-custom-dashboard-page.adoc[Dashboards in Kogito Serverless Workflow Tools extension]
-**** xref:serverless-logic:tooling/kn-plugin-workflow-overview.adoc[{product_name} plug-in for Knative CLI]
**** xref:serverless-logic:tooling/serverless-logic-web-tools/serverless-logic-web-tools-overview.adoc[{serverless_logic_web_tools_name}]
***** xref:serverless-logic:tooling/serverless-logic-web-tools/serverless-logic-web-tools-github-integration.adoc[GitHub integration]
***** xref:serverless-logic:tooling/serverless-logic-web-tools/serverless-logic-web-tools-openshift-integration.adoc[OpenShift integration]
@@ -41,48 +38,51 @@
***** xref:serverless-logic:tooling/serverless-logic-web-tools/serverless-logic-web-tools-enable-kogito-swf-visualization.adoc[Kogito Serverless Workflow Visualization]
*** Service Orchestration
**** xref:serverless-logic:service-orchestration/orchestration-of-openapi-based-services.adoc[Orchestrating the OpenAPI services]
-**** xref:serverless-logic:service-orchestration/configuring-openapi-services-endpoints.adoc[Configuring the OpenAPI services endpoints]
-**** xref:serverless-logic:service-orchestration/orchestration-of-asyncapi-based-services.adoc[Orchestrating the AsyncAPI services]
+***** xref:serverless-logic:service-orchestration/configuring-openapi-services-endpoints.adoc[Configuring the OpenAPI services endpoints]
+***** xref:serverless-logic:service-orchestration/working-with-openapi-callbacks.adoc.adoc[OpenAPI callback in {product_name}]
**** xref:serverless-logic:service-orchestration/orchestration-of-grpc-services.adoc[Orchestration of gRPC based services]
-*** Eventing
-**** xref:serverless-logic:eventing/handling-events-on-workflows.adoc[Event State]
-**** xref:serverless-logic:eventing/consume-producing-events-with-kafka.adoc[Consuming and producing events using Apache Kafka]
-**** xref:serverless-logic:eventing/consume-produce-events-with-knative-eventing.adoc[Consuming and producing events on Knative Eventing]
+*** Event Orchestration
+**** xref:serverless-logic:eventing/orchestration-of-asyncapi-callbacks.adoc[Orchestrating AsyncAPI Services]
**** xref:serverless-logic:eventing/event-correlation-with-workflows.adoc[Event Correlation]
-**** xref:serverless-logic:eventing/working-with-callbacks.adoc[Callback State]
-**** xref:serverless-logic:eventing/working-with-openapi-callbacks.adoc[OpenAPI Callback]
+**** Quarkus
+***** xref:serverless-logic:eventing/consume-producing-events-with-kafka.adoc[Consuming and producing events using Apache Kafka]
+**** xref:serverless-logic:eventing/consume-produce-events-with-knative-eventing.adoc[Consuming and producing events on Knative Eventing]
*** Security
**** xref:serverless-logic:security/authention-support-for-openapi-services.adoc[Authentication for OpenAPI services]
**** xref:serverless-logic:security/orchestrating-third-party-services-with-oauth2.adoc[Orchestration of third-party services using OAuth 2.0 authentication]
-*** Testing and Troubleshooting
-**** xref:serverless-logic:testing-and-troubleshooting/mocking-http-cloudevents-with-wiremock.adoc[Mocking HTTP CloudEvents sink using WireMock]
-**** xref:serverless-logic:testing-and-troubleshooting/mocking-openapi-services-with-wiremock.adoc[Mocking OpenAPI services using WireMock]
-**** xref:serverless-logic:testing-and-troubleshooting/basic-integration-tests-with-restassured.adoc[Testing using REST Assured]
-//**** xref:serverless-logic:testing-and-troubleshooting/debugging-workflow-execution-runtime.adoc[Debugging the workflow execution in runtime]
-**** xref:serverless-logic:testing-and-troubleshooting/integration-tests-with-postgresql.adoc[Integration tests using PostgreSQL]
-//**** xref:serverless-logic:testing-and-troubleshooting/development-tools-for-troubleshooting.adoc[Development tools for troubleshooting]
+*** Executing, Testing and Troubleshooting
+**** Executing and Testing Workflows
+***** xref:serverless-logic:testing-and-troubleshooting/quarkus-dev-ui-extension/quarkus-dev-ui-overview.adoc[Developer UI]
+****** xref:serverless-logic:testing-and-troubleshooting/quarkus-dev-ui-extension/quarkus-dev-ui-workflow-instances-page.adoc[Workflow Instances]
+****** xref:serverless-logic:testing-and-troubleshooting/quarkus-dev-ui-extension/quarkus-dev-ui-workflow-definition-page.adoc[Workflow Definitions]
+****** xref:serverless-logic:testing-and-troubleshooting/quarkus-dev-ui-extension/quarkus-dev-ui-monitoring-page.adoc[Monitoring]
+****** xref:serverless-logic:testing-and-troubleshooting/quarkus-dev-ui-extension/quarkus-dev-ui-custom-dashboard-page.adoc[Dashboards]
+***** xref:serverless-logic:testing-and-troubleshooting/kn-plugin-workflow-overview.adoc[Command Line]
+**** Quarkus
+***** xref:serverless-logic:testing-and-troubleshooting/mocking-http-cloudevents-with-wiremock.adoc[Mocking HTTP CloudEvents sink using WireMock]
+***** xref:serverless-logic:testing-and-troubleshooting/mocking-openapi-services-with-wiremock.adoc[Mocking OpenAPI services using WireMock]
+***** xref:serverless-logic:testing-and-troubleshooting/basic-integration-tests-with-restassured.adoc[Testing using REST Assured]
+
*** Persistence
-**** xref:serverless-logic:persistence/persistence-with-postgresql.adoc[Running a workflow service using PostgreSQL]
-**** xref:serverless-logic:persistence/postgresql-flyway-migration.adoc[Migrate your PostgreSQL database]
-//**** xref:serverless-logic:persistence/workflow-database-for-db-admins.adoc[Workflows database for DB admins]
-// **** xref:serverless-logic:persistence/data-consistency.adoc[Data consistency]
+**** Quarkus
+***** xref:serverless-logic:persistence/persistence-with-postgresql.adoc[Running a workflow service using PostgreSQL]
+***** xref:serverless-logic:persistence/postgresql-flyway-migration.adoc[Migrate your PostgreSQL database]
+***** xref:serverless-logic:persistence/integration-tests-with-postgresql.adoc[Integration Tests with PostgreSQL]
*** xref:serverless-logic:cloud/index.adoc[Cloud]
**** Operator
***** xref:serverless-logic:cloud/operator/install-serverless-operator.adoc[Installation]
-***** xref:serverless-logic:cloud/operator/developing-workflows.adoc[Developing Workflows]
-***** xref:serverless-logic:cloud/operator/configuring-workflows.adoc[Configuring Workflows]
-***** xref:serverless-logic:cloud/operator/workflow-status-conditions.adoc[Workflows Status]
+***** xref:serverless-logic:cloud/operator/developing-workflows.adoc[Development Mode]
+***** xref:serverless-logic:cloud/operator/referencing-resource-files.adoc[Referencing Workflow Resources]
+***** xref:serverless-logic:cloud/operator/configuring-workflows.adoc[Configuration]
***** xref:serverless-logic:cloud/operator/build-and-deploy-workflows.adoc[Building and Deploying Workflows]
+***** xref:serverless-logic:cloud/operator/workflow-status-conditions.adoc[Custom Resource Status]
***** xref:serverless-logic:cloud/operator/known-issues.adoc[Known Issues]
**** Quarkus
-***** xref:serverless-logic:cloud/quarkus/build-workflow-image-with-quarkus-cli.adoc[Building workflow images using Quarkus CLI]
-// *** xref:serverless-logic:cloud/build-workflow-images-with-tekton.adoc[Building Workflow Images with Tekton Pipelines]
+***** xref:serverless-logic:cloud/quarkus/build-workflow-image-with-quarkus-cli.adoc[Building workflow images]
***** xref:serverless-logic:cloud/quarkus/deploying-on-minikube.adoc[Deploying Workflows on Minikube]
***** xref:serverless-logic:cloud/quarkus/deploying-on-kubernetes.adoc[Deploying Workflows on Kubernetes]
-// *** xref:serverless-logic:cloud/versioning-workflows-in-knative.adoc[Versioning workflows in Knative]
+***** xref:serverless-logic:cloud/deploying-on-openshift.adoc[Deploying Workflows on Openshift]
***** xref:serverless-logic:cloud/quarkus/kubernetes-service-discovery.adoc[Kubernetes service discovery]
-//**** xref:serverless-logic:cloud/build-and-deploy-with-serverless-operator-on-kubernetes.adoc[Buiding and deploying a {product_name} application on Kubernetes using the {product_name} Serverless Operator]
-
*** Integrations
**** xref:serverless-logic:integrations/camel-routes-integration.adoc[Integrating with Camel Routes]
@@ -90,15 +90,25 @@
**** xref:serverless-logic:integrations/expose-metrics-to-prometheus.adoc[Exposing the workflow base metrics to Prometheus]
// **** xref:serverless-logic:integrations/camel-k-integration.adoc[Integrating with Camel-K]
**** xref:serverless-logic:integrations/serverless-dashboard-with-runtime-data.adoc[Displaying workflow data in dashboards]
+
*** Supporting Services
-**** xref:serverless-logic:supporting-services/jobs-service.adoc[Job Service]
+**** Data Index
+***** xref:serverless-logic:supporting-services/data-index/data-index-concepts.adoc[Data Index Core Concepts]
+***** xref:serverless-logic:supporting-services/data-index/data-index-service.adoc[Data Index Standalone Service]
+***** xref:serverless-logic:supporting-services/data-index/data-index-quarkus-extensions.adoc[Data Index Quarkus Extensions]
+***** Operator
+****** xref:serverless-logic:supporting-services/data-index/operator/data-index-usecase-singleton.adoc[Deploying Data Index and {product_name} application]
+****** xref:serverless-logic:supporting-services/data-index/operator/data-index-usecase-multi.adoc[Deploying Data Index and multiple {product_name} application]
+**** Job Service
+***** xref:serverless-logic:supporting-services/jobs-service/core-concepts.adoc[Job Service Introduction]
+***** xref:serverless-logic:supporting-services/jobs-service/quarkus-extensions.adoc[Job Service Quarkus Extensions]
*** Use Cases
**** xref:serverless-logic:use-cases/orchestration-based-saga-pattern.adoc[Saga Orchestration Example]
-// **** xref:serverless-logic:use-cases/newsletter-subscription-example.adoc[Newsletter subscription example]
**** xref:serverless-logic:use-cases/timeout-showcase-example.adoc[Timeout Example]
+
** xref:serverless-logic:release-notes.adoc[Release notes for Serverless Logic]
+
* Buildpacks for Serverless Functions
** xref:functions/serverless-functions-about.adoc[About buildpacks for OpenShift Serverless Functions]
** xref:functions/serverless-functions-buildpacks.adoc[Building and deploying functions on the cluster]
-** xref:functions/serverless-developing-go-functions.adoc[Developing Go functions]
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-dashboard-details.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-dashboard-details.png
new file mode 100644
index 00000000..5c20958e
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-dashboard-details.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-dashboard-list.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-dashboard-list.png
new file mode 100644
index 00000000..2f171b1c
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-dashboard-list.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-dashboard-storage-path.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-dashboard-storage-path.png
new file mode 100644
index 00000000..2f2282e1
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-dashboard-storage-path.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-filters-in-dashboards.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-filters-in-dashboards.png
new file mode 100644
index 00000000..ec35e17a
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-filters-in-dashboards.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-monitoring-summary.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-monitoring-summary.png
new file mode 100644
index 00000000..ea18d61d
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-monitoring-summary.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-monitoring-workflows.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-monitoring-workflows.png
new file mode 100644
index 00000000..b6f40cc0
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-monitoring-workflows.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-quarkus-dev-ui.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-quarkus-dev-ui.png
new file mode 100644
index 00000000..5e5aa290
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-quarkus-dev-ui.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-start-workflow-fail-alert.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-start-workflow-fail-alert.png
new file mode 100644
index 00000000..60346651
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-start-workflow-fail-alert.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-start-workflow-form.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-start-workflow-form.png
new file mode 100644
index 00000000..2015d992
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-start-workflow-form.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-start-workflow-no-form.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-start-workflow-no-form.png
new file mode 100644
index 00000000..eb799286
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-start-workflow-no-form.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-start-workflow-success-alert.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-start-workflow-success-alert.png
new file mode 100644
index 00000000..2c794004
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-start-workflow-success-alert.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-trigger-cloud-event-start-error-alert.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-trigger-cloud-event-start-error-alert.png
new file mode 100644
index 00000000..2510f97e
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-trigger-cloud-event-start-error-alert.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-trigger-cloud-event-start-success-alert.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-trigger-cloud-event-start-success-alert.png
new file mode 100644
index 00000000..da69193e
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-trigger-cloud-event-start-success-alert.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-trigger-cloud-events.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-trigger-cloud-events.png
new file mode 100644
index 00000000..83786172
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-trigger-cloud-events.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-definitions-filter.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-definitions-filter.png
new file mode 100644
index 00000000..3f461a51
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-definitions-filter.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-definitions.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-definitions.png
new file mode 100644
index 00000000..06a60d14
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-definitions.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-details-page.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-details-page.png
new file mode 100644
index 00000000..115e1e0d
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-details-page.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-details-panel.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-details-panel.png
new file mode 100644
index 00000000..e325bb11
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-details-panel.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instance-completed.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instance-completed.png
new file mode 100644
index 00000000..3400b684
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instance-completed.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instances-cloud-event-failure.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instances-cloud-event-failure.png
new file mode 100644
index 00000000..7ff039c8
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instances-cloud-event-failure.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instances-cloud-event-success.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instances-cloud-event-success.png
new file mode 100644
index 00000000..7b599000
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instances-cloud-event-success.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instances-cloud-event.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instances-cloud-event.png
new file mode 100644
index 00000000..95ccf6cd
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instances-cloud-event.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instances-empty.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instances-empty.png
new file mode 100644
index 00000000..8b6f274f
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instances-empty.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instances-filters.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instances-filters.png
new file mode 100644
index 00000000..b1bcbb24
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instances-filters.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instances-kebab.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instances-kebab.png
new file mode 100644
index 00000000..db1db3a1
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instances-kebab.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-mermaid-diagram-panel.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-mermaid-diagram-panel.png
new file mode 100644
index 00000000..bd37035b
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-mermaid-diagram-panel.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-stunner-diagram-panel.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-stunner-diagram-panel.png
new file mode 100644
index 00000000..1e3a5e56
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-stunner-diagram-panel.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-timeline-panel.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-timeline-panel.png
new file mode 100644
index 00000000..abfeb190
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-timeline-panel.png differ
diff --git a/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-variables-panel.png b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-variables-panel.png
new file mode 100644
index 00000000..612bd8c7
Binary files /dev/null and b/modules/serverless-logic/assets/images/testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-variables-panel.png differ
diff --git a/modules/serverless-logic/pages/_common-content/downstream-post-create-project.adoc b/modules/serverless-logic/pages/_common-content/downstream-post-create-project.adoc
index 09ef72ae..48aad1d6 100644
--- a/modules/serverless-logic/pages/_common-content/downstream-post-create-project.adoc
+++ b/modules/serverless-logic/pages/_common-content/downstream-post-create-project.adoc
@@ -45,25 +45,3 @@
...
----
-+
-// Needed only for Developer Preview 3
-. *[Optional]* If you want to use Binary CloudEvents (where the event data is stored in the message body, and event attributes are stored as part of message metadata), you need to upgrade the version of the `quarkus-reactive-messaging-http` dependency:
-+
-.Override of `quarkus-reactive-messaging-http` in `pom.xml` file
-[source,xml,subs="attributes+"]
-----
-
- ...
-
-
- ...
-
- io.quarkiverse.reactivemessaging.http
- quarkus-reactive-messaging-http
- 1.0.8
-
-
-
- ...
-
-----
diff --git a/modules/serverless-logic/pages/_common-content/report-issue.adoc b/modules/serverless-logic/pages/_common-content/report-issue.adoc
index a3e8a6f7..58a21f61 100644
--- a/modules/serverless-logic/pages/_common-content/report-issue.adoc
+++ b/modules/serverless-logic/pages/_common-content/report-issue.adoc
@@ -1,5 +1,4 @@
== _**Found an issue?**_
-:new_issue: https://github.com/kiegroup/kogito-docs/issues/new
If you find an issue or any misleading information, please feel free to report it link:{docs_issues_url}[here].
We really appreciate it!
diff --git a/modules/serverless-logic/pages/cloud/common/_common_proc_deploy_kubectl_oc.adoc b/modules/serverless-logic/pages/cloud/common/_common_proc_deploy_kubectl_oc.adoc
new file mode 100644
index 00000000..dcb37eab
--- /dev/null
+++ b/modules/serverless-logic/pages/cloud/common/_common_proc_deploy_kubectl_oc.adoc
@@ -0,0 +1,141 @@
+:quarkus_k8s_deploy_url: https://github.com/quarkusio/quarkus/issues/26385
+
+You can also use `{command_line_tool}` command-line interface and plain Kubernetes objects to deploy your workflow application. +
+And, instead of creating `knative` `yaml|json` descriptors, you can leverage the Quarkus Kubernetes extension and {product_name} Knative add-on to generate the descriptors.
+
+.Prerequisites
+* {product_name} Workflow application ready to use.
+* `{command_line_tool_name}` command-line tool is installed.
+* (Optional) Quarkus CLI is installed +
+For more information about installing the Quarkus CLI, see link:{quarkus_cli_url}#installing-the-cli[Installing the Quarkus CLI].
+
+.Procedure
+. Add the Quarkus extensions to generate `knative` `yaml|json` descriptors.
++
+--
+To use the Quarkus Kubernetes extension and {product_name} Knative add-on, ensure that the following dependencies are available in the `pom.xml` file and Gradle:
+
+[tabs]
+====
+pom.xml::
++
+[source,xml,subs="attributes+"]
+----
+
+ org.kie.kogito
+ kogito-addons-quarkus-knative-eventing
+
+
+ io.quarkus
+ {quarkus-k8s-plugin}
+
+----
+Gradle::
++
+[source,shell,subs="attributes+"]
+----
+quarkus-kubernetes 'io.quarkus:{quarkus-k8s-plugin}:{quarkus_version}'
+quarkus-kubernetes 'org.kie.kogito:kogito-addons-quarkus-knative-eventing:{page-component-version}'
+----
+Quarkus CLI::
++
+[source,shell,subs="attributes+"]
+----
+quarkus ext add org.kie.kogito:kogito-addons-quarkus-knative-eventing quarkus-openshift{page-component-version}'
+----
+====
+--
+
+. To generate the `knative` `yaml|json` descriptors, set the following properties in the `application.properties` file of your workflow application:
++
+--
+.System properties to generate knative descriptors
+[source,shell,subs="attributes+"]
+----
+quarkus.kubernetes.deployment-target=knative
+quarkus.knative.name=hello-workflow
+----
+--
+
+. Build your workflow application.
++
+--
+Once you have built your application, you can find the generated descriptors files in the `target/kubernetes` directory:
+
+* `knative.json`
+* `knative.yml`
+
+[IMPORTANT]
+====
+The image used in this section is the one built in the following guide: xref:cloud/quarkus/build-workflow-image-with-quarkus-cli.adoc[Build Workflow Image with Quarkus CLI].
+====
+
+Following is an example of the generated files:
+
+.Knative descriptor example for a workflow application
+[source,yaml,subs="attributes+"]
+----
+---
+apiVersion: serving.knative.dev/v1
+kind: Service
+metadata:
+ annotations:
+ app.quarkus.io/commit-id: 06c3fe8e2dfc42e2211cbcc41224f5a3d6bd1f26
+ app.quarkus.io/build-timestamp: 2022-06-23 - 23:53:38 +0000
+ labels:
+ app.kubernetes.io/name: hello-workflow
+ name: hello-workflow
+spec:
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: hello-workflow
+ spec:
+ containerConcurrency: 0
+ containers:
+ - image: {k8s_registry}/{default_namespace}/serverless-workflow-greeting-quarkus:1.0-native
+ livenessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /q/health/live
+ port: null
+ scheme: HTTP
+ initialDelaySeconds: 0
+ periodSeconds: 30
+ successThreshold: 1
+ timeoutSeconds: 10
+ name: hello-workflow
+ ports:
+ - containerPort: 8080
+ name: http1
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /q/health/ready
+ port: null
+ scheme: HTTP
+ initialDelaySeconds: 0
+ periodSeconds: 30
+ successThreshold: 1
+ timeoutSeconds: 10
+----
+
+[NOTE]
+====
+Once the files are generated, you must add the `imagePullPolicy` manually before deploying the workflow application.
+====
+--
+
+. Enter the following command to deploy the workflow application using `{command_line_tool}`:
++
+--
+.Deploy the workflow application using `{command_line_tool}`
+[source,shell,subs="attributes+"]
+----
+{command_line_tool} apply -f target/kubernetes/knative.yml
+----
+--
+
+// verify deployed swf
+include::_verify_if_swf_is_deployed.adoc[]
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/cloud/common/_create_namespace_and_deploy_info.adoc b/modules/serverless-logic/pages/cloud/common/_create_namespace_and_deploy_info.adoc
index bec325f4..81d49e21 100644
--- a/modules/serverless-logic/pages/cloud/common/_create_namespace_and_deploy_info.adoc
+++ b/modules/serverless-logic/pages/cloud/common/_create_namespace_and_deploy_info.adoc
@@ -34,7 +34,6 @@ You can read the further sections on the different procedures to deploy your {pr
[NOTE]
====
-You can use the native image due to the faster startup. +
-For more information about installing the workflow application, see xref:cloud/quarkus/build-workflow-image-with-quarkus-cli.adoc[Building workflow images using Quarkus CLI] document.
+You can use the native image due to the faster startup.
====
--
diff --git a/modules/serverless-logic/pages/cloud/common/_deploy_workflow_application_requisites.adoc b/modules/serverless-logic/pages/cloud/common/_deploy_workflow_application_requisites.adoc
index 510b15ac..feda2539 100644
--- a/modules/serverless-logic/pages/cloud/common/_deploy_workflow_application_requisites.adoc
+++ b/modules/serverless-logic/pages/cloud/common/_deploy_workflow_application_requisites.adoc
@@ -5,4 +5,5 @@ For more information, see {knative_procedure}.
* Knative CLI is installed.
* (Optional) Quarkus CLI is installed. +
For more information, see link:{quarkus_cli_url}[Building Quarkus Apps with Quarkus command line interface (CLI)].
-* {product_name} application container is ready.
+* Your xref:cloud/quarkus/build-workflow-image-with-quarkus-cli.adoc[{product_name} application] is ready to use.
+
diff --git a/modules/serverless-logic/pages/cloud/common/_prerequisites.adoc b/modules/serverless-logic/pages/cloud/common/_prerequisites.adoc
index 231c80a9..abbb39ec 100644
--- a/modules/serverless-logic/pages/cloud/common/_prerequisites.adoc
+++ b/modules/serverless-logic/pages/cloud/common/_prerequisites.adoc
@@ -1,4 +1,3 @@
-:kn_cli_url: https://knative.dev/docs/client/install-kn/
.Prerequisites
* Your {product_name} application is ready to use. +
@@ -7,9 +6,9 @@ For more information about building the application container, see xref:cloud/qu
* {environment_prereq}
* `kubectl` {kubectl_prereq}
* Knative CLI is installed. +
-For more information, see link:{kn_cli_url}[Install the Knative CLI].
+For more information, see link:{kn_cli_install_url}[Install the Knative CLI].
* Knative workflow CLI is installed. +
-For more information see xref:tooling/kn-plugin-workflow-overview.adoc[Serverless Workflow plug-in for Knative CLI].
+For more information see xref:testing-and-troubleshooting/kn-plugin-workflow-overview.adoc[Serverless Workflow plug-in for Knative CLI].
* (Optional) Quarkus CLI is installed. +
For more information, see link:{quarkus_cli_url}[Building Quarkus Apps with Quarkus command line interface (CLI)].
diff --git a/modules/serverless-logic/pages/cloud/common/_proc_deploy_sw_kn_cli.adoc b/modules/serverless-logic/pages/cloud/common/_proc_deploy_sw_kn_cli.adoc
index c00b3aa4..5d519000 100644
--- a/modules/serverless-logic/pages/cloud/common/_proc_deploy_sw_kn_cli.adoc
+++ b/modules/serverless-logic/pages/cloud/common/_proc_deploy_sw_kn_cli.adoc
@@ -1,13 +1,12 @@
[[proc-deploy-sw-application-knative-cli]]
=== Deploying your workflow application using Knative CLI
-Once you have pushed your workflow application into the {registry} registry, you can use the command-line tools, such
-as Knative CLI or `kubectl` to initiate the deployment process.
+Once you have pushed your workflow application into the {registry} registry, you can use the command-line tools, such as Knative CLI or `{command_line_tool_name}` to initiate the deployment process.
.Prerequisites
* Workflow application is installed.
* Knative CLI is installed. +
-For more information, see link:{kn_cli_url}[Install the Knative CLI].
+For more information, see link:{kn_cli_install_url}[Install the Knative CLI].
.Procedure
. In a command terminal, execute the following command to deploy your workflow application using Knative CLI:
@@ -16,8 +15,8 @@ For more information, see link:{kn_cli_url}[Install the Knative CLI].
.Example of deploying workflow application using Knative CLI
[source,shell,subs="attributes+"]
----
-kn service create hello \
- --image={k8s_registry}/serverless-workflow-greeting-quarkus:1.0 \
+kn service create hello-workflow \
+ --image={k8s_registry}/{default_namespace}/serverless-workflow-greeting-quarkus:1.0 \
--pull-policy=IfNotPresent \
--port 8080
----
@@ -25,20 +24,25 @@ kn service create hello \
.Example output
[source,shell]
----
-Creating service 'hello' in namespace 'serverless-workflow-greeting-quarkus':
+Creating service 'hello-workflow' in namespace 'serverless-workflow-greeting-quarkus':
0.066s The Route is still working to reflect the latest desired specification.
0.099s ...
- 0.322s Configuration "hello" is waiting for a Revision to become ready.
+ 0.322s Configuration "hello-workflow" is waiting for a Revision to become ready.
4.885s ...
5.061s Ingress has not yet been reconciled.
5.322s Waiting for load balancer to be ready
5.460s Ready to serve.
-Service 'hello' created to latest revision 'hello-00001' is available at URL:
-http://hello.serverless-workflow-greeting-quarkus.10.103.94.37.sslip.io
+Service 'hello-workflow' created to latest revision 'hello-workflow-00001' is available at URL:
+http://hello-workflow.serverless-workflow-greeting-quarkus.10.103.94.37.sslip.io
----
--
+[NOTE]
+====
+Depending on the cluster type where you have deployed the workflow application, the service URL might be different. Pay attention to the output to use the correct URL in the next topic.
+====
+
// verify deployed swf
include::_verify_if_swf_is_deployed.adoc[]
diff --git a/modules/serverless-logic/pages/cloud/common/_proc_deploy_sw_kubectl.adoc b/modules/serverless-logic/pages/cloud/common/_proc_deploy_sw_kubectl.adoc
index dc639033..01af593f 100644
--- a/modules/serverless-logic/pages/cloud/common/_proc_deploy_sw_kubectl.adoc
+++ b/modules/serverless-logic/pages/cloud/common/_proc_deploy_sw_kubectl.adoc
@@ -1,141 +1,6 @@
[[proc-deploy-sw-application-kubectl]]
-=== Deploying your workflow application using `kubectl`
-
-You can also use `kubectl` command-line interface and plain Kubernetes objects to deploy your workflow application. +
-And, instead of creating `knative` `yaml|json` descriptors, you can leverage the Quarkus Kubernetes extension and
-Kogito Knative add-on to generate the descriptors.
-
-.Prerequisites
-* Workflow application ready to use.
-* `kubectl` command-line tool is installed.
-* (Optional) Quarkus CLI is installed +
-For more information about installing the Quarkus CLI, see link:{quarkus_cli_url}#installing-the-cli[Installing the Quarkus CLI].
-
-.Procedure
-. Add the Quarkus extensions to generate `knative` `yaml|json` descriptors.
-+
---
-To use the Quarkus Kubernetes extension and Kogito Knative add-on, ensure that the following dependencies are available in the `pom.xml` file and Gradle:
-
-[tabs]
-====
-pom.xml::
-+
-[source,xml]
-----
-
- org.kie.kogito
- kogito-addons-quarkus-knative-eventing
-
-
- io.quarkus
- quarkus-kubernetes
-
-----
-Gradle::
-+
-[source,shell,subs="attributes+"]
-----
-quarkus-kubernetes 'io.quarkus:quarkus-kubernetes:{quarkus_version}'
-quarkus-kubernetes 'org.kie.kogito:kogito-addons-quarkus-knative-eventing:{page-component-version}'
-----
-====
---
-
-. To generate the `knative` `yaml|json` descriptors, set the following properties in the `application.properties`
-file of your workflow application:
-+
---
-.System properties to generate knative descriptors
-[source,shell,subs="attributes+"]
-----
-quarkus.kubernetes.deployment-target=knative
-quarkus.knative.name=greeting-quarkus-kubectl
-----
---
-
-. Build your workflow application.
-+
---
-Once you have built your application, you can find the generated descriptors files in the `target/kubernetes` directory:
-
-* `knative.json`
-* `knative.yml`
-
-Following is an example of the generated files:
-
-.Knative descriptor example for a workflow application
-[source,yaml,subs="attributes+"]
-----
----
-apiVersion: serving.knative.dev/v1
-kind: Service
-metadata:
- annotations:
- app.quarkus.io/commit-id: 06c3fe8e2dfc42e2211cbcc41224f5a3d6bd1f26
- app.quarkus.io/build-timestamp: 2022-06-23 - 23:53:38 +0000
- labels:
- app.kubernetes.io/name: greeting-quarkus-kubectl
- name: greeting-quarkus-kubectl
-spec:
- template:
- metadata:
- labels:
- app.kubernetes.io/name: greeting-quarkus-kubectl
- spec:
- containerConcurrency: 0
- containers:
- - image: {k8s_registry}/serverless-workflow-greeting-quarkus:1.0-native
- livenessProbe:
- failureThreshold: 3
- httpGet:
- path: /q/health/live
- port: null
- scheme: HTTP
- initialDelaySeconds: 0
- periodSeconds: 30
- successThreshold: 1
- timeoutSeconds: 10
- name: greeting-quarkus-kubectl
- ports:
- - containerPort: 8080
- name: http1
- protocol: TCP
- readinessProbe:
- failureThreshold: 3
- httpGet:
- path: /q/health/ready
- port: null
- scheme: HTTP
- initialDelaySeconds: 0
- periodSeconds: 30
- successThreshold: 1
- timeoutSeconds: 10
-----
-
-[NOTE]
-====
-Once the files are generated, you might be required to add the `imagePullPolicy` manually before deploying the workflow application.
-====
-
-[WARNING]
-====
-Some system properties are not working properly due to a bug in the `Decorate API`. For more information about the bug, see the link:https://github.com/quarkusio/quarkus/issues/23832[Quarkus issue].
-
-There is a small bug on the `Decorate API` where some system properties are not taking effect, for more information take a look at this link:https://github.com/quarkusio/quarkus/issues/23832[Quarkus issue]. After the file generation, you might be required to add the `imagePullPolicy` manually before deploying it.
-====
---
-
-. Enter the following command to deploy the workflow application using `kubectl`:
-+
---
-.Deploy the workflow application using `kubectl`
-[source,shell]
-----
-kubectl apply -f target/kubernetes/knative.yml
-----
---
+=== Deploying your workflow application using {command_line_tool_name}
// verify deployed swf
-include::_verify_if_swf_is_deployed.adoc[]
+include::_common_proc_deploy_kubectl_oc.adoc[]
diff --git a/modules/serverless-logic/pages/cloud/common/_proc_deploy_sw_oc.adoc b/modules/serverless-logic/pages/cloud/common/_proc_deploy_sw_oc.adoc
new file mode 100644
index 00000000..9e6921a0
--- /dev/null
+++ b/modules/serverless-logic/pages/cloud/common/_proc_deploy_sw_oc.adoc
@@ -0,0 +1,6 @@
+[[proc-deploy-sw-application-openfhift-client]]
+=== Deploying your workflow application using {command_line_tool_name}
+
+// verify deployed swf
+include::_common_proc_deploy_kubectl_oc.adoc[]
+
diff --git a/modules/serverless-logic/pages/cloud/common/_proc_deploy_sw_quarkus_cli.adoc b/modules/serverless-logic/pages/cloud/common/_proc_deploy_sw_quarkus_cli.adoc
new file mode 100644
index 00000000..475b2b59
--- /dev/null
+++ b/modules/serverless-logic/pages/cloud/common/_proc_deploy_sw_quarkus_cli.adoc
@@ -0,0 +1,64 @@
+[[proc-deploy-sw-application-quarkus-cli]]
+=== Deploying your workflow application using Quarkus CLI
+
+.Prerequisites
+* Workflow application is ready.
+* Quarkus CLI is installed. +
+ For more information, see link:{quarkus_cli_url}[Building Quarkus Apps with Quarkus command line interface (CLI)].
+
+
+.Procedure
+. Add the Quarkus extensions to deploy the `knative` service.
++
+--
+You can add the {platform} and the {product_name} Knative extensions to your project with the Quarkus CLI:
+
+.Add {platform} and {product_name} Knative extensions to the project with Quarkus CLI
+[source,shell,subs="attributes+"]
+----
+quarkus ext add {quarkus-k8s-plugin} kogito-addons-quarkus-knative-eventing
+----
+--
+. To deploy the workflow application using Quarkus CLI, set the following system properties in `application.properties` file:
++
+--
+.Required system properties
+[source,properties,subs="attributes+"]
+----
+quarkus.knative.name=hello-workflow <1>
+quarkus.kubernetes.deployment-target=knative <2>
+quarkus.kubernetes.deploy=true <3>
+quarkus.container-image.push={container_push_prop} <4>
+quarkus.container-image.group={default_namespace} <5>
+quarkus.container-image.registry={k8s_registry} <6>
+quarkus.container-image.tag=1.0-SNAPSHOT <7>
+----
+
+<1> Property to set the Knative service name
+<2> Property to set the target deployment type
+<3> Property to set whether or not deploy on an active {platform} cluster
+<4> {container_push}
+<5> Property to define which registry group/namespace the built image belongs to
+<6> Property to define to which registry the built image will be pushed to
+<7> Sets the built Container Image tag
+
+[IMPORTANT]
+====
+This feature works with Quarkus 2.10.2.Final or later. For more information, see
+link:{quarkus_k8s_deploy_url}[link].
+====
+--
+
+.Build and Deploy your workflow application
+[source,shell,subs="attributes+"]
+----
+quarkus build -DskipTests
+----
+
+[NOTE]
+====
+The `kogito-examples` already have this extension added by default, and can be activated with the `container` Maven profile.
+====
+
+// verify deployed swf
+include::../common/_verify_if_swf_is_deployed.adoc[]
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/cloud/common/_verify_if_swf_is_deployed.adoc b/modules/serverless-logic/pages/cloud/common/_verify_if_swf_is_deployed.adoc
index 7192d01e..03371a66 100644
--- a/modules/serverless-logic/pages/cloud/common/_verify_if_swf_is_deployed.adoc
+++ b/modules/serverless-logic/pages/cloud/common/_verify_if_swf_is_deployed.adoc
@@ -2,17 +2,17 @@ Verify if the workflow application is deployed correctly:
--
[tabs]
====
-On kubectl::
+On {command_line_tool_name}::
+
-[source,shell]
+[source,shell,subs="attributes+"]
----
-kubectl get services.serving.knative.dev greeting-quarkus-cli
+{command_line_tool} get services.serving.knative.dev hello-workflow
----
On Knative CLI::
+
[source,shell]
----
-kn service list greeting-quarkus-cli
+kn service list hello-workflow
----
====
--
@@ -21,8 +21,8 @@ kn service list greeting-quarkus-cli
.Example output
[source,shell]
----
-NAME URL LATEST AGE CONDITIONS READY REASON
-greeting-quarkus-cli http://greeting-quarkus-cli.serverless-workflow-greeting-quarkus.10.103.94.37.sslip.io greeting-quarkus-cli-00001 7m6s 3 OK / 3 True
+NAME URL LATEST AGE CONDITIONS READY REASON
+hello-workflow http://hello-workflow.serverless-workflow-greeting-quarkus.10.103.94.37.sslip.io hello-workflow-00001 7m6s 3 OK / 3 True
----
--
@@ -31,7 +31,7 @@ greeting-quarkus-cli http://greeting-quarkus-cli.serverless-workflow-greeting-
.Example request
[source,shell]
----
-curl -X POST -H 'Content-Type:application/json' -H 'Accept:application/json' -d '{"name": "John", "language": "English"}' http://hello.serverless-workflow-greeting-quarkus.10.103.94.37.sslip.io/jsongreet
+curl -X POST -H 'Content-Type:application/json' -H 'Accept:application/json' -d '{"name": "John", "language": "English"}' http://hello-workflow.serverless-workflow-greeting-quarkus.10.103.94.37.sslip.io/jsongreet
----
.Example response
diff --git a/modules/serverless-logic/pages/cloud/index.adoc b/modules/serverless-logic/pages/cloud/index.adoc
index 203f6439..151eea6b 100644
--- a/modules/serverless-logic/pages/cloud/index.adoc
+++ b/modules/serverless-logic/pages/cloud/index.adoc
@@ -17,91 +17,115 @@ Eventually these two options will converge, the {operator_name} will also be abl
====
[.card-section]
-== Kubernetes with Quarkus
+== Kubernetes with the Operator
-For Java developers, you can use Quarkus and a few add-ons to help you build and deploy the application in a Kubernetes cluster. {product_name} also generates basic Kubernetes objects YAML files to help you getting started. The application should be managed by a Kubernetes administrator.
+For developers that are looking for a native Kubernetes approach where you can model workflows using YAML definitions and directly deploy them, you can use the {operator_name}. The operator registers a new Kubernetes resource in the cluster to manage your workflow development iteration cycle and composition of services and events. The application is managed by the operator.
[.card]
--
[.card-title]
-xref:cloud/quarkus/build-workflow-image-with-quarkus-cli.adoc[Building workflow images using Quarkus CLI]
+xref:serverless-logic:cloud/operator/install-serverless-operator.adoc[]
[.card-description]
-Learn how to build images for your workflow applications using Quarkus CLI
+Learn how to install the {operator_name} in a Kubernetes cluster
--
[.card]
--
[.card-title]
-xref:cloud/quarkus/kubernetes-service-discovery.adoc[Kubernetes service discovery in {product_name}]
+xref:serverless-logic:cloud/operator/developing-workflows.adoc[]
[.card-description]
-Learn what is and how the Kubernetes service discovery for workflow application configuration works
+Learn how to deploy a workflow for development purposes
--
[.card]
--
[.card-title]
-xref:cloud/quarkus/deploying-on-minikube.adoc[Deploying your {product_name} application on Minikube]
+xref:serverless-logic:cloud/operator/referencing-resource-files.adoc[]
[.card-description]
-Learn how to deploy your workflow application on Minikube for local tests and development
+Learn how to reference resource files to the workflow application
--
[.card]
--
[.card-title]
-xref:cloud/quarkus/deploying-on-kubernetes.adoc[Deploying your {product_name} application on Kubernetes]
+xref:serverless-logic:cloud/operator/configuring-workflows.adoc[]
[.card-description]
-Learn how to deploy your workflow application on Kubernetes
+Learn how to configure workflows deployed with {operator_name}
--
-[.card-section]
-== Kubernetes with the Operator
+[.card]
+--
+[.card-title]
+xref:serverless-logic:cloud/operator/workflow-status-conditions.adoc[]
+[.card-description]
+Learn to interpret the workflow resource status conditions
+--
-For developers that are looking for a native Kubernetes approach where you can model workflows using YAML definitions and directly deploy them, you can use the {operator_name}. The operator registers a new Kubernetes resource in the cluster to manage your workflow development iteration cycle and composition of services and events. The application is managed by the operator.
+[.card]
+--
+[.card-title]
+xref:serverless-logic:cloud/operator/build-and-deploy-workflows.adoc[]
+[.card-description]
+Learn how to build and deploy workflow services with {operator_name}
+--
[.card]
--
[.card-title]
-xref:cloud/operator/install-serverless-operator.adoc[]
+xref:serverless-logic:cloud/operator/known-issues.adoc[]
[.card-description]
-Learn how to install the {operator_name} in a Kubernetes cluster
+Learn about the known issues and feature roadmap of the {operator_name}
--
[.card]
--
[.card-title]
-xref:cloud/operator/developing-workflows.adoc[]
+xref:serverless-logic:cloud/operator/migration-guide.adoc[]
[.card-description]
-Learn how to deploy a workflow for development purposes
+Migration guides
--
+[.card-section]
+== Kubernetes with Quarkus
+
+For Java developers, you can use Quarkus and a few add-ons to help you build and deploy the application in a Kubernetes cluster. {product_name} also generates basic Kubernetes objects YAML files to help you getting started. The application should be managed by a Kubernetes administrator.
+
[.card]
--
[.card-title]
-xref:cloud/operator/configuring-workflows.adoc[]
+xref:cloud/quarkus/build-workflow-image-with-quarkus-cli.adoc[Building workflow images using Quarkus CLI]
[.card-description]
-Learn how to configure workflows deployed with {operator_name}
+Learn how to build images for your workflow applications using Quarkus CLI
--
[.card]
--
[.card-title]
-xref:cloud/operator/workflow-status-conditions.adoc[]
+xref:cloud/quarkus/kubernetes-service-discovery.adoc[Kubernetes service discovery in {product_name}]
[.card-description]
-Learn to interpret the workflow resource status conditions
+Learn what is and how the Kubernetes service discovery for workflow application configuration works
--
[.card]
--
[.card-title]
-xref:cloud/operator/build-and-deploy-workflows.adoc[]
+xref:cloud/quarkus/deploying-on-openshift.adoc[Deploying your {product_name} application on OpenShift]
[.card-description]
-Learn how to build and deploy workflow services with {operator_name}
+Learn how to deploy your workflow application on OpenShift cluster
--
[.card]
--
[.card-title]
-xref:cloud/operator/known-issues.adoc[]
+xref:cloud/quarkus/deploying-on-minikube.adoc[Deploying your {product_name} application on Minikube]
[.card-description]
-Learn about the known issues and feature roadmap of the {operator_name}
+Learn how to deploy your workflow application on Minikube for local tests and development
+--
+
+[.card]
+--
+[.card-title]
+xref:serverless-logic:cloud/quarkus/deploying-on-kubernetes.adoc[Deploying your {product_name} application on Kubernetes]
+[.card-description]
+Learn how to deploy your workflow application on Kubernetes
--
diff --git a/modules/serverless-logic/pages/cloud/operator/build-and-deploy-workflows.adoc b/modules/serverless-logic/pages/cloud/operator/build-and-deploy-workflows.adoc
index e77457b6..9eeb2ae5 100644
--- a/modules/serverless-logic/pages/cloud/operator/build-and-deploy-workflows.adoc
+++ b/modules/serverless-logic/pages/cloud/operator/build-and-deploy-workflows.adoc
@@ -2,13 +2,25 @@
:compat-mode!:
// Metadata:
:description: Build and deploy with {operator_name}
-:keywords: kogito, workflow, serverless, operator, kubernetes, minikube, openshift, containers
+:keywords: kogito, sonataflow, workflow, serverless, operator, kubernetes, minikube, openshift, containers
// links
:kogito_serverless_operator_url: https://github.com/kiegroup/kogito-serverless-operator/
:kogito_greeting_example_url: https://github.com/kiegroup/kogito-examples/tree/stable/serverless-workflow-examples/serverless-workflow-greeting-quarkus
:kaniko_issue_url: https://github.com/GoogleContainerTools/kaniko/issues/2201
+:kaniko_url: https://github.com/GoogleContainerTools/kaniko
+:openshift_build_url: https://docs.openshift.com/container-platform/4.13/cicd/builds/understanding-image-builds.html
+:openshift_route_new_url: https://docs.openshift.com/container-platform/4.13/networking/routes/route-configuration.html#nw-creating-a-route_route-configuration
+:kubernetes_resource_management_url: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+:kubernetes_envvar_url: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/
+:minikube_registry_url: https://minikube.sigs.k8s.io/docs/handbook/registry/#enabling-insecure-registries
+:docker_doc_arg_url: https://docs.docker.com/engine/reference/builder/#arg
+:quarkus_extensions_url: https://quarkus.io/extensions/
-This document describes how to build and deploy your workflow on a Kubernetes cluster using the link:{kogito_serverless_operator_url}[{operator_name}] only by having a workflow definition.
+This document describes how to build and deploy your workflow on a cluster using the link:{kogito_serverless_operator_url}[{operator_name}] only by having a `SonataFlow` custom resource.
+
+Every time you need to change the workflow definition the system will (re)build a new immutable version of the workflow. If you're still in development phase, please see the xref:cloud/operator/developing-workflows.adoc[] guide.
+
+Follow the <> or <> sections of this document based on the cluster you wish to build your workflows on.
[IMPORTANT]
====
@@ -16,115 +28,495 @@ This document describes how to build and deploy your workflow on a Kubernetes cl
====
.Prerequisites
-* A workflow definition.
-* The {operator_name} installed. See xref:cloud/operator/install-serverless-operator.adoc[]
+* A Workflow definition.
+* The {operator_name} installed. See xref:cloud/operator/install-serverless-operator.adoc[] guide
-== Preparing for the build
+[#configure-build-system]
+== Configuring the build system
-You should follow these steps to create a container that you can deploy as a service on Kubernetes.
+The operator can build workflows on Kubernetes or OpenShift. On Kubernetes, it uses link:{kaniko_url}[Kaniko] and on OpenShift a link:{openshift_build_url}[standard BuildConfig]. The operator build system is not tailored for advanced use cases and you can do only a few customizations.
-=== Create a namespace for the building phase
+=== Using another Workflow base builder image
-Create a new namespace that will hold all the resources that the operator will create (pods, deployments, services, secretes, config map, and Custom Resources) in this guide.
+If your scenario has strict policies for image usage, such as security or hardening constraints, you can replace the default image used by the operator to build the final workflow container image. Alternatively, you might want to test a nightly build with a bug fix or a custom image containing your customizations.
-.Create a namespace for the application to build & run in
+By default, the operator will use the image distributed upstream to run the workflows in the development profile. You can change this image by editing the `SonataFlowPlatform` custom resource in the namespace where you deployed your workflows:
+
+.Patching the current SonataFlowPlatform with the new builder image
[source,bash,subs="attributes+"]
----
-kubectl create namespace kogito-workflows
-# set the kogito-workflows namespace to your context
-kubectl config set-context --current --namespace=kogito-workflows
+# use `kubectl get sonataflowplatform` to get the SonataFlowPlatform name
+kubectl patch sonataflowplatform --patch 'spec:\n build:\n config:\n baseImage: ' -n
+----
+
+=== Customize the base build Dockerfile
+
+The operator uses the sonataflow-operator-builder-config `ConfigMap` in the operator's installation namespace ({operator_installation_namespace}) to configure and run the workflow build process.
+You can change the `Dockerfile` entry in this `ConfigMap` to tailor the Dockerfile to your needs. Just be aware that this can break the build process.
+
+.Example of the sonataflow-operator-builder-config `ConfigMap`
+[source,yaml,subs="attributes+"]
+----
+apiVersion: v1
+data:
+ DEFAULT_BUILDER_RESOURCE_NAME: Dockerfile
+ DEFAULT_WORKFLOW_EXTENSION: .sw.json
+ Dockerfile: "FROM quay.io/kiegroup/kogito-swf-builder-nightly:latest AS builder\n
+ \ \n # Copy from build context to skeleton resources project\nCOPY * ./resources/\n\nRUN
+ /home/kogito/launch/build-app.sh ./resources\n \n #=============================\n
+ \ # Runtime Run\n #=============================\nFROM registry.access.redhat.com/ubi8/openjdk-11:latest\n\nENV
+ LANG='en_US.UTF-8' LANGUAGE='en_US:en'\n \n # We make four distinct layers so
+ if there are application changes the library layers can be re-used\nCOPY --from=builder
+ --chown=185 /home/kogito/serverless-workflow-project/target/quarkus-app/lib/ /deployments/lib/\nCOPY
+ --from=builder --chown=185 /home/kogito/serverless-workflow-project/target/quarkus-app/*.jar
+ /deployments/\nCOPY --from=builder --chown=185 /home/kogito/serverless-workflow-project/target/quarkus-app/app/
+ /deployments/app/\nCOPY --from=builder --chown=185 /home/kogito/serverless-workflow-project/target/quarkus-app/quarkus/
+ /deployments/quarkus/\n\nEXPOSE 8080\nUSER 185\nENV AB_JOLOKIA_OFF=\"\"\nENV JAVA_OPTS=\"-Dquarkus.http.host=0.0.0.0
+ -Djava.util.logging.manager=org.jboss.logmanager.LogManager\"\nENV JAVA_APP_JAR=\"/deployments/quarkus-run.jar\"\n"
+kind: ConfigMap
+metadata:
+ name: sonataflow-operator-builder-config
+----
+
+[WARNING]
+====
+The excerpt above is just an example. The current version might have a slightly different version. Don't use this example in your installation.
+====
+
+=== Changing resources requirements
+
+You can create or edit a `SonataFlowPlatform` in the workflow namespace specifying the link:{kubernetes_resource_management_url}[resources requirements] for the internal builder pods:
+
+.Example of SonataFlowPlatform
+[source,yaml,subs="attributes+"]
+----
+apiVersion: sonataflow.org/v1alpha08
+kind: SonataFlowPlatform
+metadata:
+ name: sonataflow-platform
+spec:
+ build:
+ template:
+ resources:
+ requests:
+ memory: "64Mi"
+ cpu: "250m"
+ limits:
+ memory: "128Mi"
+ cpu: "500m"
----
-=== Create a secret for the container registry authentication
+Every build process will reuse this configuration and start new instances based on it from now on.
+
+[IMPORTANT]
+====
+Only one `SonataFlowPlatform` is allowed per namespace. Try fetching the one the operator created for you and edit it instead of trying to create another one.
+====
+
+You can fine tune the resources requirements for a particular workflow. Every workflow instance will have a `SonataFlowBuild` instance created with the same name as the workflow. You can edit the `SonataFlowBuild` custom resource and specify the same parameters. For example:
+
+.Example of SonataFlowBuild
+[source,yaml,subs="attributes+"]
+----
+apiVersion: sonataflow.org/v1alpha08
+kind: SonataFlowBuild
+metadata:
+ name: my-workflow
+spec:
+ resources:
+ requests:
+ memory: "64Mi"
+ cpu: "250m"
+ limits:
+ memory: "128Mi"
+ cpu: "500m"
+----
+
+This parameters will only apply to new build instances.
+
+=== Passing arguments to the internal builder
+
+You can pass build arguments (see link:{docker_doc_arg_url}[Dockerfile ARG]) to the `SonataFlowBuild` instance.
+
+.Procedure
+
+1. Create or edit an existing `SonataFlowBuild` instance. It has the same name of the `SonataFlow` you're trying to build.
++
+.Checking if the SonataFlowBuild instance already exists
+[source,bash,subs="attributes+"]
+----
+kubectl edit sonataflowbuild/ -n
+----
++
+2. Add to the `.spec.buildArgs` the desired arguments.
++
+.Adding buildArgs to the SonataFlowBuild instance
+[source,yaml,subs="attributes+"]
+----
+apiVersion: sonataflow.org/v1alpha08
+kind: SonataFlowBuild
+metadata:
+ name:
+spec:
+ [...]
+ buildArgs:
+ - name: ARG1
+ value: value1
+ - name: ARG2
+ value: value2
+----
++
+3. Save the file and exit. A new build should start soon with the new configuration.
+
+Alternatively, you can set this information in the `SonataFlowPlatform`, so that every new build instance will use it as a template. For example:
+
+.Example of a SonataFlowPlatform instance with default build args for every build within the namespace
+[source,yaml,subs="attributes+"]
+----
+apiVersion: sonataflow.org/v1alpha08
+kind: SonataFlowPlatform
+metadata:
+ name:
+spec:
+ build:
+ template:
+ buildArgs:
+ - name: ARG1
+ value: value1
+ - name: ARG2
+ value: value2
+----
+
+[TIP]
+====
+Since the `buildArgs` attribute is an array of link:{kubernetes_envvar_url}[Kubernetes `EnvVar` object], you can refer to a `ConfigMap` or `Secret` to pass this value, for example. You're not restricted by a plain value.
+
+On Minikube and Kubernetes only plain values, `ConfigMap` and `Secret` are supported due to a restriction on the build system provided by these platforms.
+====
+
+The table below lists the Dockerfile arguments available in the default {operator_name} installation:
+
+.Default ARG definitions in the builder Dockerfile
+[cols="1,1,1"]
+|===
+| Argument | Description | Example
+
+|QUARKUS_EXTENSIONS | List of link:{quarkus_extensions_url}[Quarkus Extensions] separated by comma that the builder should add to the workflow. | org.kie.kogito:kogito-addons-quarkus-persistence-infinispan:2.0.0-SNAPSHOT
+|QUARKUS_ADD_EXTENSION_ARGS | Arguments passed to the Quarkus CLI when adding extensions. Enabled only when `QUARKUS_EXTENSIONS` is not empty. | See the link:{quarkus_cli_url}#using-the-cli[Quarkus CLI documentation]
+|===
+
+=== Setting environment variables in the internal builder
+
+You can set environment variables to the `SonataFlowBuild` internal builder pod.
+
+[IMPORTANT]
+====
+These environment variables are valid for the build context **only**. They are not set on the final built workflow image.
+====
+
+.Procedure
+
+1. Create or edit an existing `SonataFlowBuild` instance. It has the same name of the `SonataFlow` you're trying to build.
++
+.Checking if the SonataFlowBuild instance already exists
+[source,bash,subs="attributes+"]
+----
+kubectl edit sonataflowbuild/ -n
+----
++
+2. Add to the `.spec.envs` the desired data.
++
+.Setting environment variables in the SonataFlowBuild instance
+[source,yaml,subs="attributes+"]
+----
+apiVersion: sonataflow.org/v1alpha08
+kind: SonataFlowBuild
+metadata:
+ name:
+spec:
+ [...]
+ envs:
+ - name: MYENV1
+ value: value1
+ - name: MYENV2
+ value: value2
+----
++
+3. Save the file and exit. A new build should start soon with the new configuration.
+
+Alternatively, you can set this information in the `SonataFlowPlatform`, so that every new build instances will use it as a template. For example:
+
+.Example of a SonataFlowPlatform instance with default envs for every build within the namespace
+[source,yaml,subs="attributes+"]
+----
+apiVersion: sonataflow.org/v1alpha08
+kind: SonataFlowPlatform
+metadata:
+ name:
+spec:
+ build:
+ template:
+ envs:
+ - name: MYENV1
+ value: value1
+ - name: MYENV2
+ value: value2
+----
+
+[TIP]
+====
+Since the `envs` attribute is an array of link:{kubernetes_envvar_url}[Kubernetes `EnvVar` object], you can refer to a `ConfigMap` or `Secret` to pass this value, for example. You're not restricted by a plain value.
+
+On Minikube and Kubernetes only plain values, `ConfigMap` and `Secret` are supported due to a restriction on the build system provided by these platforms.
+====
+
+[#building-kubernetes]
+== Building on Kubernetes
+
+[TIP]
+====
+You can skip this section if you're running <>.
+====
+
+Follow these steps to configure your Kubernetes namespace to build workflow images with the operator.
+
+=== Create a Namespace for the building phase
+
+Create a new namespace that will hold all the resources that the operator will create (Pods, Deployments, Services, Secrets, ConfigMap, and Custom Resources) in this guide.
+
+.Create a Namespace for the workflow to build and run in
+[source,bash,subs="attributes+"]
+----
+kubectl create namespace workflows
+# set the workflows namespace to your context
+kubectl config set-context --current --namespace=workflows
+----
+
+=== Create a Secret for the container registry authentication
+
+You can follow these steps to publish on external registry that requires authentication. If you're running on Minikube, just link:{minikube_registry_url}[enable the internal registry]. You can skip this whole section since the internal Minikube registry doesn't require authentication.
+
.Create a secret for the container registry authentication
[source,bash,subs="attributes+"]
----
-kubectl create secret docker-registry regcred --docker-server= --docker-username= --docker-password= --docker-email= -n kogito-workflows
+kubectl create secret docker-registry regcred --docker-server= --docker-username= --docker-password= --docker-email= -n workflows
----
-or you can directly import your local docker config into your Kubernetes cluster:
+or you can directly import your local Docker config into your Kubernetes cluster:
-.Create a secret for the container registry authentication based on local docker config
+.Create a Secret for the container registry authentication based on local Docker config
[source,bash,subs="attributes+"]
----
-kubectl create secret generic regcred --from-file=.dockerconfigjson=$\{HOME\}/.docker/config.json --type=kubernetes.io/dockerconfigjson -n kogito-workflows
+kubectl create secret generic regcred --from-file=.dockerconfigjson=$\{HOME\}/.docker/config.json --type=kubernetes.io/dockerconfigjson -n workflows
----
[WARNING]
====
-Double check your `$\{HOME\}/.docker/config.json`. If you're using local desktop authentication, this configuration **won't work** in the cluster. You can initialize this by logging in in the target registry, e.g. `docker login`.
+Double-check your `$\{HOME\}/.docker/config.json`. If you're using local desktop authentication, this configuration **won't work** in the cluster. You can initialize this by logging in to the target registry, e.g. `docker login`.
====
-=== Configure the {operator_name} (i.e. registry address, secret) for building your workflows
+=== Configure the {operator_name} (i.e. registry address, secret) for building your Workflows
-The `KogitoServerlessPlatform` is the resource used to control the behavior of the {operator_name}.
-It defines the behavior of all Custom Resources (Workflow and Build) in the given namespace.
+The `SonataFlowPlatform` is the Custom Resource used to control the behavior of the {operator_name}.
+It defines the behavior of the operator when handling all {product_name} Custom Resources (Workflow and Build) in the given namespace.
-Since the operator is installed in global mode, you will need to specify a platform in each namespace where you want to deploy workflows.
-You can find a basic `KogitoServerlessPlatform` custom resource example in the link:https://github.com/kiegroup/kogito-serverless-operator/tree/{operator_version}/config/samples[`config/samples` folder] that you can simply apply to configure your operator.
+Since the operator is installed in global mode, you will need to specify a `SonataFlowPlatform` in each Namespace where you want to deploy Workflows.
-.Create a `KogitoServerlessPlatform`
-[source,bash,subs="attributes+"]
+[TIP]
+====
+If you have deployed a workflow for xref:cloud/operator/developing-workflows.adoc[development] you already have a `SonataFlowPlatform` custom resource since the operator created one for you. Just edit the one you have.
+====
+
+Following is a very basic `SonataFlowPlatform` Custom Resource example to work on Kubernetes:
+
+.Example of a basic SonataFlowPlatform on Kubernetes
+[source,yaml,subs="attributes+"]
+----
+apiVersion: sonataflow.org/v1alpha08
+kind: SonataFlowPlatform
+metadata:
+ name: sonataflow-platform
+spec:
+ build:
+ config:
+ strategyOptions:
+ KanikoBuildCacheEnabled: "true"
+ registry:
+ address: quay.io/kiegroup <1>
+ secret: regcred <2>
+----
+
+<1> Your registry address
+<2> The secret name created in the steps above
+
+On Minikube, you can remove the `registry` information entirely since you don't need credentials for pushing to the internal registry:
+
+.Example of a basic SonataFlowPlatform on Minikube
+[source,yaml,subs="attributes+"]
----
-kubectl apply -f https://raw.githubusercontent.com/kiegroup/kogito-serverless-operator/{operator_version}/config/samples/sw.kogito_v1alpha08_kogitoserverlessplatform.yaml -n kogito-workflows
+apiVersion: sonataflow.org/v1alpha08
+kind: SonataFlowPlatform
+metadata:
+ name: sonataflow-platform
+spec:
+ build:
+ config:
+ strategyOptions:
+ KanikoBuildCacheEnabled: "true"
----
-[NOTE]
+[TIP]
====
-In this Custom Resource, `spec.platform.registry.secret` is the name of the secret you created just before.
+The `spec.build.config.strategyOptions.KanikoBuildCacheEnabled` enables the internal Kaniko process to cache builder images to try to speed up the building execution.
====
-You can also update "on-the-fly" the `KogitoServerlessPlatform` registry field with this command (change )
+You can save this file locally and run the following command:
-.Create a `KogitoServerlessPlatform` with a specific registry
+.Create a `SonataFlowPlatform`
[source,bash,subs="attributes+"]
----
-curl https://raw.githubusercontent.com/kiegroup/kogito-serverless-operator/{operator_version}/config/samples/sw.kogito_v1alpha08_kogitoserverlessplatform.yaml | sed "s|address: .*|address: " | kubectl apply -f -
+kubectl apply -f my-sonataflowplatform.yaml -n workflows
----
-In order to retrieve the Cluster IP address of Minikube's internal registry to configure your platform, you can use the following command:
+You can also update "on-the-fly" the `SonataFlowPlatform` registry field with this command (change )
-.Retrieve Minikube registry internal IP
+.Create a `SonataFlowPlatform` with a specific registry
[source,bash,subs="attributes+"]
----
-kubectl get svc registry -n kube-system -ojsonpath='{.spec.clusterIP}'
+cat my-sonataflowplatform.yaml | sed "s|address: .*|address: " | kubectl apply -f -
----
-== Build and deploy your workflow application
+[#building-openshift]
+== Building on OpenShift
+
+You don't need to do anything to build on OpenShift since the operator will configure everything for you. There are a few customizations you can do described in the <> section.
-You can now send your workflow definition (`KogitoServerlessWorkflow`) to the operator.
+In general, the operator will create a link:{openshift_build_url}[`BuildConfig` to build] the workflow using the mapped xref:cloud/operator/referencing-resource-files.adoc[resource files] and your workflow definition. After the build is finished, the image will be pushed to the internal OpenShift registry backed by an `ImageStream` object.
-You can find a basic `KogitoServerlessWorkflow` in the link:https://github.com/kiegroup/kogito-serverless-operator/tree/{operator_version}/config/samples[`config/samples` folder] that is defining the link:{kogito_greeting_example_url}[{product_name} Greeting example].
+=== Changing the base builder image
+
+If you are running on OpenShift, you have access to the Red Hat's supported registry. You can change the default builder image by editing the sonataflow-operator-builder-config `ConfigMap`.
[source,bash,subs="attributes+"]
----
-kubectl apply -f https://raw.githubusercontent.com/kiegroup/kogito-serverless-operator/{operator_version}/config/samples/sw.kogito_v1alpha08_kogitoserverlessworkflow.yaml -n kogito-workflows
+oc edit cm/sonataflow-operator-builder-config -n {operator_installation_namespace}
----
-You can check the logs of the build of your workflow via:
-.Get the workflow application pod logs
+In your editor, change the first line in the `Dockerfile` entry where it reads `FROM quay.io/kiegroup/kogito-swf-builder-nightly:latest` to the desired image.
+
+This image must be compatible with your operator's installation.
+
+[#build-deploy-workflow]
+== Build and deploy your workflow
+
+You can now send your workflow definition (`SonataFlow`) to the operator.
+
+You can find a basic `SonataFlow` bellow:
+
+.Example of the greetings workflow example
+[source,yaml,subs="attributes+"]
+----
+apiVersion: sonataflow.org/v1alpha08
+kind: SonataFlow
+metadata:
+ name: greeting
+ annotations:
+ sonataflow.org/description: Greeting example on k8s!
+ sonataflow.org/version: 0.0.1
+spec:
+ flow:
+ start: ChooseOnLanguage
+ functions:
+ - name: greetFunction
+ type: custom
+ operation: sysout
+ states:
+ - name: ChooseOnLanguage
+ type: switch
+ dataConditions:
+ - condition: "${ .language == \"English\" }"
+ transition: GreetInEnglish
+ - condition: "${ .language == \"Spanish\" }"
+ transition: GreetInSpanish
+ defaultCondition: GreetInEnglish
+ - name: GreetInEnglish
+ type: inject
+ data:
+ greeting: "Hello from JSON Workflow, "
+ transition: GreetPerson
+ - name: GreetInSpanish
+ type: inject
+ data:
+ greeting: "Saludos desde JSON Workflow, "
+ transition: GreetPerson
+ - name: GreetPerson
+ type: operation
+ actions:
+ - name: greetAction
+ functionRef:
+ refName: greetFunction
+ arguments:
+ message: ".greeting+.name"
+ end: true
+----
+
+Save a file in your local file system with this contents named `greetings-workflow.yaml` then run:
+
[source,bash,subs="attributes+"]
----
-kubectl logs kogito-greeting-builder -n kogito-workflows
+kubectl apply -f greetings-workflow.yaml -n workflows
+----
+
+You can check the logs of the build of your Workflow via:
+
+.Get the Workflow pod logs
+[source,bash,subs="attributes+"]
+----
+# on Kubernetes
+kubectl logs kogito-greeting-builder -n workflows
+
+# on OpenShift
+oc logs buildconfig/greeting -n workflows
----
The final pushed image must be printed into the logs at the end of the build.
-== Check the workflow application is running
+== Check if the Workflow is running
+
+In order to check that the {product_name} Greeting is up and running, you can try to perform a test HTTP call, from the greeting Pod.
-In order to check that the {product_name} Greeting application is up and running, you can try to perform a test HTTP call, from the greeting pod.
+.Procedure
-.Check the greeting application is running
+1. Expose the workflow so you can access it:
++
+.Exposing the greeting workflow on Minikube
+[source,bash,subs="attributes+"]
+----
+# On Minikube you can use Nodeport
+kubectl patch svc greeting -n workflows -p '{"spec": {"type": "NodePort"}}'
+GREETING_SVC=$(minikube service greeting -n workflows --url)
+----
++
+.Exposing the greeting workflow on OpenShift
+[source,bash,subs="attributes+"]
+----
+# On OpenShift you can expose a route: {openshift_route_new_url}
+oc expose svc greeting -n workflows
+# get the public URL
+GREETING_SVC=$(oc get route/greeting --template='{{.spec.host}}')
+----
++
+2. Make the HTTP call using `curl`:
++
+.Check if the greeting workflow is running
[source,bash,subs="attributes+"]
----
-kubectl patch svc greeting -n kogito-workflows -p '{"spec": {"type": "NodePort"}}'
-GREETING_SVC=$(minikube service greeting -n kogito-workflows --url)
curl -X POST -H 'Content-Type:application/json' -H 'Accept:application/json' -d '{"name": "John", "language": "English"}' $GREETING_SVC/greeting
----
-
-If everything is working well you should receive a response like this:
-
-.Response from the greeting application
++
+If everything is working, you should receive a response like this:
++
+.Response from the greeting workflow
[source,json,subs="attributes+"]
----
{"id":"b5fbfaa3-b125-4e6c-9311-fe5a3577efdd","workflowdata":{"name":"John","language":"English","greeting":"Hello from JSON Workflow, "}}
diff --git a/modules/serverless-logic/pages/cloud/operator/configuring-workflows.adoc b/modules/serverless-logic/pages/cloud/operator/configuring-workflows.adoc
index f20b8cb6..89fa264a 100644
--- a/modules/serverless-logic/pages/cloud/operator/configuring-workflows.adoc
+++ b/modules/serverless-logic/pages/cloud/operator/configuring-workflows.adoc
@@ -2,20 +2,15 @@
:compat-mode!:
// Metadata:
:description: Configuration of Workflow Services deployed by the operator
-:keywords: kogito, workflow, serverless, operator, kubernetes, minikube, config, openshift, containers
+:keywords: kogito, sonataflow, workflow, serverless, operator, kubernetes, minikube, config, openshift, containers
-This document describes how to configure a workflow service with the {operator_name}.
-
-[IMPORTANT]
-====
-This feature is supported only for development profile. Configuring workflows not in development is mapped and tracked by link:https://issues.redhat.com/browse/KOGITO-8522[KOGITO-8522]. See xref:cloud/operator/known-issues.adoc[]
-====
+This document describes how to configure a Workflow service with the {operator_name}.
== Editing the Workflow Configuration
-When the operator deploys the workflow service, it also creates a `ConfigMap` named after the `KogitoServerlessWorkflow` object with the suffix `-props`. For example, if your workflow name is `greeting`, then the `ConfigMap` name is `greeting-props`.
+When the operator deploys the Workflow service, it also creates a `ConfigMap` named after the `SonataFlow` object with the suffix `-props`. For example, if your Workflow name is `greeting`, then the `ConfigMap` name is `greeting-props`.
-You can use the Kubernetes object editor of your preference to add or edit the link:https://en.wikipedia.org/wiki/.properties[properties] in the workflow configuration. Using `kubectl` you can do:
+You can use the Kubernetes object editor of your preference to add or edit the link:https://en.wikipedia.org/wiki/.properties[properties] in the Workflow configuration. Using `kubectl` you can do:
.Editing the Workflow Properties
[source,shell,subs="attributes+"]
@@ -25,7 +20,7 @@ kubectl edit cm -props
Note that it's important to respect the properties format, otherwise the operator will replace your configuration with the default one.
-Here's an example of a workflow properties:
+Here's an example of a Workflow properties stored within a ConfigMap:
.Example of a Workflow ConfigMap Properties
[source,yaml,subs="attributes+"]
@@ -42,13 +37,13 @@ data:
my.properties.key = any-value
----
-The underlying runtime engine that executes the workflow service is based on link:{quarkus_url}[Quarkus]. So that, you can configure the workflow as you normally would any link:{quarkus_config_url}[Quarkus application].
+The underlying runtime engine that executes the Workflow service is based on link:{quarkus_url}[Quarkus]. The `application.properties` key in the ConfigMap means that you can configure the Workflow as you would any link:{quarkus_config_url}[Quarkus application].
Any xref:core/configuration-properties.adoc[{product_name} configuration that the documentation describes] or general Quarkus application property can be configured using this method.
== Immutable properties
-A few properties can not be changed in this configuration. Usually, they are already defined in the properties file. The table below lists them.
+A few properties cannot be changed in this configuration. Usually, they are already defined in the properties file. The table below lists them.
.List of immutable properties
[cols="2,1,1"]
@@ -63,7 +58,7 @@ A few properties can not be changed in this configuration. Usually, they are alr
|0.0.0.0
|all
-|org.kie.kogito.addons.knative.health-enabled
+|org.kie.kogito.addons.knative.eventing.health-enabled
|false
|dev
diff --git a/modules/serverless-logic/pages/cloud/operator/developing-workflows.adoc b/modules/serverless-logic/pages/cloud/operator/developing-workflows.adoc
index b4fb22b7..1cd0530b 100644
--- a/modules/serverless-logic/pages/cloud/operator/developing-workflows.adoc
+++ b/modules/serverless-logic/pages/cloud/operator/developing-workflows.adoc
@@ -1,12 +1,15 @@
-= Developing Workflow Services with the Operator
+= Developing Workflow with the Operator
:compat-mode!:
// Metadata:
-:description: Developing workflow service with the operator on Kubernetes
-:keywords: kogito, workflow, serverless, operator, kubernetes, minikube, devmode
+:description: Developing workflows with the operator on Kubernetes
+:keywords: kogito, sonataflow, workflow, serverless, operator, kubernetes, minikube, devmode
+//links
+:openshift_route_url: https://docs.openshift.com/container-platform/4.13/networking/routes/route-configuration.html
+:kubernetes_url: https://kubernetes.io/docs/concepts/services-networking/ingress/
-This document describes how you can develop your workflows directly on Kubernetes with the {operator_name}.
+This document describes how you can develop your Workflows directly on Kubernetes with the {operator_name}.
-Workflows in development profile are not tailored for production environments. To build and deploy an immutable workflow application with the operator, see xref:cloud/operator/build-and-deploy-workflows.adoc[].
+Workflows in the development profile are not tailored for production environments. To build and deploy an immutable Workflow with the operator, see xref:cloud/operator/build-and-deploy-workflows.adoc[].
[IMPORTANT]
====
@@ -15,70 +18,71 @@ Workflows in development profile are not tailored for production environments. T
== Introduction to the Development Profile
-The development profile is the easiest way to start playing around with workflows and the operator.
+The development profile is the easiest way to start playing around with Workflows and the operator.
-To get started, you can use an editor of your choice to create a new `KogitoServerlessWorkflow` Custom Resource YAML definition. For example:
+To get started, you can use an editor of your choice to create a new `SonataFlow` Custom Resource YAML definition. For example:
.Example of a Kubernetes {product_name} YAML definition
[source,yaml,subs="attributes+"]
----
-apiVersion: sw.kogito.kie.org/v1alpha08
-kind: KogitoServerlessWorkflow
+apiVersion: sonataflow.org/v1alpha08
+kind: SonataFlow
metadata:
name: greeting
annotations:
- sw.kogito.kie.org/description: Greeting example on k8s!
- sw.kogito.kie.org/version: 0.0.1
- sw.kogito.kie.org/profile: dev <1>
-spec: <2>
- start: ChooseOnLanguage
- functions:
- - name: greetFunction
- type: custom
- operation: sysout
- states:
- - name: ChooseOnLanguage
- type: switch
- dataConditions:
- - condition: "${ .language == \"English\" }"
- transition: GreetInEnglish
- - condition: "${ .language == \"Spanish\" }"
- transition: GreetInSpanish
- defaultCondition: GreetInEnglish
- - name: GreetInEnglish
- type: inject
- data:
- greeting: "Hello from JSON Workflow, "
- transition: GreetPerson
- - name: GreetInSpanish
- type: inject
- data:
- greeting: "Saludos desde JSON Workflow, "
- transition: GreetPerson
- - name: GreetPerson
- type: operation
- actions:
- - name: greetAction
- functionRef:
- refName: greetFunction
- arguments:
- message: ".greeting+.name"
- end: true
+ sonataflow.org/description: Greeting example on k8s!
+ sonataflow.org/version: 0.0.1
+ sonataflow.org/profile: dev <1>
+spec:
+ flow: <2>
+ start: ChooseOnLanguage
+ functions:
+ - name: greetFunction
+ type: custom
+ operation: sysout
+ states:
+ - name: ChooseOnLanguage
+ type: switch
+ dataConditions:
+ - condition: "${ .language == \"English\" }"
+ transition: GreetInEnglish
+ - condition: "${ .language == \"Spanish\" }"
+ transition: GreetInSpanish
+ defaultCondition: GreetInEnglish
+ - name: GreetInEnglish
+ type: inject
+ data:
+ greeting: "Hello from JSON Workflow, "
+ transition: GreetPerson
+ - name: GreetInSpanish
+ type: inject
+ data:
+ greeting: "Saludos desde JSON Workflow, "
+ transition: GreetPerson
+ - name: GreetPerson
+ type: operation
+ actions:
+ - name: greetAction
+ functionRef:
+ refName: greetFunction
+ arguments:
+ message: ".greeting+.name"
+ end: true
----
-<1> The annotation `sw.kogito.kie.org/profile: dev` tells the operator to deploy your workflow using the development profile. This means that the operator will build a running instance of the workflow ready to receive changes during your development cycle.
+<1> The annotation `sonataflow.org/profile: dev` tells the operator to deploy the Workflow using the development profile. This means that the operator will build a running instance of the Workflow ready to receive changes during your development cycle.
-<2> In the `spec` attribute goes the workflow definition as described by the xref:getting-started/cncf-serverless-workflow-specification-support.adoc[CNCF Serverless Workflow specification]. So if you already have a workflow definition, you can use it there. Alternatively, you can use the xref:tooling/serverless-workflow-editor/swf-editor-overview.adoc[editors to create your workflow definition].
+<2> In the `flow` attribute goes the Workflow definition as described by the xref:core/cncf-serverless-workflow-specification-support.adoc[CNCF Serverless Workflow specification]. So if you already have a workflow definition, you can use it there. Alternatively, you can use the xref:tooling/serverless-workflow-editor/swf-editor-overview.adoc[editors to create your workflow definition].
-== Deploying a New Workflow Service
+== Deploying a New Workflow
.Prerequisites
* You have xref:cloud/operator/install-serverless-operator.adoc[installed the {operator_name}]
* You have created a new {product_name} Kubernetes YAML file
-Having a new Kubernetes workflow definition in a YAML file (you can use the above example), you can deploy it in your cluster with the following command:
+Having a new Kubernetes Workflow definition in a YAML file (you can use the above example), you can deploy it in your cluster with the following command:
-.Deploying a new KogitoServerlessWorkflow custom resource in Kubernetes
+.Deploying a new SonataFlow Custom Resource in Kubernetes
[source,bash,subs="attributes+"]
----
kubectl apply -f -n
@@ -86,26 +90,26 @@ kubectl apply -f -n
Alternatively, you can try one of the examples available in the operator repository:
-.Deploying the greeting workflow example
+.Deploying the greeting Workflow example
[source,bash,subs="attributes+"]
----
-kubectl apply -f https://raw.githubusercontent.com/kiegroup/kogito-serverless-operator/{operator_version}/config/samples/sw.kogito_v1alpha08_kogitoserverlessworkflow_devmode.yaml -n
+kubectl apply -f https://raw.githubusercontent.com/kiegroup/kogito-serverless-operator/{operator_version}/test/testdata/sw.kogito_v1alpha08_sonataflow_devmode.yaml -n
----
[TIP]
====
-Replace `` with the namespace you're using to deploy your applications
+Replace `` with the Namespace you're using to deploy your workflows
====
-You can follow the workflow status to check if everything is fine with:
+You can follow the Workflow status to check if everything is fine with:
-.Checking the workflow status
+.Checking the Workflow status
[source,bash,subs="attributes+"]
----
kubectl get workflow -n -w
----
-You should see the workflow conditions evolving to `READY` in a few seconds:
+You should see the Workflow conditions evolving to `READY` in a few seconds:
.Example workflow deployment status
[source,bash,subs="attributes+"]
@@ -117,46 +121,50 @@ greeting dev 0.0.1 True
[TIP]
====
-The `REASON` field gives you a cue about the current workflow status.
+The `REASON` field gives you a cue about the current Workflow status.
====
-You can make changes to the workflow YAML using any Kubernetes editor. For example you can use `kubectl` and the following commanda:
+You can make changes to the Workflow YAML using any Kubernetes editor. For example, you can use `kubectl` and the following command:
[source,bash,subs="attributes+"]
----
kubectl edit workflow/greeting -n
----
-and changing the workflow definition inside the CustomResource Spec section.
+and changing the Workflow definition inside the Custom Resource Spec section.
-Otherwhise you can save the CustomResource definition file and edit it with your desired editor and re-applying it.
+Alternatively, you can save the Custom Resource definition file and edit it with your desired editor and re-apply it.
For example using VSCode, there are the commands needed:
[source,bash,subs="attributes+"]
----
-curl -S https://raw.githubusercontent.com/kiegroup/kogito-serverless-operator/{operator_version}/config/samples/sw.kogito_v1alpha08_kogitoserverlessworkflow_devmode.yaml > workflow_devmode.yaml
+curl -S https://raw.githubusercontent.com/kiegroup/kogito-serverless-operator/{operator_version}/config/samples/sw.kogito_v1alpha08_sonataflow_devmode.yaml > workflow_devmode.yaml
code workflow_devmode.yaml
kubectl apply -f workflow_devmode.yaml -n
----
-The operator ensures that the latest workflow definition is running and ready.
-This way, you can include the workflow application in your development scenario and start making requests to it.
+The operator ensures that the latest Workflow definition is running and ready.
+This way, you can include the Workflow in your development scenario and start making requests to it.
-== Check the workflow application is running
+== Check if the Workflow is running
-In order to check that the {product_name} Greeting application is up and running, you can try to perform a test HTTP call. First, you must expose the service:
+In order to check that the {product_name} Greeting workflow is up and running, you can try to perform a test HTTP call. First, you must get the service URL:
-.Exposing the workflow application
+.Exposing the Workflow
[source,bash,subs="attributes+"]
----
-kubectl patch svc greeting -n -p '{"spec": {"type": "NodePort"}}'
minikube service greeting -n --url
http://127.0.0.1:57053
-# use the above output to get the current workflow URL in your environment
+# use the above output to get the current Workflow URL in your environment
----
-Alter exposing the workflow service, you can point your browser to the Swagger UI and start making requests with the REST interface.
+[TIP]
+====
+When running on Minikube, the service is already exposed for you via `NodePort`. On OpenShift, link:{openshift_route_url}[a Route is automatically created in devmode]. If you're running on Kubernetes you can link:{kubernetes_url}[expose your service using an Ingress].
+====
+
+You can now point your browser to the Swagger UI and start making requests with the REST interface.
For example, using the above command execution you can access the Swagger UI via `http://127.0.0.1:57053/q/swagger-ui/`.
@@ -187,22 +195,58 @@ You should see a result similar to this:
}
----
-You can even make changes to your `KogitoServerlessWorkflow` YAML file and see the results using the Swagger UI.
+You can even make changes to your `SonataFlow` YAML file and see the results using the Swagger UI.
+
+== Remove the Workflow
+
+In order to remove the {product_name} Greeting, you can execute the following command:
+
+.Removing the Workflow
+[source,bash,subs="attributes+"]
+----
+kubectl delete -f -n
+----
+
+== Referencing resources in the Workflow
+
+See the xref:cloud/operator/referencing-resource-files.adoc[] guide for more complex scenarios where you may need to reference other resources in the workflow definition.
+
+== Using another Workflow base image
+
+If your scenario has strict policies for image usage, such as security or hardening constraints, you can replace the default image used by the operator. Alternatively, you might want to test a nightly build with a bug fix or a custom image containing your customizations.
+
+By default, the operator will use the image distributed upstream to run the workflows in the development profile. You can change this image by editing the `SonataFlowPlatform` custom resource in the namespace where you deployed your workflows:
+
+.Patching the current SonataFlowPlatform with the new image
+[source,bash,subs="attributes+"]
+----
+# use `kubectl get sonataflowplatform` to get the SonataFlowPlatform name
+kubectl patch sonataflowplatform --patch 'spec:\n devMode:\n baseImage: ' -n
+----
+
+From now on, every deployment in the development profile will use this image to run the workflow.
+
+[IMPORTANT]
+====
+The default image was created to run a Quarkus Java application in dev mode. You can replace this image with another one as long as it has the same concept. One way of doing this is using the default as the base image.
+====
[[troubleshooting]]
-== Troubleshooting the Workflow Service
+== Troubleshooting the Workflow
+
+As you make changes to your workflow during development, it's likely that you will need to troubleshoot it when something goes wrong.
-Since during development you are iterating over the deployed workflow service, it's likely that you will need to troubleshoot the application if something goes wrong.
+To ensure the Workflow is running in a healthy state, the operator deploys its Pod with health check probes.
+If the changes you make to your Workflow cause the health checks to fail, the Pod executing the Workflow will stop responding.
-To ensure the workflow is running in a healthy state, the operator deploys the application pod with health checks probes.
-So if your changes impact the application somehow, the pod will stop responding.
+The following will help you discover the reason for any failure during development.
[[basic-troubleshooting]]
=== Basic Troubleshooting
-1. Analyze the workflow status with:
+1. Analyze the Workflow status with:
+
-.Get the workflow status conditions
+.Get the Workflow status conditions
[source,shell,subs="attributes+"]
----
kubectl get workflow -o jsonpath={.status.conditions} | jq .
@@ -212,24 +256,24 @@ It can give you a clue about what might be happening. See xref:cloud/operator/wo
+
2. Fetch the logs and look for `ERROR` messages:
+
-.Watch the application logs
+.Watch the workflow logs
[source,shell,subs="attributes+"]
----
kubectl logs deployment/ -f
----
+
-If you are looking for opening an issue or ask in {product_name} communication channels, this logging information is always useful for the person who will try to help you.
+If you decide to open an issue or ask for help in {product_name} communication channels, this logging information is always useful for the person who will try to help you.
=== Possible Failure Scenarios
==== Feature Not Yet Supported
The {operator_name} is under active development. Sometimes a feature might not be available yet. Please see xref:cloud/operator/known-issues.adoc[] for a comprehensive list of available features.
-If you identify you're refering to a feature not yet available, please file a new issue so we can prioritize it for you or ask in {product_name} communication channels.
+If you identify that you are attempting to use a feature that is not available yet, you can file a new issue, so we can prioritize it. Alternatively, you can ask in {product_name} communication channels.
-==== Wrong Application Configuration
-A wrong configuration, or lack of one might impact your workflow to run correctly.
-The operator deploys a `ConfigMap` that holds the application properties for the workflow.
+==== Wrong Workflow Configuration
+A wrong configuration (or lack of one) might prevent your Workflow from running correctly.
+The operator deploys a `ConfigMap` that holds the workflow properties for the Workflow.
.Get the properties ConfigMap
[source,shell,subs="attributes+"]
@@ -237,28 +281,28 @@ The operator deploys a `ConfigMap` that holds the application properties for the
kubectl get cm -props
----
-The `ConfigMap` name pattern is the workflow name followed by `-props`.
+The `ConfigMap` name pattern is the Workflow name followed by `-props`.
-Make sure that the configuration is correct and you're not lacking any required properties for a given feature to work.
-If so, you can make your changes to the configuration as you normally would to any `ConfigMap`.
+Make sure that the configuration is correct and you're not missing any required properties for a given feature to work.
+You can make any changes to the configuration by simply editing the content of the `ConfigMap`.
-The operator ensures that these properties are applied to the application.
+Once you have updated the configuration in the `ConfigMap`, the operator ensures that these properties are applied to the workflow.
See xref:cloud/operator/configuring-workflows.adoc[] for more information.
==== Wrong Workflow Definition
-The {operator_name} validates the workflow definition at the moment you create or edit the YAML file, avoiding to persist a workflow in an invalid state.
-Although, the operator is under active development, so errors during the validation might occur.
+The {operator_name} validates the Workflow definition at the moment you create or edit the YAML file, preventing you from creating an invalid workflow.
+As the operator is under active development, errors during the validation might occur.
-In this case, you might have to make a few modifications to the workflow definition to fix any structural error.
+In the case where validation has not prevented your error, you might have to make a few modifications to the Workflow definition to fix any problems.
-You can identify such problems by looking at the deployed workflow application logs as explained <>.
+You can identify such problems by looking at the deployed Workflow logs as explained <>.
-If you found an issue that a cause is not listed in this section, please link:{docs_issues_url}[let us know].
+If you find an issue with a cause is not listed in this section, please link:{docs_issues_url}[let us know].
== Additional resources
-* xref:cloud/operator/known-issues.adoc[]
+* xref:cloud/operator/referencing-resource-files.adoc[]
* xref:cloud/operator/build-and-deploy-workflows.adoc[]
include::../../../pages/_common-content/report-issue.adoc[]
diff --git a/modules/serverless-logic/pages/cloud/operator/install-serverless-operator.adoc b/modules/serverless-logic/pages/cloud/operator/install-serverless-operator.adoc
index e68744cb..c73051cf 100644
--- a/modules/serverless-logic/pages/cloud/operator/install-serverless-operator.adoc
+++ b/modules/serverless-logic/pages/cloud/operator/install-serverless-operator.adoc
@@ -2,135 +2,42 @@
:compat-mode!:
// Metadata:
:description: Install the operator on Kubernetes clusters
-:keywords: kogito, workflow, serverless, operator, kubernetes, minikube, openshift, containers
+:keywords: kogito, sonataflow, workflow, serverless, operator, kubernetes, minikube, openshift, containers
+// links
-This guide describes how to install the {operator_name} in a Kubernetes cluster. The operator is in an xref:/cloud/operator/known-issues.adoc[early development stage] (community only) and has been tested on Kubernetes 1.22+, and link:{minikube_url}[Minikube].
+:openshift_operator_install_url: https://docs.openshift.com/container-platform/4.13/operators/admin/olm-adding-operators-to-cluster.html
+:openshift_operator_uninstall_url: https://docs.openshift.com/container-platform/4.13/operators/admin/olm-deleting-operators-from-cluster.html
+:kubernetes_operator_install_url: https://operatorhub.io/how-to-install-an-operator
+:kubernetes_operator_uninstall_url: https://olm.operatorframework.io/docs/tasks/uninstall-operator/
+:operatorhub_url: https://operatorhub.io/
-.Prerequisites
-* A Kubernetes cluster with admin privileges. Alternatively, you can use Minikube or KIND.
-* `kubectl` command-line tool is installed. Otherwise, Minikube provides it.
-
-== Prepare a Minikube instance
-
-[WARNING]
-====
-You can safely skip this section if you're not using Minikube.
-====
+This guide describes how to install the {operator_name} in a Kubernetes or OpenShift cluster. The operator is in an xref:/cloud/operator/known-issues.adoc[early development stage] (community only) and has been tested on OpenShift 4.11+, Kubernetes 1.22+, and link:{minikube_url}[Minikube].
.Prerequisites
-* A machine with at least 8GB memory and a link:https://en.wikipedia.org/wiki/Multi-core_processor[CPU with 8 cores]
-* Docker or Podman installed
-
-Run the following command to create a new instance capable of installing the operator and deploy workflows:
-
-[source,shell,subs="attributes+"]
-----
-minikube start --cpus 4 --memory 4096 --addons registry --addons metrics-server --insecure-registry "10.0.0.0/24" --insecure-registry "localhost:5000"
-----
-
-[NOTE]
-====
-To speed up the build time, you can increase CPUs and memory options so that your minikube instance will have more resources. For example, use `--cpus 12 --memory 16384`. In order to work, you will have to recreate your instance.
-====
-
-If it does not work with the default driver, also known as `docker`, you can try to start with the `podman` driver as follows:
-
-.Start minikube with podman driver
-[source,shell,subs="attributes+"]
-----
-minikube start [...] --driver podman
-----
-
-== Install the {product_name} Operator
-
-In order to have an up-and-running instance of the {product_name} Operator you can use the following command:
-
-// TODO: replace this URI with the GitHub's artifact release instead.
-.Install {product_name} Operator on Kubernetes
-[source,shell,subs="attributes+"]
-----
-kubectl create -f https://raw.githubusercontent.com/kiegroup/kogito-serverless-operator/{operator_version}/operator.yaml
-----
-
-You can follow the deployment of the {product_name} Operator:
-
-.Watch the {product_name} Operator pod
-[source,shell,subs="attributes+"]
-----
-kubectl get pod -n kogito-serverless-operator-system --watch
-----
-
-A successful installation should have an output like this:
-
-.Successful Installation Output
-[source]
-----
-NAME READY STATUS RESTARTS AGE
-kogito-serverless-operator-controller-manager-948547ffd-sr2j2 0/2 ContainerCreating 0 6s
-kogito-serverless-operator-controller-manager-948547ffd-sr2j2 1/2 Running 0 7s
-kogito-serverless-operator-controller-manager-948547ffd-sr2j2 2/2 Running 0 20s
-----
-
-You can also follow the operator’s log:
-
-.Watch the {product_name} Operator pod logs
-[source,shell,subs="attributes+"]
-----
-kubectl logs deployment/kogito-serverless-operator-controller-manager -n kogito-serverless-operator-system -f
-----
-
-Once the operator is running, it will watch for new custom resources (CR) so that you can prepare your environment to be ready to create a new {product_name} application based on the definitions you will send to the operator.
-
-To check if the definitions are correclty installed, try running:
-
-.Check if the CRDs are correctly installed
-[source,shell,subs="attributes+"]
-----
-kubectl get crds | grep kogito
-kogitoserverlessbuilds.sw.kogito.kie.org 2023-03-08T18:31:15Z
-kogitoserverlessplatforms.sw.kogito.kie.org 2023-03-08T18:31:15Z
-kogitoserverlessworkflows.sw.kogito.kie.org 2023-03-08T18:31:15Z
-----
+* A Kubernetes or OpenShift cluster with admin privileges. Alternatively, you can use Minikube or KIND.
+* `kubectl` command-line tool is installed. Otherwise, Minikube provides it.
-== Uninstall the Operator
-// TODO: this is super verbose for now because we don't have OLM/OperatorHub. This procedure should be replaced as soon as we have them. Or it should be renamed to "Uninstalling snapshot/local versions", so crazy users that don't rely on operatorhub/olm can also install/uninstall their instances.
+== {product_name} Operator OpenShift installation
-To uninstall the {operator_name}, first you should remove all the object instances managed by it. Then, you can delete every object created during the installation.
+To install the operator on OpenShift refer to the "link:{openshift_operator_install_url}[Adding Operators to a cluster]" from the OpenShift's documentation.
-To delete every object instance managed by the workflow in your cluster, you can run these series of commands:
+When searching for the operator in the *Filter by keyword* field, use the word `{operator_openshift_keyword}`. If you're installing from the CLI, the operator's catalog name is `{operator_openshift_catalog}`.
-.Delete every {product_name} object instances
-[source,shell,subs="attributes+"]
-----
-kubectl delete --all workflow --all-namespaces
-kubectl delete --all kogitoserverlessbuild --all-namespaces
-kubectl delete --all kogitoserverlessplatform --all-namespaces
-----
+=== Uninstall
-Alternatively, if you created everything under the same namespace, deleting the given namespace has the same outcome.
+To remove the operator on OpenShift refer to the "link:{openshift_operator_uninstall_url}[Deleting Operators from a cluster]" from the OpenShift's documentation.
-To uninstall the correct version of the operator, first you must get the current version by running:
+== {product_name} Operator Kubernetes installation
-.Getting the operator version
-[source,shell,subs="attributes+"]
-----
-kubectl get deployment kogito-serverless-operator-controller-manager -n kogito-serverless-operator-system -o jsonpath="{.spec.template.spec.containers[?(@.name=='manager')].image}"
+To install the operator on Kubernetes refer to the "link:{kubernetes_operator_install_url}[How to install an Operator from OperatorHub.io]" from the OperatorHub's documentation.
-quay.io/kiegroup/kogito-serverless-operator-nightly:1.34.0
-----
+When link:{operatorhub_url}[searching for the operator in the *Search OperatorHub* field], use the word `{operator_k8s_keyword}`.
-The operator manager image reflects the current operator's version. Replace the major and minor version names in the command below. For example, if the image version is `1.34.0` use `1.34` in the placeholder:
+=== Uninstall
-.Uninstalling the operator
-[source,shell,subs="attributes+"]
-----
-kubectl delete -f https://raw.githubusercontent.com/kiegroup/kogito-serverless-operator/.x/operator.yaml
-----
+To remove the operator on Kubernetes follow the document "link:{kubernetes_operator_uninstall_url}[Uninstall your operator]" from the OLM's documentation.
-[TIP]
-====
-If you're running a snapshot version, use this URL instead `https://raw.githubusercontent.com/kiegroup/kogito-serverless-operator/main/operator.yaml`.
-====
+When searching for the subscription to remove, use the word `{operator_k8s_subscription}`.
== Additional resources
diff --git a/modules/serverless-logic/pages/cloud/operator/known-issues.adoc b/modules/serverless-logic/pages/cloud/operator/known-issues.adoc
index c33a1b47..d8f0e19b 100644
--- a/modules/serverless-logic/pages/cloud/operator/known-issues.adoc
+++ b/modules/serverless-logic/pages/cloud/operator/known-issues.adoc
@@ -2,67 +2,31 @@
:compat-mode!:
// Metadata:
:description: Known issues, features, and limitations of the operator
-:keywords: kogito, workflow, serverless, operator, kubernetes, minikube, roadmap
-:rest_example_url:
+:keywords: kogito, sonataflow, workflow, serverless, operator, kubernetes, minikube, roadmap
The link:{kogito_serverless_operator_url}[{operator_name}] is currently in Alpha version, is under active development.
-== Supported Features
-
-* Functions
- - xref:core/custom-functions-support.adoc#con-func-sysout[Sysout]
- - link:{kogito_sw_examples_url}/serverless-workflow-functions-quarkus/src/main/resources/restfunctions.sw.json[Rest Custom Functions]
- - xref:core/understanding-jq-expressions.adoc[Expression functions]
-* Events
- - xref:eventing/consume-produce-events-with-knative-eventing.adoc[Knative Eventing integration]. Every Knative Eventing object (brokers, sources, sinks) must be manually configured.
-* States
- - Switch including dataConditions
- - Inject including data with a transition
- - States with Operations including Actions containing functionRef with arguments
-* ExpressionsLang (jq or jsonpath)
-* xref:cloud/operator/configuring-workflows.adoc[Configuring the workflow application in development profile]
-
-== Not Supported Features
-
-Every other feature from the xref:getting-started/cncf-serverless-workflow-specification-support.adoc[CNCF Serverless Workflow Specification] not listed in the section above, is not supported or tested at the moment.
-
-Any feature requiring external file configuration such as OpenAPI or Camel Routes is not supported at the moment.
-
-== Known Bugs
-
-- link:https://issues.redhat.com/browse/KOGITO-8805[Workflow Dev Profile Builder Image lacking permissions to write on mvn dir on OpenShift]
+// == Known Bugs
== Roadmap
-The following issues is currently being prioritized.
+The following issues are currently being prioritized:
=== CNCF Specification v0.8 Alignment
-- link:https://issues.redhat.com/browse/KOGITO-8452[Alignment with Serverless Workflow v0.8 model]
- link:https://issues.redhat.com/browse/KOGITO-7840[Implement admission webhooks for workflow validation]
-=== Workflow Development Profile
-
-- link:https://issues.redhat.com/browse/KOGITO-8675[Make Workflow Dev Profile builder image configurable via Platform]
-- link:https://issues.redhat.com/browse/KOGITO-8517[Support external resources configuration for Workflow CR in devmode]
-- link:https://issues.redhat.com/browse/KOGITO-8643[Expose Workflow Dev Profile application endpoint externally]
-- link:https://issues.redhat.com/browse/KOGITO-8650[Ensure that Data Index is embedded in the workflow dev profile]
-- link:https://issues.redhat.com/browse/KOGITO-8651[Ensure that the Management Console is embedded in the workflow dev profile]
-- link:https://issues.redhat.com/browse/KOGITO-8866[Ensure that Jobs Service is embedded in the workflow dev profile]
+// === Workflow Development Profile
=== Workflow Productization Profile
-- link:https://issues.redhat.com/browse/KOGITO-8522[Map an external ConfigMap for application properties on Serverless Workflow services]
- link:https://issues.redhat.com/browse/KOGITO-7755[Manage the Functions included in a Workflow with Operator]
- link:https://issues.redhat.com/browse/KOGITO-8524[Enable toggle Workflow CR from devmode to production mode and vice-versa]
- link:https://issues.redhat.com/browse/KOGITO-8792[Review build failures and signal the reasoning in the Events API]
-- link:https://issues.redhat.com/browse/KOGITO-8794[Handle deployment failures in prod profile]
- link:https://issues.redhat.com/browse/KOGITO-8806[Evaluate internal registry integration on OpenShift, Kubernetes and Minikube]
=== Knative Integration
-- link:https://issues.redhat.com/browse/KOGITO-8648[Implement the Knative Addressable interface in dev profile]
-- link:https://issues.redhat.com/browse/KOGITO-8409[Add support to Cloud Events to Knative custom function]
- link:https://issues.redhat.com/browse/KOGITO-8410[Add support to GET method to Knative custom function]
-- link:https://issues.redhat.com/browse/KOGITO-8766[Adjust the Knative Function definition to use the same interface as defined by the extension]
- link:https://issues.redhat.com/browse/KOGITO-8646[Review the need of the knative/kubernetes addons inside the dev profile builder image]
+- link:https://issues.redhat.com/browse/KOGITO-9314[Knative eventing addon should not provide defaults for incoming/outgoing]
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/cloud/operator/referencing-resource-files.adoc b/modules/serverless-logic/pages/cloud/operator/referencing-resource-files.adoc
new file mode 100644
index 00000000..bdfcb483
--- /dev/null
+++ b/modules/serverless-logic/pages/cloud/operator/referencing-resource-files.adoc
@@ -0,0 +1,155 @@
+= Referencing Additional Files in the Workflow
+:compat-mode!:
+// Metadata:
+:description: Referencing external additional files in a workflow definition
+:keywords: sonataflow, workflow, serverless, operator, kubernetes, minikube, configmap
+// links
+:kubernetes_create_configmap_url: https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#create-a-configmap
+:kubernetes_configmap_url: https://kubernetes.io/docs/concepts/configuration/configmap/
+
+This document describes how to reference additional files in the `SonataFlow` Custom Resource (CR).
+
+Most of the time, a workflow definition will require not only the flow definition, but also OpenAPI or AsyncAPI specification descriptors, schemas, subflows definitions, and etc.
+For example, when doing xref:service-orchestration/orchestration-of-openapi-based-services.adoc[service orchestration using OpenAPI descriptors], you need to tell the workflow where to find these descriptors in your context.
+
+If these files are not in a remote location that can be accessed via the HTTP protocol, you must describe in the `SonataFlow` CR where to find them within the cluster. This is done via link:{kubernetes_configmap_url}[`ConfigMaps`].
+
+== Creating ConfigMaps with Workflow Additional Files
+
+.Prerequisites
+* You have the files available in your file system
+* You have permissions to create `ConfigMaps` in the target namespace
+
+Given that you already have the file you want to add to your workflow definition, you link:{kubernetes_create_configmap_url}[can create a `ConfigMap`] as you normally would with the contents of the file.
+
+For example, given the following workflow:
+
+.Example of a workflow referencing additional files
+[source,yaml,subs="attributes+"]
+----
+apiVersion: sonataflow.org/v1alpha08
+kind: SonataFlow
+metadata:
+ name: service
+ annotations:
+ sonataflow.org/description: Hello Service!
+ sonataflow.org/version: 0.0.1
+ sonataflow.org/profile: dev
+spec:
+ flow:
+ start: Service
+ dataInputSchema: specs/workflow-service-schema.json <1>
+ functions:
+ - name: isWinner
+ operation: specs/workflow-service-openapi.json#isWinner <2>
+ type: rest
+ states:
+ - name: Service
+ type: operation
+ actions:
+ - name: CallService
+ functionRef:
+ refName: isWinner
+ end: true
+----
+
+<1> The workflow defines an input schema
+<2> The workflow requires an OpenAPI specification file to make a REST invocation
+
+For this example, you have two options. You can either create two `ConfigMaps` to have a clear separation of concerns or only one with both files.
+
+From the operator perspective, it won't make any difference since both files will be available for the workflow application at runtime.
+
+To make it simple, you can create only one `ConfigMap`. Given that the files are available in the current directory:
+
+.Creating a ConfigMap from the current directory
+[source,bash,subs="attributes+"]
+----
+kubectl create configmap service-files --from-file=$(pwd) -n
+----
+
+[IMPORTANT]
+====
+Replace `` with the namespace where you are going to deploy the workflow. The operator won't access `ConfigMaps` in other namespaces.
+====
+
+You should have a `ConfigMap` with two data entries similar to this one:
+
+.Example of a ConfigMap containing the data for the worflow
+[source,yaml,subs="attributes+"]
+----
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: service-files
+data:
+ workflow-service-schema.json: # data was removed to save space
+ workflow-service-openapi.json: # data was removed to save space
+----
+
+Now you can reference this `ConfigMap` to your `SonataFlow` CR:
+
+.SonataFlow CR referencing a ConfigMap resource
+[source,yaml,subs="attributes+"]
+----
+apiVersion: sonataflow.org/v1alpha08
+kind: SonataFlow
+metadata:
+ name: service
+ annotations:
+ sonataflow.org/description: Hello Service!
+ sonataflow.org/version: 0.0.1
+ sonataflow.org/profile: dev
+spec:
+ resources: <1>
+ configMaps:
+ - configMap:
+ name: service-files <2>
+ workflowPath: specs <3>
+ flow:
+ start: Service
+ dataInputSchema: specs/workflow-service-schema.json
+ functions:
+ - name: isWinner
+ operation: specs/workflow-service-openapi.json#isWinner
+ type: rest
+ states:
+ - name: Service
+ type: operation
+ actions:
+ - name: CallService
+ functionRef:
+ refName: isWinner
+ end: true
+----
+
+<1> Introduced a new attribute `.spec.resources` where you can bind the `ConfigMap` to the `SonataFlow` CR
+<2> The name of the `ConfigMap` in the same namespace
+<3> The path where we want to reference these files
+
+Note that the `workflowPath` is `specs`. This is the path where you want to reference the files within the `ConfigMap` in the workflow definition.
+
+[IMPORTANT]
+====
+Always create your `ConfigMaps` before the `SonataFlow` since not having the files available during startup might break the deployment.
+====
+
+Any files you have to map to the flow definition can be added to the `SonataFlow` CR using this procedure.
+
+== Creating a Static Service Registry
+
+The `ConfigMap` containing workflow files are not tied to a particular `SonataFlow` instance, just referenced by it. It means that the operator won't edit or delete them if the `SonataFlow` CR is updated or deleted. You have total control of the `ConfigMap` instance.
+
+You can organize your `ConfigMaps` in a way that other workflows could reuse them. In other words, one `ConfigMap` can be mapped to many `SonataFlow` Custom Resources.
+
+For example, if you have many xref:service-orchestration/orchestration-of-openapi-based-services.adoc[OpenAPI] or xref:eventing/orchestration-of-asyncapi-based-services.adoc[AsyncAPI] specification files that your workflows can access, you can group them by domain. This way you create a static Service Registry using `ConfigMaps`. Other developers within your company can reuse the same specification files when designing workflows.
+
+The same applies for data input and output schemas, subflows definitions, and so on.
+
+== Additional resources
+
+* xref:cloud/operator/known-issues.adoc[]
+* xref:cloud/operator/developing-workflows.adoc[]
+* xref:cloud/operator/configuring-workflows.adoc[]
+
+include::../../../pages/_common-content/report-issue.adoc[]
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/cloud/operator/workflow-status-conditions.adoc b/modules/serverless-logic/pages/cloud/operator/workflow-status-conditions.adoc
index cbe0ac22..05df329c 100644
--- a/modules/serverless-logic/pages/cloud/operator/workflow-status-conditions.adoc
+++ b/modules/serverless-logic/pages/cloud/operator/workflow-status-conditions.adoc
@@ -1,43 +1,51 @@
= Understanding Workflow Services Status Conditions
:compat-mode!:
// Metadata:
-:description: Description of the status and conditions of a workflow deployed by the operator
-:keywords: kogito, workflow, serverless, operator, kubernetes, minikube, status, conditions
+:description: Description of the status and conditions of a Workflow deployed by the operator
+:keywords: sonataflow, workflow, serverless, operator, kubernetes, minikube, status, conditions
-This document describes the status and conditions of the `KogitoServerlessWorkflow` object deployed by the {operator_name}.
+This document describes the Status and Conditions of a `SonataFlow` object managed by the {operator_name}.
-link:https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties[Kubernetes Status] is an important property to observe in order to understand what is currently happening with the object. It can also help you troubleshoot or integrate with other objects in the cluster.
+link:https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties[Kubernetes Status] is an important property to observe in order to understand what is currently happening with the object. It can also help you troubleshoot or integrate with other objects in the cluster.
+
+You can inspect the Status of any Workflow object using the following command:
+
+.Checking the Workflow Status
+[source,bash,subs="attributes+"]
+----
+kubectl get workflow -n -o jsonpath={.status}
+----
== General Status
-The table below lists the general structure of a workflow status:
+The table below lists the general structure of a Workflow status:
-.Description of KogitoServerlessWorkflow status object
+.Description of SonataFlow Status object
[cols="1,2"]
|===
|Status | Description
| `ObservedGeneration`
-| Last object generation observed by the status
+| Last object generation observed by the operator
| `Conditions`
-| Current observed workflow conditions
+| Current observed Workflow Conditions
| `Address`
-| External or internal workflow access endpoint. Implements the link:https://github.com/knative/specs/blob/main/specs/eventing/overview.md#addressable[Knative Addressable Interface]
+| External or internal Workflow access endpoint. Implements the link:https://github.com/knative/specs/blob/main/specs/eventing/overview.md#addressable[Knative Addressable Interface]
| `RecoverFailureAttempts`
| How many attempts the operator tried to recover from a failure
|===
-The `Conditions` property might vary depending on the workflow profile. The next sections describe the current implementation.
+The `Conditions` property might vary depending on the Workflow profile. The next sections describe the current implementation.
== Development Profile Conditions
-When you deploy a workflow with the xref:cloud/operator/developing-workflows.adoc[development profile], the operator deploys a ready-to-use container with a running workflow instance.
+When you deploy a Workflow with the xref:cloud/operator/developing-workflows.adoc[development profile], the operator deploys a ready-to-use container with a running Workflow instance.
-The following table lists the possible conditions.
+The following table lists the possible Conditions.
.Conditions Scenarios in Development
[cols="0,0,1,2"]
@@ -52,35 +60,35 @@ The following table lists the possible conditions.
| Running
| True
|
-| The workflow is running and in healthy state
+| The Workflow is running and in healthy state
| Running
| False
| WaitingForDeployment
-| The workflow is waiting for the underlying deployment object to have a minimum availability
+| The Workflow is waiting for the underlying deployment object to have a minimum availability
| Running
| False
| DeploymentFailure
-| There was a problem with the underlying deployment object. Check the message in this condition and the workflow pod logs for more info
+| There was a problem with the underlying deployment object. Check the message in this Condition and the Workflow Pod logs for more info
| Running
| False
| DeploymentIsUnavailable
-| The underlying deployment object doesn't have the minimum availability for this workflow. Check the message in this condition and the workflow pod logs for more info
+| The underlying deployment object doesn't have the minimum availability for this Workflow. Check the message in this condition and the Workflow Pod logs for more info
| Running
| False
| AttemptToRedeployFailed
-| If the workflow deployment is not available, the operator will try to rollout the deployment three times before entering in this stage. Check the message in this condition and the workflow pod logs for more info
+| If the Workflow Deployment is not available, the operator will try to rollout the Deployment three times before entering this stage. Check the message in this Condition and the Workflow Pod logs for more info
|===
-In normal conditions, the workflow will transition from `Running`, `WaitingForDeployment` condition to `Running`. In case something wrong happens, consult the section xref:cloud/operator/developing-workflows.adoc#troubleshooting[Workflow Troubleshooting in Development].
+In normal conditions, the Workflow will transition from `Running`, `WaitingForDeployment` condition to `Running`. In case something wrong happens, consult the section xref:cloud/operator/developing-workflows.adoc#troubleshooting[Workflow Troubleshooting in Development].
== Production Profile Conditions
-Deploying the workflow in xref:cloud/operator/build-and-deploy-workflows.adoc[Production profile] makes the operator to build an immutable image for the workflow service. The build step can be followed by observing the workflow conditions.
+Deploying the Workflow in xref:cloud/operator/build-and-deploy-workflows.adoc[Production profile] makes the operator build an immutable image for the Workflow application. The progress of the immutable image build can be followed by observing the Workflow Conditions.
.Condition Scenarios in Production
[cols="0,0,1,2"]
@@ -105,41 +113,41 @@ Deploying the workflow in xref:cloud/operator/build-and-deploy-workflows.adoc[Pr
| Running
| True
|
-| The workflow is running and in healthy state
+| The Workflow is running and in healthy state
| Running
| False
| WaitingForPlatform
-| The workflow can't have a pod running until a `KogitoServerlessPlatform` is ready
+| The Workflow can't have a Pod running until a `SonataFlowPlatform` is ready
| Running
| False
| WaitingForBuildReason
-| The workflow is waiting for the build to finish to start running
+| The Workflow is waiting for the build to finish to start running
| Running
| False
| WaitingForDeployment
-| The workflow is waiting for the underlying deployment object to have a minimum availability
+| The Workflow is waiting for the underlying Deployment object to have a minimum availability
| Running
| False
| DeploymentFailure
-| There was a problem with the underlying deployment object. Check the message in this condition and the workflow pod logs for more info
+| There was a problem with the underlying Deployment object. Check the message in this Condition and the Workflow Pod logs for more info
| Running
| False
| DeploymentIsUnavailable
-| The underlying deployment object doesn't have the minimum availability for this workflow. Check the message in this condition and the workflow pod logs for more info
+| The underlying Deployment object doesn't have the minimum availability for this Workflow. Check the message in this Condition and the Workflow Pod logs for more info
| Running
| False
| AttemptToRedeployFailed
-| If the workflow deployment is not available, the operator will try to roll out the deployment three times before entering this stage. Check the message in this condition and the workflow pod logs for more info
+| If the Workflow Deployment is not available, the operator will try to roll out the Deployment three times before entering this stage. Check the message in this Condition and the Workflow Pod logs for more info
|===
-The normal conditions for the workflow resource are to place a `KogitoServerlessBuild` to run and wait for it to finish. As soon as the image is ready, the workflow transitions to the deployment phase, which is to provision a new workflow service pod to run with the built image.
+When building an image for a Workflow resource, a `SonataFlowBuild` will first be created and the operator will wait for the associated build to finish. Once the build is complete, the Workflow transitions to the deployment phase, which will provide a new Workflow service Pod to run with the built image.
== Additional resources
diff --git a/modules/serverless-logic/pages/cloud/quarkus/build-workflow-image-with-quarkus-cli.adoc b/modules/serverless-logic/pages/cloud/quarkus/build-workflow-image-with-quarkus-cli.adoc
index 3e01cc2c..8274f0eb 100644
--- a/modules/serverless-logic/pages/cloud/quarkus/build-workflow-image-with-quarkus-cli.adoc
+++ b/modules/serverless-logic/pages/cloud/quarkus/build-workflow-image-with-quarkus-cli.adoc
@@ -18,7 +18,7 @@ include::../../../pages/_common-content/getting-started-requirement.adoc[]
Quarkus provides a few extensions to build container images, such as `Jib`, `docker`, `s2i`, and `buildpacks`. For more information about the Quarkus extensions, see the link:{quarkus_container_images_url}[Quarkus documentation].
-The examples in this document assume that you have the Quarkus tooling installed. For more information about the tooling, see xref:serverless-logic:getting-started/getting-familiar-with-our-tooling.adoc[Getting familiar with {product_name} tooling].
+The examples in this document assume that you have the Quarkus tooling installed. For more information about the tooling, see xref:getting-started/getting-familiar-with-our-tooling.adoc[Getting familiar with {product_name} tooling].
[[proc-using-example-application]]
== Using an example application
@@ -33,16 +33,23 @@ NOTE: You can skip the following procedure if you already have a workflow applic
.Clone an example application
[source,shell,subs="attributes+"]
----
-git clone --branch main {kogito_sw_examples_git_repo_url}
+git clone --branch main {kogito_sw_examples_git_repo_url}
cd kogito-examples/serverless-workflow-examples/serverless-workflow-greeting-quarkus
----
-. To run the example application, follow the instructions in xref:serverless-logic:getting-started/create-your-first-workflow-service.adoc[Creating your first workflow service].
+. To run the example application, follow the instructions in xref:getting-started/create-your-first-workflow-service.adoc[Creating your first workflow service].
. Install the Quarkus command line interface (CLI). For more information, see link:{quarkus_cli_url}[Installing the Quarkus CLI].
. Add the required Quarkus extension using Quarkus CLI:
+
--
.Add the Jib extension
+
+[NOTE]
+====
+The `kogito-examples` already have this extension added by default, and can be activated with the `container` Maven profile.
+====
+
+The steps to add the extension in your Serverless Workflow application are:
[source,shell]
----
quarkus extension add 'container-image-jib'
@@ -67,7 +74,7 @@ After installing the required tooling, you can start building your workflow appl
.Prerequisites
* You have created a Quarkus project.
-* Quarkus CLI is installed.
+* Quarkus CLI is installed.
For more information about installing the Quarkus CLI, see link:{quarkus_cli_url}#installing-the-cli[Installing the Quarkus CLI].
.Procedure
@@ -85,7 +92,7 @@ The previous command builds your image with name: `\{system_username\}/\{project
Optionally, you can set the following properties to specify the image:
-* `quarkus.container-image.registry`: To define the registry address of the image, such as `quay.io`.
+* `quarkus.container-image.registry`: To define the registry address of the image, such as `quay.io`. When using OpenShift use the provided registry.
* `quarkus.container-image.group`: To define the registry namespace of the image. For example, `context/namespace` or in case of Kubernetes or OpenShift `namespace/project`.
* `quarkus.container-image.name`: To override the image name. By default, this property uses artifact ID.
@@ -104,7 +111,6 @@ quarkus build -Dquarkus.container-image.build=true \
The added Jib extension caches the `target/lib` directory. Based on the size of your project, Jib speeds up the rebuilds.
====
--
-
. You can also push your workflow application to the defined registry. You need to set the `quarkus.container-image.push` property value to `true` (default value is `false`).
+
.Example of pushing the built workflow application to a registry
@@ -157,7 +163,7 @@ When it comes to workflows, a small startup footprint is expected, which can be
.Prerequisites
* You have created a Quarkus project.
-* Quarkus CLI is installed.
+* Quarkus CLI is installed.
For more information about installing the Quarkus CLI, see link:{quarkus_cli_url}[Installing the Quarkus CLI].
.Procedure
@@ -262,9 +268,8 @@ Example response::
== Additional resources
-* xref:serverless-logic:cloud/quarkus/deploying-on-minikube.adoc[Deploying your {product_name} application on Minikube]
-//* xref:serverless-logic:cloud/deploying-on-kubernetes-cluster.adoc[Deploying your Serverless Workflow application on Kubernetes] Guide not available in DP1
+* xref:cloud/quarkus/deploying-on-minikube.adoc[Deploying your {product_name} application on Minikube]
* xref:cloud/quarkus/deploying-on-kubernetes.adoc[Deploying your {product_name} application on Kubernetes]
-
+* xref:cloud/quarkus/deploying-on-openshift.adoc[Deploying your {product_name} application on OpenShift]
include::../../../pages/_common-content/report-issue.adoc[]
diff --git a/modules/serverless-logic/pages/cloud/quarkus/build-workflow-images-with-tekton.adoc b/modules/serverless-logic/pages/cloud/quarkus/build-workflow-images-with-tekton.adoc
deleted file mode 100644
index 65f5cd82..00000000
--- a/modules/serverless-logic/pages/cloud/quarkus/build-workflow-images-with-tekton.adoc
+++ /dev/null
@@ -1 +0,0 @@
-//= Building Workflow Images with Tekton Pipelines
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/cloud/quarkus/deploying-on-kubernetes.adoc b/modules/serverless-logic/pages/cloud/quarkus/deploying-on-kubernetes.adoc
index ac2e78e1..32b3bda4 100644
--- a/modules/serverless-logic/pages/cloud/quarkus/deploying-on-kubernetes.adoc
+++ b/modules/serverless-logic/pages/cloud/quarkus/deploying-on-kubernetes.adoc
@@ -1,6 +1,8 @@
-= Deploying your {product_name} application on Kubernetes
+= Deploying your Serverless Workflow application on Kubernetes
+:compat-mode!:
+// Metadata:
:description: Deploying Serverless Application on Kubernetes
-:keywords: kogito, workflow, quarkus, serverless, kn, kubectl
+:keywords: kogito, workflow, quarkus, serverless, kn, kubectl, kubernetes
:table-caption: Data Set
// envs for common content
:environment_prereq: Kubernetes Cluster is available
@@ -8,9 +10,15 @@
:cluster_kind: Kubernetes with Knative is ready
:knative_procedure: <>
:deploy_application: on Kubernetes
-:k8s_registry: quay.io/mynamespace
+:k8s_registry: quay.io
+:default_namespace: kiegroup
:registry: target
-:container_push: Property to whether or not push images
+:container_push: Property to push or not the Container image to the given registry
+:quarkus-k8s-plugin: quarkus-kubernetes
+:container_push_prop: true
+:platform: Kubernetes
+:command_line_tool: kubectl
+:command_line_tool_name: Kubernetes CLI
// links
:knative_kubernetes_url: https://knative.dev/docs/install/
:verify_knative_url: https://knative.dev/docs/serving/knative-kubernetes-services/#before-you-begin
@@ -65,8 +73,7 @@ include::../common/_deploy_workflow_application_requisites.adoc[]
====
By default, Kubernetes does not have any registry installed. You can check with the administrator if a private registry is available. Otherwise, you can publish the Application Container image on the link:{quay_io_url}[Quay.io], or on any other registry of your preference.
-If the registry requires authentication you need to create a Pull Secret with the registry credentials, for more
-information please take a look in this link:{k8s_pull_secret_url}[link].
+If the registry requires authentication you need to create a Pull Secret with the registry credentials, for more information please take a look in this link:{k8s_pull_secret_url}[link].
====
.Procedure
@@ -74,7 +81,7 @@ information please take a look in this link:{k8s_pull_secret_url}[link].
+
include::../common/_create_namespace_and_deploy_info.adoc[]
-In the following procedures, you can find two examples of deploying your workflow application, including:
+In the following procedures, you can find different approaches to deploy your workflow application, such as:
* <>
* <>
@@ -93,74 +100,7 @@ include::../common/_proc_deploy_sw_kn_cli.adoc[]
// deploy with kubectl
include::../common/_proc_deploy_sw_kubectl.adoc[]
-[[proc-deploy-sw-application-quarkus-cli]]
-=== Deploying your workflow application using Quarkus CLI
+// deploy with quarkus-cli
+include::../common/_proc_deploy_sw_quarkus_cli.adoc[]
-
-.Prerequisites
-* Workflow application is installed.
-* Quarkus CLI is installed. +
-For more information, see link:{quarkus_cli_url}[Building Quarkus Apps with Quarkus command line interface (CLI)].
-
-
-.Procedure
-. Add the Quarkus extensions to deploy the `knative` service
-+
---
-You can add the Kubernetes and the Kogito Knative extensions to your project with the Quarkus CLI:
-
-.Add kubernetes and Kogito knative extensions to the project with Quarkus CLI
-[source,shell]
-----
-quarkus extension add kubernetes
-quarkus extension add kogito-addons-quarkus-knative-eventing
-----
---
-. To deploy your workflow application using Quarkus CLI, set the following system properties in `application.properties` file:
-+
---
-.Required system properties
-[source,properties]
-----
-quarkus.knative.name=greeting-quarkus-cli <1>
-quarkus.kubernetes.deployment-target=knative <2>
-quarkus.kubernetes.deploy=true <3>
-quarkus.container-image.push=true <4>
-quarkus.container-image.group=kiegroup <5>
-quarkus.container-image.registry=quay.io <6>
-quarkus.container-image.tag=1.0-SNAPSHOT <7>
-
-----
-
-<1> Property to set the Knative service name
-<2> Property to set the target deployment type
-<3> Property to set whether or not deploy on an active Kubernetes cluster
-<4> {container_push}
-<5> Property to define which registry group/namespace the built image belongs to
-<6> Property to define to which registry the built image will be pushed to
-<7> Sets the built Container image tag
-
-[IMPORTANT]
-====
-This feature works with Quarkus 2.10.2.Final or later. For more information, see
-link:{quarkus_k8s_deploy_url}[link].
-====
---
-
-.Build and Deploy your workflow application
-[source,shell]
-----
-quarkus build -- -Pcontainer -DskipTests \
- -Dquarkus.container-image.push=true \
- -Dquarkus.container-image.registry=quay.io \
- -Dquarkus.container-image.group=kiegroup \
- -Dquarkus.container-image.tag=1.0-SNAPSHOT
-----
-
-Note that the maven profile activated is named as `container`, which provides the needed system properties to build the
-target container image.
-
-// verify deployed swf
-include::../common/_verify_if_swf_is_deployed.adoc[]
-
-include::../../../pages/_common-content/report-issue.adoc[]
+include::../../../pages/_common-content/report-issue.adoc[]
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/cloud/quarkus/deploying-on-minikube.adoc b/modules/serverless-logic/pages/cloud/quarkus/deploying-on-minikube.adoc
index 36d54cab..f55a7b40 100644
--- a/modules/serverless-logic/pages/cloud/quarkus/deploying-on-minikube.adoc
+++ b/modules/serverless-logic/pages/cloud/quarkus/deploying-on-minikube.adoc
@@ -10,15 +10,20 @@
:cluster_kind: Minikube is installed
:knative_procedure: <>
:deploy_application: using the Minikube registry
-:k8s_registry: dev.local/kogito
+:k8s_registry: dev.local
+:default_namespace: kogito
+:container_push_prop: false
+:platform: Kubernetes
:registry: Minikube's
-:container_push: Property to whether or not push images. When using Minikube's remote Docker daemon to avoid image validation
-
+:container_push: Property to whether or not push images. False when using Minikube's remote Docker daemon to avoid image validation
+:quarkus-k8s-plugin: quarkus-kubernetes
+:command_line_tool: kubectl
+:command_line_tool_name: Kubernetes CLI
// links
:kn_cli_quickstart_plugin_url: https://knative.dev/docs/install/quickstart-install/#install-the-knative-cli
:knative_on_minikube_step_by_step_url: https://redhat-developer-demos.github.io/knative-tutorial/knative-tutorial/setup/minikube.html
:knative_issue_url: https://github.com/knative/serving/issues/6101
-:quarkus_k8s_deploy_url: https://github.com/quarkusio/quarkus/issues/26385
+
This document describes how to deploy your workflow application using a local Kubernetes cluster, such as link:{minikube_url}[Minikube], along with a procedure to run the Knative platform.
@@ -68,7 +73,7 @@ Once you verify the Minikube and Knative CLI installation, you can install Knati
.Prerequisites
* Minikube is installed.
* Knative CLI is installed. +
-For more information, see link:{kn_cli_url}[Install the Knative CLI].
+For more information, see link:{kn_cli_install_url}[Install the Knative CLI].
.Procedure
. Open a command terminal and install Knative on Minikube.
@@ -164,7 +169,7 @@ For more information, see link:{knative_issue_url}[How to use locally built dock
In that case, use the `-Dquarkus.container-image.registry=some_of_the_values_above` property to enable Knative fetch the container images from Minikube Docker Daemon.
-If you do not use the values, you might need to set the `imagePullPolicy` to `Never` or `IfNotPresent`, otherwise, Minikube pulls the images from a remote registry.
+If you do not use the values, you might need to set the `imagePullPolicy` to `Never` or `IfNotPresent`, otherwise, Minikube pulls the images from a remote registry.
This behavior can be avoided by tagging the image using previously listed domains.
====
--
@@ -200,10 +205,8 @@ Status:
+
include::../common/_create_namespace_and_deploy_info.adoc[]
-In the following procedures, you can find two examples of deploying your workflow application, including:
+In the following procedures, you can find different approaches to deploy your workflow application, such as:
-[[proc-deploy-sw-application-kubectl]]
-=== Deploying your workflow application using `kubectl`
* <>
* <>
* <>
@@ -215,66 +218,11 @@ include::../common/_proc_deploy_sw_kn_cli.adoc[]
// deploy with kubectl
include::../common/_proc_deploy_sw_kubectl.adoc[]
-[[proc-deploy-sw-application-quarkus-cli]]
-=== Deploying your workflow application using Quarkus CLI
-
-.Prerequisites
-* Workflow application is installed.
-* Quarkus CLI is installed. +
-For more information, see link:{quarkus_cli_url}[Building Quarkus Apps with Quarkus command line interface (CLI)].
-
-.Procedure
-. Add the Quarkus extensions to deploy the `knative` service
-+
---
-You can simply add the kubernetes and the Kogito knative extension to your project with the Quarkus CLI:
-
-.Add kubernetes and Kogito knative extensions to the project with Quarkus CLI
-[source,shell]
-----
-quarkus extension add kubernetes
-quarkus extension add kogito-addons-quarkus-knative-eventing
-----
---
-. To deploy your workflow application using Quarkus CLI, set the following system properties in `application.properties` file:
-+
---
-.Required system properties
-[source,properties]
-----
-quarkus.knative.name=greeting-quarkus-cli <1>
-quarkus.kubernetes.deployment-target=knative <2>
-quarkus.kubernetes.deploy=true <3>
-quarkus.container-image.push=false <4>
-----
-
-<1> Property to set the Knative service name
-<2> Property to set the target deployment type
-<3> Property to set whether or not deploy on an active Kubernetes cluster
-<4> {container_push}
-
-[IMPORTANT]
-====
-This functionality works with Quarkus 2.10.2.Final or later. For more information, see
-link:{quarkus_k8s_deploy_url}[link].
-====
---
-
-.Build and Deploy your workflow application
-[source,shell]
-----
-quarkus build -- -Pcontainer -DskipTests \
- -Dquarkus.container-image.push=false \
- -Dquarkus.container-image.registry=quay.io \
- -Dquarkus.container-image.group=kiegroup \
- -Dquarkus.container-image.tag=1.0-SNAPSHOT
-----
+// deploy with quarkus-cli
+include::../common/_proc_deploy_sw_quarkus_cli.adoc[]
-Note that the maven profile activated is named as `container`, which provides the needed system properties to build the
-target container image.
// verify deployed swf
include::../common/_verify_if_swf_is_deployed.adoc[]
-
include::../../../pages/_common-content/report-issue.adoc[]
diff --git a/modules/serverless-logic/pages/cloud/quarkus/deploying-on-openshift.adoc b/modules/serverless-logic/pages/cloud/quarkus/deploying-on-openshift.adoc
new file mode 100644
index 00000000..976e8717
--- /dev/null
+++ b/modules/serverless-logic/pages/cloud/quarkus/deploying-on-openshift.adoc
@@ -0,0 +1,179 @@
+= Deploying your {product_name} application on OpenShift
+:compat-mode!:
+// Metadata:
+:description: Deploying Serverless Application on Minikube
+:keywords: kogito, workflow, quarkus, serverless, kn, oc, openshift
+:table-caption: Data Set
+// envs for common content
+:registry: OpenShift's
+:cluster_kind: OpenShift with Red Hat OpenShift Serverless is ready
+:k8s_registry: image-registry.openshift-image-registry.svc:5000
+:knative_procedure: link:{ocp_knative_serving_url}[Knative Serving]
+:default_namespace: kogito-serverless
+:command_line_tool: oc
+:command_line_tool_name: OpenShift CLI
+:quarkus-k8s-plugin: quarkus-openshift
+:container_push_prop: true
+:platform: OpenShift
+:container_push: This property should be removed if deploying on OpenShift Clusters
+// links
+:ocp_local_url_install: https://access.redhat.com/documentation/en-us/red_hat_openshift_local/2.17/html/getting_started_guide/installation_gsg
+:ocp_cli_url: https://docs.openshift.com/container-platform/4.12/cli_reference/openshift_cli/getting-started-cli.html#cli-about-cli_cli-developer-commands
+:ocp_swf_install_url: https://docs.openshift.com/serverless/1.29/install/install-serverless-operator.html
+:ocp_expose_registry_url: https://docs.openshift.com/container-platform/4.12/registry/securing-exposing-registry.html
+:knative_istio_issue_url: https://access.redhat.com/solutions/4791871
+
+
+This document describes how to deploy a {product_name} application using a OpenShift cluster, along with a procedure to run the OpenShift Serverless Operator, which is based on Knative.
+
+.Prerequisites
+* Your xref:cloud/quarkus/build-workflow-image-with-quarkus-cli.adoc[{product_name} application] is ready to use.
+* link:{ocp_cli_url}[OpenShift CLI] is installed.
+* link:{ocp_kn_cli_url}[Knative CLI] is installed.
+* xref:testing-and-troubleshooting/kn-plugin-workflow-overview.adoc[Knative CLI workflow plugin] is installed.
+* link:{podman_install_url}[Podman] +
+If you are using link:{docker_install_url}[Docker], you might need to update the commands accordingly.
+* (Optional) link:{quarkus_cli_url}[Quarkus CLI] is installed.
+
+For the following steps we will be using the link:{ocp_local_url}[OpenShift Local]. However, the steps described here can be used on any OpenShift 4.x version that has support for OpenShift Serverless.
+
+[[proc-install-openshift-local]]
+== Installing OpenShift Local
+
+_If you already have an OpenShift cluster available you can skip this section._
+
+Instructions to install Openshift Local can be found {ocp_local_url_install}[here].
+
+Once you have OpenShift Local running, proceed to the next topic.
+[IMPORTANT]
+====
+If you are running OpenShift Local on Mac with M1 processors, you might not find the OpenShift Serverless Operator available.
+====
+
+Before proceeding further, make sure that you have access to the OpenShift cluster, the OpenShift Serverless operator is properly installed and the `Knative Serving` is ready for use. For more information on each topic, please refer the following guides:
+
+* Installing link:{ocp_swf_install_url}[OpenShift Serverless Operator].
+* Installing link:{ocp_knative_serving_url}[Knative Serving].
+* Installing link:{ocp_knative_eventing_url}[Knative Eventing]. Knative Eventing is not required for this guide, however it is important to mention how to install it, if required by your {product_name} application.
+
+
+[TIP]
+====
+If you get error messages related to `Istio`, this link:{knative_istio_issue_url}[article] might be helpful.
+====
+
+
+[[proc-deploy-sw-application-openshift]]
+== Deploying your workflow application on OpenShift
+
+Once `Knative Serving` is ready, you can initiate the process of deploying your {product_name} application on OpenShift.
+
+// shared app req
+include::../common/_deploy_workflow_application_requisites.adoc[]
+
+After checking the prerequisites, prepare the project that will be used to deploy your application:
+
+.Procedure
+. Create project
++
+--
+[source,shell,subs="attributes+"]
+----
+{command_line_tool} new-project {default_namespace}
+----
+--
++
+. Set the Kubernetes context to the newly created namespace using the following command:
++
+--
+.Set OpenShift current project
+[source,shell,subs="attributes+"]
+----
+{command_line_tool} project {default_namespace}
+----
+--
+
+
+[TIP]
+====
+You can use the link:build-workflow-image-with-quarkus-cli.html#proc-building-serverless-workflow-application-using-native-image[native image] for a faster startup.
+====
+
+
+=== Preparing the OpenShift's Registry
+OpenShift comes with a registry where you can push your images built locally to be deployed.
+
+First, let's check if the Registry is already exposed, to do that, check if the route is exposed:
+
+.Check whether the OpenShift registry is exposed
+[source,shell]
+----
+oc get route default-route -n openshift-image-registry --template='{{ .spec.host }}'
+----
+
+If no information or an error is returned, it does mean that the registry is not exposed. To expose the registry follow link:{ocp_expose_registry_url}[these] steps.
+
+Now that we have the OpenShift registry exposed, let's store into the `OPENSHIFT_REGISTRY_URL` environment variable, so we can use it easily in the next steps:
+
+.Retrieve the OpenShift registry
+[source,shell]
+----
+OPENSHIFT_REGISTRY_URL=$(oc get route default-route -n openshift-image-registry --template='{{ .spec.host }}')
+----
+
+The OpenShift's registry requires you to be logged in to be allowed to push Container Images. If you are already logged in the cluster you can use your token to authenticate to the OpenShift's registry:
+
+.Log in to the OpenShift's registry
+[source,shell]
+----
+podman login -u -p $(oc whoami -t) --tls-verify=false $OPENSHIFT_REGISTRY_URL
+----
+If everything is working, you should get the *Login Succeeded!* message.
+
+.Pushing {product_name} Application To OpenShift's registry
+First step, get the Container Image tag built previously:
+[source,shell]
+----
+podman images
+----
+
+With the Container Image, use the following command to tag the application image with the OpenShift's registry and
+the namespace being used:
+[source,shell,subs="attributes+"]
+----
+podman tag $OPENSHIFT_REGISTRY_URL/{default_namespace}/serverless-workflow-greeting-quarkus:1.0
+----
+
+Then, push:
+[source,shell,subs="attributes+"]
+----
+podman push $OPENSHIFT_REGISTRY_URL/{default_namespace}/serverless-workflow-greeting-quarkus:1.0
+----
+
+[IMPORTANT]
+====
+The {default_namespace} is the current project/namespace created earlier to deploy your {product_name} application.
+====
+
+The next step is to deploy your workflow application and execute it.
+You can read further the next sections which explain different approaches to deploy your {product_name} application:
+
+[NOTE]
+====
+In the next steps you will notice the value **{k8s_registry}** being used. It is the internal OpenShift's registry address where the images of the deployments will pulled from. Note that, the Container Image pushed in the previous step will be queried as `{k8s_registry}/{default_namespace}/serverless-workflow-greeting-quarkus:1.0`
+====
+
+* <>
+* <>
+* <>
+
+// deploy with kn-cli
+include::../common/_proc_deploy_sw_kn_cli.adoc[]
+
+// deploy with kubectl
+include::../common/_proc_deploy_sw_oc.adoc[]
+
+// deploy with quarkus-cli
+include::../common/_proc_deploy_sw_quarkus_cli.adoc[]
+
+include::../../../pages/_common-content/report-issue.adoc[]
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/cloud/quarkus/kubernetes-service-discovery.adoc b/modules/serverless-logic/pages/cloud/quarkus/kubernetes-service-discovery.adoc
index fd53f406..b044007a 100644
--- a/modules/serverless-logic/pages/cloud/quarkus/kubernetes-service-discovery.adoc
+++ b/modules/serverless-logic/pages/cloud/quarkus/kubernetes-service-discovery.adoc
@@ -5,19 +5,20 @@
:keywords: kogito, workflow, quarkus, serverless, service-discovery, enhanced-service-discovery
// links
:quarkus_issue_url: https://github.com/quarkusio/quarkus/issues/27457
+:kubernertes_sa_url: https://kubernetes.io/docs/concepts/security/service-accounts/
-The Kubernetes service discovery allows you to have a static URI, defining a Kubernetes resource, which is used to perform HTTP requests. The Kubernetes resource defined in the URI is queried in the current Kubernetes cluster and translated in a valid URL.
+The Kubernetes service discovery allows you to describe the Kubernetes resource you want to perform HTTP requests on using a custom URI. Under the hood, it will discover the network endpoint (URL) to where to make the request.
-The Kubernetes service discovery feature works during the workflow application startup, in which this feature scans all the Quarkus configuration in search of the URI pattern. Therefore, you must keep in mind that if application startup time matters, then consider to use a static URL instead.
+The Kubernetes service discovery feature works during the workflow application startup, in which this feature scans all the Quarkus configurations in search of the URI pattern. Therefore, you must remember that if the application startup time matters, consider using a known static URL instead.
-Following is the URI pattern in Kubernetes service discovery:
+Following is the custom URI pattern in Kubernetes service discovery:
.URI pattern in Kubernetes service discovery
[source,shell]
----
-kubernetes:////?=
-\________/ \_____/ \_______/ \____/ \_________/ \____________/ \______________________________/
- scheme group version kind namespace resourceName additional resource attributes
+kubernetes:..//?=
+\________/ \____/ \_______/ \_____/ \_________/ \____________/ \______________________________/
+ scheme kind version group namespace resourceName additional resource attributes
\____________________/ \__________________________/
GVK Supported values:
- port-name={PORT_NAME}
@@ -32,14 +33,14 @@ The following scheme values are supported in the URI pattern:
The following resources are supported for the Kubernetes GVK (Group, Version, and Kind):
-* `v1/service`
-* `serving.knative.dev/v1/service`
-* `v1/pod`
-* `apps/v1/deployment`
-* `apps.openshift.io/v1/deploymentconfig`
-* `apps/v1/statefulset`
-* `route.openshift.io/v1/route`
-* `networking.k8s.io/v1/ingress`
+* `services.v1`
+* `services.v1.serving.knative.dev`
+* `pods.v1`
+* `deployments.v1.apps`
+* `deploymentconfigs.v1.apps.openshift.io`
+* `statefulsets.v1.apps`
+* `routes.v1.route.openshift.io`
+* `ingresses.v1.networking.k8s.io`
[NOTE]
====
@@ -50,7 +51,7 @@ When using `knative`, you can also use a simplified URI like:
knative:/
----
-The above URI looks directly for serving.knative.dev/v1/service resource.
+The above URI looks directly for services.v1.serving.knative.dev resource.
====
[IMPORTANT]
@@ -65,7 +66,7 @@ Also known as query string. The query parameters are defined the similar way wit
The following query parameters help the engine to be more precise when querying for a given Kubernetes resource:
-* *Custom labels*: The custom labels are used to filter services in case there are more than one service with the same label selector but exposing different ports. In this case, you can instruct the engine that if more than one service is found, then the engine must use the service containing the provided label.
+* *Custom labels*: The custom labels are used to filter services in case there are more than one service with the same label selector but exposing different ports. In this case, you can instruct the engine that if more than one service is found, then the engine must use the service containing the provided label.
+
The label is defined with the following expression and in case of multiple labels, you can use semicolon (;):
+
@@ -74,7 +75,7 @@ The label is defined with the following expression and in case of multiple label
.Example label definition in URI
[source,shell]
----
-kubernetes:v1/pod//?labels=label-name=test-label
+kubernetes:pods.v1//?labels=label-name=test-label
----
+
Using the previous URI example, if there are more than one service exposing the given pod, the `label-name=test-label` label is used to filter the service. If the label does not exist, the first found service is used.
@@ -84,6 +85,103 @@ Using the previous URI example, if there are more than one service exposing the
`port-name=`
--
+[[required-kubernetes-roles]]
+== Required Kubernetes roles
+
+The service discovery engine requires that the link:{kubernertes_sa_url}[Kubernetes Service Account] running the application has read permissions for the discovered objects.
+
+The following Kubernetes Role resource has all the required permissions for the service discovery to work:
+
+.Required permissions for the Service Discovery on Kubernetes
+[source,yaml]
+----
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: service-discovery-role
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ - services
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - apps
+ resources:
+ - deployments
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingresses
+ verbs:
+ - get
+ - list
+ # Knative
+ - apiGroups:
+ - serving.knative.dev
+ resources:
+ - services
+ verbs:
+ - get
+ - list
+----
+
+If running on OpenShift, you must also add the following rules:
+
+.Additional rules to run on OpenShift
+[source,yaml]
+----
+rules:
+ - apiGroups:
+ - route.openshift.io
+ resources:
+ - routes
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - apps.openshift.io
+ resources:
+ - deploymentconfigs
+ verbs:
+ - get
+ - list
+----
+
+You must then bind the Service Account with the `Role` via a `RoleBinding`:
+
+.RoleBinding for the required service discovery role
+[source,yaml]
+----
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: service-discovery-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: service-discovery-role
+subjects:
+ - kind: ServiceAccount
+ name: default
+----
+
+[IMPORTANT]
+====
+Usually, pods run using the `default` Service Account. If your cluster has a different configuration, you should adapt this `RoleBinding` to your requirements.
+====
+
+You must create these two objects in every namespace that you wish to deploy Quarkus Workflow applications with Service Discovery enabled.
+
+As an alternative, you can create a `ClusterRole` instead of a `Role` in every namespace. In this case, the role will reflect globally in your cluster. For security reasons, you should avoid this approach.
+
[[con-kubernetes-service-doscovery-configuration]]
== Configuration in Kubernetes service discovery
@@ -114,7 +212,15 @@ For example, consider an application that consumes a resource running on Kuberne
.Example URI
[source,shell]
----
-org.kie.kogito.sw.knative.service=knative:v1/Service/serverless-workflow-greeting-quarkus/greeting-quarkus-cli
+org.kie.kogito.sw.knative.service=${knative:services.v1/serverless-workflow-greeting-quarkus/greeting-quarkus-cli}
+----
+
+Or you can reference a specific endpoint of the Knative service using the following URI:
+
+.Example endpoint URI
+[source,shell]
+----
+org.kie.kogito.sw.knative.service=${knative:services.v1/serverless-workflow-greeting-quarkus/greeting-quarkus-cli}/endpoint
----
[NOTE]
@@ -147,7 +253,7 @@ __ ____ __ _____ ___ __ ____ ______
[NOTE]
====
-In the previous example, the URI is translated to `http://greeting-quarkus-cli.serverless-workflow-greeting-quarkus.10.99.154.147.sslip.io` when the application started. The translated URI is used at runtime, when needed.
+In the previous example, the URI is translated to `http://greeting-quarkus-cli.serverless-workflow-greeting-quarkus.10.99.154.147.sslip.io` when the application started. The translated URL is used at runtime when needed.
====
The Kubernetes service discovery scans the Quarkus configuration during the startup, which can also cause small delay as follows:
@@ -168,39 +274,56 @@ Kubernetes service discovery is disabled::
----
====
-[IMPORTANT]
-====
+[[ref-enabling-kubernetes-service-discovery]]
+== Enabling Kubernetes service discovery
+
When using the Kubernetes service discovery feature, you need to balance if your application can afford the delayed startup time.
If the URI pattern is not found in the application properties, then discovery is not triggered. However, the scanning is performed anyway. Therefore, a short time is required to go through the startup scan.
-You can disable the Kubernetes service discovery by removing the `kogito-addons-quarkus-kubernetes` library from the application's dependencies as shown in the following Maven exclusions:
+=== Available service discovery implementations
-.Example Maven exclusions
-[source,shell]
+Currently, there are two implementations available.
+
+You can enable the Kubernetes service discovery by adding a service discovery implementation to the application's dependencies as shown in the following Maven dependencies:
+
+==== Fabric8 Kubernetes Service Catalog
+
+When activated, it leverages the Kubernetes Java API for service discovery, making it ideally suited for self-managed scenarios where users independently construct and deploy workflows.
+
+.Example Maven dependencies
+[source,xml]
----
- org.kie.kogito
- kogito-quarkus-serverless-workflow
-
-
- org.kie.kogito
- kogito-addons-quarkus-kubernetes
-
-
- org.kie.kogito
- kogito-addons-quarkus-kubernetes-deployment
-
-
+ org.kie.kogito
+ kogito-addons-quarkus-kubernetes
+
+
+ org.kie.kogito
+ kogito-addons-quarkus-fabric8-kubernetes-service-catalog
----
-====
+==== MicroProfile Config Kubernetes Service Catalog
+This implementation retrieves information from the application's configuration, making it valuable in scenarios where users rely on the operator to provide the necessary information from the hosting platform.
+
+.Example Maven dependencies
+[source,xml]
+----
+
+ org.kie.kogito
+ kogito-addons-quarkus-kubernetes
+
+
+ org.kie.kogito
+ kogito-addons-quarkus-microprofile-config-service-catalog
+
+----
== Additional resources
-* xref:serverless-logic:service-orchestration/configuring-openapi-services-endpoints.adoc[Configuring the OpenAPI services endpoints]
+* xref:service-orchestration/configuring-openapi-services-endpoints.adoc[Configuring the OpenAPI services endpoints]
include::../../../pages/_common-content/report-issue.adoc[]
diff --git a/modules/serverless-logic/pages/cloud/quarkus/versioning-workflows-in-knative.adoc b/modules/serverless-logic/pages/cloud/quarkus/versioning-workflows-in-knative.adoc
deleted file mode 100644
index 36e8a1f9..00000000
--- a/modules/serverless-logic/pages/cloud/quarkus/versioning-workflows-in-knative.adoc
+++ /dev/null
@@ -1 +0,0 @@
-//= Versioning workflows in Knative
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/core/accessing-workflow-metainformation-in-runtime.adoc b/modules/serverless-logic/pages/core/accessing-workflow-metainformation-in-runtime.adoc
deleted file mode 100644
index c756f7c3..00000000
--- a/modules/serverless-logic/pages/core/accessing-workflow-metainformation-in-runtime.adoc
+++ /dev/null
@@ -1 +0,0 @@
-//= Accessing workflow metainformation in runtime
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/getting-started/cncf-serverless-workflow-specification-support.adoc b/modules/serverless-logic/pages/core/cncf-serverless-workflow-specification-support.adoc
similarity index 84%
rename from modules/serverless-logic/pages/getting-started/cncf-serverless-workflow-specification-support.adoc
rename to modules/serverless-logic/pages/core/cncf-serverless-workflow-specification-support.adoc
index 1ae89f9e..b5565606 100644
--- a/modules/serverless-logic/pages/getting-started/cncf-serverless-workflow-specification-support.adoc
+++ b/modules/serverless-logic/pages/core/cncf-serverless-workflow-specification-support.adoc
@@ -1,4 +1,4 @@
-= CNCF Serverless Workflow specification
+= Serverless Workflow Specification
:compat-mode!:
// Metadata:
:description: CNCF Serverless Workflow Specification Support
@@ -88,6 +88,8 @@ specification.
The link:{spec_doc_url}#parallel-state[Parallel State] of the workflow states feature works in a single thread. This means that a Parallel State does not create one thread per branch, simulating an actual parallel behavior.
+ If an exclusive property is set to `false`, you should not use the link:{spec_doc_url}#event-state[Event State] of the workflow states feature as the starting state. In case, if it is specified that way, then it will behave as if an exclusive property was set to `true`.
+
[NOTE]
====
{product_name} does not support the link:{spec_doc_url}#sleep-state[Sleep State] feature. However, this feature will be supported in a future release.
@@ -101,7 +103,7 @@ The following table shows all the workflow states that {product_name} supports i
| State | Status | Reference
| Event
-| emoji:full_moon[]
+| emoji:last_quarter_moon[]
| link:{spec_doc_url}#Event-State[Event State]
| Operation
@@ -172,15 +174,15 @@ The following table shows the status of the workflow functions that {product_nam
| link:{spec_doc_url}#defining-custom-function-types[Defining custom function types]
|===
-For additional functions, the Serverless Workflow specification support the `custom` function type, such as `sysout` and `java`. For more information about these custom function types, see xref:serverless-logic:core/custom-functions-support.adoc[Custom functions for your {product_name} service].
+For additional functions, the Serverless Workflow specification support the `custom` function type, such as `sysout` and `java`. For more information about these custom function types, see xref:core/custom-functions-support.adoc[Custom functions for your {product_name} service].
[[events]]
== Events
{product_name} supports events of the workflow model as defined in the link:{spec_doc_url}#Event-Definition[Serverless Workflow specification definition], except the following:
-* `resultEventRef` property in link:{spec_doc_url}#eventref-definition[`EventRefDefinition`] is not implemented and, if specified, this property is ignored. Same functionality can be achieved by using xref:serverless-logic:eventing/working-with-callbacks.adoc[Callback] state.
-* link:{spec_doc_url}#correlation-definition[Correlation] has limited support, that evaluates correlation rules matching a single event consumed per time with a workflow instance. The correlation among `N` different events to be consumed and matched with a workflow instance is not supported in {product_name}. For more information about event correlation, see xref:serverless-logic:eventing/event-correlation-with-workflows.adoc[Event correlation in {product_name}].
+* `resultEventRef` property in link:{spec_doc_url}#eventref-definition[`EventRefDefinition`] is not implemented and, if specified, this property is ignored. The same functionality can be achieved by using xref:core/working-with-callbacks.adoc[Callback] state.
+* link:{spec_doc_url}#correlation-definition[Correlation] has limited support, that evaluates correlation rules matching a single event consumed per time with a workflow instance. The correlation among `N` different events to be consumed and matched with a workflow instance is not supported in {product_name}. For more information about event correlation, see xref:eventing/event-correlation-with-workflows.adoc[Event correlation in {product_name}].
[[workflow_data]]
== Serverless Workflow data
@@ -219,14 +221,14 @@ For additional functions, the Serverless Workflow specification support the `cus
{product_name} supports the error handling feature as described in the link:{spec_doc_url}#workflow-error-handling[Serverless Workflow specification definition].
-For more information about error handling, see xref:serverless-logic:core/understanding-workflow-error-handling.adoc[Error handling in {product_name}].
+For more information about error handling, see xref:core/understanding-workflow-error-handling.adoc[Error handling in {product_name}].
[[retries]]
== Retries
{product_name} does not support Retries feature, however, it will be implemented in a future release.
-Alternatively, you can use xref:serverless-logic:core/understanding-workflow-error-handling.adoc[Error handling in {product_name}].
+Alternatively, you can use xref:core/understanding-workflow-error-handling.adoc[Error handling in {product_name}].
[[timeouts]]
== Timeouts
@@ -235,14 +237,14 @@ Alternatively, you can use xref:serverless-logic:core/understanding-workflow-err
For start event state the `exclusive` property is not supported if set to `false`, therefore the timeout is not supported for the event state when starting a workflow.
-For more information about timeouts, see xref:serverless-logic:core/timeouts-support.adoc[Timeouts on events for {product_name}].
+For more information about timeouts, see xref:core/timeouts-support.adoc[Timeouts on events for {product_name}].
[[compensation]]
== Compensation
{product_name} supports workflow compensation as described in the link:{spec_doc_url}#Workflow-Compensation[Serverless Workflow specification definition].
-For more information about compensations, see xref:serverless-logic:use-cases/orchestration-based-saga-pattern.adoc[Saga orchestration example in {product_name}].
+For more information about compensations, see xref:use-cases/orchestration-based-saga-pattern.adoc[Saga orchestration example in {product_name}].
[[constants]]
== Constants
@@ -260,11 +262,11 @@ Secrets are associated with the link:{quarkus_config_guide_url}[Quarkus Configur
====
//Guide not available currently (DP1)
-//For more information, see xref:serverless-logic:core/accessing-workflow-metainformation-in-runtime.adoc[Accessing Serverless Workflow Metainformation].
+//For more information, see xref:core/accessing-workflow-metainformation-in-runtime.adoc[Accessing Serverless Workflow Metainformation].
== Additional resources
-* xref:serverless-logic:getting-started/create-your-first-workflow-service.adoc[Creating your first workflow service]
-* xref:serverless-logic:getting-started/getting-familiar-with-our-tooling.adoc[Getting familiar with {product_name} tooling]
+* xref:getting-started/create-your-first-workflow-service.adoc[Creating your first workflow service]
+* xref:getting-started/getting-familiar-with-our-tooling.adoc[Getting familiar with {product_name} tooling]
include::../../pages/_common-content/report-issue.adoc[]
diff --git a/modules/serverless-logic/pages/core/configuration-properties.adoc b/modules/serverless-logic/pages/core/configuration-properties.adoc
index 98c57884..1c7eebac 100644
--- a/modules/serverless-logic/pages/core/configuration-properties.adoc
+++ b/modules/serverless-logic/pages/core/configuration-properties.adoc
@@ -5,7 +5,7 @@
:description: Configuration Properties
:keywords: kogito, workflow, serverless, configuration, properties
-The following table serves as a quick reference for commonly used configuration properties supported in {product_name}. You can define the following properties in the `src/main/resources/application.properties` file of your project.
+The following table serves as a quick reference for commonly used configuration properties supported in {product_name}. You can define the following properties in the `src/main/resources/application.properties` file of your project.
.Common configuration properties
[cols="20%,30%,20%,15%,15%", options="header"]
@@ -65,7 +65,7 @@ a|Defines the type of persistence database. The possible values of this property
|No
|`kogito.addon.messaging.outgoing.defaultName`
-|Defines the default name of the outgoing channel.
+|Defines the default name of the outgoing channel.
|string
|`kogito_outgoing_stream`
|No
diff --git a/modules/serverless-logic/pages/core/custom-functions-support.adoc b/modules/serverless-logic/pages/core/custom-functions-support.adoc
index 4b383753..1fc1f4b4 100644
--- a/modules/serverless-logic/pages/core/custom-functions-support.adoc
+++ b/modules/serverless-logic/pages/core/custom-functions-support.adoc
@@ -164,11 +164,11 @@ If your method returns Java collections, it is converted to a JSON array and add
=== Function accessing contextual data
-If you need access to process contextual information (for example, Kogito process instance ID) inside your Java service, you can add a `KogitoProcessContext` parameter as the last one in the method signature.
+If you need access to process contextual information (for example, {product_name} workflow instance ID) inside your Java service, you can add a `KogitoProcessContext` parameter as the last one in the method signature.
Therefore, if you need to do so, you can update the signature of methods from previous sections.
-.Example of a function accessing contextual data
+.Example of a function accessing { product_name} context
[source,java]
----
public class MyInterfaceOrClass {
@@ -181,7 +181,7 @@ public JsonNode myMethod(JsonNode workflowData, KogitoProcessContext context ) {
}
----
-.Example of a function accessing contextual data
+.Example of a function accessing {product_name} context
[source,java]
----
public class MyInterfaceOrClass {
@@ -204,7 +204,7 @@ Avoid using `java` functions to call the external services, instead, you can use
[[con-func-camel]]
== Camel custom function
-Kogito supports the link:{camel_url}[Camel Routes] functions within an Apache Maven project, in which you define your workflow service.
+{product_name} supports the link:{camel_url}[Camel Routes] functions within an Apache Maven project, in which you define your workflow service.
[NOTE]
====
@@ -329,65 +329,42 @@ You can declare a {product_name} custom function using the Knative service name,
{
"name": "greet", <1>
"type": "custom", <2>
- "operation": "knative:custom-function-knative-service" <3>
+ "operation": "knative:services.v1.serving.knative.dev/custom-function-knative-service?path=/plainJsonFunction", <3>
}
]
----
<1> `greet` is the function name
<2> `custom` is the function type
-<3> `knative:custom-function-knative-service` is the custom operation definition. In this definition, `knative` is the reserved keyword followed by the `custom-function-knative-service` Knative service.
+<3> In `operation` you set the coordinates of the Knative service
-The above function will send a `POST` request to the http://custom-function-knative-service.default.10.109.169.193.sslip.io URL.
+The above function will send a `POST` request to the http://custom-function-knative-service.default.10.109.169.193.sslip.io/plainJsonFunction URL. If you don't specify a path, {product_name} will use the root path (/).
[NOTE]
====
-`GET` requests are not yet supported.
+You can also send `GET` requests by setting `method=GET` in `operation`. In this case, the arguments are forwarded over a query string.
====
-[[about-namespaces]]
-==== About namespaces
-
-Note that in the above example, you declared only the name of the service you wanted to call, but not a namespace. In this case, {product_name} will look for a Knative service in the same namespace the workflow service is running.
-
-In case you need to call a Knative service in a different namespace, you can declare the function as:
-
+.Example of a `GET` request
[source,json]
----
"functions": [
{
"name": "greet",
"type": "custom",
- "operation": "knative:my_different_namespace/custom-function-knative-service"
+ "operation": "knative:services.v1.serving.knative.dev/custom-function-knative-service?path=/plainJsonFunction&method=GET", <1>
}
]
----
-In the above example, {product_name} will look for the `custom-function-knative-service` in the `my_different_namespace` namespace.
-
-=== Function metadata
-
-==== HTTP resource path
-
-You can specify the HTTP resource path by adding the `path` property to the function's metadata.
+<1> `GET` request
-For instance, given the `/path/to/my/resource` HTTP resource path, you can define your function as:
+[[about-namespaces]]
+==== About namespaces
-[source,json]
-----
- "functions": [
- {
- "name": "greet",
- "type": "custom",
- "operation": "knative:custom-function-knative-service",
- "metadata": {
- "path": "/path/to/my/resource"
- }
- }
- ]
-----
+Note that in the above example, you declared only the name of the service you wanted to call, but not a namespace. In this case, {product_name} will look for a Knative service in the same namespace the workflow service is running.
-In case you don't specify any path, the root path will be used. For instance, the following function:
+In case you need to call a Knative service in a different namespace, you can declare the function as:
[source,json]
----
@@ -395,26 +372,12 @@ In case you don't specify any path, the root path will be used. For instance, th
{
"name": "greet",
"type": "custom",
- "operation": "knative:custom-function-knative-service"
+ "operation": "knative:services.v1.serving.knative.dev/my_different_namespace/custom-function-knative-service?path=/plainJsonFunction"
}
]
----
-Is exactly the same as:
-
-[source,json]
-----
- "functions": [
- {
- "name": "greet",
- "type": "custom",
- "operation": "knative:custom-function-knative-service",
- "metadata": {
- "path": "/"
- }
- }
- ]
-----
+In the above example, {product_name} will look for the `custom-function-knative-service` in the `my_different_namespace` namespace.
=== Function arguments
@@ -470,11 +433,7 @@ To tell {product_name} you want to send the payload as a CloudEvent, you must de
{
"name": "greet",
"type": "custom",
- "operation": "knative:custom-function-knative-service",
- "metadata": {
- "path": "/",
- "asCloudEvent": true <1>
- }
+ "operation": "knative:services.v1.serving.knative.dev/custom-function-knative-service?path=/plainJsonFunction&asCloudEvent=true" <1>
}
]
----
@@ -537,28 +496,46 @@ You must declare a `functionRef` like the following: (Do not forget to set `asCl
[NOTE]
====
-{product_name} generates a CloudEvent ID based on the `source` and the workflow instance ID. In case an ID is set, {product_name} will ignore it and use a generated one.
+{product_name} generates a CloudEvent ID based on the `source` and the workflow instance ID. In case you decide to set an ID, {product_name} will use it and you must ensure it's unique. Refer to the following example on how to set a CloudEvent ID:
====
+.Setting a CloudEvent ID
+[source,json]
+----
+"arguments": {
+ "specversion" : "1.0",
+ "id": "a_unique_id_42", <1>
+ "type" : "com.github.pull_request.opened",
+ "source" : "https://github.com/cloudevents/spec/pull",
+ "subject" : "123",
+ "time" : "2018-04-05T17:31:00Z",
+ "comexampleextension1" : "value",
+ "comexampleothervalue" : 5,
+ "datacontenttype" : "text/xml",
+ "data" : ""
+}
+----
+<1> The CloudEvent ID.
+
=== Configurations
==== Request timeout
-By default, the Knative service must respond within 10 seconds. You can use the `kogito.addon.knative-serving.request-timeout` property to configure this value.
+By default, the Knative service must respond within 10 seconds. You can use the `kogito.sw.functions..timeout` property to configure this value.
For instance, if you want to reduce the request timeout to 5 seconds, you must add the following to your `application.properties` file:
[source,properties]
----
-kogito.addon.knative-serving.request-timeout=5000 <1>
+kogito.sw.functions.greet.timeout=5000 <1>
----
<1> Time in milliseconds
== Custom function types
-You can add your custom types by using the Kogito add-on mechanism. As predefined custom types like xref:core/custom-functions-support.adoc#con-func-sysout[`sysout`] or xref:core/custom-functions-support.adoc#con-func-java[`java`], the custom type identifier is the prefix of the operation field of the function definition.
+You can add your custom types by using the {product_name} add-on mechanism. As predefined custom types like xref:core/custom-functions-support.adoc#con-func-sysout[`sysout`] or xref:core/custom-functions-support.adoc#con-func-java[`java`], the custom type identifier is the prefix of the operation field of the function definition.
-Kogito add-ons relies on the link:{quarkus_guides_base_url}/writing-extensions[Quarkus extensions] mechanism. And the add-on consists of at least two Maven projects:
+{product_name} add-ons relies on the link:{quarkus_guides_base_url}/writing-extensions[Quarkus extensions] mechanism. And the add-on consists of at least two Maven projects:
- The deployment module, which is responsible for generating the code required for the extension to work.
- The runtime module, which includes the non-generated classes that are required for the extension to work.
@@ -568,7 +545,7 @@ In the case of a Serverless Workflow custom type, following are the roles of the
- *The deployment project*
+
The deployment project is expected to configure the work item handler used during runtime to perform the logic associated with the custom type.
-It must contain a Java class that inherits from `WorkItemTypeHandler`. Its responsibilities are to indicate the custom type identifier (the operation prefix, as indicated earlier) and to set up the `WorkItemNodeFactory` instance passed as a parameter of the `fillWorkItemHandler` method. That instance is included in the Kogito process definition for that Workflow. As a part of this setup, you must indicate the name of the `WorkItemNodeFactory`. You might also provide any relevant metadata for that handler if needed.
+It must contain a Java class that inherits from `WorkItemTypeHandler`. Its responsibilities are to indicate the custom type identifier (the operation prefix, as indicated earlier) and to set up the `WorkItemNodeFactory` instance passed as a parameter of the `fillWorkItemHandler` method. That instance is included in the {product_name} process definition for that Workflow. As a part of this setup, you must indicate the name of the `WorkItemNodeFactory`. You might also provide any relevant metadata for that handler if needed.
- *The runtime project*
+
@@ -599,7 +576,7 @@ Using the recent approach, you can write a link:{kogito_sw_examples_url}/serverl
The `operation` starts with `rpc`, which is the custom type identifier, and continues with `division`, which denotes the operation that will be executed in the legacy server.
-A Kogito addon that defines the `rpc` custom type must be developed for this function definition to be identified. It is consist of a link:{kogito_sw_examples_url}/serverless-workflow-custom-type/serverless-workflow-custom-rpc-deployment[deployment project] and a link:{kogito_sw_examples_url}/serverless-workflow-custom-type/serverless-workflow-custom-rpc[runtime project].
+A {product_name} addon that defines the `rpc` custom type must be developed for this function definition to be identified. It is consist of a link:{kogito_sw_examples_url}/serverless-workflow-custom-type/serverless-workflow-custom-rpc-deployment[deployment project] and a link:{kogito_sw_examples_url}/serverless-workflow-custom-type/serverless-workflow-custom-rpc[runtime project].
The deployment project is responsible for extending the link:{kogito_sw_examples_url}/serverless-workflow-custom-type/serverless-workflow-custom-rpc-deployment/src/main/java/org/kie/kogito/examples/sw/services/RPCCustomTypeHandler.java[`WorkItemTypeHandler`] and setup the `WorkItemNodeFactory` as follows:
@@ -695,7 +672,7 @@ void init () {
== Additional resources
-* xref:getting-started/cncf-serverless-workflow-specification-support.adoc[CNCF Serverless Workflow specification]
+* xref:core/cncf-serverless-workflow-specification-support.adoc[CNCF Serverless Workflow specification]
* xref:core/understanding-jq-expressions.adoc[jq expressions in {product_name}]
include::../../pages/_common-content/report-issue.adoc[]
diff --git a/modules/serverless-logic/pages/core/defining-an-input-schema-for-workflows.adoc b/modules/serverless-logic/pages/core/defining-an-input-schema-for-workflows.adoc
index acbdaef5..b2bca86a 100644
--- a/modules/serverless-logic/pages/core/defining-an-input-schema-for-workflows.adoc
+++ b/modules/serverless-logic/pages/core/defining-an-input-schema-for-workflows.adoc
@@ -1,9 +1,11 @@
-= Input schema definition for {product_name}
+= Input and Output schema definition for {product_name}
:compat-mode!:
// Metadata:
:description: Defining input schema for Serverless Workflow
:keywords: kogito, workflow, serverless, dataInputSchema
+== Input schema
+
The `dataInputSchema` in the link:{spec_doc_url}#workflow-definition-structure[Serverless Workflow specification] is a parameter used in the workflow definition. The `dataInputSchema` parameter validates the workflow data input against a defined JSON Schema. It is important to provide `dataInputSchema`, as it is used to verify if the provided workflow data input is correct before any workflow states are executed.
You can define a `dataInputSchema` as follows:
@@ -17,11 +19,37 @@ You can define a `dataInputSchema` as follows:
}
----
-In the previous definition, the `schema` property is a URI, which holds the path to the JSON schema used to validate the workflow data input. The URI can be a classpath URI, a file, or an HTTP URL. If a classpath URI is specified, then the JSON schema file must be placed in the resources section of the project.
+In the previous definition, the `schema` property is a URI, which holds the path to the JSON schema used to validate the workflow data input. The URI can be a classpath URI, a file, or an HTTP URL. If a classpath URI is specified, then the JSON schema file must be placed in the resources section of the project or any other directory included in the classpath.
+
+`failOnValidationErrors` is an optional flag that indicates the behavior adopted when the input data does not match the specified JSON schema. If not specified or set to true, an exception will be thrown and flow execution will fail. If set to false, the flow will be executed and a log of level WARN with the validation errors will be printed.
+
+== Output schema
+
+Serverless Workflow specification does not support JSON output schema until version 0.9. Therefore {product_name} is implementing it as a link:{spec_doc_url}#extensions[Serverless Workflow specification extension]. Output schema is applied after workflow execution to verify that the output model has the expected format. It is also useful for Swagger generation purposes.
+
+Similar to Input schema, you must specify the URL to the JSON schema, using `outputSchema` as follows:
+
+.`outputSchema` definition
+[source,json]
+----
+"extensions" : [ {
+ "extensionid": "workflow-output-schema",
+ "outputSchema": {
+ "schema" : "URL_to_json_schema",
+ "failOnValidationErrors": false
+ }
+ ]
+----
+
+The same rules described for `dataInputSchema` apply for `schema` and `failOnValidationErrors`. The difference is that the latter flag is applied after workflow execution.
+
+== Example with `dataInputSchema` and `outputSchema`
+
+You can see the link:{kogito_sw_examples_url}/serverless-workflow-expression-quarkus[serverless-workflow-expression-quarkus] example application of a workflow definition with `dataInputSchema` and `outputSchema`.
-You can see the link:{kogito_sw_examples_url}/serverless-workflow-expression-quarkus[`serverless-workflow-expression-quarkus`] example application of a workflow definition with `dataInputSchema`. See the link:{kogito_sw_examples_url}/serverless-workflow-expression-quarkus/src/main/resources/expression.sw.json[Serverless Workflow JSON file] for more details.
+== Swagger documentation
-When a workflow definition contains a `dataInputSchema` attribute, the workflow application generates an OpenAPI file, such as `http://localhost/q/openapi`. The generated OpenAPI file references the schema file, which helps in defining the input data for the workflows. For more information about the OpenAPI file, see link:{open_api_spec_url}[OpenAPI specification].
+When a workflow definition contains a `dataInputSchema` and/or `outputSchema` attribute, the workflow application generates an OpenAPI file, such as `http://localhost:8080/q/openapi`. The generated OpenAPI file references the schema file, which helps in defining the input and checking the output data for the workflows. For more information about the OpenAPI file, see link:{open_api_spec_url}[OpenAPI specification].
If you want to generate an OpenAPI file for a workflow, then you must add the link:{quarkus_swagger_url}#expose-openapi-specifications[Quarkus dependency] in the project.
@@ -42,7 +70,7 @@ components:
== Additional resources
-* xref:serverless-logic:service-orchestration/orchestration-of-openapi-based-services.adoc[Orchestrating the OpenAPI services]
-* xref:serverless-logic:service-orchestration/configuring-openapi-services-endpoints.adoc[Configuring the OpenAPI services endpoints]
+* xref:service-orchestration/orchestration-of-openapi-based-services.adoc[Orchestrating the OpenAPI services]
+* xref:service-orchestration/configuring-openapi-services-endpoints.adoc[Configuring the OpenAPI services endpoints]
include::../../pages/_common-content/report-issue.adoc[]
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/eventing/handling-events-on-workflows.adoc b/modules/serverless-logic/pages/core/handling-events-on-workflows.adoc
similarity index 84%
rename from modules/serverless-logic/pages/eventing/handling-events-on-workflows.adoc
rename to modules/serverless-logic/pages/core/handling-events-on-workflows.adoc
index 5cb0bf7c..336058ee 100644
--- a/modules/serverless-logic/pages/eventing/handling-events-on-workflows.adoc
+++ b/modules/serverless-logic/pages/core/handling-events-on-workflows.adoc
@@ -1,4 +1,4 @@
-= Event state in {product_name}
+= Events in {product_name}
:compat-mode!:
// Metadata:
@@ -132,13 +132,13 @@ To start a new workflow instance, set the `start` property to the event state na
An event state can also be used to pause an existing workflow instance. When the workflow execution reaches an event state, which is not starting, then the execution is paused until there is an event match for that workflow instance.
-Similar to the callback state in a workflow, the workflow instance to be resumed is identified by `kogitoprocrefid` CloudEvent attribute or calculated according to the xref:serverless-logic:eventing/event-correlation-with-workflows.adoc[event correlation] functionality. While callback state is used for _fire&wait_ scenarios, event state covers _wait&fire_ scenarios. For more information about the callback state, see xref:serverless-logic:eventing/working-with-callbacks.adoc[Callback state in {product_name}].
+Similar to the callback state in a workflow, the workflow instance to be resumed is identified by `kogitoprocrefid` CloudEvent attribute or calculated according to the xref:eventing/event-correlation-with-workflows.adoc[event correlation] functionality. While callback state is used for _fire&wait_ scenarios, event state covers _wait&fire_ scenarios. For more information about the callback state, see xref:core/working-with-callbacks.adoc[Callback state in {product_name}].
== Additional resources
-* xref:serverless-logic:getting-started/create-your-first-workflow-service.adoc[Creating your first workflow service]
-* xref:serverless-logic:eventing/consume-producing-events-with-kafka.adoc[Consuming and producing events using Apache Kafka]
-* xref:serverless-logic:eventing/consume-produce-events-with-knative-eventing.adoc[Consuming and producing events on Knative Eventing]
+* xref:getting-started/create-your-first-workflow-service.adoc[Creating your first workflow service]
+* xref:eventing/consume-producing-events-with-kafka.adoc[Consuming and producing events using Apache Kafka]
+* xref:eventing/consume-produce-events-with-knative-eventing.adoc[Consuming and producing events on Knative Eventing]
include::../../pages/_common-content/report-issue.adoc[]
diff --git a/modules/serverless-logic/pages/core/timeouts-support.adoc b/modules/serverless-logic/pages/core/timeouts-support.adoc
index 752cedb0..9761574c 100644
--- a/modules/serverless-logic/pages/core/timeouts-support.adoc
+++ b/modules/serverless-logic/pages/core/timeouts-support.adoc
@@ -3,53 +3,56 @@
// Metadata:
:description: Using timeouts in {product_name}
:keywords: kogito, workflow, serverless, timeout, timer, expiration
-// links
-:jobs_service_image_url: https://quay.io/repository/kiegroup/kogito-jobs-service-allinone
-:jobs_service_image_usage_url: https://github.com/kiegroup/kogito-images#jobs-services-all-in-one
+The Serverless Workflow specification defines several link:{spec_doc_url}#workflow-timeouts[timeouts configurations] that you can use to configure maximum times for the workflow execution in different scenarios.
+For example, you can configure how long a workflow can wait for an event to arrive when it is in a given state, etc. Currently, {product_name} supports only the link:{spec_doc_url}#workflow-timeout-definition[workflow execution timeout] and link:{spec_doc_url}#event-timeout-definition[event timeouts].
-Serverless Workflow specification defines a wide amount of possible link:{spec_doc_url}#workflow-timeout-definition[timeout configurations].
-Among them, {product_name} currently supports workflow and event timeouts.
-
-Regardless of its application scope (workflow or state), you can not define the timeouts as a specific point in time, but they should be an amount of time, a duration, which is considered to start when the referred scope becomes active. Timeouts use the link:https://en.wikipedia.org/wiki/ISO_8601[`ISO 8601` data and time standard] to specify a duration of time.
-It follows the format `PnDTnHnMn.nS` with days considered to be exactly 24 hours.
+Regardless of its application scope (workflow or state), the timeouts must be configured as an amount of time (a duration), which is considered to start when the referred scope becomes active. Timeouts use the link:https://en.wikipedia.org/wiki/ISO_8601[`ISO 8601` data and time standard] to specify a duration of time and follow the format `PnDTnHnMn.nS` with days considered to be exactly 24 hours.
For instance, `PT15M` configures 15 minutes, and `P2DT3H4M` defines 2 days, 3 hours and 4 minutes.
-== Workflow timeout
-
-You can set up the maximum amount of time a workflow might be running before being canceled. This is configured in the header section of the flow, by using the link:{spec_doc_url}#workflowexectimeout-definition[WorkflowExecTimeout definition]. Only the `duration` property is currently implemented.
-Once canceled, the workflow is considered to be finished and will not be accessible through a GET request anymore. So it behaves as if the interrupt was `true` by default.
+[#workflow-timeout]
+== Workflow timeout
+To configure the maximum amount of time a workflow can be running before being canceled, you can use the workflow timeout.
+This is configured in the header section of the workflow, by using the link:{spec_doc_url}#workflowexectimeout-definition[WorkflowExecTimeout definition]. Only the `duration` property is currently implemented.
+Once canceled, the workflow is considered to be finished, and will not be accessible through a GET request anymore. So it behaves as if the interrupt was `true` by default.
-For example, in order to cancel the workflow after an hour of execution, you might add the following snippet.
+For example, to cancel the workflow after an hour of execution, you must use the following configuration.
.Example of workflow timeout
[source,json]
-[source,json]
----
-"timeouts": {
+{
+ "id": "workflow_timeouts",
+ "version": "1.0",
+ "name": "Workflow Timeouts",
+ "description": "Simple workflow to show the workflowExecTimeout working",
+ "start": "PrintStartMessage",
+ "timeouts": {
"workflowExecTimeout": "PT1H"
+ } ...
}
----
== Event timeout
-When you define a state in a serverless workflow, you can use the `timeouts` property to configure the maximum time to complete this state.
+When you define a state in a workflow, you can use the `timeouts` property to configure the maximum time to complete this state.
When that time is overdue, the state is considered timed-out, and the engine continues the execution from this state. The execution flow depends on the state type, for instance,
-a transition to a next state.
-All the properties you can use to configure state timeouts are described in the link:{spec_doc_url}#event-timeout-definition[Serverless Workflow specification].
+a transition to a next state might be executed.
+All the properties that you can use to configure state timeouts are described in the link:{spec_doc_url}#states-timeout-definition[Serverless Workflow specification].
Event-based states can use the sub-property `eventTimeout` to configure the maximum time to wait for an event to arrive.
+This is the only property that is supported in current implementation.
-
+[#callback-state-timeout]
=== Callback state timeout
-Callback state can be used when you need to execute an action, in general to call an external service, and wait for an asynchronous response in form of an event, the callback.
+The Callback state can be used when you need to execute an action, in general to call an external service, and wait for an asynchronous response in form of an event, the callback.
-Once the response event is consumed, the workflow continues the execution, in general moving to the next state defined in the `transition` property. See more on xref:eventing/working-with-callbacks.adoc[Callback state in {product_name}].
+Once the response event is consumed, the workflow continues the execution, in general moving to the next state defined in the `transition` property. See more on xref:core/working-with-callbacks.adoc[Callback state in {product_name}].
-Since the callback state halts the execution util the event is consumed, you can define an `eventTimeout` for it, and in case the event does not arrive in the defined duration time, the workflow continues the execution moving to the next state defined in the transition, see the <>.
+Since the Callback state halts the execution until the event is consumed, you can configure an `eventTimeout` for it, and in case the event does not arrive in the configured time duration, the workflow continues the execution moving to the next state defined in the transition, see the <>.
[#callback-state]
-.Example of callback state with timeout
+.Example of Callback state with timeout
[source,json]
----
{
@@ -78,17 +81,20 @@ Since the callback state halts the execution util the event is consumed, you can
}
----
+[#switch-state-timeout]
=== Switch state timeout
+The Switch state can be used when you need to take an action depending on certain conditions, these conditions can be based on the workflow data, link:{spec_doc_url}#Switch-state-Data-Conditions[dataConditions], or on events, link:{spec_doc_url}#Switch-State-Event-Conditions[eventConditions].
-The switch state can be used when you need to take an action based on conditions, defined with the link:{spec_doc_url}#switch-state-event-conditions[eventConditions] property, where the workflow execution waits to make a decision depending on the events to be consumed and matched, defined through link:{spec_doc_url}#event-definition[event definition].
+When you use the link:{spec_doc_url}#Switch-State-Event-Conditions[eventConditions], the workflow execution waits to make a decision until any of the configured events arrives and matches a condition.
+In this situation, you can configure an event timeout, that controls the maximum time to wait for an event to match the conditions.
-In this situation, you can define an event timeout, that controls the maximum time to wait for an event to match the conditions, if this time is expired, the workflow moves to the state defined in the `defaultCondition` property of the switch state, as you can see in the <>.
+If this time expires, the workflow moves to the state defined in the `defaultCondition` property of the Switch state, as you can see in the <>.
-See more details about this state on the link:{spec_doc_url}#switch-date[Serverless Workflow specification].
+See more details about this state on the link:{spec_doc_url}#switch-state[Serverless Workflow specification - Switch State].
[#switch-state]
-.Example of switch state with timeout
+.Example of Switch state with timeout
[source,json]
----
{
@@ -113,26 +119,27 @@ See more details about this state on the link:{spec_doc_url}#switch-date[Serverl
}
----
+[#event-state-timeout]
=== Event state timeout
-The `event` state is used to wait for one or more events to be received by the workflow and then continue the execution.
-If the event state is a starting state, a new workflow instance is created.
+The Event state is used to wait for one or more events to be received by the workflow, execute a set of actions, and then continue the execution.
+If the Event state is a starting state, a new workflow instance is created.
[NOTE]
====
-The event state is not supported as a starting state if the `exclusive` flag is set to `false`, therefore timeout is not supported in this case.
+The Event state is not supported as a starting state if the `exclusive` flag is set to `false`, therefore timeout is not supported in this case.
====
-The `timeouts` property is used for this state to configure the maximum time the workflow should wait for the defined events to arrive.
+The `timeouts` property is used for this state to configure the maximum time the workflow should wait for the configured events to arrive.
-If this time is exceeded and the events are not received, the workflow moves to the state defined in the `transition` property or ends the workflow instance without performing any actions in case of an end state.
+If this time is exceeded and the events are not received, the workflow moves to the state defined in the `transition` property or ends the workflow instance (in case of an end state), without performing any actions.
You can see this in the <>.
-For more information about event state timeout, see link:{spec_doc_url}#event-timeout-definition[Serverless Workflow specification].
+For more information about Event state timeout, see link:{spec_doc_url}#event-timeout-definition[Serverless Workflow specification - Event State].
[#event-state]
-.Example of event state with timeout
+.Example of Event state with timeout
[source,json]
----
{
@@ -187,55 +194,35 @@ For more information about event state timeout, see link:{spec_doc_url}#event-ti
}
----
-=== Deploying a timed-based workflow
+== Deploying a timed-based workflow
-In order to deploy a workflow that contains timeouts or any other timer-based action, it is necessary to have a job service running in your environment, which is an external service responsible to control the workflows timers, see the xref:supporting-services/jobs-service.adoc#job-service[job service guide] for more information.
-In the <> you can see the details of how set up a Knative infrastructure with the workflow and job service running.
+To deploy a workflow that uses timeouts, or any other timer-based action, it is necessary to have a {job_service_xref}[job service] running in your environment, which is an independent service responsible to control the workflows timers, see the {job_service_xref}#integration-with-the-workflows[job service] for more information.
+The provisioning of this service depends on the work mode that you are using.
-[[job-addon-configuration]]
-==== Addon configuration in the workflow runtime
+=== {operator_name} Dev Profile
-The communication from the workflow application with Job Service is done through an addon, which is responsible for publishing and consuming events related to timers.
-When you run the workflow as a Knative service, you must add the `kogito-addons-quarkus-jobs-knative-eventing` to your project and provide the proper configuration.
+When you work with the {operator_name} Dev Profile, the operator will automatically provide an execution environment that contains an embedded {job_service_xref}[job service] instance, as well as an instance of the {data_index_xref}[data index service].
+And thus, there is no need for additional configurations.
-* Dependency in the `pom.xml`:
+In the <> you can see the details of how to work with timeouts and the {operator_name} Dev Profile.
-.Callback state example with timeout
-[source, xml]
-----
-
- org.kie.kogito
- kogito-addons-quarkus-jobs-knative-eventing
-
-----
+=== Quarkus Workflow Project
-* Configuration parameters:
+When you work with a standalone Quarkus Workflow Project, you must:
-[[workflow-application-configuration-parameters]]
-.Callback state example with timeout
-[source, properties]
-----
-# Events produced by kogito-addons-quarkus-jobs-knative-eventing to program the timers on the Job Service.
-mp.messaging.outgoing.kogito-job-service-job-request-events.connector=quarkus-http
-mp.messaging.outgoing.kogito-job-service-job-request-events.url=${K_SINK:http://localhost:8280/jobs/events}
-mp.messaging.outgoing.kogito-job-service-job-request-events.method=POST
-----
+1. Configure the project to include the required addon for the workflows to connect with that service.
-[NOTE]
-====
-The `K_SINK` variable used in the URL configuration for the outgoing channel in the
-<>, is injected by Knative Eventing, more information on
-xref:eventing/consume-produce-events-with-knative-eventing.adoc[Consuming and producing events on Knative Eventing].
-====
+2. Ensure that a jobs service instance is available in your environment.
+
+In the {job_service_xref}[job service] guide you can see all the available add-ons and configuration alternatives for this case.
+
+[#timeouts-showcase]
+== Timeouts showcase
-[#timeout-example]
-== Timeout showcase example
-You can check xref:use-cases/timeout-showcase-example.adoc[Timeout example in {product_name}]
-to see how to use and configure workflows with timeouts.
+To see the examples about how configure the timeouts, and execute them in different deployment scenarios, you can use the xref:use-cases/timeout-showcase-example.adoc[Timeouts showcase in {product_name}]
== Additional resources
-* xref:eventing/working-with-callbacks.adoc[Callback state in {product_name}]
-* xref:use-cases/timeout-showcase-example.adoc[Timeout example in {product_name}]
+* xref:core/working-with-callbacks.adoc[Callback state in {product_name}]
-include::../../pages/_common-content/report-issue.adoc[]
+include::../../pages/_common-content/report-issue.adoc[]
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/core/understanding-jq-expressions.adoc b/modules/serverless-logic/pages/core/understanding-jq-expressions.adoc
index 76f36b72..675c53c8 100644
--- a/modules/serverless-logic/pages/core/understanding-jq-expressions.adoc
+++ b/modules/serverless-logic/pages/core/understanding-jq-expressions.adoc
@@ -1,18 +1,21 @@
-= jq expressions in {product_name}
+= jq Expressions
:compat-mode!:
// Metadata:
-:description: JQ expressions in Serverless Workflow
+:description: jq expressions in Serverless Workflow
:keywords: kogito, workflow, serverless, jq, expression
// links:
:jq_url: https://stedolan.github.io/jq/manual/
:jsonpath_url: https://github.com/json-path/JsonPath/
:json_data_types_url: https://www.w3schools.com/js/js_json_datatypes.asp
+:jq_play:https://jiehong.gitlab.io/jq_offline
Each workflow instance is associated with a data model. A data model consists of a JSON object regardless of whether the workflow file contains YAML or JSON. The initial content of the JSON object depends on how the workflow is started. If the workflow is created using the link:{cloud_events_url}[Cloud Event], then the workflow content is taken from the `data` property. However, if the workflow is started through an HTTP POST request, then the workflow content is taken from the request body.
-The workflow expressions in the link:{spec_doc_url}#workflow-expressions[Serverless Workflow specification] are used to interact with the data model. The supported expression languages include link:{jsonpath_url}[JsonPath] and link:{jq_url}[jq]. jq expression language is the default language. However, you can change the expression language to JsonPath using the `expressionLang` property.
+The workflow expressions in the link:{spec_doc_url}#workflow-expressions[Serverless Workflow specification] are used to interact with the data model. The supported expression languages include link:{jsonpath_url}[JsonPath] and link:{jq_url}[jq]. jq expression language is the default language. However, you can change the expression language to JsonPath using the `expressionLang` property.
-This document describes the usage of jq expressions in switch state conditions, action function arguments, and data filtering.
+This document describes the usage of jq expressions in switch state conditions, action function arguments, and data filtering.
+
+JQ expression might be tricky to master, for non trivial cases, it is recommended to use helper tools like link:{jq_play}[JQ Play] to validate the expression before including it in the workflow file.
[[ref-example-jq-expression-switch-conditions]]
== Example of jq expressions in switch conditions
@@ -21,7 +24,7 @@ The conditions occurring in a switch state enable the workflow designer to selec
A condition in a switch state is an expression, which returns a boolean value when evaluated against the data model. If a condition associated with a state transition returns true, then the workflow must follow that transition.
-For example, in the link:{kogito_sw_examples_url}/serverless-workflow-greeting-quarkus[`serverless-workflow-greeting-quarkus`] example application, a message is displayed depending on the selected language, that is English or Spanish.
+For example, in the link:{kogito_sw_examples_url}/serverless-workflow-greeting-quarkus[`serverless-workflow-greeting-quarkus`] example application, a message is displayed depending on the selected language, that is English or Spanish.
If the value of the `language` property is English, the constant literal injected on the `message` property is _Hello from_, otherwise the constant value injected on the `message` property is _Saludos desde…_.
@@ -54,7 +57,7 @@ The Serverless Workflow specification requires all the expressions to be embedde
In the Serverless Workflow specification, you can define link:{spec_doc_url}#workflow-functions[workflow functions], which can be invoked several times by the workflow states. Each workflow function call might contain different arguments, which are specified using the function arguments.
-For example, you can see the link:{kogito_sw_examples_url}/serverless-workflow-temperature-conversion/conversion-workflow/src/main/resources/fahrenheit-to-celsius.sw.json[temperature conversion] function definition in `serverless-workflow-temperature-conversion` example application. The temperature conversion function performs OpenAPI invocations to convert Fahrenheit to Celsius. For more information about OpenAPI, see xref:serverless-logic:service-orchestration/orchestration-of-openapi-based-services.adoc[Orchestrating the OpenAPI services].
+For example, you can see the link:{kogito_sw_examples_url}/serverless-workflow-temperature-conversion/conversion-workflow/src/main/resources/fahrenheit-to-celsius.sw.json[temperature conversion] function definition in `serverless-workflow-temperature-conversion` example application. The temperature conversion function performs OpenAPI invocations to convert Fahrenheit to Celsius. For more information about OpenAPI, see xref:service-orchestration/orchestration-of-openapi-based-services.adoc[Orchestrating the OpenAPI services].
Following is the `subtraction` function in `serverless-workflow-temperature-conversion` example application:
@@ -68,15 +71,15 @@ Following is the `subtraction` function in `serverless-workflow-temperature-conv
}]
----
-The arguments in `subtraction` function are expressed as a JSON object, and the property values of the JSON object are either a string containing an expression or a link:{json_data_types_url}[JSON data type], such as string, number, or boolean.
+The arguments in `subtraction` function are expressed as a JSON object, and the property values of the JSON object are either a string containing an expression or a link:{json_data_types_url}[JSON data type], such as string, number, or boolean.
.Example arguments in `subtraction` function
[source,json]
----
-"functionRef":
+"functionRef":
{
"refName": "subtraction",
- "arguments":
+ "arguments":
{
"leftElement": ".fahrenheit",
"rightElement": ".subtractValue"
@@ -84,7 +87,7 @@ The arguments in `subtraction` function are expressed as a JSON object, and the
}
----
-In the previous example, the left number is equal to the `fahrenheit` property (an input number that invokes the workflow), and the right number is equal to the `subtractValue` property (a constant number that is injected to the workflow model by `SetConstants` state). Once the expression evaluation is resolved for all properties that contain an expression, the resulting object is passed in the OpenAPI request. Based on the OpenAPI definition, the properties in the JSON object are used as body, path, query, or header of the upcoming REST invocation.
+In the previous example, the left number is equal to the `fahrenheit` property (an input number that invokes the workflow), and the right number is equal to the `subtractValue` property (a constant number that is injected to the workflow model by `SetConstants` state). Once the expression evaluation is resolved for all properties that contain an expression, the resulting object is passed in the OpenAPI request. Based on the OpenAPI definition, the properties in the JSON object are used as body, path, query, or header of the upcoming REST invocation.
Following is an example of function arguments defined as string that contains an expression, returning a JSON object:
@@ -105,7 +108,7 @@ In the previous example, the result of the expression evaluation is the same JSO
The Serverless Workflow specification defines the following filtering mechanisms to select which information must be part of the workflow data model:
-* link:{spec_doc_url}#action-data-filters[Action data filters]: Select the part of the action result that is merged into the data model, which overrides the properties that share the name with the selected action result.
+* link:{spec_doc_url}#action-data-filters[Action data filters]: Select the part of the action result that is merged into the data model, which overrides the properties that share the name with the selected action result.
* link:{spec_doc_url}#event-data-filters[Event data filters]: Similar to the action data filters, but apply to the events instead of actions.
* link:{spec_doc_url}#state-data-filters[State data filters]: Define the workflow model to the JSON object, which is returned by the expression and discards an existing property.
@@ -130,9 +133,9 @@ Following is an expression function in `serverless-workflow-expression-quarkus`
]
----
-In the previous example, an array of complex numbers (`x` is real coordinate and `y` is imaginary coordinate) is accepted and an expression function is defined to calculate the maximum value of `x` and minimum value of `y` for the `numbers` array.
+In the previous example, an array of complex numbers (`x` is real coordinate and `y` is imaginary coordinate) is accepted and an expression function is defined to calculate the maximum value of `x` and minimum value of `y` for the `numbers` array.
-Also, the `serverless-workflow-expression-quarkus` example application contains an action data filter defined inside `squareState` action and a state data filter defined inside `finish` state. The action data filter selects the maximum value of `x` to be merged to the workflow model, and the state data filter defines the maximum value as the entire workflow model that is returned as the workflow response.
+Also, the `serverless-workflow-expression-quarkus` example application contains an action data filter defined inside `squareState` action and a state data filter defined inside `finish` state. The action data filter selects the maximum value of `x` to be merged to the workflow model, and the state data filter defines the maximum value as the entire workflow model that is returned as the workflow response.
The previous example expression also contains a `max` function of type expression and an `operation` property containing a string of jq expression. This jq expression returns a JSON object, in which the `max` property is the maximum value of the `x` coordinate and the `min` property is the minimum value of the `y` coordinate.
@@ -172,7 +175,7 @@ Therefore, after executing the action, the workflow model consists of a `number`
}
----
-The original `numbers` array should not be returned as a result of the workflow execution, therefore the final stage consists of a state data filter defining the content of the output model. The output model should contain a `result` property and the value of `result` property should be the maximum number that is stored by the previous state in the `number` property.
+The original `numbers` array should not be returned as a result of the workflow execution, therefore the final stage consists of a state data filter defining the content of the output model. The output model should contain a `result` property and the value of `result` property should be the maximum number that is stored by the previous state in the `number` property.
In the previous example, the workflow model is changed by the `input` property of the filter, which means that the output model is updated before the state is executed. As a final result, the output model consists of a `result` property, containing the maximum value of `x`.
--
@@ -181,7 +184,7 @@ Event data filter example::
+
--
-You can find an example of event data filtering in the link:{kogito_sw_examples_url}/serverless-workflow-callback-quarkus[`serverless-workflow-callback-quarkus`] example application.
+You can find an example of event data filtering in the link:{kogito_sw_examples_url}/serverless-workflow-callback-quarkus[`serverless-workflow-callback-quarkus`] example application.
.Example event filter
[source,json]
@@ -198,6 +201,7 @@ The previous example of the event filter copies the content of CloudEvent data `
== Additional resources
-* xref:serverless-logic:service-orchestration/configuring-openapi-services-endpoints.adoc[Configuring the OpenAPI services endpoints]
+* link:{jq_play} [JQ Play offline]
+* xref:service-orchestration/configuring-openapi-services-endpoints.adoc[Configuring the OpenAPI services endpoints]
include::../../pages/_common-content/report-issue.adoc[]
diff --git a/modules/serverless-logic/pages/core/understanding-workflow-error-handling.adoc b/modules/serverless-logic/pages/core/understanding-workflow-error-handling.adoc
index ad8bf3de..f413313c 100644
--- a/modules/serverless-logic/pages/core/understanding-workflow-error-handling.adoc
+++ b/modules/serverless-logic/pages/core/understanding-workflow-error-handling.adoc
@@ -8,7 +8,7 @@
:java_regex_url: https://docs.oracle.com/javase/tutorial/essential/regex/index.html
:java_regex_pattern_url: https://docs.oracle.com/javase/tutorial/essential/regex/pattern.html
-This document describes how you can handle the errors that might occur in {product_name}.
+This document describes how you can handle the errors that might occur in {product_name}.
The Serverless Workflow specification provides an link:{spec_doc_url}#workflow-error-handling[error handling] mechanism, enabling you to handle the errors that might happen during the interactions between the workflow and external systems.
@@ -185,7 +185,7 @@ The `finish` state in the `serverless-workflow-error-quarkus` example applicatio
== Additional resources
-* xref:serverless-logic:getting-started/create-your-first-workflow-service.adoc[Creating your first workflow service]
+* xref:getting-started/create-your-first-workflow-service.adoc[Creating your first workflow service]
include::../../pages/_common-content/report-issue.adoc[]
diff --git a/modules/serverless-logic/pages/eventing/working-with-callbacks.adoc b/modules/serverless-logic/pages/core/working-with-callbacks.adoc
similarity index 91%
rename from modules/serverless-logic/pages/eventing/working-with-callbacks.adoc
rename to modules/serverless-logic/pages/core/working-with-callbacks.adoc
index 71fc1074..57766b5c 100644
--- a/modules/serverless-logic/pages/eventing/working-with-callbacks.adoc
+++ b/modules/serverless-logic/pages/core/working-with-callbacks.adoc
@@ -1,4 +1,4 @@
-= Callback state in {product_name}
+= Callbacks in {product_name}
:compat-mode!:
// Metadata:
@@ -9,7 +9,7 @@ This document describes the link:{spec_doc_url}#Callback-State[Callback state] a
From a workflow perspective, asynchronous service indicates that the control is returned to the caller immediately without waiting for the action to be completed. Once the action is completed, a link:{cloud_events_url}[CloudEvent] is published to resume the workflow.
-For the workflow to identify the published CloudEvent it is waiting for, the external service developer includes the workflow instance ID in the CloudEvent header or uses the xref:serverless-logic:eventing/event-correlation-with-workflows.adoc[Event correlation]. The following figure displays the process:
+For the workflow to identify the published CloudEvent it is waiting for, the external service developer includes the workflow instance ID in the CloudEvent header or uses the xref:eventing/event-correlation-with-workflows.adoc[Event correlation]. The following figure displays the process:
image::eventing/callbacks-explained.png[]
@@ -97,7 +97,7 @@ An link:{kogito_sw_examples_url}/serverless-workflow-callback-quarkus/src/main/j
After that, the workflow application consumes the event published by the listener and sets the result field. The consumed CloudEvent contains an attribute named `kogitoprocrefid`, which holds the workflow instance ID of the workflow.
-The `kogitoprocrefid` attribute is crucial because when the correlation is not used, then this attribute is the only way for the Callback state to identify that the related CloudEvent needs to be used to resume the workflow. For more information about correlation, see xref:serverless-logic:eventing/event-correlation-with-workflows.adoc[Event correlation in {product_name}].
+The `kogitoprocrefid` attribute is crucial because when the correlation is not used, then this attribute is the only way for the Callback state to identify that the related CloudEvent needs to be used to resume the workflow. For more information about correlation, see xref:eventing/event-correlation-with-workflows.adoc[Event correlation in {product_name}].
Note that each workflow is identified by a unique instance ID, which is automatically included in any published CloudEvent, as `kogitoprocinstanceid` CloudEvent extension.
@@ -119,7 +119,7 @@ The following example shows that the event listener takes the workflow instance
Apache Kafka configuration in `serverless-workflow-callback-quarkus`::
+
--
-The `serverless-workflow-callback-quarkus` example application requires an external broker to manage the associated CloudEvents. The default setup in the `serverless-workflow-callback-quarkus` example application uses link:{kafka_doc_url}[Apache Kafka]. However, you can also use xref:serverless-logic:eventing/consume-produce-events-with-knative-eventing.adoc[Knative Eventing].
+The `serverless-workflow-callback-quarkus` example application requires an external broker to manage the associated CloudEvents. The default setup in the `serverless-workflow-callback-quarkus` example application uses link:{kafka_doc_url}[Apache Kafka]. However, you can also use xref:eventing/consume-produce-events-with-knative-eventing.adoc[Knative Eventing].
Apache Kafka uses topics to publish or consume messages. In the `serverless-workflow-callback-quarkus` example application, two topics are used, matching the name of the CloudEvent types that are defined in the workflow, such as `resume` and `wait`. The `resume` and `wait` CloudEvent types are configured in the link:{kogito_sw_examples_url}/serverless-workflow-callback-quarkus/src/main/resources/application.properties[`application.properties`] file.
@@ -128,8 +128,8 @@ For more information about using Apache Kafka with events, see link:xref:consume
== Additional resources
-* xref:serverless-logic:getting-started/create-your-first-workflow-service.adoc[Creating your first workflow service]
-* xref:serverless-logic:eventing/event-correlation-with-workflows.adoc[Event correlation in {product_name}]
+* xref:getting-started/create-your-first-workflow-service.adoc[Creating your first workflow service]
+* xref:eventing/event-correlation-with-workflows.adoc[Event correlation in {product_name}]
include::../../pages/_common-content/report-issue.adoc[]
diff --git a/modules/serverless-logic/pages/core/working-with-parallelism.adoc b/modules/serverless-logic/pages/core/working-with-parallelism.adoc
index c0108279..7ff1fd3a 100644
--- a/modules/serverless-logic/pages/core/working-with-parallelism.adoc
+++ b/modules/serverless-logic/pages/core/working-with-parallelism.adoc
@@ -18,7 +18,7 @@ The `serverless-workflow-service-calls-quarkus` example application is a workflo
[[proc-parallel-creating-the-workflow]]
== Creating a parallel workflow
-You can create a workflow, which performs a series of parallel tasks.
+You can create a workflow, which performs a series of parallel tasks.
.Prerequisites
@@ -114,7 +114,7 @@ After you create a workflow that performs a series of parallel tasks, you can ru
.Prerequisites
-* A parallel workflow is created.
+* A parallel workflow is created.
+
For more information, see <>.
@@ -163,7 +163,7 @@ You can define the `"completionType": "atLeast"` to run only some branches in pa
.Prerequisites
-* A parallel workflow is created.
+* A parallel workflow is created.
+
For more information, see <>.
@@ -270,6 +270,6 @@ The parallel workflow data shows the concatenated string as result, but in this
== Additional resources
-* xref:serverless-logic:getting-started/create-your-first-workflow-service.adoc[Creating your first workflow service]
+* xref:getting-started/create-your-first-workflow-service.adoc[Creating your first workflow service]
-include::../../pages/_common-content/report-issue.adoc[]
+include::../../pages/_common-content/report-issue.adoc[]
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/eventing/consume-produce-events-with-knative-eventing.adoc b/modules/serverless-logic/pages/eventing/consume-produce-events-with-knative-eventing.adoc
index 827e5db6..53701920 100644
--- a/modules/serverless-logic/pages/eventing/consume-produce-events-with-knative-eventing.adoc
+++ b/modules/serverless-logic/pages/eventing/consume-produce-events-with-knative-eventing.adoc
@@ -1,4 +1,4 @@
-= Consuming and producing events on Knative Eventing
+= Consuming and producing events on Knative Eventing in Quarkus
:compat-mode!:
// Metadata:
:description: Consuming and producing events on Knative Eventing
@@ -43,10 +43,10 @@ Manually::
[TIP]
====
-If you have used the Knative workflow CLI to create your project, then the Kogito Knative Eventing extension is already present. For more information about creating a project using Knative workflow CLI, see xref:serverless-logic:tooling/kn-plugin-workflow-overview.adoc[{product_name} plug-in for Knative CLI].
+If you have used the Knative workflow CLI to create your project, then the {product_name} Knative Eventing extension is already present. For more information about creating a project using Knative workflow CLI, see xref:testing-and-troubleshooting/kn-plugin-workflow-overview.adoc[{product_name} plug-in for Knative CLI].
====
-The Kogito Knative Eventing add-on takes care of the required dependencies and additional configuration that the workflow application needs, to interact with the Knative Eventing platform.
+The {product_name} Knative Eventing add-on takes care of the required dependencies and additional configuration that the workflow application needs, to interact with the Knative Eventing platform.
Although the default configuration that the Quarkus Knative Eventing add-on provides ought to be enough for most of the use cases, sometimes you might need to do additional configuration to serve a specific scenario.
@@ -95,7 +95,7 @@ The following table lists the health check probe configuration property:
|===
|Property|Default value|Description
-|`org.kie.kogito.addons.knative.health_enabled`
+|`org.kie.kogito.addons.knative.eventing.health_enabled`
|`true`
|This property indicates if the health check is enabled to verify that the `K_SINK` variable is injected into the environment.
@@ -105,7 +105,7 @@ The following table lists the health check probe configuration property:
Knative target sink generation configuration::
+
--
-The Kogito Knative Eventing add-on generates a few Knative objects during build time. By default, the add-on generates a link:{knative_eventing_doc_url}/broker[Knative Broker] named `default` if the workflow application is acting as an event source.
+The {product_name} Knative Eventing add-on generates a few Knative objects during build time. By default, the add-on generates a link:{knative_eventing_doc_url}/broker[Knative Broker] named `default` if the workflow application is acting as an event source.
The following table lists the configuration properties related to Knative sink generation:
@@ -114,23 +114,23 @@ The following table lists the configuration properties related to Knative sink g
|===
|Property|Default value|Description
-|`org.kie.kogito.addons.knative.auto_generate_broker`
+|`org.kie.kogito.addons.knative.eventing.auto_generate_broker`
|true
-|This property indicates if the Kogito Knative Eventing add-on generates a default Knative Broker in memory to sink and dispatch the messages. Set this property to `false` in case a broker is already installed in your namespace. Note that you can use `org.kie.kogito.addons.knative.eventing.sink.*` property to configure your custom sink. If this property is not set, then the auto-generated broker works as a sink.
+|This property indicates if the {product_name} Knative Eventing add-on generates a default Knative Broker in memory to sink and dispatch the messages. Set this property to `false` in case a broker is already installed in your namespace. Note that you can use `org.kie.kogito.addons.knative.eventing.sink.*` property to configure your custom sink. If this property is not set, then the auto-generated broker works as a sink.
-|`org.kie.kogito.addons.knative.sink.namespace`
+|`org.kie.kogito.addons.knative.eventing.sink.namespace`
|
|This property indicates the namespace where the generated Knative sink is deployed.
-|`org.kie.kogito.addons.knative.sink.api_version`
+|`org.kie.kogito.addons.knative.eventing.sink.api_version`
|`eventing.knative.dev/v1`
|This property indicates the API group and version of the generated Knative sink.
-|`org.kie.kogito.addons.knative.sink.name`
+|`org.kie.kogito.addons.knative.eventing.sink.name`
|`default`
|This property indicates the name of the generated Knative sink.
-|`org.kie.kogito.addons.knative.sink.kind`
+|`org.kie.kogito.addons.knative.eventing.sink.kind`
|`Broker`
|This property indicates the Kubernetes kind of the generated Knative sink.
@@ -150,7 +150,7 @@ The following table lists the configuration property related to Knative sink gen
|===
|Property|Default value|Description
-|`org.kie.kogito.addons.knative.broker`
+|`org.kie.kogito.addons.knative.eventing.broker`
|`default`
|This property indicates the name of the default Knative broker that is deployed in the Kubernetes namespace. This broker is used as the reference to create the Knative triggers, which are responsible to delegate the events that the workflow service consumes.
@@ -296,7 +296,7 @@ kn workflow deploy
----
====
-For more information about building and deploying the workflow application, see xref:serverless-logic:cloud/quarkus/build-workflow-image-with-quarkus-cli.adoc[Building workflow images using Quarkus CLI].
+For more information about building and deploying the workflow application, see xref:cloud/quarkus/build-workflow-image-with-quarkus-cli.adoc[Building workflow images using Quarkus CLI].
--
[[ref-example-sw-event-definition-knative]]
@@ -375,9 +375,9 @@ For each consumed event definition, the Knative Eventing add-on generates one Kn
== Additional resources
-* xref:serverless-logic:testing-and-troubleshooting/mocking-http-cloudevents-with-wiremock.adoc[Mocking HTTP CloudEvents sink using WireMock]
-* xref:serverless-logic:eventing/consume-producing-events-with-kafka.adoc[Consuming and producing events using Apache Kafka]
-* xref:serverless-logic:eventing/event-correlation-with-workflows.adoc[Event correlation in {product_name}]
-* xref:serverless-logic:eventing/working-with-callbacks.adoc[Callback state in {product_name}]
+* xref:testing-and-troubleshooting/mocking-http-cloudevents-with-wiremock.adoc[Mocking HTTP CloudEvents sink using WireMock]
+* xref:eventing/consume-producing-events-with-kafka.adoc[Consuming and producing events using Apache Kafka]
+* xref:eventing/event-correlation-with-workflows.adoc[Event correlation in {product_name}]
+* xref:core/working-with-callbacks.adoc[Callback state in {product_name}]
include::../../pages/_common-content/report-issue.adoc[]
diff --git a/modules/serverless-logic/pages/eventing/consume-producing-events-with-kafka.adoc b/modules/serverless-logic/pages/eventing/consume-producing-events-with-kafka.adoc
index 96937b4f..b9633e12 100644
--- a/modules/serverless-logic/pages/eventing/consume-producing-events-with-kafka.adoc
+++ b/modules/serverless-logic/pages/eventing/consume-producing-events-with-kafka.adoc
@@ -1,4 +1,4 @@
-= Consuming and producing events using Apache Kafka
+= Consuming and producing events using Apache Kafka in Quarkus
:compat-mode!:
// Metadata:
:description: Consuming and producing events with Kafka
@@ -115,7 +115,7 @@ mp.messaging.outgoing.kogito_outgoing_stream.value.serializer=org.apache.kafka.c
Smallrye provide means to link:{overflow_management_url}[manage emitter overflow] through link:{overflow_annotation_url}[OnOverflow] annotation
-Kogito will annotate generated smallrye `emitters` for a particular channel using the information provided by a property of the form `kogito.addon.messaging.emitter..overflow-strategy`. The possible values are `BUFFER`, `NONE`, `UNBOUNDED`, `FAIL`, and `DROP`. If the value of the strategy is `BUFFER`, then you must specify the buffer size by using the property of the form `kogito.addon.messaging.emitter..buffer-size`
+{product_name} will annotate generated smallrye `emitters` for a particular channel using the information provided by a property of the form `kogito.addon.messaging.emitter..overflow-strategy`. The possible values are `BUFFER`, `NONE`, `UNBOUNDED`, `FAIL`, and `DROP`. If the value of the strategy is `BUFFER`, then you must specify the buffer size by using the property of the form `kogito.addon.messaging.emitter..buffer-size`
Therefore, for the `wait` channel, in the previous example, if we want to buffer as many as 100 events we will add these two properties
@@ -131,9 +131,9 @@ If all your channels use the same strategy and this strategy differs from the `B
== Additional resources
-* xref:serverless-logic:eventing/consume-produce-events-with-knative-eventing.adoc[Consuming and producing events on Knative Eventing]
-* xref:serverless-logic:eventing/event-correlation-with-workflows.adoc[Event correlation in {product_name}]
-* xref:serverless-logic:eventing/working-with-callbacks.adoc[Callback state in {product_name}]
+* xref:eventing/consume-produce-events-with-knative-eventing.adoc[Consuming and producing events on Knative Eventing]
+* xref:eventing/event-correlation-with-workflows.adoc[Event correlation in {product_name}]
+* xref:core/working-with-callbacks.adoc[Callback state in {product_name}]
include::../../pages/_common-content/report-issue.adoc[]
diff --git a/modules/serverless-logic/pages/eventing/event-correlation-with-workflows.adoc b/modules/serverless-logic/pages/eventing/event-correlation-with-workflows.adoc
index 73ecfdfe..48ecf5f4 100644
--- a/modules/serverless-logic/pages/eventing/event-correlation-with-workflows.adoc
+++ b/modules/serverless-logic/pages/eventing/event-correlation-with-workflows.adoc
@@ -15,7 +15,7 @@ You can optionally set the `contextAttributeValue` property, which matches the v
The incoming events consumed by the engine must contain the correlation attributes, set in the definition as extension context attributes. The correlation attributes are compliant with the link:{cloud_events_url}[CloudEvent] format, therefore, the attributes are not part of the event payload.
====
-A new workflow instance must be created using an event, which must be declared in the workflow definition file, containing correlation attributes in the event definition section. For more information about events, see xref:serverless-logic:eventing/handling-events-on-workflows.adoc[Event state in {product_name}]. Once the event is consumed, the engine extracts the correlation attributes and associates the attributes with the created workflow instance.
+A new workflow instance must be created using an event, which must be declared in the workflow definition file, containing correlation attributes in the event definition section. For more information about events, see xref:core/handling-events-on-workflows.adoc[Event state in {product_name}]. Once the event is consumed, the engine extracts the correlation attributes and associates the attributes with the created workflow instance.
A start event does not trigger a correlation evaluation, but acts as a moment in which correlation attributes and values are set. The correlation attributes and values are evaluated against other incoming events that might trigger the given instance. Therefore, when a non-start event is consumed and correlation attributes are evaluated, then the engine continues the execution of the matched instances (if any).
@@ -129,7 +129,7 @@ In {product_name}, correlating multiple events together is not supported, theref
Using the link:{kogito_sw_examples_url}/serverless-workflow-correlation-quarkus/src/main/resources/correlation.sw.json[workflow definition] in `serverless-workflow-correlation-quarkus` example application, you can define other events that are published and consumed by the workflow.
-The `serverless-workflow-correlation-quarkus` example application uses Callback states, such as <>. This means that once the workflow execution reaches the Callback state, the workflow publishes an event of <> type and waits to receive an event of <> type. For more information about callback state, see xref:serverless-logic:eventing/working-with-callbacks.adoc[Callback state in {product_name}].
+The `serverless-workflow-correlation-quarkus` example application uses callback states, such as <>. This means that once the workflow execution reaches the callback state, the workflow publishes an event of <> type and waits to receive an event of <> type. For more information about the callback state, see xref:core/working-with-callbacks.adoc[Callback state in {product_name}].
[[ref-validade-user-email-state]]
.Example Callback state definition
@@ -194,7 +194,7 @@ All consumed events must contain the same correlation attributes since the consu
}
----
-The engine stores the correlation information in the same persistence mechanism that is configured in the workflow application. If a persistence add-on is not configured, then the correlation information is stored in memory. This means that entire correlation information is lost when the workflow application restarts, therefore this process must be used for testing purposes. For more information about the persistence configuration, see xref:serverless-logic:persistence/persistence-with-postgresql.adoc[Running a workflow service using PostgreSQL].
+The engine stores the correlation information in the same persistence mechanism that is configured in the workflow application. If a persistence add-on is not configured, then the correlation information is stored in memory. This means that entire correlation information is lost when the workflow application restarts, therefore this process must be used for testing purposes. For more information about the persistence configuration, see xref:persistence/persistence-with-postgresql.adoc[Running a workflow service using PostgreSQL].
[NOTE]
====
@@ -203,7 +203,7 @@ Currently, only `kogito-addons-quarkus-persistence-jdbc` persistence add-on supp
== Additional resources
-* xref:serverless-logic:eventing/handling-events-on-workflows.adoc[Event state in {product_name}]
-* xref:serverless-logic:eventing/working-with-callbacks.adoc[Callback state in {product_name}]
+* xref:core/handling-events-on-workflows.adoc[Event state in {product_name}]
+* xref:core/working-with-callbacks.adoc[Callback state in {product_name}]
include::../../pages/_common-content/report-issue.adoc[]
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/service-orchestration/orchestration-of-asyncapi-based-services.adoc b/modules/serverless-logic/pages/eventing/orchestration-of-asyncapi-based-services.adoc
similarity index 97%
rename from modules/serverless-logic/pages/service-orchestration/orchestration-of-asyncapi-based-services.adoc
rename to modules/serverless-logic/pages/eventing/orchestration-of-asyncapi-based-services.adoc
index 809fe1a7..6802a0be 100644
--- a/modules/serverless-logic/pages/service-orchestration/orchestration-of-asyncapi-based-services.adoc
+++ b/modules/serverless-logic/pages/eventing/orchestration-of-asyncapi-based-services.adoc
@@ -1,4 +1,4 @@
-= Orchestrating the AsyncAPI services
+= Orchestrating AsyncAPI Services
:compat-mode!:
// Metadata:
:description: Orchestration of AsyncAPI based services
@@ -9,7 +9,7 @@
// Referenced documentation pages.
:kafka_config: xref:eventing/consume-producing-events-with-kafka.adoc
:knative_config: xref:eventing/consume-produce-events-with-knative-eventing.adoc
-:event_states: xref:eventing/handling-events-on-workflows.adoc
+:event_states: xref:core/handling-events-on-workflows.adoc
This document describes how to trigger and publish events using an link:{async_api_spec_url}[AsyncAPI] specification file.
Underneath, {product_name} uses the link:{async_quarkiverse_url}[AsyncAPI Quarkiverse extension].
diff --git a/modules/serverless-logic/pages/getting-started/create-your-first-workflow-service-with-kn-cli-and-vscode.adoc b/modules/serverless-logic/pages/getting-started/create-your-first-workflow-service-with-kn-cli-and-vscode.adoc
new file mode 100644
index 00000000..c45f3bf2
--- /dev/null
+++ b/modules/serverless-logic/pages/getting-started/create-your-first-workflow-service-with-kn-cli-and-vscode.adoc
@@ -0,0 +1,120 @@
+= Creating and running workflow projects using KN CLI and Visual Studio Code
+
+This guide showcases using the Knative Workflow CLI plugin and Visual Studio code to create & run {product_name} projects.
+
+.Prerequisites
+* xref:testing-and-troubleshooting/kn-plugin-workflow-overview.adoc[Knative Workflow CLI] {kn_cli_version} is installed.
+* Visual Studio Code with https://marketplace.visualstudio.com/items?itemName=redhat.vscode-extension-serverless-workflow-editor[Red Hat Serverless Workflow Editor] is installed to edit your workflows.
+
+.Preparing your environment
+* Install https://docs.docker.com/engine/install/[Docker] or https://podman.io/docs/installation[Podman].
+* Install https://minikube.sigs.k8s.io/docs/start/[minikube].
+* Install https://kubernetes.io/docs/tasks/tools/[kubectl].
+* Start minikube. Depending on your environment, set `--driver` flag to `podman` or `docker`
+[source,bash]
+----
+minikube start --cpus 4 --memory 8096 --addons registry --addons metrics-server --insecure-registry "10.0.0.0/24" --insecure-registry "localhost:5000" --driver=docker
+----
+* (optional) Install https://k9scli.io/[k9scli.io]
+* xref:cloud/operator/install-serverless-operator.adoc[]
+* Install https://github.com/kiegroup/kie-tools/releases/tag/0.30.3[KN Workflow CLI] by downloading the correct distribution for your development environment and adding it to the PATH.
+
+[[proc-creating-app-with-kn-cli]]
+== Creating a workflow project with Visual Studio Code and KN CLI
+
+Use the `create` command with kn workflow to scaffold a new SonataFlow project.
+
+* Navigate to you development directory and create your project.
+[source,bash]
+----
+kn workflow create -n my-sonataflow-project
+----
+* This will create a folder with name `my-sonataflow-project` and a sample workflow `workflow.sw.json`
+[source,bash]
+----
+cd ./my-sonataflow-project
+----
+* Open the folder in Visual Studo Code and examine the created `workflow.sw.json` using our extension.
+
+Now you can run the project and execute the workflow.
+
+[[proc-running-app-with-kn-cli]]
+== Running a Workflow project with Visual Studio Code and KN CLI
+
+Use the `run` command with kn workflow to build and run the {product_name} project in local development mode.
+
+* Run the project.
+[source,bash]
+----
+kn workflow run
+----
+* The Development UI wil be accesible at `localhost:8080/q/dev`
+* You can now work on your project. Any changes will be picked up by the hot reload feature.
+* See xref:testing-and-troubleshooting/quarkus-dev-ui-extension/quarkus-dev-ui-workflow-instances-page.adoc[Workflow instances] guide on how to run workflows via Development UI.
+* Once you are done developing your project navigate to the terminal that is running the `kn workflow run` command and hit `Ctlr+C` to stop the development environment.
+
+To deploy the finished project to a local cluster, proceed to the next section.
+
+[[proc-deploying-app-with-kn-cli]]
+== Deploying a workflow project with Visual Studio Code and KN CLI
+
+Use the `deploy` command with kn workflow to deploy the {product_name} project into your local cluster.
+
+* Deploy to minikube
+[source,bash]
+----
+kn workflow deploy
+----
+* (Optional) Using k9scli you can examine your deployment.
+* In a different bash instance, create a port mapping:
+[source,bash]
+----
+minikube service hello --namespace default --url
+----
+* Use this URL to access your workflow instances using the Developer UI
+** /q/dev/org.kie.kogito.kogito-quarkus-serverless-workflow-devui/workflowInstances
+* To update the image run the `deploy` again, note that this may take some time.
+
+* To stop the deployment, use the `undeploy` command:
+[source,bash]
+----
+kn worklow undeploy
+----
+* You can validate your pod is terminating using k9s cli.
+
+[[proc-testing-application]]
+== Testing your workflow application
+
+To test your workflow application you can use any capable REST client out there. All that is needeed is the URL of your deployed worklow project.
+
+.Prerequisites
+* You have your workflow project deployed using <> and you have the URL where it is deployed handy.
+
+.Testing your workflow application
+* To test your workflow project, access the Swagger UI on `/q/swagger-ui` to examine available endpoints.
+* In order to execute the workflow once, run:
+[source,bash]
+----
+curl -X 'POST' \
+ '/hello' \
+ -H 'accept: */*' \
+ -H 'Content-Type: application/json' \
+ -d '{
+ "workflowdata": {}
+}'
+----
+* To examine executed instance you can use the GraphQL UI by navigating to
+`/q/graphl-ui`.
+
+
+== Additional resources
+
+* xref:testing-and-troubleshooting/quarkus-dev-ui-extension/quarkus-dev-ui-overview.adoc[]
+* xref:getting-started/getting-familiar-with-our-tooling.adoc[]
+* xref:service-orchestration/orchestration-of-openapi-based-services.adoc[]
+
+include::../../pages/_common-content/report-issue.adoc[]
+
+ifeval::["{kogito_version_redhat}" != ""]
+include::../../pages/_common-content/downstream-project-setup-instructions.adoc[]
+endif::[]
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/getting-started/create-your-first-workflow-service.adoc b/modules/serverless-logic/pages/getting-started/create-your-first-workflow-service.adoc
index 1137f1cc..b11f51a6 100644
--- a/modules/serverless-logic/pages/getting-started/create-your-first-workflow-service.adoc
+++ b/modules/serverless-logic/pages/getting-started/create-your-first-workflow-service.adoc
@@ -1,4 +1,4 @@
-= Creating your first workflow service
+= Creating a Quarkus Workflow Project
As a developer, you can use {product_name} and create a `Hello World` application, which includes the following procedures:
@@ -20,11 +20,11 @@ image::getting-started/hello-world-workflow.png[]
.Prerequisites
* Java {java_min_version} is installed with `JAVA_HOME` configured appropriately.
* Apache Maven {maven_min_version} is installed.
-* {quarkus_cli_url}[Quarkus CLI] or xref:serverless-logic:tooling/kn-plugin-workflow-overview.adoc[Knative Workflow CLI] {kn_cli_version} is installed.
+* {quarkus_cli_url}[Quarkus CLI] or xref:testing-and-troubleshooting/kn-plugin-workflow-overview.adoc[Knative Workflow CLI] {kn_cli_version} is installed.
* Visual Studio Code with https://marketplace.visualstudio.com/items?itemName=redhat.java[Red Hat Java Extension]
and https://marketplace.visualstudio.com/items?itemName=redhat.vscode-extension-serverless-workflow-editor[Red Hat Serverless Workflow Editor] is installed to edit your workflows.
-For more information about the tooling and the required dependencies, see xref:serverless-logic:getting-started/getting-familiar-with-our-tooling.adoc[Getting familiar with {product_name} tooling].
+For more information about the tooling and the required dependencies, see xref:getting-started/getting-familiar-with-our-tooling.adoc[Getting familiar with {product_name} tooling].
ifeval::["{kogito_version_redhat}" != ""]
include::../../pages/_common-content/downstream-project-setup-instructions.adoc[]
@@ -93,7 +93,7 @@ kn workflow create \
--quarkus-version={quarkus_platform_version}
----
-For more information about Knative workflow CLI, see xref:serverless-logic:tooling/kn-plugin-workflow-overview.adoc[{product_name} plug-in for Knative CLI].
+For more information about Knative workflow CLI, see xref:testing-and-troubleshooting/kn-plugin-workflow-overview.adoc[{product_name} plug-in for Knative CLI].
--
====
@@ -166,7 +166,7 @@ In the previous example:
[NOTE]
====
The workflow definition follows the CNCF Serverless Workflow specification. For more information, see
-xref:serverless-logic:getting-started/cncf-serverless-workflow-specification-support.adoc[CNCF Serverless Workflow specification].
+xref:core/cncf-serverless-workflow-specification-support.adoc[CNCF Serverless Workflow specification].
====
--
@@ -209,7 +209,7 @@ kn workflow build --image dev.local/serverless-workflow-hello-world --verbose
----
The `--verbose` flag is used to display the output of the build command. This flag is optional.
-For more information about Knative workflow CLI, see xref:serverless-logic:tooling/kn-plugin-workflow-overview.adoc[{product_name} plug-in for Knative CLI].
+For more information about Knative workflow CLI, see xref:testing-and-troubleshooting/kn-plugin-workflow-overview.adoc[{product_name} plug-in for Knative CLI].
--
====
@@ -259,9 +259,9 @@ mvn clean quarkus:dev
quarkus dev
----
-For more information about Knative workflow CLI, see xref:serverless-logic:tooling/kn-plugin-workflow-overview.adoc[{product_name} plug-in for Knative CLI].
+For more information about Knative workflow CLI, see xref:testing-and-troubleshooting/kn-plugin-workflow-overview.adoc[{product_name} plug-in for Knative CLI].
-Also, to deploy and run your workflow application, see xref:serverless-logic:cloud/quarkus/deploying-on-minikube.adoc[Deploying workflow application on Minikube]
+Also, to deploy and run your workflow application, see xref:cloud/quarkus/deploying-on-minikube.adoc[Deploying workflow application on Minikube]
--
====
+
@@ -360,11 +360,11 @@ Note that the `mantra` value is updated without restarting the application, beca
== Testing your workflow application
To test your workflow application, you can follow the instructions in the
-xref:serverless-logic:testing-and-troubleshooting/basic-integration-tests-with-restassured.adoc[Testing your workflow application using REST Assured].
+xref:testing-and-troubleshooting/basic-integration-tests-with-restassured.adoc[Testing your workflow application using REST Assured].
== Additional resources
-* xref:serverless-logic:getting-started/getting-familiar-with-our-tooling.adoc[Getting familiar with {product_name} tooling]
-* xref:serverless-logic:service-orchestration/orchestration-of-openapi-based-services.adoc[Orchestrating the OpenAPI services]
+* xref:getting-started/getting-familiar-with-our-tooling.adoc[Getting familiar with {product_name} tooling]
+* xref:service-orchestration/orchestration-of-openapi-based-services.adoc[Orchestrating the OpenAPI services]
include::../../pages/_common-content/report-issue.adoc[]
diff --git a/modules/serverless-logic/pages/getting-started/getting-familiar-with-our-tooling.adoc b/modules/serverless-logic/pages/getting-started/getting-familiar-with-our-tooling.adoc
index 6269e64d..3044dea3 100644
--- a/modules/serverless-logic/pages/getting-started/getting-familiar-with-our-tooling.adoc
+++ b/modules/serverless-logic/pages/getting-started/getting-familiar-with-our-tooling.adoc
@@ -9,10 +9,10 @@
The tooling in {product_name} provides the best developer experience for the workflow ecosystem. The following tools are provided that you can use to author your workflow assets:
-* xref:serverless-logic:tooling/serverless-workflow-editor/swf-editor-vscode-extension.adoc[*VS Code extension*]: Use the Serverless Workflow editor and edit the link:{spec_website_url}[CNCF Serverless Workflow specification] files in Visual Studio Code.
-* xref:serverless-logic:tooling/serverless-workflow-editor/swf-editor-chrome-extension.adoc[*Chrome GitHub extension*]: View and edit the CNCF Serverless Workflow specification files in GitHub.
-* xref:serverless-logic:tooling/quarkus-dev-ui-extension/quarkus-dev-ui-overview.adoc[*Kogito Serverless Workflow Tools extension in Quarkus Dev UI*]: View, manage, and start the workflow instances.
-* xref:serverless-logic:tooling/kn-plugin-workflow-overview.adoc[*{product_name} plug-in for Knative CLI*]: Set up a local workflow project using the command line.
+* xref:tooling/serverless-workflow-editor/swf-editor-vscode-extension.adoc[*VS Code extension*]: Use the Serverless Workflow editor and edit the link:{spec_website_url}[CNCF Serverless Workflow specification] files in Visual Studio Code.
+* xref:tooling/serverless-workflow-editor/swf-editor-chrome-extension.adoc[*Chrome GitHub extension*]: View and edit the CNCF Serverless Workflow specification files in GitHub.
+* xref:testing-and-troubleshooting/quarkus-dev-ui-extension/quarkus-dev-ui-overview.adoc[*Kogito Serverless Workflow Tools extension in Quarkus Dev UI*]: View, manage, and start the workflow instances.
+* xref:testing-and-troubleshooting/kn-plugin-workflow-overview.adoc[*{product_name} plug-in for Knative CLI*]: Set up a local workflow project using the command line.
* link:{kubesmarts_url}[*Serverless Logic online tooling*]: Try and run the Serverless Workflow example applications in a web environment.
include::../../pages/_common-content/report-issue.adoc[]
diff --git a/modules/serverless-logic/pages/integrations/camel-routes-integration.adoc b/modules/serverless-logic/pages/integrations/camel-routes-integration.adoc
index 009cc550..018e8b0c 100644
--- a/modules/serverless-logic/pages/integrations/camel-routes-integration.adoc
+++ b/modules/serverless-logic/pages/integrations/camel-routes-integration.adoc
@@ -1,6 +1,6 @@
= Integrating with Camel routes
-{product_name} can integrate with link:{camel_url}[Apache Camel Routes] by adding the Kogito Quarkus Camel Add-on to your project. It enables the workflow engine to identify and call Camel routes declared in YAML or XML in the same workflow project context.
+{product_name} can integrate with link:{camel_url}[Apache Camel Routes] by adding the {product_name} Quarkus Camel Add-on to your project. It enables the workflow engine to identify and call Camel routes declared in YAML or XML in the same workflow project context.
[[proc-enable-quarkus-camel]]
== Enabling Quarkus Camel in {product_name}
diff --git a/modules/serverless-logic/pages/integrations/custom-functions-knative.adoc b/modules/serverless-logic/pages/integrations/custom-functions-knative.adoc
index 118ece2d..94cf4012 100644
--- a/modules/serverless-logic/pages/integrations/custom-functions-knative.adoc
+++ b/modules/serverless-logic/pages/integrations/custom-functions-knative.adoc
@@ -33,6 +33,8 @@ include::../cloud/common/_prerequisites.adoc[]
--
+. xref:cloud/quarkus/kubernetes-service-discovery.adoc#ref-enabling-kubernetes-service-discovery[Enable the Service Discovery feature].
+
. Discover the name of the Knative service that your workflow will invoke. In a terminal window, run the following command:
+
--
@@ -64,17 +66,13 @@ Save the Knative service name (`custom-function-knative-service`) to use it in t
{
"name": "greet", <1>
"type": "custom", <2>
- "operation": "knative:custom-function-knative-service", <3>
- "metadata": {
- "path": "/function" <4>
- }
+ "operation": "knative:services.v1.serving.knative.dev/custom-function-knative-service?path=/function", <3>
}
----
<1> The name of the {product_name} function
<2> Indicates that this function is a custom one
-<3> Indicates that your custom function is of type `knative` and it will invoke the `custom-function-knative-service` service.
-<4> The resource path you want to access
+<3> The coordinates of the service you want to invoke
--
@@ -137,4 +135,4 @@ Knative functions support https://github.com/knative/func/blob/main/docs/functio
* xref:cloud/quarkus/deploying-on-minikube.adoc[Deploying your Serverless Workflow application on Minikube]
* xref:cloud/quarkus/deploying-on-kubernetes.adoc[Deploying your Serverless Workflow application on Kubernetes]
-include::../_common-content/report-issue.adoc[]
+include::../_common-content/report-issue.adoc[]
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/integrations/expose-metrics-to-prometheus.adoc b/modules/serverless-logic/pages/integrations/expose-metrics-to-prometheus.adoc
index 342af09d..a4d5c35d 100644
--- a/modules/serverless-logic/pages/integrations/expose-metrics-to-prometheus.adoc
+++ b/modules/serverless-logic/pages/integrations/expose-metrics-to-prometheus.adoc
@@ -1,4 +1,4 @@
-= Exposing workflow base metrics to Prometheus
+= Exposing Workflow base metrics to Prometheus
:compat-mode!:
// Metadata:
:description: Exposing the workflow base metrics to Prometheus
@@ -9,7 +9,7 @@
:grafana_url: https://grafana.com/
:quarkus_micrometer_url: https://quarkus.io/guides/micrometer
:openshift_monitoring_url: https://docs.openshift.com/container-platform/4.11/monitoring/enabling-monitoring-for-user-defined-projects.html
-:prometheus_operator_url: https://access.redhat.com/documentation/en-us/openshift_container_platform/3.11/html/configuring_clusters/prometheus-cluster-monitoring
+:prometheus_operator_url: https://prometheus-operator.dev/
:prometheus_operator_getting_started_guide: https://prometheus.io/docs/prometheus/latest/getting_started/#configure-prometheus-to-monitor-the-sample-targets
{product_name} generates metrics that can be consumed by Prometheus and visualized by dashboard tools, such as link:{openshift_micrometer_url}[OpenShift], link:{dashbuilder_url}[Dashbuilder], and link:{grafana_url}[Grafana].
@@ -22,9 +22,9 @@ This document describes how you can enable and expose the generated metrics to P
You can enable the metrics in your workflow application.
.Prerequisites
-* A workflow application is created.
+* A workflow application is created.
+
-For more information about creating a workflow, see xref:serverless-logic:getting-started/create-your-first-workflow-service.adoc[Creating your first workflow service].
+For more information about creating a workflow, see xref:getting-started/create-your-first-workflow-service.adoc[Creating your first workflow service].
.Procedure
. To add the metrics to your workflow application, add the `org.kie.kogito:kogito-addons-quarkus-monitoring-prometheus` dependency to the `pom.xml` file of your project:
@@ -60,12 +60,12 @@ If your workflow server is running on OpenShift, then you can use the server to
For more information, see <>.
.Procedure
-. To consume metrics from OpenShift, enable monitoring for user-defined projects.
+. To consume metrics from OpenShift, enable monitoring for user-defined projects.
+
--
For more information, see link:{openshift_monitoring_url}[Enabling monitoring for user-defined projects] in OpenShift documentation.
-When you enable monitoring for user-defined projects, the Prometheus Operator is installed automatically.
+When you enable monitoring for user-defined projects, the Prometheus Operator is installed automatically.
--
. Create a service monitor as shown in the following configuration:
@@ -109,7 +109,7 @@ After that, Prometheus sends request to the `/q/metrics` endpoint for all the se
[IMPORTANT]
====
-Consuming metrics from Kubernetes is similar to OpenShift. However, you need to install the Prometheus Operator project manually.
+Consuming metrics from Kubernetes is similar to OpenShift. However, you need to install the Prometheus Operator project manually.
For more information about installing Prometheus Operator, see link:{prometheus_operator_url}[Prometheus Operator] website.
====
@@ -152,7 +152,7 @@ In {product_name}, you can check the following example metrics:
* `kogito_process_instance_completed_total`: Completed workflows
* `kogito_process_instance_started_total`: Started workflows
* `kogito_process_instance_running_total`: Running workflows
-* `kogito_process_instance_duration_seconds_sum`: Workflows total duration
+* `kogito_process_instance_duration_seconds_sum`: Workflows total duration
[NOTE]
====
@@ -176,4 +176,4 @@ kogito_process_instance_completed_total{app_id="default-process-monitoring-liste
Internally, {product_name} uses Quarkus Micrometer extension, which also exposes built-in metrics. You can disable the Micrometer metrics in {product_name}. For more information, see link:{quarkus_micrometer_url}[Quarkus - Micrometer Metrics].
====
-include::../../pages/_common-content/report-issue.adoc[]
+include::../../pages/_common-content/report-issue.adoc[]
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/integrations/serverless-dashboard-with-runtime-data.adoc b/modules/serverless-logic/pages/integrations/serverless-dashboard-with-runtime-data.adoc
index 67d4d376..e0733a24 100644
--- a/modules/serverless-logic/pages/integrations/serverless-dashboard-with-runtime-data.adoc
+++ b/modules/serverless-logic/pages/integrations/serverless-dashboard-with-runtime-data.adoc
@@ -1,4 +1,4 @@
-= Displaying workflow data in dashboards
+= Displaying Workflow Data in Dashboards
:compat-mode!:
// Metadata:
@@ -36,7 +36,7 @@ You can build dashboards to monitor the data of your workflows using metrics.
.Prerequisites
* A workflow application is created.
+
-For more information about creating a workflow, see xref:serverless-logic:getting-started/create-your-first-workflow-service.adoc[Creating your first workflow service].
+For more information about creating a workflow, see xref:getting-started/create-your-first-workflow-service.adoc[Creating your first workflow service].
.Procedure
. To enable metrics for your workflows application add `org.kie.kogito:kogito-addons-quarkus-monitoring-prometheus` dependency in `pom.xml` file of your application:
@@ -245,7 +245,7 @@ The Data Index service uses GraphQL, so that dashbuilder can connect with the se
.Prerequisites
* A workflow application is created.
+
-For more information about creating a workflow, see xref:serverless-logic:getting-started/create-your-first-workflow-service.adoc[Creating your first workflow service].
+For more information about creating a workflow, see xref:getting-started/create-your-first-workflow-service.adoc[Creating your first workflow service].
.Procedure
. Go to the Data Index GraphQL interface (default to `http://localhost:8180/graphiql`) and test your query.
@@ -380,7 +380,7 @@ For more examples of dashboards, see link:{dashbuilder_url}[Dashbuilder] website
== Additional resources
-xref:serverless-logic:getting-started/create-your-first-workflow-service.adoc[Creating your first workflow service]
+xref:getting-started/create-your-first-workflow-service.adoc[Creating your first workflow service]
include::../../pages/_common-content/report-issue.adoc[]
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/persistence/data-consistency.adoc b/modules/serverless-logic/pages/persistence/data-consistency.adoc
deleted file mode 100644
index 485ba90e..00000000
--- a/modules/serverless-logic/pages/persistence/data-consistency.adoc
+++ /dev/null
@@ -1 +0,0 @@
-//= Data consistency
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/testing-and-troubleshooting/integration-tests-with-postgresql.adoc b/modules/serverless-logic/pages/persistence/integration-tests-with-postgresql.adoc
similarity index 95%
rename from modules/serverless-logic/pages/testing-and-troubleshooting/integration-tests-with-postgresql.adoc
rename to modules/serverless-logic/pages/persistence/integration-tests-with-postgresql.adoc
index 6e827c69..e04766fa 100644
--- a/modules/serverless-logic/pages/testing-and-troubleshooting/integration-tests-with-postgresql.adoc
+++ b/modules/serverless-logic/pages/persistence/integration-tests-with-postgresql.adoc
@@ -4,9 +4,9 @@
:description: Serverless Workflow integration test with PostgreSQL
:keywords: kogito, workflow, quarkus, serverless, quarkus-cli, test, integration, postgresql, persistence
// Referenced documentation pages
-:basic_integration_test_with_restassured_guide: xref:serverless-logic:testing-and-troubleshooting/basic-integration-tests-with-restassured.adoc
-:getting_started_create_first_workflow_guide: xref:serverless-logic:getting-started/create-your-first-workflow-service.adoc
-:persistence_with_postgresql_guide: xref:serverless-logic:persistence/persistence-with-postgresql.adoc
+:basic_integration_test_with_restassured_guide: xref:testing-and-troubleshooting/basic-integration-tests-with-restassured.adoc
+:getting_started_create_first_workflow_guide: xref:getting-started/create-your-first-workflow-service.adoc
+:persistence_with_postgresql_guide: xref:persistence/persistence-with-postgresql.adoc
// External pages
:quarkus_testing_guide_url: {quarkus_guides_base_url}/getting-started-testing
:quarkus_testing_guide_integration_test_url: {quarkus_testing_guide_url}#quarkus-integration-test
diff --git a/modules/serverless-logic/pages/persistence/persistence-with-postgresql.adoc b/modules/serverless-logic/pages/persistence/persistence-with-postgresql.adoc
index 8ee0fc1e..6fe0e151 100644
--- a/modules/serverless-logic/pages/persistence/persistence-with-postgresql.adoc
+++ b/modules/serverless-logic/pages/persistence/persistence-with-postgresql.adoc
@@ -1,11 +1,11 @@
-= Running a workflow service using PostgreSQL
+= Running a Quarkus Workflow Application using PostgreSQL
:compat-mode!:
// Metadata:
-:description: Running Serverless Workflow service using PostgresSQL
+:description: Running Quarkus Workflow Application using PostgresSQL
:keywords: kogito, workflow, quarkus, serverless, quarkus-cli, persistence, postgresql
// Referenced documentation pages.
-:getting_started_create_first_workflow_guide: xref:serverless-logic:getting-started/create-your-first-workflow-service.adoc
-:persistence_with_postgresql_guide: xref:serverless-logic:persistence/persistence-with-postgresql.adoc
+:getting_started_create_first_workflow_guide: xref:getting-started/create-your-first-workflow-service.adoc
+:persistence_with_postgresql_guide: xref:persistence/persistence-with-postgresql.adoc
// External pages
:quarkus_datasource_guide: https://quarkus.io/guides/datasource
:quarkus_datasource_devservices_guide: https://quarkus.io/guides/databases-dev-services
@@ -16,15 +16,15 @@
This document describes how you can run your workflow application using PostgreSQL persistence.
-When your workflow execution requires `wait` states, then running your workflow application with persistence enabled is a recommended approach.
+When your workflow execution requires `wait` states, then running your workflow application with persistence enabled is a recommended approach.
-For example, when a process reaches a `callback` or needs to wait for an event, then the execution of the process is paused and the engine takes a snapshot of the workflow data. The snapshot is persisted in the database as a binary format along with process metadata information. The process metadata information includes process ID, process instance ID, and process version.
+For example, when a process reaches a `callback` or needs to wait for an event, then the execution of the process is paused and the engine takes a snapshot of the workflow data. The snapshot is persisted in the database as a binary format along with process metadata information. The process metadata information includes process ID, process instance ID, and process version.
Runtime persistence is used for storing data, which is required to resume the workflow execution of a process instance. Once a process is completed, the related data is removed from the database. This means that only required data to resume the execution is persisted.
-In {product_name}, you can enable persistence using add-ons. This document describes the use of the `kogito-addons-quarkus-persistence-jdbc` add-on, which is based on Java Database Connectivity (JDBC) along with PostgreSQL.
+In {product_name}, you can enable persistence using add-ons. This document describes the use of the `kogito-addons-quarkus-persistence-jdbc` add-on, which is based on Java Database Connectivity (JDBC) along with PostgreSQL.
-The `kogito-addons-quarkus-persistence-jdbc` add-on also extends on the Quarkus capabilities and you can use the available features directly from Quarkus JDBC support. For more information about Quarkus and JDBC, see link:{quarkus_datasource_guide}[Quarkus Datasources].
+The `kogito-addons-quarkus-persistence-jdbc` add-on also extends on the Quarkus capabilities and you can use the available features directly from Quarkus JDBC support. For more information about Quarkus and JDBC, see link:{quarkus_datasource_guide}[Quarkus Datasources].
You can also see the `serverless-workflow-callback-quarkus` example application in GitHub repository. To execute the `serverless-workflow-callback-quarkus` example application, you can follow the instructions mentioned in the `README` file. To clone the `kogito-example` repository, use the following command:
@@ -109,7 +109,7 @@ quarkus.datasource.db-kind=postgresql
You can find more details regarding the PostgreSQL database schema migration in the xref:persistence/postgresql-flyway-migration.adoc[Flyway migration guide].
--
-. Optional: To handle the concurrent requests to shared workflow instances, enable the persistence-enabled optimistic locking for concurrency control using the version field in the database.
+. Optional: To handle the concurrent requests to shared workflow instances, enable the persistence-enabled optimistic locking for concurrency control using the version field in the database.
+
--
Add `kogito.persistence.optimistic.lock=true` property in the `application.properties` file of your project to enable the optimistic locking.
@@ -127,11 +127,11 @@ Add `kogito.persistence.optimistic.lock=true` property in the `application.prope
"version": "1.0"
}
----
-The versioning strategy is used to allow different workflow applications to run different versions of a process at the same time. The different versions of a process share the same database. This is useful when you migrate a process from one version to another. When allowing workflow instances to finish executing, a new version can be deployed using a new workflow application setup.
+The versioning strategy is used to allow different workflow applications to run different versions of a process at the same time. The different versions of a process share the same database. This is useful when you migrate a process from one version to another. When allowing workflow instances to finish executing, a new version can be deployed using a new workflow application setup.
By default, the engine considers the `version` specified in the workflow file as the current version of the asset. Therefore, you need to manually change the `version` in the workflow file, making the engine consider the specified version as a new version.
-As an alternative, you can set the `kogito.workflow.version-strategy=project` property in the `application.properties` file of your project. This enables the engine to consider the Maven or Gradle project version as the version of all workflows in the project. For instance, when you release a new version of your Maven project, the version in the workflow file is automatically updated.
+As an alternative, you can set the `kogito.workflow.version-strategy=project` property in the `application.properties` file of your project. This enables the engine to consider the Maven or Gradle project version as the version of all workflows in the project. For instance, when you release a new version of your Maven project, the version in the workflow file is automatically updated.
--
[[ref-postgresql-persistence-configuration]]
@@ -166,7 +166,7 @@ The following table serves as a quick reference of commonly used persistence con
[[postgresql-persistence-additional-resources]]
== Additional resources
-* xref:serverless-logic:testing-and-troubleshooting/integration-tests-with-postgresql.adoc[{product_name} integration test using PostgreSQL]
-* xref:persistence/postgresql-flyway-migration.adoc[Migrating Your PostgreSQL Database with a Kogito upgrade]
+* xref:persistence/integration-tests-with-postgresql.adoc[{product_name} integration test using PostgreSQL]
+* xref:persistence/postgresql-flyway-migration.adoc[Migrating Your PostgreSQL Database with a {product_name} upgrade]
include::../../pages/_common-content/report-issue.adoc[]
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/persistence/postgresql-flyway-migration.adoc b/modules/serverless-logic/pages/persistence/postgresql-flyway-migration.adoc
index eda71252..5e1785a0 100644
--- a/modules/serverless-logic/pages/persistence/postgresql-flyway-migration.adoc
+++ b/modules/serverless-logic/pages/persistence/postgresql-flyway-migration.adoc
@@ -1,4 +1,5 @@
-= Migrating Your PostgreSQL Database with a {product_name} upgrade
+= PostgreSQL Database Migration
+
:flyway_install_url: https://flywaydb.org/download/community
:flyway_migrate_existing_url: https://flywaydb.org/documentation/learnmore/existing
:kogito_ddl_script_url: https://repo1.maven.org/maven2/org/kie/kogito/kogito-ddl
@@ -13,7 +14,7 @@ When you upgrade your {product_name} version, by default it **won't** pick up th
=== Migrate using Flyway Config
-* {product_name} provides a mechanism for migrating your database while updating the {product_name} version using the following `Flyway` properties (default value is `false`, not upgrade):
+* {product_name} to provides a mechanism for migrating your database while updating the {product_name} version using the following `Flyway` properties (default value is `false`, not upgrade):
+
--
[source,properties]
@@ -75,7 +76,7 @@ You can use the provided SQL scripts in the zip file to migrate the database by
--
== Baseline migration
-In order to apply migrations to an existing schema. It is necessary to establish the baseline from where Flyway should start executing new migrations. That means, in case your schema already contains a structure and from now on, new changes provided by {product_name} should be applied via Flyway integration. In Flyway terms, only migrations above `baselineVersion` will then be applied.
+In order to apply migrations to an existing schema. It is necessary to establish the baseline from where Flyway should start executing new migrations. That means, in case your schema already contains a structure and from now on, new changes provided by {product_name} should be applied via Flyway integration. In Flyway terms, only migrations above `baselineVersion` will then be applied.
Using below properties you can initialize flyway schema table.
=== Baseline using Quarkus Configuration:
diff --git a/modules/serverless-logic/pages/release-notes.adoc b/modules/serverless-logic/pages/release-notes.adoc
index 768fcc20..763845bb 100644
--- a/modules/serverless-logic/pages/release-notes.adoc
+++ b/modules/serverless-logic/pages/release-notes.adoc
@@ -1,11 +1,16 @@
-= New features on OpenShift Serverless - Developer Preview Releases
+= New features on OpenShift Serverless Logic - Tehcnology Preview 2 Release
:compat-mode!:
== Notable changes
-* link:https://issues.redhat.com/browse/SRVLOGIC-136[SRVLOGIC-136] - Recollection of DP6 small features
-* link:https://issues.redhat.com/browse/SRVLOGIC-68[SRVLOGIC-68] - YAML Serverless Workflow parsing on Stunner
-* link:https://issues.redhat.com/browse/SRVLOGIC-133[SRVLOGIC-133] - Support validation of output schema
-* link:https://issues.redhat.com/browse/SRVLOGIC-145[SRVLOGIC-145] - Serverless Workflow Visualization Improvements - DP6
+* link:https://issues.redhat.com/browse/SRVLOGIC-160[SRVLOGIC-160] - [operator use case] Multiple Workflows pushing to a single Data Index
+* link:https://issues.redhat.com/browse/SRVLOGIC-163[SRVLOGIC-163] - [operator use case] Singleton Data Index per Namespace
+* link:https://issues.redhat.com/browse/SRVLOGIC-144[SRVLOGIC-144] - Productize OpenShift Serverless Logic Operator
+* link:https://issues.redhat.com/browse/SRVLOGIC-156[SRVLOGIC-156] - Example to showcase the operator dev mode use case with timers
+* link:https://issues.redhat.com/browse/SRVLOGIC-84[SRVLOGIC-84] - Create addons to persist audit data at runtimes and a common graphql query service
== Other changes and Bug fixes
+
+* link:https://issues.redhat.com/browse/SRVLOGIC-158[SRVLOGIC-158] - Dynamic resources script is reading wrong container sys files on cgroupsv2
+* link:https://issues.redhat.com/browse/SRVLOGIC-168[SRVLOGIC-168] - Missing commonmark and hibernate-types sources JARs in the maven repository
+* link:https://issues.redhat.com/browse/SRVLOGIC-198[SRVLOGIC-198] - SonataFlow builder image is not preserving resources path
diff --git a/modules/serverless-logic/pages/security/authention-support-for-openapi-services.adoc b/modules/serverless-logic/pages/security/authention-support-for-openapi-services.adoc
index f693f12c..c69f7563 100644
--- a/modules/serverless-logic/pages/security/authention-support-for-openapi-services.adoc
+++ b/modules/serverless-logic/pages/security/authention-support-for-openapi-services.adoc
@@ -4,9 +4,9 @@
:description: Authentication support for OpenAPI services
:keywords: kogito, workflow, serverless, authentication
// Referenced documentation pages.
-:orchestration-of-openapi-based-services: xref:serverless-logic:service-orchestration/orchestration-of-openapi-based-services.adoc
-:configuring-openapi-services-endpoints: xref:serverless-logic:service-orchestration/configuring-openapi-services-endpoints.adoc
-:orchestrating-third-party-services-with-oauth2: xref:serverless-logic:security/orchestrating-third-party-services-with-oauth2.adoc
+:orchestration-of-openapi-based-services: xref:service-orchestration/orchestration-of-openapi-based-services.adoc
+:configuring-openapi-services-endpoints: xref:service-orchestration/configuring-openapi-services-endpoints.adoc
+:orchestrating-third-party-services-with-oauth2: xref:security/orchestrating-third-party-services-with-oauth2.adoc
This document describes the supported authentication types and how to configure them to access the OpenAPI service operations that are used in workflows.
diff --git a/modules/serverless-logic/pages/security/orchestrating-third-party-services-with-oauth2.adoc b/modules/serverless-logic/pages/security/orchestrating-third-party-services-with-oauth2.adoc
index 788464ee..5d4e9077 100644
--- a/modules/serverless-logic/pages/security/orchestrating-third-party-services-with-oauth2.adoc
+++ b/modules/serverless-logic/pages/security/orchestrating-third-party-services-with-oauth2.adoc
@@ -1,14 +1,14 @@
-= Orchestration of third-party services using OAuth 2.0 authentication in {product_name}
+= Orchestration of third-party services using OAuth 2.0 authentication
:compat-mode!:
// Metadata:
:description: Orchestration of third-party services using OAuth 2.0 authentication
:keywords: kogito, workflow, serverless, authentication, oauth2
// Referenced documentation pages.
-:orchestration-of-openapi-based-services: xref:serverless-logic:service-orchestration/orchestration-of-openapi-based-services.adoc
-:configuring-openapi-services-endpoints: xref:serverless-logic:service-orchestration/configuring-openapi-services-endpoints.adoc
-:authentication-support-for-openapi-services: xref:serverless-logic:security/authention-support-for-openapi-services.adoc
-:authentication-support-for-openapi-services-oauth-example: xref:serverless-logic:security/authention-support-for-openapi-services.adoc#ref-example-oauth-authentication
+:orchestration-of-openapi-based-services: xref:service-orchestration/orchestration-of-openapi-based-services.adoc
+:configuring-openapi-services-endpoints: xref:service-orchestration/configuring-openapi-services-endpoints.adoc
+:authentication-support-for-openapi-services: xref:security/authention-support-for-openapi-services.adoc
+:authentication-support-for-openapi-services-oauth-example: xref:security/authention-support-for-openapi-services.adoc#ref-example-oauth-authentication
// Other links
:kogito_sw_examples_serverless_workflow_oauth2_orchestration_url: {kogito_sw_examples_url}/serverless-workflow-oauth2-orchestration-quarkus
:kogito_sw_examples_serverless_workflow_oauth2_orchestration_workflow_definition_url: {kogito_sw_examples_url}/serverless-workflow-oauth2-orchestration-quarkus/currency-exchange-workflow/src/main/resources/currency-exchange-workflow.sw.json
@@ -664,6 +664,6 @@ In this example the error indicates that it was not possible to contact the `acm
== Additional resources
-* xref:serverless-logic:service-orchestration/orchestration-of-openapi-based-services.adoc[Orchestrating the OpenAPI services]
+* xref:service-orchestration/orchestration-of-openapi-based-services.adoc[Orchestrating the OpenAPI services]
-include::../../pages/_common-content/report-issue.adoc[]
+include::../../pages/_common-content/report-issue.adoc[]
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/service-orchestration/configuring-openapi-services-endpoints.adoc b/modules/serverless-logic/pages/service-orchestration/configuring-openapi-services-endpoints.adoc
index 6be68557..51332f5f 100644
--- a/modules/serverless-logic/pages/service-orchestration/configuring-openapi-services-endpoints.adoc
+++ b/modules/serverless-logic/pages/service-orchestration/configuring-openapi-services-endpoints.adoc
@@ -1,4 +1,4 @@
-= Configuring the OpenAPI services endpoints
+= Configuring OpenAPI Services Endpoints
:compat-mode!:
// Metadata:
@@ -10,7 +10,7 @@
This document describes how you can configure OpenAPI service endpoints in {product_name}.
[[con-config-openapi-services-endpoints]]
-== Overview
+== Overview
{product_name} leverages MicroProfile REST Client to invoke OpenAPI services. Therefore, you can configure the OpenAPI services by following the MicroProfile Config specification. For the list of properties to configure in the MicroProfile REST Client specification, see link:https://download.eclipse.org/microprofile/microprofile-rest-client-2.0/microprofile-rest-client-spec-2.0.html#mpconfig[Support for MicroProfile Config] in MicroProfile REST Client documentation.
@@ -115,7 +115,7 @@ A Kubernetes service endpoint can be used as a service URL if the target service
=== Using URI alias
-As an alternative to `kogito.sw.operationIdStrategy`, you can assign an alias name to an URI by using `workflow-uri-definitions` custom link:{spec_doc_url}#extensions[extension]. Then you can use that alias as configuration key and in function definitions.
+As an alternative to `kogito.sw.operationIdStrategy`, you can assign an alias name to an URI by using `workflow-uri-definitions` custom link:{spec_doc_url}#extensions[extension]. Then you can use that alias as configuration key and in function definitions.
.Example workflow
[source,json]
@@ -126,12 +126,12 @@ As an alternative to `kogito.sw.operationIdStrategy`, you can assign an alias na
"remoteCatalog": "https://my.remote.host/apicatalog/apis/123/document",
}
}
- ],
+ ],
"functions": [
{
"name": "operation1",
"operation": "remoteCatalog#operation1"
- },
+ },
{
"name": "operation2",
"operation": "remoteCatalog#operation2"
@@ -322,20 +322,20 @@ You can access the `real-stock-service` service at `http://localhost:8383/`.
. In a separate command terminal window, run the `fake-stock-service` service:
+
--
-.Run `fake-stock-service` service
+.Run `fake-stock-service` service
[source,shell]
----
cd fake-stock-service
mvn quarkus:dev -Ddebug=false
----
-You can access the`fake-stock-service` service at `http://localhost:8181/`.
+You can access the `fake-stock-service` service at `http://localhost:8181/`.
--
[[proc-config-openapi-services-running-sw-application-in-development-mode]]
=== Running workflow application in development mode
-When you define `%dev.quarkus.rest-client.stock_svc_yaml.url=http://localhost:8181/`, the `fake-stock-service` service is used in the development mode and you get the same result every time you run the workflow. Using this example, you can run the workflow application in development mode.
+When you define `%dev.quarkus.rest-client.stock_svc_yaml.url=http://localhost:8181/`, the `fake-stock-service` service is used in the development mode and you get the same result every time you run the workflow. Using this example, you can run the workflow application in development mode.
.Prerequisites
* Services that the workflow application sends requests to are started.
@@ -479,7 +479,7 @@ Note that, in the previous example, you overwrote the property defined in the `a
== Additional resources
-* xref:serverless-logic:service-orchestration/orchestration-of-openapi-based-services.adoc[Orchestrating the OpenAPI services]
+* xref:service-orchestration/orchestration-of-openapi-based-services.adoc[Orchestrating the OpenAPI services]
* link:{quarkus-profiles-url}[Quarkus configuration guide]
-include::../../pages/_common-content/report-issue.adoc[]
+include::../../pages/_common-content/report-issue.adoc[]
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/service-orchestration/orchestration-of-grpc-services.adoc b/modules/serverless-logic/pages/service-orchestration/orchestration-of-grpc-services.adoc
index 21136dc4..aec73107 100644
--- a/modules/serverless-logic/pages/service-orchestration/orchestration-of-grpc-services.adoc
+++ b/modules/serverless-logic/pages/service-orchestration/orchestration-of-grpc-services.adoc
@@ -1,4 +1,4 @@
-= Orchestrating the gRPC based services
+= Orchestrating gRPC based Services
:compat-mode!:
// Metadata:
:description: Understanding JQ expressions
@@ -161,7 +161,7 @@ If, for any reason, your default value is semantically valid and you want the va
mvn compile exec:java -Dexec.mainClass="org.kie.kogito.examples.sw.greeting.GreeterService"
----
-. Once the server is running, you must navigate to the `serverless-workflow-greeting-client-rpc-quarkus` directory in a separate command terminal and run the workflow application by entering the following command:
+. Once the server is running, you must navigate to the `serverless-workflow-greeting-client-rpc-quarkus` directory in a separate command terminal and run the workflow application by entering the following command:
+
[source,shell]
----
diff --git a/modules/serverless-logic/pages/service-orchestration/orchestration-of-openapi-based-services.adoc b/modules/serverless-logic/pages/service-orchestration/orchestration-of-openapi-based-services.adoc
index 600fbebb..8d10ea4e 100644
--- a/modules/serverless-logic/pages/service-orchestration/orchestration-of-openapi-based-services.adoc
+++ b/modules/serverless-logic/pages/service-orchestration/orchestration-of-openapi-based-services.adoc
@@ -1,4 +1,4 @@
-= Orchestrating the OpenAPI services
+= Orchestrating OpenAPI Services
:compat-mode!:
// Metadata:
:description: Orchestration of OpenAPI based services
@@ -15,13 +15,13 @@
:quarkus_rest_client_url: https://quarkus.io/guides/rest-client
:mp_config_env_vars_url: https://github.com/eclipse/microprofile-config/blob/master/spec/src/main/asciidoc/configsources.asciidoc#environment-variables-mapping-rules
// Referenced documentation pages.
-:getting-familiar-with-our-tooling: xref:serverless-logic:getting-started/getting-familiar-with-our-tooling.adoc
-:create-your-first-workflow-service: xref:serverless-logic:getting-started/create-your-first-workflow-service.adoc
-:build-workflow-image-with-quarkus-cli: xref:serverless-logic:cloud/quarkus/build-workflow-image-with-quarkus-cli.adoc
-:understanding-jq-expressions: xref:serverless-logic:core/understanding-jq-expressions.adoc
-:configuring-openapi-services-endpoints: xref:serverless-logic:service-orchestration/configuring-openapi-services-endpoints.adoc
-:camel-k-integration: xref:serverless-logic:integrations/camel-k-integration.adoc
-:authention-support-for-openapi-services: xref:serverless-logic:security/authention-support-for-openapi-services.adoc
+:getting-familiar-with-our-tooling: xref:getting-started/getting-familiar-with-our-tooling.adoc
+:create-your-first-workflow-service: xref:getting-started/create-your-first-workflow-service.adoc
+:build-workflow-image-with-quarkus-cli: xref:cloud/quarkus/build-workflow-image-with-quarkus-cli.adoc
+:understanding-jq-expressions: xref:core/understanding-jq-expressions.adoc
+:configuring-openapi-services-endpoints: xref:service-orchestration/configuring-openapi-services-endpoints.adoc
+:camel-k-integration: xref:integrations/camel-k-integration.adoc
+:authention-support-for-openapi-services: xref:security/authention-support-for-openapi-services.adoc
This document describes how to call REST services using an link:{open_api_spec_url}[OpenAPI] specification file.
diff --git a/modules/serverless-logic/pages/eventing/working-with-openapi-callbacks.adoc b/modules/serverless-logic/pages/service-orchestration/working-with-openapi-callbacks.adoc
similarity index 100%
rename from modules/serverless-logic/pages/eventing/working-with-openapi-callbacks.adoc
rename to modules/serverless-logic/pages/service-orchestration/working-with-openapi-callbacks.adoc
diff --git a/modules/serverless-logic/pages/supporting-services/data-index/common/_data_index_deployment_operator.adoc b/modules/serverless-logic/pages/supporting-services/data-index/common/_data_index_deployment_operator.adoc
new file mode 100644
index 00000000..7b3724e4
--- /dev/null
+++ b/modules/serverless-logic/pages/supporting-services/data-index/common/_data_index_deployment_operator.adoc
@@ -0,0 +1,220 @@
+
+link:{flow_examples_operator_url}/tree/main/infra/dataindex[Here] you can find the infrastructure kustomization required to deploy {data_index_ref} service and a postgresql database explained in this use case.
+
+Thas folder contains four files:
+
+* kustomization.yaml
+* 01-postgres.yaml
+* 02-dataindex.yaml
+* application.properties
+
+.`kustomization.yaml` resources that deploy {data_index_ref} deployment with persistence to a postgresql database
+[source,yaml,subs="attributes+"]
+----
+resources:
+- 01-postgres.yaml <1>
+- 02-dataindex.yaml <2>
+
+secretGenerator:
+ - name: postgres-secrets
+ literals:
+ - POSTGRES_USER=sonataflow
+ - POSTGRES_PASSWORD=sonataflow
+ - POSTGRES_DB=sonataflow
+ - PGDATA=/var/lib/postgresql/data/mydata
+
+configMapGenerator:
+ - name: dataindex-properties
+ files:
+ - application.properties
+----
+<1> Postgres database deployment
+<2> {data_index_ref} deployment
+
+.`01_postgres.yaml` that deploys Postgresql database
+[source,yaml,subs="attributes+"]
+----
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ labels:
+ app.kubernetes.io/name: postgres
+ name: postgres-pvc
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app.kubernetes.io/name: postgres
+ name: postgres
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: postgres
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: postgres
+ spec:
+ containers:
+ - name: postgres
+ image: postgres:13.2-alpine
+ imagePullPolicy: 'IfNotPresent'
+ ports:
+ - containerPort: 5432
+ volumeMounts:
+ - name: storage
+ mountPath: /var/lib/postgresql/data
+ envFrom:
+ - secretRef:
+ name: postgres-secrets
+ readinessProbe:
+ exec:
+ command: ["pg_isready"]
+ initialDelaySeconds: 15
+ timeoutSeconds: 2
+ livenessProbe:
+ exec:
+ command: ["pg_isready"]
+ initialDelaySeconds: 15
+ timeoutSeconds: 2
+ resources:
+ limits:
+ memory: "256Mi"
+ cpu: "500m"
+ volumes:
+ - name: storage
+ persistentVolumeClaim:
+ claimName: postgres-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app.kubernetes.io/name: postgres
+ name: postgres
+spec:
+ selector:
+ app.kubernetes.io/name: postgres
+ ports:
+ - port: 5432
+----
+
+.`02-dataindex.yaml` that deploys {data_index_ref} with persistence to the previous defined postgresql database
+[source,yaml,subs="attributes+"]
+----
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app.kubernetes.io/name: data-index-service-postgresql
+ name: data-index-service-postgresql
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: data-index-service-postgresql
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: data-index-service-postgresql
+ spec:
+ containers:
+ - name: data-index-service-postgresql
+ image: quay.io/kiegroup/kogito-data-index-postgresql:latest
+ imagePullPolicy: Always
+ resources:
+ limits:
+ memory: "256Mi"
+ cpu: "500m"
+ ports:
+ - containerPort: 8080
+ name: http
+ protocol: TCP
+ env:
+ - name: KOGITO_DATA_INDEX_QUARKUS_PROFILE
+ value: http-events-support
+ - name: KUBERNETES_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: QUARKUS_DATASOURCE_USERNAME
+ valueFrom:
+ secretKeyRef:
+ key: POSTGRES_USER
+ name: postgres-secrets
+ - name: QUARKUS_DATASOURCE_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ key: POSTGRES_PASSWORD
+ name: postgres-secrets
+ volumeMounts:
+ - name: application-config
+ mountPath: "/home/kogito/config"
+ livenessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /q/health/live
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: 0
+ periodSeconds: 30
+ successThreshold: 1
+ timeoutSeconds: 10
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /q/health/ready
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: 0
+ periodSeconds: 30
+ successThreshold: 1
+ timeoutSeconds: 10
+ volumes:
+ - name: application-config
+ configMap:
+ name: dataindex-properties
+ initContainers:
+ - name: init-postgres
+ image: registry.access.redhat.com/ubi9/ubi-minimal:latest
+ imagePullPolicy: IfNotPresent
+ command: ['sh', '-c', 'until (echo 1 > /dev/tcp/postgres.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local/5432) >/dev/null 2>&1; do echo "Waiting for postgres server"; sleep 3; done;']
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app.kubernetes.io/name: data-index-service-postgresql
+ name: data-index-service-postgresql
+spec:
+ ports:
+ - name: http
+ port: 80
+ targetPort: 8080
+ selector:
+ app.kubernetes.io/name: data-index-service-postgresql
+ type: NodePort
+----
+.`application.properties` referenced by `kustomization.yaml`
+[source,properties]
+----
+quarkus.http.port=8080
+quarkus.http.cors=true
+quarkus.http.cors.origins=/.*/
+
+quarkus.datasource.jdbc.url=jdbc:postgresql://postgres:5432/sonataflow?currentSchema=data-index-service
+quarkus.hibernate-orm.database.generation=update
+quarkus.flyway.migrate-at-start=true
+
+# Disable kafka client health check since the quarkus-http connector is being used instead.
+quarkus.smallrye-health.check."io.quarkus.kafka.client.health.KafkaHealthCheck".enabled=false
+----
diff --git a/modules/serverless-logic/pages/supporting-services/data-index/common/_prerequisites.adoc b/modules/serverless-logic/pages/supporting-services/data-index/common/_prerequisites.adoc
new file mode 100644
index 00000000..b2df8db1
--- /dev/null
+++ b/modules/serverless-logic/pages/supporting-services/data-index/common/_prerequisites.adoc
@@ -0,0 +1,27 @@
+.Prerequisites
+* Minikube installed with `registry` addon enabled
+* `kubectl` {kubectl_prereq}
+* {product_name} operator installed if workflows are deployed. To install the operator you can see xref:serverless-logic:cloud/operator/install-serverless-operator.adoc[].
+
+[NOTE]
+====
+We recommend that you start Minikube with the following parameters, note that the `registry` addon must be enabled.
+
+[source,shell]
+----
+minikube start --cpus 4 --memory 10240 --addons registry --addons metrics-server --insecure-registry "10.0.0.0/24" --insecure-registry "localhost:5000"
+----
+
+To verify that the registry addon was property added you can execute this command:
+
+[source,shell]
+----
+minikube addons list | grep registry
+----
+
+----
+| registry | minikube | enabled ✅ | Google |
+| registry-aliases | minikube | disabled | 3rd party (unknown) |
+| registry-creds | minikube | disabled | 3rd party (UPMC Enterprises) |
+----
+====
diff --git a/modules/serverless-logic/pages/supporting-services/data-index/common/_querying_data_index.adoc b/modules/serverless-logic/pages/supporting-services/data-index/common/_querying_data_index.adoc
new file mode 100644
index 00000000..7effdb6f
--- /dev/null
+++ b/modules/serverless-logic/pages/supporting-services/data-index/common/_querying_data_index.adoc
@@ -0,0 +1,105 @@
+[[querying-dataindex-minikube]]
+== Querying Data Index service on Minikube
+
+You can use the public Data Index endpoint to play around with the GraphiQL interface.
+
+.Procedure
+This procedure apply to all use cases with that deploys the Data Index Service.
+
+* Get the Data Index Url:
+[source,shell]
+----
+minikube service data-index-service-postgresql --url -n my_usecase
+----
+
+* Open the GrahiqlUI
+
+Using the url returned, open a browser window in the following url http://192.168.49.2:32409/graphiql/,
+
+[NOTE]
+====
+that IP and port will be different in your installation, and don't forget to add the last slash "/" to the url, otherwise the GraphiqlUI won't be opened.
+====
+
+
+To see the process instances information you can execute this query:
+
+[source,shell]
+----
+{
+ ProcessInstances {
+ id,
+ processId,
+ processName,
+ variables,
+ state,
+ endpoint,
+ serviceUrl,
+ start,
+ end
+ }
+}
+----
+
+The results should be something like:
+
+[source]
+----
+{
+ "data": {
+ "ProcessInstances": [
+ {
+ "id": "3ed8bf63-85c9-425d-9099-49bfb63608cb",
+ "processId": "greeting",
+ "processName": "workflow",
+ "variables": "{\"workflowdata\":{\"name\":\"John\",\"greeting\":\"Hello from JSON Workflow, \",\"language\":\"English\"}}",
+ "state": "COMPLETED",
+ "endpoint": "/greeting",
+ "serviceUrl": "http://greeting",
+ "start": "2023-09-13T06:59:24.319Z",
+ "end": "2023-09-13T06:59:24.400Z"
+ }
+ ]
+ }
+}
+----
+
+To see the jobs instances information, if any, you can execute this query:
+
+[source]
+----
+{
+ Jobs {
+ id,
+ processId,
+ processInstanceId,
+ status,
+ expirationTime,
+ retries,
+ endpoint,
+ callbackEndpoint
+ }
+}
+----
+
+The results should be something like:
+
+[source]
+----
+{
+ "data": {
+ "Jobs": [
+ {
+ "id": "55c7aadb-3dff-4b97-af8e-cc45014b1c0d",
+ "processId": "callbackstatetimeouts",
+ "processInstanceId": "299886b7-2b78-4965-a701-16783c4162d8",
+ "status": "EXECUTED",
+ "expirationTime": null,
+ "retries": 0,
+ "endpoint": "http://jobs-service-postgresql/jobs",
+ "callbackEndpoint": "http://callbackstatetimeouts:80/management/jobs/callbackstatetimeouts/instances/299886b7-2b78-4965-a701-16783c4162d8/timers/-1"
+ }
+ ]
+ }
+}
+----
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/supporting-services/data-index/data-index-core-concepts.adoc b/modules/serverless-logic/pages/supporting-services/data-index/data-index-core-concepts.adoc
new file mode 100644
index 00000000..9b99f3ea
--- /dev/null
+++ b/modules/serverless-logic/pages/supporting-services/data-index/data-index-core-concepts.adoc
@@ -0,0 +1,636 @@
+= Data Index Core Concepts
+:compat-mode!:
+// Metadata:
+:description: Data Index Service to allow to index and query audit data in {product_name}
+:keywords: workflow, serverless, data, dataindex, data-index, index, service
+// External pages
+:cloud_events_url: https://cloudevents.io/
+:graphql_url: https://graphql.org
+:vertx_url: https://vertx.io/
+:infinispan_url: https://infinispan.org/
+:mongo_url: https://www.mongodb.com/
+:postgresql_url: https://www.postgresql.org/
+:dev_services_url: https://quarkus.io/guides/dev-services
+:flyway_quarkus_url: https://quarkus.io/guides/flyway
+
+// Referenced documentation pages
+:path_resolution_url: https://quarkus.io/blog/path-resolution-in-quarkus/#defaults
+
+//Common constants
+:data_index_ref: Data Index
+:workflow_instance: workflow instance
+:workflow_instances: {workflow_instance}s
+
+In {product_name} platform there is a dedicated supporting service that stores the data related to the {workflow_instances} and their associated jobs called *{data_index_ref}* service.
+This service also provides a GraphQL endpoint allowing users to query that data and perform operations, also known as mutations in GraphQL terms.
+
+The data processed by the {data_index_ref} service is usually received via events. The events consumed can be generated by any workflow or the xref::job-services/core-concepts.adoc[Job service] itself.
+This event communication can be configured in different ways as described in the <> section.
+
+The {data_index_ref} service uses Apache Kafka or Knative eventing to consume link:{cloud_events_url}[CloudEvents] messages from workflows.
+The event data is indexed and stored in the database for querying via GraphQL. These events contain information about units of work executed for a workflow.
+The {data_index_ref} service is at the core of all {product_name} search, insight, and management capabilities.
+
+The {product_name} Data Index Service has the following key attributes:
+
+* Flexible data structure
+* Distributable and cloud-ready format
+* Message-based communication with workflows (Apache Kafka, Knative, CloudEvents)
+* Powerful querying API using GraphQL
+* Management capabilities using the Gateway API, to perform actions using GraphQL syntax on remote runtimes with a single entrypoint
+
+== {data_index_ref} service in {product_name}
+
+The {product_name} {data_index_ref} Service is a Quarkus application, based on link:{vertx_url}[Vert.x] with link:{smallrye_messaging_url}[Reactive Messaging], that exposes a link:{graphql_url}[GraphQL] endpoint that client applications use to access indexed data and perform management operations related to running workflow instances.
+
+[NOTE]
+====
+The indexing functionality in the {data_index_ref} service is provided by choosing one of the following persistence providers:
+
+* link:{postgresql_url}[PostgreSQL]
+* link:{infinispan_url}[Infinispan]
+* link:{mongo_url}[MongoDB]
+====
+
+The {data_index_ref} Service has been thought of as an application to store and query the existing workflow data. The data comes contained in events. The service allows multiple connection options as described in the <> section.
+
+[#data-index-deployments]
+== {data_index_ref} scenarios
+
+{data_index_ref} is distributed in different ways to allow deployment in different topologies, and depending on how the data is indexed.
+
+The following sections describe the different options of {data_index_ref} deployments.
+
+=== {data_index_ref} as a standalone service
+
+It can be deployed explicitly referencing the *image*, starting a separated service inside a container. See xref:data-index/data-index-service.adoc[{data_index_ref} standalone].
+
+image::data-index/data-index-standalone-service.png[Image of data-index deployment an external service]
+
+This type of deployment requires to choose the right image depending on the persistence, specify the database connection properties, and the event consumption configuration.
+
+[#data-index-dev-service]
+=== {data_index_ref} service as Quarkus Development service
+It also can be deployed, transparently as a *Quarkus Development Service* when the Quarkus Dev mode is used in the {product_name} application.
+When you use the {product_name} Process Quarkus extension, a temporary {data_index_ref} Service is automatically provisioned while the Quarkus application is running in development mode and the Dev Service is set up for immediate use.
+
+image::data-index/data-index-dev-service.png[Image of data-index deployment an Quarkus Dev Service]
+
+More details are provided in the xref:data-index/data-index-service.adoc#data-index-dev-service-details[{data_index_ref} as a Quarkus Development service] section.
+
+The {product_name} Process Quarkus extension sets up your Quarkus application to automatically replicate any {product_name} messaging events related to {workflow_instances} or jobs into the provisioned Data Index instance.
+
+For more information about Quarkus Dev Services, see link:{dev_services_url}[Dev Services guide].
+
+=== {data_index_ref} service as Quarkus extension
+It can be included as part of the same {product_name} application using the *{data_index_ref} extension*, through the provided addons.
+
+This scenario is specific to add the {data_index_ref} data indexing features and the GraphQL endpoint exposure inside a workflow application.
+
+The communication with the workflow where the extension is added, is something internal to the application, allowing to simplify the communication between services and avoiding extra configuration for that purpose.
+
+In this case, the indexation has some limitations: it is only able to index data from the workflows deployed in the same application.
+
+
+image::data-index/data-index-addon.png[Image of data-index as a Quarkus Extension]
+
+More details are available in the xref:data-index/data-index-quarkus-extension.adoc[{data_index_ref} Quarkus Extension] section.
+
+[#data-index-storage]
+== {data_index_ref} storage
+
+In order to store the indexed data, {data_index_ref} needs some specific tables to be created. {data_index_ref} is ready to use link:{flyway_quarkus_url}[Quarkus flyway] for that purpose.
+
+It's necessary to activate the migrate-at-start option to migrate the {data_index_ref} schema automatically.
+
+For more details about Flyway migrations, see xref:persistence/postgresql-flyway-migration.adoc[] section
+
+[#data-index-graphql]
+== {data_index_ref} GraphQL endpoint
+
+{data_index_ref} provides GraphQL endpoint that allows users to interact with the stored data.
+For more information about GraphQL see {graphql_url}[GraphQL]
+
+[#data-index-ext-queries]
+=== GraphQL queries for {workflow_instances} and jobs
+
+This guide provides as examples, some GraphQL queries that allow to retrieve data about {workflow_instance}s and jobs.
+
+Retrieve data from {workflow_instances}::
++
+--
+You can retrieve data about a specified instance from your workflow definition.
+
+.Example query
+[source]
+----
+{
+ ProcessInstances {
+ id
+ processId
+ state
+ parentProcessInstanceId
+ rootProcessId
+ rootProcessInstanceId
+ variables
+ nodes {
+ id
+ name
+ type
+ }
+ }
+}
+----
+--
+
+Retrieve data from jobs::
++
+--
+You can retrieve data from a specified job instance.
+
+.Example query
+[source]
+----
+{
+ Jobs {
+ id
+ status
+ priority
+ processId
+ processInstanceId
+ executionCounter
+ }
+}
+----
+--
+
+Filter query results using the `where` parameter::
++
+--
+You can use the `where` parameter with multiple combinations to filter query results based on workflow attributes.
+
+.Example query
+[source]
+----
+{
+ ProcessInstances(where: {state: {equal: ACTIVE}}) {
+ id
+ processId
+ processName
+ start
+ state
+ variables
+ }
+}
+----
+
+.Example query
+[source]
+----
+{
+ ProcessInstances(where: {id: {equal: "d43a56b6-fb11-4066-b689-d70386b9a375"}}) {
+ id
+ processId
+ processName
+ start
+ state
+ variables
+ }
+}
+----
+
+By default, all filtered attributes are executed as `AND` operations in queries. You can modify this behavior by combining filters with an `AND` or `OR` operator.
+
+.Example query
+[source]
+----
+{
+ ProcessInstances(where: {or: {state: {equal: ACTIVE}, rootProcessId: {isNull: false}}}) {
+ id
+ processId
+ processName
+ start
+ end
+ state
+ }
+}
+----
+
+.Example query
+[source]
+----
+{
+ ProcessInstances(where: {and: {processId: {equal: "travels"}, or: {state: {equal: ACTIVE}, rootProcessId: {isNull: false}}}}) {
+ id
+ processId
+ processName
+ start
+ end
+ state
+ }
+}
+----
+
+Depending on the attribute type, the following operators are also available:
+
+* String array argument:
+** `contains` : String
+** `containsAll`: Array of strings
+** `containsAny`: Array of strings
+** `isNull`: Boolean (`true` or `false`)
+
+* String argument:
+** `in`: Array of strings
+** `like`: String
+** `isNull`: Boolean (`true` or `false`)
+** `equal`: String
+
+* ID argument:
+** `in`: Array of strings
+** `equal`: String
+** `isNull`: Boolean (`true` or `false`)
+
+* Boolean argument:
+** `isNull`: Boolean (`true` or `false`)
+** `equal`: Boolean (`true` or `false`)
+
+* Numeric argument:
+** `in`: Array of integers
+** `isNull`: Boolean
+** `equal`: Integer
+** `greaterThan`: Integer
+** `greaterThanEqual`: Integer
+** `lessThan`: Integer
+** `lessThanEqual`: Integer
+** `between`: Numeric range
+** `from`: Integer
+** `to`: Integer
+
+* Date argument:
+** `isNull`: Boolean (`true` or `false`)
+** `equal`: Date time
+** `greaterThan`: Date time
+** `greaterThanEqual`: Date time
+** `lessThan`: Date time
+** `lessThanEqual`: Date time
+** `between`: Date range
+** `from`: Date time
+** `to`: Date time
+--
+
+Sort query results using the `orderBy` parameter::
++
+--
+You can use the `orderBy` parameter to sort query results based on workflow attributes. You can also specify the direction of sorting in ascending `ASC` order or descending `DESC` order. Multiple attributes are applied to the database query in the order they are specified in the query filter.
+
+.Example query
+[source]
+----
+{
+ ProcessInstances(where: {state: {equal: ACTIVE}}, orderBy: {start: ASC}) {
+ id
+ processId
+ processName
+ start
+ end
+ state
+ }
+}
+----
+--
+
+Limit and offset query results using the `pagination` parameter::
++
+--
+You can use the `pagination` parameter to specify a `limit` and `offset` for query results.
+
+.Example query
+[source]
+----
+{
+ ProcessInstances(where: {state: {equal: ACTIVE}}, orderBy: {start: ASC}, pagination: {limit: 10, offset: 0}) {
+ id
+ processId
+ processName
+ start
+ end
+ state
+ }
+}
+----
+--
+
+[#data-index-gateway-api]
+=== Data Index service Gateway API
+
+Data Index incorporates a set of queries or mutations that allow firing operations on workflow endpoints using GraphQL notation.
+
+The Data Index Gateway API enables you to perform the following operations:
+
+Abort a {workflow_instance}::
++
+--
+Retrieves a {workflow_instance} with the ID passed as a parameter and launches the abort operation on related {product_name} service.
+
+.Example mutation for abort operation
+[source]
+----
+mutation {
+ ProcessInstanceAbort (id:"66e05e9c-eaab-47af-a83e-156498b7096d")
+}
+----
+--
+
+Retry a {workflow_instance}::
++
+--
+Retrieves a {workflow_instance} with the id passed as a parameter and launches the retry operation on related {product_name} service.
+
+.Example mutation for retry operation
+[source]
+----
+mutation {
+ ProcessInstanceRetry (id:"66e05e9c-eaab-47af-a83e-156498b7096d")
+}
+----
+--
+
+Skip a {workflow_instance}::
++
+--
+Retrieves a {workflow_instance} with the ID passed as a parameter and launches the skip operation on related {product_name} service.
+
+.Example mutation for skip operation
+[source]
+----
+mutation {
+ ProcessInstanceSkip (id:"66e05e9c-eaab-47af-a83e-156498b7096d")
+}
+----
+--
+
+
+Retrieve {workflow_instance} nodes::
++
+--
+Retrieves the nodes of a {workflow_instance} that are coming from the process definition. When the `nodeDefinitions` field of a {workflow_instance} is queried, a call to a specific {product_name} service is generated to retrieve the requested list of available nodes.
+
+.Example query to retrieve {workflow_instance} nodes
+[source]
+----
+{ProcessInstances(where: { id: {equal: "1017afb1-5749-440e-8b9b-6b876bb5894d"}}){
+ diagram
+}}
+----
+--
+
+Update {workflow_instance} variables::
++
+--
+Updates the variables of a {workflow_instance} using the `id` passed as a parameter. Retrieves a {workflow_instance} using the `id` passed as a parameter and launches the update operation on related {product_name} service with the new values passed in `variables` parameter.
+
+.Example mutation to update {workflow_instance} variables
+[source]
+----
+mutation {
+ ProcessInstanceUpdateVariables
+ (id:"23147fcc-da82-43a2-a577-7a36b26094bd",
+ variables:"{\"it_approval\":true,\"candidate\":{\"name\":\"Joe\",\"email\":\"jdoe@ts.com\",\"salary\":30000,\"skills\":\"java\"},\"hr_approval\":true}")
+}
+----
+--
+
+Trigger a node instance::
++
+--
+Triggers a node instance using the node definition `nodeId`. The `nodeId` is included in the `nodeInstances` of a {workflow_instance} using the `id` passed as parameter.
+
+.Example mutation to trigger a node instance
+[source]
+----
+mutation{
+ NodeInstanceTrigger(
+ id: "9674e3ed-8c13-4c3f-870e-7465d9ca7ca7",
+ nodeId:"_B8C4F63C-81AD-4291-9C1B-84967277EEF6")
+}
+----
+--
+
+Retrigger a node instance::
++
+--
+Retriggers a node instance using the `id`, which is similar to `nodeInstanceId` related to a {workflow_instance}. The `id` of the {workflow_instance} is passed as a parameter.
+
+.Example mutation to retrigger a node instance
+[source]
+----
+mutation{
+ NodeInstanceRetrigger(
+ id: "9674e3ed-8c13-4c3f-870e-7465d9ca7ca7",
+ nodeInstanceId:"01756ba2-ac16-4cf1-9d74-154ae8f2df21")
+}
+----
+--
+
+Cancel a node instance::
++
+--
+Cancels a node instance with the `id`, which is similar to `nodeInstanceId` related to a {workflow_instance}. The `id` of the {workflow_instance} is passed as a parameter.
+
+.Example mutation to cancel a node instance
+[source]
+----
+mutation{
+ NodeInstanceCancel(
+ id: "9674e3ed-8c13-4c3f-870e-7465d9ca7ca7",
+ nodeInstanceId:"01756ba2-ac16-4cf1-9d74-154ae8f2df21")
+}
+----
+
+
+[NOTE]
+====
+To enable described management operations on workflow instances, make sure your project is configured to have the `kogito-addons-quarkus-process-management` dependency on its `pom.xml` file to have this management operations enabled, like:
+[source,xml]
+----
+
+ org.kie.kogito
+ kogito-addons-quarkus-process-management
+
+----
+====
+--
+
+Retrieve the {workflow_instance} source file content::
++
+--
+Retrieves the {workflow_instance} source file. When the `source` field of a {workflow_instance} is queried, a call to a specific {product_name} service is generated to retrieve the requested {workflow_instance} source file content.
+
+.Example query to retrieve a {workflow_instance} source file content
+[source]
+----
+{ProcessInstances(where: { id: {equal: "1017afb1-5749-440e-8b9b-6b876bb5894d"}}){
+ source
+}}
+----
+
+[NOTE]
+====
+The workflow instance source field only will be available when `kogito-addons-quarkus-source-files` dependency is added on {product_name} runtime service `pom.xml` file.
+[source,xml]
+----
+
+ org.kie.kogito
+ kogito-addons-quarkus-source-files
+
+----
+====
+--
+
+Reschedule a job::
++
+--
+Reschedules a job using the `id`. The job `id` and other information are passed in the `data` parameter.
+
+.Example mutation to reschedule a job
+[source]
+----
+mutation{
+ JobReschedule(
+ id: "9674e3ed-8c13-4c3f-870e-7465d9ca7ca7",
+ data:"{\"expirationTime\": \"2033-06-27T04:35:54.631Z\",\"retries\": 2}")
+}
+----
+--
+
+Cancel a job::
++
+--
+Cancels a job using the `id` passed as a parameter.
+
+.Example mutation to cancel a job
+[source]
+----
+mutation{
+ JobCancel(
+ id: "9674e3ed-8c13-4c3f-870e-7465d9ca7ca7")
+}
+----
+--
+
+[#data-index-graphql-ui]
+=== {data_index_ref} GraphQL UI
+
+Data Index GraphQL UI is provided to interact with GraphQL endpoint.
+
+image::data-index/data-index-graphql-ui.png[Image of data-index GraphQL UI]
+
+When the {data_index_ref} is deployed as a standalone service, this UI will be available at `/graphiql/` endpoint (i.e: at http://localhost:8180/graphiql/)
+
+To have the GraphQL UI available when the {data_index_ref} extension is deployed the property `quarkus.kogito.data-index.graphql.ui.always-include` needs to be enabled.
+
+It will be accessible at: /graphql-ui/ (i.e: http://localhost:8080/q/graphql-ui/)
+
+[NOTE]
+====
+The `quarkus.http.root-path' and `quarkus.http.non-application-root-path` belong to the workflow application where the {data_index_ref} extension has been added. {path_resolution_url}[Here] there are more details about those properties and their default values.
+====
+
+[#data-index-service-communication]
+== {data_index_ref} service communication configuration
+
+In order to index the data, {data_index_ref} allows multiple connection options to be able to consume the information provided by the different workflows.
+
+The final goal is to receive the application-generated data related to the {workflow_instances} and jobs. The information that comes inside events, is indexed and stored in the database allowing it to be consumed through the provided GraphQL endpoint.
+
+=== Knative Eventing
+
+In order to interact with the {data_index_ref} separated service, use the Knative eventing system eventing:
+
+* Add the {data_index_ref} service and deployment, defining the Database connection properties and setting the `KOGITO_DATA_INDEX_QUARKUS_PROFILE` to `http-events-support`.
+* Specify the Knative Triggers to filter the {data_index_ref} events.
+
+.Example `DataIndex` resource with triggers definition (requires Knative):
+[source,yaml]
+----
+apiVersion: eventing.knative.dev/v1
+kind: Trigger
+metadata:
+ name: data-index-service-postgresql-processes-trigger
+spec:
+ broker: default
+ filter:
+ attributes:
+ type: ProcessInstanceEvent
+ subscriber:
+ ref:
+ apiVersion: v1
+ kind: Service
+ name: data-index-service-postgresql
+ uri: /processes
+---
+apiVersion: eventing.knative.dev/v1
+kind: Trigger
+metadata:
+ name: data-index-service-postgresql-jobs-trigger
+spec:
+ broker: default
+ filter:
+ attributes:
+ type: JobEvent
+ subscriber:
+ ref:
+ apiVersion: v1
+ kind: Service
+ name: data-index-service-postgresql
+ uri: /jobs
+----
+
+
+* Configure the workflow to use the K_SINK as outgoing connection url
+
+.Example of configuration in {product_name} application `application.properties` file to communicate with Knative
+[source,properties]
+----
+mp.messaging.outgoing.kogito-processinstances-events.connector=quarkus-http
+mp.messaging.outgoing.kogito-processinstances-events.url=${K_SINK}
+mp.messaging.outgoing.kogito-processinstances-events.method=POST
+----
+
+[NOTE]
+====
+*Job service* needs also to be configured to send the events to the Knative K_SINK to have them available for {data_index_ref} related triggers.
+====
+
+=== Kafka eventing
+
+To configure the communication between the {data_index_ref} Service and the workflow through Kafka, you must provide a set of configurations.
+
+* Add the {data_index_ref} service and deployment, defining the Database connection properties and setting the `KOGITO_DATA_INDEX_QUARKUS_PROFILE` to `kafka-events-support` (this value is set by default).
+
+* Configure the {product_name} application to use the smallrye-kafka connector and the expected topic.
+
+.Example of configuration in {product_name} application `application.properties` file to communicate with Kafka
+[source,properties]
+----
+mp.messaging.outgoing.kogito-processinstances-events.connector=smallrye-kafka
+mp.messaging.outgoing.kogito-processinstances-events.topic=kogito-processinstances-events
+mp.messaging.outgoing.kogito-processinstances-events.value.serializer=org.apache.kafka.common.serialization.StringSerializer
+----
+
+[NOTE]
+====
+*Job service* is configured to send the JobEvents to the kafka topic `kogito-jobs-events` to have them available for {data_index_ref} consumption.
+====
+
+=== {data_index_ref} Quarkus extension and Jobs embedded addon
+When {data_index_ref} functionality is added as a Quarkus extension to the workflow, there is no event configuration.
+In this case, the data indexation is done internally, and all interactions are through the {data_index_ref} Gateway API.
+
+== Additional resources
+
+* xref:eventing/consume-producing-events-with-kafka.adoc[]
+* xref:eventing/consume-produce-events-with-knative-eventing.adoc[]
+* xref:use-cases/timeout-showcase-example.adoc[]
+
+include::../_common-content/report-issue.adoc[]
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/supporting-services/data-index/data-index-quarkus-extension.adoc b/modules/serverless-logic/pages/supporting-services/data-index/data-index-quarkus-extension.adoc
new file mode 100644
index 00000000..0304013b
--- /dev/null
+++ b/modules/serverless-logic/pages/supporting-services/data-index/data-index-quarkus-extension.adoc
@@ -0,0 +1,204 @@
+= Data Index Quarkus extensions
+:compat-mode!:
+// Metadata:
+:description: Data Index Service to allow to index and query audit data in {product_name}
+:keywords: workflow, serverless, data, dataindex, data-index, index, service, extension, quarkus
+// Referenced documentation pages
+:persistence_with_postgresql_guide: xref:persistence/persistence-with-postgresql.adoc
+:data-index-core-concepts_guide: xref:data-index/data-index-core-concepts.adoc
+:getting_started_create_first_workflow_guide: xref:getting-started/create-your-first-workflow-service.adoc
+// External pages
+:kogito_sw_timeouts_showcase_embedded_example_url: {kogito_sw_examples_url}/serverless-workflow-timeouts-showcase-embedded
+:kogito_sw_timeouts_showcase_embedded_example_application_properties_url: {kogito_sw_timeouts_showcase_embedded_example_url}/src/main/resources/application.properties
+:kogito_sw_dataindex_persistence_example_url: {kogito_sw_examples_url}/serverless-workflow-data-index-persistence-addon-quarkus
+
+:infinispan_url: https://infinispan.org/
+:mongo_url: https://www.mongodb.com/
+:postgresql_url: https://www.postgresql.org/
+
+//Common constants
+:data_index_ref: Data Index
+:workflow_instance: process instance
+:workflow_instances: {workflow_instance}s
+
+
+This document describes how you add the {data_index_ref} features to your workflow. You simply need to add the {data_index_ref} extension to the workflow and
+the related data will be collected and stored in the database, enabling the GraphQL endpoint to execute queries and perform management operations over the {workflow_instances}.
+The example described in this document is based on the link:{kogito_sw_timeouts_showcase_embedded_example_url}[`serverless-workflow-timeouts_showcase_embedded`] example application.
+
+[[data-index-ext-overview]]
+== Overview
+
+The {data_index_ref} service has been designed to store and manage data from different workflow instances.
+Communication with the service is through events that contain the workflows related data and the service is responsible for storing them and exposing a GraphQL endpoint to allow queries and maintenance operations on the different workflow instances.
+
+image::data-index/data-index-addon.png[Image of data-index as a Quarkus Extension]
+
+In specific use cases, to avoid deploying the service separately, it could be useful to have the indexing functionality and the query capabilities embedded in the same application.
+For this purpose, the Quarkus {data_index_ref} extension can be added to any workflow application and incorporates the full {data_index_ref} functionality into the same application without needing an external {data_index_ref} service.
+These extensions are distributed as addons ready to work with different types of persistence:
+
+* kogito-addons-quarkus-data-index-inmemory (inmemory PostgreSQL)
+* kogito-addons-quarkus-data-index-postgresql
+* kogito-addons-quarkus-data-index-infinispan
+* kogito-addons-quarkus-data-index-mongodb
+
+With the same purpose, the Quarkus {data_index_ref} persistence extension can be added to any workflow application and incorporates only the {data_index_ref} indexation and data persistence functionality into the same application without needing an external {data_index_ref} service to do that.
+These extensions are distributed as addons ready to work with different types of persistence:
+
+* kogito-addons-quarkus-data-index-persistence-postgresql
+* kogito-addons-quarkus-data-index-persistence-infinispan
+* kogito-addons-quarkus-data-index-persistence-mongodb
+
+In this case to interact with that data and related runtimes using GraphQL you will need an external {data_index_ref} service that makes that endpoint available.
+
+[NOTE]
+====
+The {data_index_ref} extensions are provided as addons for each kind of supported persistence relying on the link:{quarkus_guides_base_url}/writing-extensions[Quarkus extensions] mechanism.
+====
+
+Once one of these `kogito-addons-quarkus-data-index` or `kogito-addons-quarkus-data-index-persistence` addons is added to a workflow, it incorporates the functionality to index and store the workflow data. In case of the `kogito-addons-quarkus-data-index` also incorporates the GraphQL endpoint to perform queries and management operations.
+
+In the same way as the {data_index_ref} service, there is a specific addon for each type of persistence you want to work with. Currently, you can find {data_index_ref} addons for: link:{postgresql_url}[PostgreSQL], link:{infinispan_url}[Infinispan], and link:{mongo_url}[MongoDB]
+
+[IMPORTANT]
+====
+The {data_index_ref} addon distribution added to the workflow must match the workflow's persistence type regarding the indexed data, which can be stored in the same database.
+The addon will share the data source used by the workflow where it is added, and it will create separate tables for that purpose.
+====
+
+When any of the `kogito-addons-quarkus-data-index` or `kogito-addons-quarkus-data-index-persistence` addons is added:
+
+* The communication with the workflow is direct, the workflow data is *not* transmitted or consumed through events, they are stored directly in the configured database.
+There is no need to configure the events connection for this purpose.
+
+Only when any of the `kogito-addons-quarkus-data-index` addons is added:
+
+* A new GraphQL endpoint is added to perform queries and management operations when `kogito-addons-quarkus-data-index` is added
+
+[[data-index-ext-use]]
+== Adding {data_index_ref} extension to a workflow application
+
+You can add the {data_index_ref} quarkus extension as an addon:
+
+.Prerequisites
+* Your workflow is running and has persistence enabled.
+* {data_index_ref} is using the same datasource to store indexed data
++
+For more information about creating a workflow, see {getting_started_create_first_workflow_guide}[Creating your first workflow]. You also can find more details about enabling persistence in {persistence_with_postgresql_guide}[Running a workflow using PostgreSQL]
+
+.Procedure
+. Add the `kogito-addons-quarkus-data-index` extension to your Quarkus Workflow Project using any of the following alternatives:
++
+--
+
+[tabs]
+====
+Manually to the POM.xml::
++
+[source,xml]
+----
+
+ org.kie.kogito
+ kogito-addons-quarkus-data-index-postgresql
+
+----
+Apache Maven::
++
+[source,shell]
+----
+mvn quarkus:add-extension -Dextensions="kogito-addons-quarkus-data-index-postgresql"
+----
+Quarkus CLI::
++
+[source,shell]
+----
+quarkus extension add kogito-addons-quarkus-data-index-postgresql
+----
+====
+
+--
+
+. Configure the Data Index Addon properties.
++
+--
+Prepare the workflow to run in dev mode, avoid starting the {data_index_ref} Quarkus Dev service, and specify if the GraphQL UI needs to be available.
+
+.Example adding Data Index addon properties in `application.properties` file
+[source,properties]
+----
+quarkus.kogito.devservices.enabled=false <1>
+quarkus.kogito.data-index.graphql.ui.always-include=true <2>
+----
+<1> By default, when a workflow is running in dev mode, automatically a Data Index Dev Service is started and a temporary dev service Database is created.
+When adding the addon to the workflow, you need to disable it by setting `quarkus.kogito.devservices.enabled` to `false` in the `application.properties` file.
+<2> The addon allows to expose a simple UI to interact with the GraphQL endpoint, it only will be available when this property is set to `true`, and it will be placed in '/graphql-ui/`
+
+For more information, see `application.properties` file of link:{kogito_sw_timeouts_showcase_embedded_example_application_properties_url}[`serverless-timeouts_showcase_embedded`] example application.
+--
+
+
+[#kogito-addons-quarkus-dataindex-persistence-extension]
+== Adding {data_index_ref} persistence extension to a workflow application
+
+You can add the {data_index_ref} persistence quarkus extension as an addon:
+
+.Prerequisites
+* Your workflow is running and has persistence enabled
+* {data_index_ref} is using the same datasource to store indexed data
++
+For more information about creating a workflow, see {getting_started_create_first_workflow_guide}[Creating your first workflow]. You also can find more details about enabling persistence in {persistence_with_postgresql_guide}[Running a workflow using PostgreSQL]
+
+.Procedure
+. Add the `kogito-addons-quarkus-data-index-persistence` extension to your Quarkus Workflow Project using any of the following alternatives:
++
+--
+
+[tabs]
+====
+Manually to the POM.xml::
++
+[source,xml]
+----
+
+ org.kie.kogito
+ kogito-addons-quarkus-data-index-persistence-postgresql
+
+----
+Apache Maven::
++
+[source,shell]
+----
+mvn quarkus:add-extension -Dextensions="kogito-addons-quarkus-data-index-persistence-postgresql"
+----
+Quarkus CLI::
++
+[source,shell]
+----
+quarkus extension add kogito-addons-quarkus-data-index-persistence-postgresql
+----
+====
+
+--
+
+. Add the following configurations to the `application.properties` file of your project.
+
+.Example adding Data Index addon properties in `application.properties` file
+[source,properties]
+----
+quarkus.kogito.devservices.enabled=false <1>
+----
+<1> By default, when a workflow is running in dev mode, automatically a Data Index Dev Service is started and a temporary dev service Database is created.
+When adding the addon to the workflow, you need to disable it by setting `quarkus.kogito.devservices.enabled` to `false` in the `application.properties` file.
+
+
+For more information, see link:{kogito_sw_dataindex_persistence_example_url}[`serverless-workflow-data-index-persistence-addon-quarkus`] example application.
+
+== Additional resources
+
+* xref:getting-started/create-your-first-workflow-service.adoc[]
+* xref:persistence/persistence-with-postgresql.adoc[]
+* xref:data-index/data-index-core-concepts.adoc[]
+
+include::../../pages/_common-content/report-issue.adoc[]
+
diff --git a/modules/serverless-logic/pages/supporting-services/data-index/data-index-service.adoc b/modules/serverless-logic/pages/supporting-services/data-index/data-index-service.adoc
new file mode 100644
index 00000000..dd63bf51
--- /dev/null
+++ b/modules/serverless-logic/pages/supporting-services/data-index/data-index-service.adoc
@@ -0,0 +1,399 @@
+= Data Index standalone service
+:compat-mode!:
+// Metadata:
+:description: Data Index Service use as an independent service that allow to index and query audit data in {product_name}
+:keywords: workflow, serverless, data, dataindex, data-index, index, service, standalone
+// External pages
+:distributions_url: https://quay.io/organization/kiegroup
+:dev_services_url: https://quarkus.io/guides/dev-services
+:test_containers_url: https://www.testcontainers.org/
+:kubernetes_configmap_url: https://kubernetes.io/docs/concepts/configuration/configmap/
+:quarkus_container_image_customizing_url: https://quarkus.io/guides/container-image#customizing
+//Common constants
+:data_index_ref: Data Index
+:workflow_instance: process instance
+:workflow_instances: {workflow_instance}s
+
+[#data-index-service]
+== {data_index_ref} service deployment.
+
+{data_index_ref} service can be deployed referencing directly a distributed {data_index_ref} image. There are different images provided that take into account what persistence layer is required in each case.
+In each distribution, there are some properties to configure things like the connection with the database or the communication with other services.
+The goal is to configure the container to allow to process ProcessInstances and Jobs *events* that incorporate their related data, to index and store that in the database and finally, to provide the xref:data-index/data-index-core-concepts.adoc#data-index-graphql[{data_index_ref} GraphQL] endpoint to consume it.
+
+[#data-index-service-distributions]
+=== {data_index_ref} distributions
+{data_index_ref} service can be deployed referencing directly a distributed {data_index_ref} image. Here there are the different {data_index_ref} image distributions that can be found in link:{distributions_url}[Quay.io/kiegroup]:
+
+image::data-index/data-index-distributions.png[Image of data-index different available distributions]
+
+=== {data_index_ref} standalone service deployment
+
+There are several ways to deploy the {data_index_ref} service. But there are some common points in all the deployments:
+
+. Reference the right {data_index_ref} image to match with the type of Database that will store the indexed data.
+. Provide the database connection properties, to allow data index store the indexed data. {data_index_ref} service does not initialize its database schema automatically. To initialize the database schema, you need to enable Flyway migration by setting QUARKUS_FLYWAY_MIGRATE_AT_START=true.
+. Define the `KOGITO_DATA_INDEX_QUARKUS_PROFILE` to set the way that the events will be connected (by default: `kafka-event-support`).
+
+[NOTE]
+====
+You must prepare the {product_name} workflow to support the full communication with an external {data_index_ref} service.
+For this purpose, it is important to make sure the following addons are included:
+
+.{product_name} workflow addon dependencies to support the connection with external {data_index_ref}
+[source,xml]
+----
+
+ org.kie.kogito
+ kogito-addons-quarkus-events-process <1>
+
+
+ org.kie.kogito
+ kogito-addons-quarkus-process-management <2>
+
+----
+
+<1> Allows the workflow to send the events that can be consumed by the {data_index_ref} service.
+<2> Creates the endpoints that allow management operations on {workflow_instances}.
+
+====
+
+==== {data_index_ref} deployment resource example using Kafka eventing:
+
+Here you can see in example, how the {data_index_ref} resource definition can be deployed as part of a docker-compose definition
+
+.Example of `DataIndex` resource in a docker-compose deployment using Kafka eventing:
+[source,yaml]
+----
+ data-index:
+ container_name: data-index
+ image: quay.io/kiegroup/kogito-data-index-postgresql:latest <1>
+ ports:
+ - "8180:8080"
+ depends_on:
+ postgres:
+ condition: service_healthy
+ volumes:
+ - ./../target/classes/META-INF/resources/persistence/protobuf:/home/kogito/data/protobufs/
+ environment:
+ QUARKUS_DATASOURCE_JDBC_URL: "jdbc:postgresql://postgres:5432/kogito" <2>
+ QUARKUS_DATASOURCE_USERNAME: kogito-user
+ QUARKUS_DATASOURCE_PASSWORD: kogito-pass
+ QUARKUS_HTTP_CORS_ORIGINS: "/.*/"
+ KOGITO_DATA_INDEX_QUARKUS_PROFILE: kafka-events-support <3>
+ QUARKUS_FLYWAY_MIGRATE_AT_START: "true" <4>
+ QUARKUS_HIBERNATE_ORM_DATABASE_GENERATION: update
+----
+
+<1> Reference the right {data_index_ref} image to match with the type of Database, in this case `quay.io/kiegroup/kogito-data-index-postgresql:latest`
+<2> Provide the database connection properties.
+<3> When `KOGITO_DATA_INDEX_QUARKUS_PROFILE` is not present, the {data_index_ref} is configured to use Kafka eventing.
+<4> To initialize the database schema at start using flyway.
+
+When Kafka eventing is used, workflow applications need to be configured to send the events to the Kafka topic `kogito-processinstances-events` allowing {data_index_ref} service to consume the generated events.
+
+In this case {data_index_ref} is ready to consume the events sent to the topics: `kogito-processinstances-events` and `kogito-jobs-events`.
+
+[IMPORTANT]
+====
+It is important to configure the workflows application to send the events to the topic `kogito-processinstances-events` using the connector `smallrye-kafka`. More information about how to configure workflows Kafka eventing is available in xref:eventing/consume-producing-events-with-kafka.adoc[] guide.
+
+To explore the specific configuration to add to the workflow to connect with {data_index_ref} using Kafka eventing see xref:data-index/data-index-core-concepts.adoc#_kafka_eventing[{data_index_ref} Kafka eventing]
+
+.Example of configuration in {product_name} application passed in application.properties to configure connection with {data_index_ref} using Kafka connector:
+[source,properties]
+----
+mp.messaging.outgoing.kogito-processinstances-events.connector=smallrye-kafka
+mp.messaging.outgoing.kogito-processinstances-events.topic=kogito-processinstances-events
+mp.messaging.outgoing.kogito-processinstances-events.value.serializer=org.apache.kafka.common.serialization.StringSerializer
+----
+====
+
+[NOTE]
+====
+Usually, when using docker-compose the workflow application generates the container image that is added to docker-compose. If the Kafka eventing configuration values weren't there before the container image creation, they need to be passed as environment variables.
+
+More details about customizing Quarkus generated images can be found in {quarkus_container_image_customizing_url}[Quarkus Container Images Customizing] guide.
+====
+
+==== {data_index_ref} deployment resource example using Knative eventing:
+
+This deployment definition resource shows how the {data_index_ref} service configured and deployed can connect with an existing PostgreSQL database and to consume Knative events.
+
+.Example `DataIndex` resource with PostgreSQL persistence and Knative eventing in a kubernetes environment :
+[source,yaml]
+----
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app.kubernetes.io/name: data-index-service-postgresql
+ app.kubernetes.io/version: 2.0.0-SNAPSHOT
+ name: data-index-service-postgresql
+spec:
+ ports:
+ - name: http
+ port: 80
+ targetPort: 8080
+ selector:
+ app.kubernetes.io/name: data-index-service-postgresql
+ app.kubernetes.io/version: 2.0.0-SNAPSHOT
+ type: ClusterIP
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app.kubernetes.io/name: data-index-service-postgresql
+ app.kubernetes.io/version: 2.0.0-SNAPSHOT
+ name: data-index-service-postgresql
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: data-index-service-postgresql
+ app.kubernetes.io/version: 2.0.0-SNAPSHOT
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: data-index-service-postgresql
+ app.kubernetes.io/version: 2.0.0-SNAPSHOT
+ spec:
+ containers:
+ - name: data-index-service-postgresql
+ image: quay.io/kiegroup/kogito-data-index-postgresql:latest <1>
+ imagePullPolicy: Always
+ ports:
+ - containerPort: 8080
+ name: http
+ protocol: TCP
+ env:
+ - name: KUBERNETES_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: QUARKUS_DATASOURCE_USERNAME <2>
+ value: postgres
+ - name: QUARKUS_DATASOURCE_PASSWORD
+ value: pass
+ - name: QUARKUS_DATASOURCE_JDBC_URL
+ value: jdbc:postgresql://newsletter-postgres:5432/postgres?currentSchema=data-index-service
+ - name: QUARKUS_DATASOURCE_DB_KIND
+ value: postgresql
+ - name: QUARKUS_HIBERNATE_ORM_DATABASE_GENERATION
+ value: update
+ - name: QUARKUS_KAFKA_HEALTH_ENABLE
+ value: "false"
+ - name: QUARKUS_HTTP_CORS
+ value: "true"
+ - name: QUARKUS_HTTP_CORS_ORIGINS
+ value: /.*/
+ - name: QUARKUS_FLYWAY_MIGRATE_AT_START <4>
+ value: "true"
+ - name: KOGITO_DATA_INDEX_QUARKUS_PROFILE <3>
+ value: "http-events-support"
+ - name: QUARKUS_HTTP_PORT
+ value: "8080"
+---
+apiVersion: eventing.knative.dev/v1
+kind: Trigger <5>
+metadata:
+ name: data-index-service-postgresql-processes-trigger
+spec:
+ broker: default
+ filter:
+ attributes:
+ type: ProcessInstanceEvent <6>
+ subscriber:
+ ref:
+ apiVersion: v1
+ kind: Service
+ name: data-index-service-postgresql
+ uri: /processes <7>
+---
+apiVersion: eventing.knative.dev/v1
+kind: Trigger <5>
+metadata:
+ name: data-index-service-postgresql-jobs-trigger
+spec:
+ broker: default
+ filter:
+ attributes:
+ type: JobEvent <6>
+ subscriber:
+ ref:
+ apiVersion: v1
+ kind: Service
+ name: data-index-service-postgresql
+ uri: /jobs <7>
+----
+<1> Reference the right {data_index_ref} image to match with the type of Database, in this case `quay.io/kiegroup/kogito-data-index-postgresql:latest`
+<2> Provide the database connection properties
+<3> KOGITO_DATA_INDEX_QUARKUS_PROFILE: http-events-support to use the http-connector with Knative eventing.
+<4> To initialize the database schema at start using flyway
+<5> Trigger definition to filter the events that arrive to the Sink and pass them to {data_index_ref}
+<6> Type of event to filter
+<7> The URI where the {data_index_ref} service is expecting to consume those types of events.
+
+This deployment is using `KOGITO_DATA_INDEX_QUARKUS_PROFILE: http-events-support`. Workflow applications need to configure the connector to use `quarkus-http` and send the events to the Knative `K_SINK`.
+You can find more information about Knative eventing and `K_SINK` environment variable in xref:eventing/consume-produce-events-with-knative-eventing.adoc[]
+
+To explore the specific configuration to add to the workflow to connect with {data_index_ref} using Knative eventing see xref:data-index/data-index-core-concepts.adoc#_knative_eventing[{data_index_ref} Knative eventing]
+
+.Example of configuration in {product_name} application `application.properties` file to send events to {data_index_ref} using Knative eventing
+[source,properties]
+----
+mp.messaging.outgoing.kogito-processinstances-events.connector=quarkus-http
+mp.messaging.outgoing.kogito-processinstances-events.url=${K_SINK}
+mp.messaging.outgoing.kogito-processinstances-events.method=POST
+----
+
+[NOTE]
+====
+If that configuration values weren't there before the container image creation, they need to be passed as environment variables. More details about customizing Quarkus generated images can be found in {quarkus_container_image_customizing_url}[Quarkus Container Images Customizing] guide.
+====
+
+A full example where the {data_index_ref} service standalone is deployed using Knative eventing can be found as part of xref:use-cases/timeout-showcase-example.adoc#execute-quarkus-project-standalone-services[Quarkus Workflow Project with standalone services] guide.
+
+---
+
+[#data-index-dev-service-details]
+== {data_index_ref} as a Quarkus Development service
+When you use the {product_name} Process Quarkus extension, a temporary {data_index_ref} service is automatically provisioned while the Quarkus application is running in development mode. When you use one of the following Quarkus extensions, the Dev Service is set up for immediate use:
+
+.{product_name} main Quarkus extension
+[source,xml]
+----
+
+ org.kie.kogito
+ kogito-quarkus
+
+----
+
+.{product_name} Quarkus extension
+[source,xml]
+----
+
+ org.kie.kogito
+ kogito-quarkus-serverless-workflow
+
+----
+
+When you start your Quarkus project in development mode, an in-memory instance of the {data_index_ref} service is automatically started in the background. This feature is enabled by link:{dev_services_url}[Quarkus Dev Services], and leverages link:{test_containers_url}[Testcontainers] to start an image of the {data_index_ref} service.
+
+The {product_name} Process Quarkus extension sets up your Quarkus application to automatically replicate any {product_name} messaging events related to {workflow_instances} or jobs into the provisioned {data_index_ref} instance.
+
+Once the service is up and running, you can query the GraphQL interface directly using `http://localhost:8180/graphql` or using the Quarkus Dev UI console `http://localhost:8080/q/dev`.
+
+The {data_index_ref} GraphQL endpoint can query for `ProcessInstances` and `Jobs`. For more information about operations and attributes to query, see xref:data-index/data-index-core-concepts.adoc#data-index-graphql[GraphQL endpoint provided by {data_index_ref}] section.
+
+You can share the same {data_index_ref} instance across multiple {product_name} services during development. Sharing {data_index_ref} instances is enabled by default, therefore, only one {data_index_ref} instance is started. This behavior can be adjusted to start multiple instances using the `quarkus.kogito.devservices.shared` property.
+
+The Quarkus Dev Service also allows further configuration options including:
+
+* To disable {data_index_ref} Dev Service, use the `quarkus.kogito.devservices.enabled=false` property.
+* To change the port where the {data_index_ref} Dev Service runs, use the `quarkus.kogito.devservices.port=8180` property.
+* To adjust the provisioned image, use `quarkus.kogito.devservices.imageName=quay.io/kiegroup/kogito-data-index-ephemeral` property.
+* To disable sharing the {data_index_ref} instance across multiple Quarkus applications, use `quarkus.kogito.devservices.shared=false` property.
+
+For more information about Quarkus Dev Services, see link:{dev_services_url}[Dev Services guide].
+
+[#data-index-service-configuration-properties]
+== {data_index_ref} service configuration properties
+The following table serves as a quick reference for commonly {data_index_ref} configuration properties supported.
+
+.Common configuration properties
+[cols="40%,35%,10%,10%,5%", options="header"]
+|===
+|Property|Description|Type|Default value|Override at runtime
+
+|`QUARKUS_DATASOURCE_JDBC_URL`
+| The datasource URL
+| string
+|
+| Yes
+
+|`QUARKUS_DATASOURCE_USERNAME`
+| The datasource username
+| string
+|
+| Yes
+
+|`QUARKUS_DATASOURCE_PASSWORD`
+| The datasource password
+| string
+|
+| Yes
+
+|`QUARKUS_DATASOURCE_DB_KIND`
+a|The kind of database to connect: `postgresql`,..
+|string
+|
+|Yes
+
+|`QUARKUS_FLYWAY_MIGRATE_AT_START`
+| `true` to execute Flyway automatically when the application starts, false otherwise.
+| boolean
+| false
+| Yes
+
+|`KOGITO_DATA_INDEX_QUARKUS_PROFILE`
+a| (Only when referencing an image distribution)
+
+Allows to change the event connection type. The possible values are:
+
+* `kafka-events-support`
+* `http-events-support`
+
+| string
+| `kafka-events-support`
+| Yes
+
+|`kogito.persistence.query.timeout.millis`
+|Defines timeout for a query execution.
+|long
+|`10000`
+|Yes
+
+|`quarkus.kogito.devservices.enabled`
+|Enables or disables the Dev Services for workflows. By default, the Dev Services are enabled, unless an existing configuration is present.
+|boolean
+|`true`
+|No
+
+|`quarkus.kogito.devservices.port`
+|Defines the optional fixed port that the Dev Services listen to.
+|int
+|`8180`
+|No
+
+|`quarkus.kogito.devservices.image-name`
+|Defines the {data_index_ref} image to use in Dev Service.
+|string
+|`quay.io/kiegroup/kogito-data-index-ephemeral:{page-component-version}`
+|No
+
+|`quarkus.kogito.devservices.shared`
+|Indicates if the {data_index_ref} instance, which is managed by Quarkus Dev Services, is shared.
+|boolean
+|`true`
+|No
+
+|`quarkus.kogito.devservices.service-name`
+|Defines the value of the label that is attached to the started container. This property is used when `shared` is set to `true`.
+|string
+|`kogito-data-index`
+|No
+
+
+|===
+
+== Additional resources
+
+* xref:data-index/data-index-core-concepts.adoc[]
+* xref:cloud/quarkus/deploying-on-minikube.adoc[]
+* xref:eventing/consume-producing-events-with-kafka.adoc[]
+* xref:eventing/consume-produce-events-with-knative-eventing.adoc[]
+* xref:use-cases/timeout-showcase-example.adoc[Timeout example in {product_name}]
+
+include::../_common-content/report-issue.adoc[]
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/supporting-services/data-index/data-index-usecase-multi.adoc b/modules/serverless-logic/pages/supporting-services/data-index/data-index-usecase-multi.adoc
new file mode 100644
index 00000000..c66f28ab
--- /dev/null
+++ b/modules/serverless-logic/pages/supporting-services/data-index/data-index-usecase-multi.adoc
@@ -0,0 +1,207 @@
+= Deploying Data Index and multiple {product_name} application on Minikube
+:compat-mode!:
+// Metadata:
+:description: Deploying Multiple {product_name} pushing to single Data Index on Minikube
+:keywords: kogito, workflow, quarkus, serverless, kubectl, minikube, operator, dataindex
+:table-caption: Data Set
+// envs for common content
+:kubectl_prereq: command-line tool is installed. Otherwise, Minikube handles it.
+//Common constants
+:data_index_ref: Data Index
+:flow_examples_operator_url: {kogito_sw_operator_examples_url}/serverless-workflow-dataindex-use-cases
+
+
+This document describes how to deploy a multiple {product_name} workflow applications and the {data_index_ref} service using a local Kubernetes cluster, such as link:{minikube_url}[Minikube], using the link:{kogito_serverless_operator_url}[{operator_name}].
+
+For more information about Minikube and related system requirements, see link:{minikube_url}/docs/start/[Getting started with Minikube] documentation.
+
+This use case is intended to represent an installation with:
+
+* A singleton Data Index Service with PostgreSQL persistence
+* The `greeting` workflow (no persistence), that is configured to register events to the Data Index Service.
+* The `helloworld` workflow (no persistence), that is configured to register events to the Data Index Service.
+* Both workflows are configured to register the process events on the {data_index_ref} Service.
+
+You can directly access the UseCase2 example application we are going to follow at link:{flow_examples_operator_url}[{product_name} Data Index Use Cases with operator].
+
+include::common/_prerequisites.adoc[]
+
+You can check the Minikube installation by entering the following commands in a command terminal:
+
+.Verify Minikube version
+[source,shell]
+----
+minikube version
+----
+
+.Verify `kubectl` CLI version
+[source,shell]
+----
+kubectl version
+----
+
+[NOTE]
+====
+If `kubectl` is not installed, then Minikube handles it when you execute the following command:
+
+.`kubectl` is available using Minikube
+[source,shell]
+----
+alias kubectl="minikube kubectl --"
+----
+====
+
+.Procedure
+. After cloning the link:{kogito_examples_url}[{product_name} examples repository]. Open a terminal and run the following commands
+
++
+--
+[source,shell]
+----
+cd serverless-operator-examples/serverless-workflow-dataindex-use-cases/
+----
+--
+
+. Create the namespace:
++
+--
+[source,shell]
+----
+kubectl create namespace usecase2
+----
+--
+
+. Deploy the {data_index_ref} Service and postgresql database:
++
+--
+include::common/_dataindex_deployment_operator.adoc[]
+
+Perform the deployments executing
+[source,shell]
+----
+kubectl kustomize infra/dataindex | kubectl apply -f - -n usecase2
+----
+
+----
+configmap/dataindex-properties-hg9ff8bff5 created
+secret/postgres-secrets-22tkgc2dt7 created
+service/data-index-service-postgresql created
+service/postgres created
+persistentvolumeclaim/postgres-pvc created
+deployment.apps/data-index-service-postgresql created
+deployment.apps/postgres created
+----
+
+Give some time for the data index to start, you can check that it's running by executing.
+
+[source,shell]
+----
+kubectl get pod -n usecase2
+----
+
+----
+NAME READY STATUS RESTARTS AGE
+data-index-service-postgresql-5d76dc4468-lb259 1/1 Running 0 2m11s
+postgres-7f78499688-lc8n6 1/1 Running 0 2m11s
+----
+--
+. Deploy the workflow:
++
+--
+
+link:{flow_examples_operator_url}/tree/main/usecases/usecase2[Here] you can find the use case kustomization required to deploy the workflow
+
+.Use case kustomization.yaml resources that deploys the workflow
+[source,yaml,subs="attributes+"]
+----
+resources:
+- ../../infra/service_discovery
+- ../../workflows/sonataflow-greeting
+- ../../workflows/sonataflow-helloworld
+----
+
+To see in more detail access to xref:serverless-logic:cloud/operator/build-and-deploy-workflows.adoc[]
+
+
+Perform the deployment executing
+[source,shell]
+----
+ kubectl kustomize usecases/usecase2 | kubectl apply -f - -n usecase2
+----
+
+----
+configmap/greeting-props created
+configmap/helloworld-props created
+sonataflow.sonataflow.org/greeting created
+sonataflow.sonataflow.org/helloworld created
+----
+
+Give some time for the sonataflow operator to build and deploy the workflow.
+To check that the workflow is ready you can use this command.
+
+[source,shell]
+----
+kubectl get workflow -n usecase2
+----
+
+----
+NAME PROFILE VERSION URL READY REASON
+greeting 0.0.1 True
+helloworld 0.0.1 True
+----
+--
+
+. Expose the workflows and get the urls:
++
+--
+[source,shell]
+----
+kubectl patch svc greeting helloworld -p '{"spec": {"type": "NodePort"}}' -n usecase2
+----
+
+[source,shell]
+----
+minikube service greeting --url -n usecase2
+----
+
+[source,shell]
+----
+minikube service helloworld --url -n usecase2
+----
+--
+
+. Create a workflow instance:
++
+--
+You must use the URLs calculated in step 5.
+
+[source,shell]
+----
+curl -X POST -H 'Content-Type:application/json' -H 'Accept:application/json' -d '{"name": "John", "language": "English"}' http://192.168.49.2:32407/greeting
+----
+
+[source,shell]
+----
+curl -X POST -H 'Content-Type:application/json' -H 'Accept:application/json' -d '{}' http://192.168.49.2:32327/helloworld
+----
+--
+
+. Clean the use case:
++
+--
+[source,shell]
+----
+kubectl delete namespace usecase2
+----
+--
+
+include::common/_querying_dataindex.adoc[]
+
+== Additional resources
+
+* xref:serverless-logic:supporting-services/data-index/data-index-concepts.adoc[]
+* xref:serverless-logic:supporting-services/data-index/operator/data-index-usecase-singleton.adoc[]
+* xref:serverless-logic:cloud/quarkus/deploying-on-minikube.adoc[]
+* xref:serverless-logic:cloud/operator/install-serverless-operator.adoc[]
+
+include::../../pages/_common-content/report-issue.adoc[]
diff --git a/modules/serverless-logic/pages/supporting-services/data-index/data-index-usecase-singleton.adoc b/modules/serverless-logic/pages/supporting-services/data-index/data-index-usecase-singleton.adoc
new file mode 100644
index 00000000..6b6bb0fc
--- /dev/null
+++ b/modules/serverless-logic/pages/supporting-services/data-index/data-index-usecase-singleton.adoc
@@ -0,0 +1,198 @@
+= Deploying Data Index and {product_name} application on Minikube
+:compat-mode!:
+// Metadata:
+:description: Deploying {product_name} application and Data Index on Minikube with operator
+:keywords: kogito, workflow, quarkus, serverless, kn, kubectl, minikube, operator, dataindex
+:table-caption: Data Set
+// envs for common content
+:kubectl_prereq: command-line tool is installed. Otherwise, Minikube handles it.
+//Common constants
+:data_index_ref: Data Index
+:flow_examples_operator_url: {kogito_sw_operator_examples_url}/serverless-workflow-dataindex-use-cases
+
+
+This document describes how to deploy a workflow application and the {data_index_ref} service using a local Kubernetes cluster, such as link:{minikube_url}[Minikube], using the link:{kogito_serverless_operator_url}[{operator_name}].
+
+For more information about Minikube and related system requirements, see link:{minikube_url}/docs/start/[Getting started with Minikube] documentation.
+
+This use case is intended to represent an installation with:
+
+* A singleton Data Index Service with PostgreSQL persistence
+* The `greeting` workflow (no persistence), that is configured to register events to the Data Index Service.
+
+You can directly access the UseCase1 example application we are going to follow at link:{flow_examples_operator_url}[{product_name} Data Index Use Cases with operator].
+
+// shared pre req
+include::common/_prerequisites.adoc[]
+
+You can check the Minikube installation by entering the following commands in a command terminal:
+
+.Verify Minikube version
+[source,shell]
+----
+minikube version
+----
+
+.Verify `kubectl` CLI version
+[source,shell]
+----
+kubectl version
+----
+
+[NOTE]
+====
+If `kubectl` is not installed, then Minikube handles it when you execute the following command:
+
+.`kubectl` is available using Minikube
+[source,shell]
+----
+alias kubectl="minikube kubectl --"
+----
+====
+
+.Procedure
+
+. After cloning the link:{kogito_examples_url}[{product_name} examples repository]. Open a terminal and run the following commands
+
++
+--
+[source,shell]
+----
+cd serverless-operator-examples/serverless-workflow-dataindex-use-cases/
+----
+--
+
+. Create the namespace:
++
+--
+[source,shell]
+----
+kubectl create namespace usecase1
+----
+--
+
+. Deploy the {data_index_ref} Service and postgresql database:
++
+--
+include::common/_dataindex_deployment_operator.adoc[]
+
+Perform the deployments executing
+[source,shell]
+----
+kubectl kustomize infra/dataindex | kubectl apply -f - -n usecase1
+----
+
+----
+configmap/dataindex-properties-hg9ff8bff5 created
+secret/postgres-secrets-22tkgc2dt7 created
+service/data-index-service-postgresql created
+service/postgres created
+persistentvolumeclaim/postgres-pvc created
+deployment.apps/data-index-service-postgresql created
+deployment.apps/postgres created
+----
+
+Give some time for the data index to start, you can check that it's running by executing.
+
+[source,shell]
+----
+kubectl get pod -n usecase1
+----
+
+----
+NAME READY STATUS RESTARTS AGE
+data-index-service-postgresql-5d76dc4468-lb259 1/1 Running 0 2m11s
+postgres-7f78499688-lc8n6 1/1 Running 0 2m11s
+----
+--
+. Deploy the workflow:
++
+--
+
+link:{flow_examples_operator_url}/tree/main/usecases/usecase1[Here] you can find the use case kustomization required to deploy the workflow
+
+.Use case kustomization.yaml resources that deploys the workflow
+[source,yaml,subs="attributes+"]
+----
+resources:
+- ../../infra/service_discovery
+- ../../workflows/sonataflow-greeting
+----
+
+To see in more detail how to deploy the workflow access to xref:cloud/operator/build-and-deploy-workflows.adoc[]
+
+Perform the deployment executing
+
+[source,shell]
+----
+ kubectl kustomize usecases/usecase1 | kubectl apply -f - -n usecase1
+----
+
+----
+configmap/greeting-props created
+sonataflow.sonataflow.org/greeting created
+----
+
+To see in more detail how to generate this resources access to xref:cloud/operator/build-and-deploy-workflows.adoc[]
+
+Give some time for the sonataflow operator to build and deploy the workflow.
+To check that the workflow is ready you can use this command.
+
+[source,shell]
+----
+kubectl get workflow -n usecase1
+----
+
+----
+NAME PROFILE VERSION URL READY REASON
+greeting 0.0.1 True
+----
+--
+
+. Expose the workflow and get the url:
++
+--
+[source,shell]
+----
+kubectl patch svc greeting -p '{"spec": {"type": "NodePort"}}' -n usecase1
+----
+
+[source,shell]
+----
+minikube service greeting --url -n usecase1
+----
+--
+
+. Create a workflow instance:
++
+--
+You must use the URLs calculated in step 5.
+
+[source,shell]
+----
+curl -X POST -H 'Content-Type:application/json' -H 'Accept:application/json' -d '{"name": "John", "language": "English"}' http://192.168.49.2:32407/greeting
+----
+
+--
+
+. Clean the use case:
++
+--
+[source,shell]
+----
+kubectl delete namespace usecase1
+----
+--
+
+include::common/_querying_dataindex.adoc[]
+
+== Additional resources
+
+
+* xref:serverless-logic:supporting-services/data-index/data-index-concepts.adoc[]
+* xref:serverless-logic:supporting-services/data-index/operator/data-index-usecase-multi.adoc[]
+* xref:serverless-logic:cloud/quarkus/deploying-on-minikube.adoc[]
+* xref:serverless-logic:cloud/operator/install-serverless-operator.adoc[]
+
+
+include::../../pages/_common-content/report-issue.adoc[]
diff --git a/modules/serverless-logic/pages/supporting-services/job-service/core-concepts.adoc b/modules/serverless-logic/pages/supporting-services/job-service/core-concepts.adoc
new file mode 100644
index 00000000..96229a71
--- /dev/null
+++ b/modules/serverless-logic/pages/supporting-services/job-service/core-concepts.adoc
@@ -0,0 +1,624 @@
+= Introduction
+:compat-mode!:
+// Metadata:
+:description: Job Service to control timeouts in {product_name}
+:keywords: sonataflow, workflow, serverless, timeout, timer, expiration, job service
+// links
+:jobs_service_image_allinone_url: https://quay.io/repository/kiegroup/kogito-jobs-service-allinone
+:jobs_service_image_ephemeral_url: https://quay.io/repository/kiegroup/kogito-jobs-service-ephemeral
+:jobs_service_image_postgresql_url: https://quay.io/repository/kiegroup/kogito-jobs-service-postgresql
+:jobs_service_image_infinispan_url: https://quay.io/repository/kiegroup/kogito-jobs-service-infinispan
+:jobs_service_image_usage_url: https://github.com/kiegroup/kogito-images#jobs-services-all-in-one
+:knative_eventing_url: https://knative.dev/docs/eventing/
+:knative_eventing_trigger_url: https://knative.dev/docs/eventing/triggers/
+:knative_eventing_sink_binding_url: https://knative.dev/docs/eventing/sinks/#sink-parameter-example
+
+The Job Service facilitates the scheduled execution of tasks in a cloud environment. These tasks are implemented by independent services, and can be started by using any of the Job Service supported interaction modes, based on Http calls or Knative Events delivery.
+
+To schedule the execution of a task you must create a Job, that is configured with the following information:
+
+* `Schedule`: the job triggering periodicity.
+* `Recipient`: the entity that is called on the job execution for the given interaction mode, and receives the execution parameters.
+
+image::job-services/Job-Service-Generic-Diagram.png[]
+
+[#integration-with-the-workflows]
+== Integration with the Workflows
+
+In the context of the {product_name}s, the Job Service is responsible for controlling the execution of the time-triggered actions. And thus, all the time-base states that you can use in a workflow, are handled by the interaction between the workflow and the Job Service.
+
+For example, every time the workflow execution reaches a state with a configured timeout, a corresponding job is created in the Job Service, and when the timeout is met, a http callback is executed to notify the workflow.
+
+image::job-services/Time-Based-States-And-Job-Service-Interaction.png[]
+
+To set up this integration you can use different xref:job-services/quarkus-extensions.adoc#job-service-quarkus-extensions[communication alternatives], that must be configured by combining the Job Service and the Quarkus Workflow Project configurations.
+
+[NOTE]
+====
+If the project is not configured to use the Job Service, all time-based actions will use an in-memory implementation of that service.
+However, this setup must not be used in production, since every time the application is restarted, all the timers are lost.
+This last is not suited for serverless architectures, where the applications might scale to zero at any time, etc.
+====
+
+== Jobs life-span
+
+Since the main goal of the Job Service is to work with the active jobs, such as the scheduled jobs that needs to be executed, when a job reaches a final state, it is removed from the Job Service.
+However, in some cases where you want to keep the information about the jobs in a permanent repository, you can configure the Job Service to produce status change events, that can be collected by the {data_index_xref}[Data Index Service], where they can be indexed and made available by GraphQL queries.
+
+[#executing]
+== Executing
+
+To execute the Job Service in your docker or kubernetes environment, you must use the following image:
+
+* link:{jobs_service_image_allinone_url}[kogito-jobs-service-allinone]
+
+In the next topics you can see the different configuration parameters that you must use, for example, to configure the <>, the <>, etc.
+More information on this image can be found link:{jobs_service_image_usage_url}[here].
+
+We recommend that you follow this procedure:
+
+1. Identify the <> to use and see the required configuration parameters.
+2. Identify if the <> is required for your needs and see the required configuration parameters.
+3. Identify if the project containing your workflows is configured with the appropriate xref:job-services/quarkus-extensions.adoc#job-service-quarkus-extensions[Job Service Quarkus Extension].
+
+Finally, to run the image, you must use the <>, and other configurations that you can set using <> or using <>.
+
+[#exposed-environment-variables]
+=== Exposed environment variables
+
+[cols="1,2"]
+|===
+|Variable | Description
+
+|`SCRIPT_DEBUG`
+|Enable debug level of the image and its operations.
+
+|`JOBS_SERVICE_PERSISTENCE`
+|Any of the following values: `postgresql`, `ephemeral`, or `infinispan` to select the persistence mechanism to use, <>.
+
+|===
+
+[NOTE]
+====
+If used, these values must always be passed as environment variables.
+====
+
+[#using-environent-variables]
+=== Using environment variables
+
+To configure the image by using environment variables you must pass one environment variable per each parameter.
+
+.Job Service image configuration for docker execution example
+[source, bash]
+----
+docker run -it -e JOBS_SERVICE_PERSISTENCE=postgresql -e VARIABLE_NAME=value quay.io/kiegroup/kogito-jobs-service-allinone:latest
+----
+
+.Job Service image configuration for Kubernetes execution example
+[source, yaml]
+----
+spec:
+ containers:
+ - name: jobs-service-postgresql
+ image: quay.io/kiegroup/kogito-jobs-service-allinone-nightly:latest
+ imagePullPolicy: Always
+ ports:
+ - containerPort: 8080
+ name: http
+ protocol: TCP
+ env:
+ # Set the image parameters as environment variables in the container definition.
+ - name: KUBERNETES_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: JOBS_SERVICE_PERSISTENCE
+ value: "postgresql"
+ - name: QUARKUS_DATASOURCE_USERNAME
+ value: postgres
+ - name: QUARKUS_DATASOURCE_PASSWORD
+ value: pass
+ - name: QUARKUS_DATASOURCE_JDBC_URL
+ value: jdbc:postgresql://timeouts-showcase-database:5432/postgres?currentSchema=jobs-service
+ - name: QUARKUS_DATASOURCE_REACTIVE_URL
+ value: postgresql://timeouts-showcase-database:5432/postgres?search_path=jobs-service
+----
+
+[NOTE]
+====
+This is the recommended approach when you execute the Job Service in kubernetes.
+The timeouts showcase example xref:use-cases/timeout-showcase-example.adoc#execute-quarkus-project-standalone-services[Quarkus Workflow Project with standalone services] contains an example of this configuration, https://github.com/kiegroup/kogito-examples/blob/main/serverless-workflow-examples/serverless-workflow-timeouts-showcase-extended/kubernetes/jobs-service-postgresql.yml#L65[see].
+====
+
+[#using-java-like-system-properties]
+=== Using system properties with java like names
+
+To configure the image by using system properties you must pass one property per parameter, however, in this case, all these properties are passed as part of a single environment with the name `JAVA_OPTIONS`.
+
+.Job Service image configuration for docker execution example
+[source, bash]
+----
+docker run -it -e JOBS_SERVICE_PERSISTENCE=postgresql -e JAVA_OPTIONS='-Dmy.sys.prop1=value1 -Dmy.sys.prop2=value2' \
+quay.io/kiegroup/kogito-jobs-service-allinone:latest
+----
+
+[NOTE]
+====
+I case that you need to convert a java like property name, to the corresponding environment variable name, to use the environment variables configuration alternative, you must apply the naming convention defined in the link:{quarkus_guides_config_reference_url}#environment-variables[Quarkus Configuration Reference].
+For example, the name `quarkus.datasource.jdbc.url` must be converted to `QUARKUS_DATASOURCE_JDBC_URL`.
+====
+
+[#job-service-global-configurations]
+== Global configurations
+
+Global configurations that affects the job execution retries, startup procedure, etc.
+
+[tabs]
+====
+Using environment variables::
++
+
+[cols="2,1,1"]
+|===
+|Name |Description |Default
+
+|`KOGITO_JOBS_SERVICE_BACKOFFRETRYMILLIS`
+|A long value that defines the retry back-off time in milliseconds between job execution attempts, in case the execution fails.
+|`1000`
+
+|`KOGITO_JOBS_SERVICE_MAXINTERVALLIMITTORETRYMILLIS`
+|A long value that defines the maximum interval in milliseconds when retrying to execute jobs, in case the execution fails.
+|`60000`
+
+|===
+
+Using system properties with java like names::
++
+
+[cols="2,1,1"]
+|===
+|Name |Description |Default
+
+|`kogito.jobs-service.backoffRetryMillis`
+|A long value that defines the retry back-off time in milliseconds between job execution attempts, in case the execution fails.
+|`1000`
+
+|`kogito.jobs-service.maxIntervalLimitToRetryMillis`
+|A long value that defines the maximum interval in milliseconds when retrying to execute jobs, in case the execution fails.
+|`60000`
+
+|===
+
+====
+
+[#job-service-persistence]
+== Persistence
+
+An important configuration aspect of the Job Service is the persistence mechanism, it is where all the jobs information is stored, and guarantees no information is lost upon service restarts.
+
+The Job Service image is shipped with the <>, <>, and <> persistence mechanisms, that can be switched by setting the JOBS_SERVICE_PERSISTENCE environment variable to any of these values `postgresql`, `ephemeral`, or `infinispan`. If not set, it defaults to the `ephemeral` option.
+
+[NOTE]
+====
+The <> image is a composite packaging that include one different image per each persistence mechanism, making it clearly bigger sized than the individual ones. If that size represents an issue in your installation you can use the individual ones instead.
+Finally, if you use this alternative, the JOBS_SERVICE_PERSISTENCE must not be used, since the persistence mechanism is auto-determined.
+
+These are the individual images: {jobs_service_image_postgresql_url}[kogito-jobs-service-postgresql], {jobs_service_image_ephemeral_url}[kogito-jobs-service-ephemeral], or {jobs_service_image_infinispan_url}[kogito-jobs-service-infinispan]
+====
+
+[#job-service-postgresql]
+=== PostgreSQL
+
+PostgreSQL is the recommended database to use with the Job Service.
+Additionally, it provides an initialization procedure that integrates Flyway for the database initialization. Which automatically controls the database schema, in this way, the tables are created or updated by the service when required.
+
+In case you need to externally control the database schema, you can check and apply the DDL scripts for the Job Service in the same way as described in
+xref:persistence/postgresql-flyway-migration.adoc#manually-executing-scripts[Manually executing scripts] guide.
+
+To configure the PostgreSQL persistence you must provide these configurations:
+
+[tabs]
+====
+Using environment variables::
++
+
+[cols="2,1,1"]
+|===
+|Variable | Description| Example value
+
+|`JOBS_SERVICE_PERSISTENCE`
+|Configure the persistence mechanism that must be used.
+|`postgresql`
+
+|`QUARKUS_DATASOURCE_USERNAME`
+|Username to connect to the database.
+|`postgres`
+
+|`QUARKUS_DATASOURCE_PASSWORD`
+|Password to connect to the database
+|`pass`
+
+|`QUARKUS_DATASOURCE_JDBC_URL`
+| JDBC datasource url used by Flyway to connect to the database.
+|`jdbc:postgresql://timeouts-showcase-database:5432/postgres?currentSchema=jobs-service`
+
+|`QUARKUS_DATASOURCE_REACTIVE_URL`
+|Reactive datasource url used by the Job Service to connect to the database.
+|`postgresql://timeouts-showcase-database:5432/postgres?search_path=jobs-service`
+
+|===
+
+Using system properties with java like names::
++
+
+[cols="2,1,1"]
+|===
+|Variable | Description| Example value
+
+|`JOBS_SERVICE_PERSISTENCE`
+|**Always an environment variable**
+|`postgresql`
+
+|`quarkus.datasource.username`
+|Username to connect to the database.
+|`postgres`
+
+|`quarkus.datasource.password`
+|Password to connect to the database
+|`pass`
+
+|`quarkus.datasource.jdbc.url`
+| JDBC datasource url used by Flyway to connect to the database.
+|`jdbc:postgresql://timeouts-showcase-database:5432/postgres?currentSchema=jobs-service`
+
+|`quarkus.datasource.reactive.url`
+|Reactive datasource url used by the Job Service to connect to the database.
+|`postgresql://timeouts-showcase-database:5432/postgres?search_path=jobs-service`
+
+|===
+====
+
+The timeouts showcase example xref:use-cases/timeout-showcase-example.adoc#execute-quarkus-project-standalone-services[Quarkus Workflow Project with standalone services], shows how to run a PostgreSQL based Job Service as a Kubernetes deployment.
+In your local environment you might have to change some of these values to point to your own PostgreSQL database.
+
+[#job-service-ephemeral]
+=== Ephemeral
+
+The Ephemeral persistence mechanism is based on an embedded PostgresSQL database and does not require any external configuration. However, the database is recreated on each service restart, and thus, it must be used only for testing purposes.
+
+[cols="2,1,1"]
+|===
+|Variable | Description| Example value
+
+|`JOBS_SERVICE_PERSISTENCE`
+|Configure the persistence mechanism that must be used.
+|`ephemeral`
+
+|===
+
+[NOTE]
+====
+If the image is started by not configuring any persistence mechanism, the Ephemeral will be defaulted.
+====
+
+[#job-service-infinispan]
+=== Infinispan
+
+To configure the Infinispan persistence you must provide these configurations:
+
+[tabs]
+====
+Using environment variables::
++
+
+[cols="2,1,1"]
+|===
+|Variable | Description| Example value
+
+|`JOBS_SERVICE_PERSISTENCE`
+|Configure the persistence mechanism that must be used.
+|`infinispan`
+
+|`QUARKUS_INFINISPAN_CLIENT_HOSTS`
+|Sets the host name/port to connect to. Each one is separated by a semicolon.
+|`host1:11222;host2:11222`
+
+|`QUARKUS_INFINISPAN_CLIENT_USE_AUTH`
+|Enables or disables authentication. Set it to `false` when connecting to an Infinispan Server without authentication.
+|The enablement of this parameter depends on your local infinispan installation. If not set, the default value is `true`.
+
+|`QUARKUS_INFINISPAN_CLIENT_SASL_MECHANISM`
+|Sets SASL mechanism used by authentication. For more information about this parameter, see link:{quarkus_guides_infinispan_client_reference_url}#quarkus-infinispan-client_quarkus.infinispan-client.sasl-mechanism[Quarkus Infinispan Client Reference].
+|When the authentication is enabled the default value is `DIGEST-MD5`.
+
+|`QUARKUS_INFINISPAN_CLIENT_AUTH_REALM`
+|Sets realm used by authentication.
+|When the authentication is enabled the default value is `default`.
+
+|`QUARKUS_INFINISPAN_CLIENT_USERNAME`
+|Sets username used by authentication.
+|Use this property if the authentication is enabled.
+
+|`QUARKUS_INFINISPAN_CLIENT_PASSWORD`
+|Sets password used by authentication.
+|Use this property if the authentication is enabled.
+
+|===
+
+Using system properties with java like names::
++
+
+[cols="2,1,1"]
+|===
+|Variable | Description| Example value
+
+|`JOBS_SERVICE_PERSISTENCE`
+|**Always an environment variable**
+|`infinispan`
+
+|`quarkus.infinispan-client.hosts`
+|Sets the host name/port to connect to. Each one is separated by a semicolon.
+|`host1:11222;host2:11222`
+
+|`quarkus.infinispan-client.use-auth`
+|Enables or disables authentication. Set it to `false` when connecting to an Infinispan Server without authentication.
+|The enablement of this parameter depends on your local infinispan installation. If not set, the default value is `true`.
+
+|`quarkus.infinispan-client.sasl-mechanism`
+|Sets SASL mechanism used by authentication. For more information about this parameter, see link:{quarkus_guides_infinispan_client_reference_url}#quarkus-infinispan-client_quarkus.infinispan-client.sasl-mechanism[Quarkus Infinispan Client Reference].
+|When the authentication is enabled the default value is `DIGEST-MD5`.
+
+|`quarkus.infinispan-client.auth-realm`
+|Sets realm used by authentication.
+|When the authentication is enabled the default value is `default`.
+
+|`quarkus.infinispan-client.username`
+|Sets username used by authentication.
+|Use this property if the authentication is enabled.
+
+|`quarkus.infinispan-client.password`
+|Sets password used by authentication.
+|Use this property if the authentication is enabled.
+
+|===
+
+====
+
+[#infinispan-client-config-note]
+[NOTE]
+====
+The Infinispan client configuration parameters that you must configure depends on your local Infinispan service.
+And thus, the table above shows only a sub-set of all the available options. To see the list of all the options supported by the quarkus infinispan client you must read the link:{quarkus_guides_infinispan_client_reference_url}[Quarkus Infinispan Client Reference].
+====
+
+[#job-service-eventing-api]
+== Eventing API
+
+The Job Service provides a Cloud Event based API that can be used to create and delete jobs.
+This API is useful in deployment scenarios where you want to use an event based communication from the workflow runtime to the Job Service. For the transport of these events you can use the <> system or the <> system.
+
+[#knative-eventing]
+=== Knative eventing
+
+By default, the Job Service Eventing API is prepared to work in a link:{knative_eventing_url}[knative eventing] system. This means that by adding no additional configurations parameters, it'll be able to receive cloud events via the link:{knative_eventing_url}[knative eventing] system to manage the jobs.
+However, you must still prepare your link:{knative_eventing_url}[knative eventing] environment to ensure these events are properly delivered to the Job Service, see <>.
+
+Finally, the only configuration parameter that you must set, when needed, is to enable the propagation of the Job Status Change events, for example, if you want to register these events in the {data_index_xref}[Data Index Service].
+
+[tabs]
+====
+Using environment variables::
++
+
+[cols="2,1,1"]
+|===
+|Variable | Description| Default value
+
+|`KOGITO_JOBS_SERVICE_HTTP_JOB_STATUS_CHANGE_EVENTS`
+| `true` to establish if the Job Status Change events must be propagated. If you set this value to `true` you must be sure that the <> was created.
+| `false`
+
+|===
+
+Using system properties with java like names::
++
+
+[cols="2,1,1"]
+|===
+|Variable | Description| Default value
+
+|`kogito.jobs-service.http.job-status-change-events`
+| `true` to establish if the Job Status Change events must be propagated. If you set this value to `true` you must be sure that the <> was created.
+| `false`
+
+|===
+
+====
+
+
+[#knative-eventing-supporting-resources]
+==== Knative eventing supporting resources
+
+To ensure the Job Service receives the knative events to manage the jobs, you must create the <> and <> triggers shown in the diagram below.
+Additionally, if you have enabled the Job Status Change events propagation you must create the <>.
+
+.Knative eventing supporting resources
+image::job-services/Knative-Eventing-API-Resources.png[]
+
+The following snippets shows an example on how you can configure these resources. Consider that these configurations might need to be adjusted to your local kubernetes cluster.
+
+[NOTE]
+====
+We recommend that you visit this example xref:use-cases/timeout-showcase-example.adoc#execute-quarkus-project-standalone-services[Quarkus Workflow Project with standalone services] to see a full setup of all these configurations.
+====
+
+[#knative-eventing-supporting-resources-trigger-create]
+.Create Job event trigger configuration example
+[source,yaml]
+----
+apiVersion: eventing.knative.dev/v1
+kind: Trigger
+metadata:
+ name: jobs-service-postgresql-create-job-trigger
+spec:
+ broker: default
+ filter:
+ attributes:
+ type: job.create
+ subscriber:
+ ref:
+ apiVersion: v1
+ kind: Service
+ name: jobs-service-postgresql
+ uri: /v2/jobs/events
+----
+
+[#knative-eventing-supporting-resources-trigger-delete]
+.Delete Job event trigger configuration example
+[source,yaml]
+----
+apiVersion: eventing.knative.dev/v1
+kind: Trigger
+metadata:
+ name: jobs-service-postgresql-delete-job-trigger
+spec:
+ broker: default
+ filter:
+ attributes:
+ type: job.delete
+ subscriber:
+ ref:
+ apiVersion: v1
+ kind: Service
+ name: jobs-service-postgresql
+ uri: /v2/jobs/events
+----
+
+For more information about triggers, see link:{knative_eventing_trigger_url}[Knative Triggers].
+
+[#knative-eventing-supporting-resources-sink-binding]
+.Job Service sink binding configuration example
+[source, yaml]
+----
+apiVersion: sources.knative.dev/v1
+kind: SinkBinding
+metadata:
+ name: jobs-service-postgresql-sb
+spec:
+ sink:
+ ref:
+ apiVersion: eventing.knative.dev/v1
+ kind: Broker
+ name: default
+ subject:
+ apiVersion: apps/v1
+ kind: Deployment
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: jobs-service-postgresql
+ app.kubernetes.io/version: 2.0.0-SNAPSHOT
+----
+
+For more information about sink bindings, see link:{knative_eventing_sink_binding_url}[Knative Sink Bindings].
+
+[#kafka-messaging]
+=== Kafka messaging
+
+To enable the Job Service Eventing API via the Kafka messaging system you must provide these configurations:
+
+[tabs]
+====
+Using environment variables::
++
+
+[cols="2,1,1"]
+|===
+|Variable | Description| Default value
+
+|`QUARKUS_PROFILE`
+|Set the quarkus profile with the value `kafka-events_support` to enable the kafka messaging based Job Service Eventing API.
+|By default the kafka eventing api is disabled.
+
+|`KOGITO_JOBS_SERVICE_KAFKA_JOB_STATUS_CHANGE_EVENTS`
+|`true` to establish if the Job Status Change events must be propagated.
+|`true` when the `kafka-events-support` profile is set.
+
+|`KAFKA_BOOTSTRAP_SERVERS`
+|A comma-separated list of host:port to use for establishing the initial connection to the Kafka cluster.
+|`localhost:9092` when the `kafka-events-support` profile is set.
+
+|`MP_MESSAGING_INCOMING_KOGITO_JOB_SERVICE_JOB_REQUEST_EVENTS_V2_TOPIC`
+|Kafka topic for events API incoming events. I general you don't need to change this value.
+|`kogito-job-service-job-request-events-v2` when the `kafka-events_support` profile is set.
+
+|`MP_MESSAGING_OUTGOING_KOGITO_JOB_SERVICE_JOB_STATUS_EVENTS_TOPIC`
+|Kafka topic for job status change outgoing events. I general you don't need to change this value.
+|`kogito-jobs-events` when the `kafka-events_support` profile is set.
+
+|===
+
+Using system properties with java like names::
++
+
+[cols="2,1,1"]
+|===
+|Variable | Description| Default value
+
+|quarkus.profile
+|Set the quarkus profile with the value `kafka-events_support` to enable the kafka messaging based Job Service Eventing API.
+|By default the kafka eventing api is disabled.
+
+|`kogito.jobs-service.kafka.job-status-change-events`
+|`true` to establish if the Job Status Change events must be propagated.
+|`true` when the `kafka-events-support` profile is set.
+
+|`kafka.bootstrap.servers`
+|A comma-separated list of host:port to use for establishing the initial connection to the Kafka cluster.
+|`localhost:9092` when the `kafka-events-support` profile is set.
+
+|`mp.messaging.incoming.kogito-job-service-job-request-events-v2.topic`
+|Kafka topic for events API incoming events. I general you don't need to change this value.
+|`kogito-job-service-job-request-events-v2` when the `kafka-events_support` profile is set.
+
+|`mp.messaging.outgoing.kogito-job-service-job-status-events.topic`
+|Kafka topic for job status change outgoing events. I general you don't need to change this value.
+|`kogito-jobs-events` when the `kafka-events_support` profile is set.
+
+|===
+
+====
+
+[NOTE]
+====
+Depending on your Kafka messaging system configuration you might need to apply additional Kafka configurations to connect to the Kafka broker, etc.
+To see the list of all the supported configurations you must read the link:{quarkus_guides_kafka_url}[Quarkus Apache Kafka Reference Guide].
+====
+
+
+
+== Leader election
+
+Currently, the Job Service is a singleton service, and thus, just one active instance of the service can be scheduling and executing the jobs.
+
+To avoid issues when it is deployed in the cloud, where it is common to eventually have more than one instance deployed, the Job Service supports a leader instance election process.
+Only the instance that becomes the leader activates the external communication to receive and schedule jobs.
+
+All the instances that are not leaders, stay inactive in a wait state and try to become the leader continuously.
+
+When a new instance of the service is started, it is not set as a leader at startup time but instead, it starts the process to become one.
+
+When an instance that is the leader for any issue stays unresponsive or is shut down, one of the other running instances becomes the leader.
+
+.Job Service leader election
+image::job-services/job-service-leader.png[]
+
+[NOTE]
+====
+This leader election mechanism uses the underlying persistence backend, which currently is only supported in the PostgreSQL implementation.
+====
+
+There is no need for any configuration to support this feature, the only requirement is to have the supported database with the data schema up-to-date as described in the <> section.
+
+In case the underlying persistence does not support this feature, you must guarantee that just one single instance of the Job Service is running at the same time.
+
+include::../../pages/_common-content/report-issue.adoc[]
diff --git a/modules/serverless-logic/pages/supporting-services/job-service/quarkus-extensions.adoc b/modules/serverless-logic/pages/supporting-services/job-service/quarkus-extensions.adoc
new file mode 100644
index 00000000..2bfaac7e
--- /dev/null
+++ b/modules/serverless-logic/pages/supporting-services/job-service/quarkus-extensions.adoc
@@ -0,0 +1,238 @@
+[#job-service-quarkus-extensions]
+= Job Service Quarkus Extensions
+:compat-mode!:
+// Metadata:
+:description: Job Service Quarkus extensions in {product_name}
+:keywords: sonataflow, workflow, serverless, job service, quarkus extensions
+
+The interaction xref:job-services/core-concepts.adoc#integration-with-the-workflows[between the workflows and the Job Service] is handled by the different Job Service Quarkus Extensions. Each extension is designed to work with a different communication alternative.
+
+For example, you can select if your workflows must interact with the Job Service by sending cloud events over the <> system or the <> system, or simply by executing direct <> calls.
+
+Finally, for the interaction work, you must configure your Quarkus Workflow Project with the extension of your choice.
+
+image::job-services/Quarkus-Workflow-Project-And-Extension.png[]
+
+We recommend that you follow this procedure:
+
+1. Identify the communication alternative that best fits your scenario.
+2. Be sure that the Job Service is properly configured to support that alternative. This is very important if you want to use xref:job-services/core-concepts.adoc#knative-eventing[knative events] or xref:job-services/core-concepts.adoc#kafka-messaging[kafka messages] to communicate with it.
+3. Configure your Quarkus Workflow Project with the corresponding extension.
+
+[NOTE]
+====
+If your workflows are not using timer-based actions, like timeouts, there is no need to add such an extension.
+====
+
+[#kogito-addons-quarkus-jobs-knative-eventing]
+== Knative eventing interaction
+
+To interact with the Job Service by sending cloud events over the knative eventing system you must follow these steps:
+
+. Be sure that you have read the xref:eventing/consume-produce-events-with-knative-eventing.adoc[Consuming and producing events on Knative Eventing] guide, and that you have configured the project accordingly.
+
+. Add the `kogito-addons-quarkus-jobs-knative-eventing` extension to your Quarkus Workflow Project using any of the following alternatives:
+
+[tabs]
+====
+Manually::
++
+[source,xml]
+----
+
+ org.kie.kogito
+ kogito-addons-quarkus-jobs-knative-eventing
+
+----
+Apache Maven::
++
+[source,shell]
+----
+mvn quarkus:add-extension -Dextensions="kogito-addons-quarkus-jobs-knative-eventing"
+----
+Quarkus CLI::
++
+[source,shell]
+----
+quarkus extension add kogito-addons-quarkus-jobs-knative-eventing
+----
+====
+
+[start=3]
+. Add the following configurations to the `application.properties` file of your project.
+
+[source,properties]
+----
+mp.messaging.outgoing.kogito-job-service-job-request-events.connector=quarkus-http
+mp.messaging.outgoing.kogito-job-service-job-request-events.url=${K_SINK:http://localhost:8280/v2/jobs/events}
+mp.messaging.outgoing.kogito-job-service-job-request-events.method=POST
+----
+
+[NOTE]
+====
+The `K_SINK` environment variable is automatically generated by the combination of the knative ecosystem and the SinkBinding definition that will be automatically generated in the `kogito.yml` file.
+
+If this variable is not present, the default value `http://localhost:8280/v2/jobs/events` is used instead, this can be useful in development environments if you are executing the Job Service as a standalone service.
+====
+
+[start=2]
+. Build your project and locate the automatically generated `kogito.yml` and `knative.yml` files in the `/target/kubernetes` directory of your project, xref:eventing/consume-produce-events-with-knative-eventing.adoc#proc-generating-kn-objects-build-time[see].
+
+[source,shell]
+----
+mvn clean install
+----
+
+[start=3]
+. Use the generated files to deploy your workflow application in the Kubernetes cluster using the following commands:
+
+[source, bash]
+----
+kubectl apply -f target/kogito.yml
+
+kubectl apply -f target/knative.yml
+----
+
+You can see a full example of this interaction mode configuration in the xref:use-cases/timeout-showcase-example.adoc#execute-quarkus-project-standalone-services[Quarkus Workflow Project with standalone services] example project.
+
+[#kogito-addons-quarkus-jobs-messaging]
+== Kafka messaging interaction
+
+To interact with the Job Service by sending cloud events over the kafka messaging system you must follow these steps:
+
+. Be sure that you have read the xref:eventing/consume-producing-events-with-kafka.adoc[Consuming and producing events with Kafka] guide, and you have configured the project accordingly.
+
+. Add the `quarkus-smallrye-reactive-messaging-kafka` and `kogito-addons-quarkus-jobs-messaging` extensions to your Quarkus Workflow Project using any of the following alternatives.
+
+[tabs]
+====
+Manually::
++
+[source,xml]
+----
+
+ io.quarkus
+ quarkus-smallrye-reactive-messaging-kafka
+
+
+ org.kie.kogito
+ kogito-addons-quarkus-jobs-messaging
+
+----
+
+Apache Maven::
++
+[source,shell]
+----
+mvn quarkus:add-extension -Dextensions="quarkus-smallrye-reactive-messaging-kafka,kogito-addons-quarkus-jobs-messaging"
+----
+
+Quarkus CLI::
++
+[source,shell]
+----
+quarkus extension add quarkus-smallrye-reactive-messaging-kafka kogito-addons-quarkus-jobs-messaging
+----
+====
+
+[start=3]
+. Add the following configurations to the `application.properties` file of your project.
+
+[source,properties]
+----
+mp.messaging.outgoing.kogito-job-service-job-request-events.connector=smallrye-kafka
+mp.messaging.outgoing.kogito-job-service-job-request-events.topic=kogito-job-service-job-request-events-v2
+mp.messaging.outgoing.kogito-job-service-job-request-events.value.serializer=org.apache.kafka.common.serialization.StringSerializer
+----
+
+[start=4]
+. Build and deploy your workflow application using any of the available procedures.
+
+[#kogito-addons-quarkus-jobs-management]
+== REST call interaction
+
+To interact with the Job Service by executing direct REST calls you must follow these steps:
+
+. Add the `kogito-addons-quarkus-jobs-management` extension to your Quarkus Workflow Project using any of the following alternatives.
+
+[tabs]
+====
+Manually::
++
+[source,xml]
+----
+
+ org.kie.kogito
+ kogito-addons-quarkus-jobs-management
+
+----
+Apache Maven::
++
+[source,shell]
+----
+mvn quarkus:add-extension -Dextensions="kogito-addons-quarkus-jobs-management"
+----
+Quarkus CLI::
++
+[source,shell]
+----
+quarkus extension add kogito-addons-quarkus-jobs-management
+----
+====
+
+[start=3]
+. Add the following configuration to the `application.properties` file of your project.
+
+[source,properties]
+----
+kogito.jobs-service.url=http://localhost:8280
+----
+
+[NOTE]
+====
+When you deploy your project in a Kubernetes cluster, you must configure the `kogito.jobs-service-url` with the cloud URL of the Job Service.
+In this case, you can also use an environment variable with the name `KOGITO_JOBS_SERVICE_URL` and pass it to the corresponding container, etc.
+====
+
+[start=4]
+. Build and deploy your workflow application using any of the available procedures.
+
+== Job Service Embedded
+
+To facilitate the development and testing stage of your workflows, this extension provides an embedded Job Service instance that executes in the same runtime as your workflows, and thus, requires no additional configurations. The only consideration is that it must not be used for production installations.
+
+To use this extension you must:
+
+. Add the `kogito-addons-quarkus-jobs-service-embedded` extension to your Quarkus Workflow Project using any of the following alternatives.
+
+[tabs]
+====
+Manually::
++
+[source,xml]
+----
+
+ org.kie.kogito
+ kogito-addons-quarkus-jobs-service-embedded
+
+----
+Apache Maven::
++
+[source,shell]
+----
+mvn quarkus:add-extension -Dextensions="kogito-addons-quarkus-jobs-management"
+----
+Quarkus CLI::
++
+[source,shell]
+----
+quarkus extension add kogito-addons-quarkus-jobs-management
+----
+====
+
+[start=3]
+. Build and deploy your workflow application using any of the available procedures.
+
+You can see a full example of Job Service embedded usage in the xref:use-cases/timeout-showcase-example.adoc#execute-quarkus-project-embedded-services[Quarkus Workflow Project with embedded services] example project.
+
+include::../../pages/_common-content/report-issue.adoc[]
diff --git a/modules/serverless-logic/pages/supporting-services/jobs-service.adoc b/modules/serverless-logic/pages/supporting-services/jobs-service.adoc
deleted file mode 100644
index 1e48798c..00000000
--- a/modules/serverless-logic/pages/supporting-services/jobs-service.adoc
+++ /dev/null
@@ -1,107 +0,0 @@
-= Job Service
-:compat-mode!:
-// Metadata:
-:description: Job Service to control timeouts in {product_name}
-:keywords: kogito, workflow, serverless, timeout, timer, expiration, job service
-// links
-:jobs_service_image_url: https://quay.io/repository/kiegroup/kogito-jobs-service-allinone
-:jobs_service_image_usage_url: https://github.com/kiegroup/kogito-images#jobs-services-all-in-one
-
-In {product_name} architecture there is a dedicated supporting service that is responsible for controlling the execution of
-time-triggered actions, also known as jobs.
-
-The job execution is a callback to the {product_name} runtime application. This callback can be configured in different ways as described in the <> section.
-
-[#job-service]
-== Job Service configuration
-
-All timer nodes that might be declared in a workflow, are handled by the job service, which is responsible for managing, scheduling, and firing all actions (jobs) to be executed in the workflows.
-
-[NOTE]
-====
-Suppose the workflow service is not configured to use the Job Service or there is no such service running.
-In that case, all timer-related actions use an embedded in-memory implementation of the Job Service, which should not be used in production, since when the application shutdown, all timers are lost, which in a serverless architecture is a very common behavior with the scale to zero approach.
-That said, the usage of in-memory Job Service can only be done for testing or development, but not for production.
-====
-
-The main goal of the Job Service is to work with only active jobs. The Job Service tracks only the jobs that are scheduled and that need to be executed. When a job reaches a final state, the job is removed from the Job Service.
-
-When configured in your environment, all the jobs information and status changes are sent to the {product_name} `Data
-Index Service`, where they can be indexed and made available by GraphQL queries.
-
-[NOTE]
-====
-Data index service and the support for jobs information will be available in future releases.
-====
-
-=== Job Service persistence
-
-An important configuration aspect of the Job Service is the persistence mechanism, where all job information is stored in a database that makes this information durable upon service restarts and guarantees no information is lost.
-
-[#job-service-postgresql]
-==== PostgreSQL
-
-PostgreSQL is the recommended database to use with the Job Service.
-Additionally, it provides an initialization procedure that integrates link:https://flywaydb.org[Flyway] for the database initialization. It automatically controls the database schema, in this way all tables are created by the service.
-
-In case you need to externally control the database schema, you can check and apply the DDL scripts for the Job Service in the same way as described in
-xref:persistence/postgresql-flyway-migration.adoc#manually-executing-scripts[Manually executing scripts] guide.
-
-You need to set the proper configuration parameters when starting the Job Service.
-The xref:use-cases/timeout-showcase-example.adoc[timeout showcase example] shows how to run PostgreSQL as a Kubernetes deployment, but you can run it the way it fits in your environment, the important part is to set all the configuration parameters points to your running instance of PostgreSQL.
-
-==== Ephemeral
-Alternatively, there is an in-memory database support that does not require any external database configuration. It can be used for testing and development purposes, but it is not recommended for production, since all jobs are lost upon a service restart or failure.
-
-[NOTE]
-====
-The Job Service link:{jobs_service_image_url}[image] is equipped with the PostgreSQL and Ephemeral (InMemory) options that can be switched using the `JOBS_SERVICE_PERSISTENCE`. If not set, it defaults to the `ephemeral` option. For more information about the Job Service, the container image can be found link:{jobs_service_image_usage_url}[here].
-====
-
-== Job service leader election
-
-Currently, the Job Service works in a single instance manner where there should be just one active instance of the service.
-
-To avoid issues when deploying the service in the cloud, where it is common to eventually have more than one instance deployed, the Job Service supports a leader instance election process. Only the instance that becomes the leader activates the external communication to receive and schedule jobs.
-
-In all instances who are not leaders, stay inactive in a kind of wait state and try to become the leader continuously.
-
-When a new instance of the service is started, it is not set as a leader at startup time but instead, it starts the process to become one.
-
-When an instance that is the leader for any issue stays unresponsive or is shut down, one of the other running instances becomes the leader.
-
-.Job Service leader election
-image::core/job-service-leader.png[]
-
-[NOTE]
-====
-This leader election mechanism uses the underlying persistence backend, which currently is only supported in the PostgreSQL implementation.
-====
-
-There is no need for any configuration to support this feature, the only requirement is to have the supported database with the data schema up-to-date as described in the <> section.
-
-In case the underlying persistence does not support this feature, you must guarantee that just one single instance of the Job Service is running at the same time.
-that just one single instance of the Job Service is running at the same time.
-
-[#job-service-communication]
-== Job Service communication
-
-[NOTE]
-====
-The Job Service does not execute a job but triggers a callback that might be an HTTP request or a Cloud Event that is
-managed by the configured xref:core/timeouts-support.adoc#job-addon-configuration[jobs addon] in the workflow application.
-====
-
-=== Knative Eventing
-
-To configure the communication between the Job Service and the workflow runtime through the Knative eventing system, you must provide a set of configurations.
-
-The Job Service configuration is accomplished through the deployment descriptor shown in the xref:use-cases/timeout-showcase-example.adoc#job-service-deploy[example].
-
-
-== Additional resources
-
-* xref:core/timeouts-support.adoc[Timeouts in {product_name}]
-* xref:use-cases/timeout-showcase-example.adoc[Timeout example in {product_name}]
-
-include::../../pages/_common-content/report-issue.adoc[]
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/testing-and-troubleshooting/basic-integration-tests-with-restassured.adoc b/modules/serverless-logic/pages/testing-and-troubleshooting/basic-integration-tests-with-restassured.adoc
index aa12e440..b9a72dca 100644
--- a/modules/serverless-logic/pages/testing-and-troubleshooting/basic-integration-tests-with-restassured.adoc
+++ b/modules/serverless-logic/pages/testing-and-troubleshooting/basic-integration-tests-with-restassured.adoc
@@ -1,10 +1,10 @@
-= Testing your workflow application using REST Assured
+= Testing your Quarkus Workflow Application using REST Assured
:compat-mode!:
// Metadata:
:description: Basic Integration Test with REST Assured
:keywords: kogito, workflow, quarkus, serverless, quarkus-cli, test, restassured
-This document describes how to test your workflow application using REST Assured.
+This document describes how to test your workflow application using REST Assured.
REST Assured enables you to test REST APIs using Java libraries and integrates with Apache Maven. For more information about REST Assured, see link:https://rest-assured.io/[REST Assured page].
@@ -156,10 +156,10 @@ If you see a similar output, this means your test is executed successfully.
== Additional resources
-* xref:serverless-logic:testing-and-troubleshooting/mocking-openapi-services-with-wiremock.adoc[Mocking OpenAPI services using WireMock]
-* xref:serverless-logic:testing-and-troubleshooting/mocking-http-cloudevents-with-wiremock.adoc[Mocking HTTP CloudEvents sink using WireMock]
-* xref:serverless-logic:testing-and-troubleshooting/integration-tests-with-postgresql.adoc[{product_name} integration test using PostgreSQL]
-* link:https://access.redhat.com/documentation/en-us/red_hat_build_of_quarkus/quarkus-2-7/guide/f93c45bd-4feb-4f74-a70a-022e9fb41957#proc_quarkus-junit-testing_quarkus-getting-started[Testing a Quarkus application]
+* xref:testing-and-troubleshooting/mocking-openapi-services-with-wiremock.adoc[Mocking OpenAPI services using WireMock]
+* xref:testing-and-troubleshooting/mocking-http-cloudevents-with-wiremock.adoc[Mocking HTTP CloudEvents sink using WireMock]
+* xref:persistence/integration-tests-with-postgresql.adoc[{product_name} integration test using PostgreSQL]
+* link:https://quarkus.io/guides/getting-started-testing[Testing a Quarkus application]
* link:https://rest-assured.io/#docs[REST Assured Documentation]
-include::../../pages/_common-content/report-issue.adoc[]
+include::../../pages/_common-content/report-issue.adoc[]
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/tooling/kn-plugin-workflow-overview.adoc b/modules/serverless-logic/pages/testing-and-troubleshooting/kn-plugin-workflow-overview.adoc
similarity index 57%
rename from modules/serverless-logic/pages/tooling/kn-plugin-workflow-overview.adoc
rename to modules/serverless-logic/pages/testing-and-troubleshooting/kn-plugin-workflow-overview.adoc
index 6ae03f91..809ee887 100644
--- a/modules/serverless-logic/pages/tooling/kn-plugin-workflow-overview.adoc
+++ b/modules/serverless-logic/pages/testing-and-troubleshooting/kn-plugin-workflow-overview.adoc
@@ -5,12 +5,6 @@
:description: Overview of the Workflow plugin for the Knative CLI
:keywords: kogito, workflow, serverless, knative, plugin
// links
-:java_install_url: https://www.java.com/en/download/help/download_options.html
-:maven_install_url: https://maven.apache.org/install.html
-:docker_install_url: https://docs.docker.com/engine/install/
-:podman_install_url: https://docs.podman.io/en/latest/
-:kubectl_install_url: https://kubernetes.io/docs/tasks/tools/install-kubectl
-:kn_cli_install_url: https://github.com/knative/client/blob/main/docs/README.md#installing-kn
:apple_support_url: https://support.apple.com/guide/mac-help/mh40616/mac
{product_name} provides a plug-in named `kn-plugin-workflow` for Knative CLI, which enables you to set up a local workflow project quickly using the command line.
@@ -20,7 +14,7 @@ This document describes how you can install and use the `kn-plugin-workflow` plu
[[proc-install-sw-plugin-kn-cli]]
== Installing the {product_name} plug-in for Knative CLI
-You can use the {product_name} plug-in to set up your local workflow project quickly using Knative CLI.
+You can use the {product_name} plug-in to set up your local workflow project quickly using Knative CLI.
.Prerequisites
* link:{java_install_url}[Java] {java_min_version} is installed.
@@ -36,14 +30,14 @@ You can use the {product_name} plug-in to set up your local workflow project qui
--
* Add `kn workflow` command in your system path and ensure that it is executable.
* Install `kn workflow` command as a plug-in of the Knative CLI using the following steps:
-.. Install the Knative CLI. For installation instructions, see link:{kn_cli_install_url}[Installing kn] documentation in GitHub.
+.. Install the Knative CLI. For installation instructions, see link:{kn_cli_install_url}[Installing kn] documentation.
.. Copy the `kn-workflow` binary to a directory in your `PATH`, such as `/usr/local/bin` and ensure that the file name is `kn-workflow`.
.. On Mac, add execution permission as follows:
+
`chmod +x /usr/local/bin/kn-workflow`
+
[WARNING]
-====
+====
Some systems might block the application to run due to Apple enforcing policies. To fix this problem, check the *Security & Privacy* section in the *System Preferences* -> *General* tab to approve the application to run. For more information, see link:{apple_support_url}[Apple support article: Open a Mac app from an unidentified developer].
====
.. Run the following command to verify that `kn-workflow` plug-in is installed successfully:
@@ -66,39 +60,42 @@ kn-workflow
.Example output
[source,text]
----
-Manage Kogito Serverless Workflow projects
+Manage SonataFlow projects
Usage:
kn workflow [command]
Available Commands:
- build Build a Kogito Serverless Workflow project and generate a container image
completion Generate the autocompletion script for the specified shell
- create Create a Kogito Serverless Workflow project
- deploy Deploy a Kogito Serverless Workflow project
+ create Creates a new SonataFlow project
+ deploy Deploy a SonataFlow project on Kubernetes via SonataFlow Operator
help Help about any command
+ quarkus Manage SonataFlow projects built in Quarkus
+ run Run a SonataFlow project in development mode
+ undeploy Undeploy a SonataFlow project on Kubernetes via SonataFlow Operator
+ version Show the version
Flags:
- -h, --help help for kn-workflow
- -v, --verbose Print verbose logs
+ -h, --help help for kn
+ -v, --version version for kn
-Use "kn workflow [command] --help" for more information about a command.
+Use "kn [command] --help" for more information about a command.
----
--
[[proc-create-sw-project-kn-cli]]
== Creating a workflow project using Knative CLI
-After installing the {product_name} plug-in, you can use the `create` command with `kn workflow` to scaffold a new workflow project in your current directory.
+After installing the {product_name} plug-in, you can use the `create` command with `kn workflow` to scaffold a new {product_name} project in your current directory.
-The `create` command sets up Quarkus project containing minimal extensions to build a workflow project. Also, the generated workflow project contains a "hello world" `workflow.sw.json` file in your `.//src/main/resources` directory.
+The `create` command sets up {product_name} project containing a minimal "hello world" `workflow.sw.json` file in your `./` directory.
.Prerequisites
* {product_name} plug-in for Knative CLI is installed.
+
For more information about installing the plug-in, see <>.
ifeval::["{kogito_version_redhat}" != ""]
-* You followed the steps in xref:serverless-logic:getting-started/create-your-first-workflow-service.adoc#proc-configuring-maven-rhbq[Configuring your Maven project to Red Hat build of Quarkus and OpenShift Serverless Logic]
+* You followed the steps in xref:getting-started/create-your-first-workflow-service.adoc#proc-configuring-maven-rhbq[Configuring your Maven project to Red Hat build of Quarkus and OpenShift Serverless Logic]
endif::[]
.Procedure
@@ -120,27 +117,123 @@ kn workflow create --name my-project
----
--
-. Add more extensions to the Quarkus project during its creation by using the `[-e|--extension]` flag as follows:
+[[proc-build-sw-project-kn-cli]]
+== Running a workflow project using Knative CLI
+
+After creating your workflow project, you can use the `run` command with `kn workflow` to build & run your workflow project in your current directory.
+
+This will start a {product_name} docker image and map your local folder to this image.
+
+.Prerequisites
+* {product_name} plug-in for Knative CLI is installed.
++
+For more information about installing the plug-in, see <>.
+
+* A workflow project is created.
++
+For more information about creating a workflow project, see <>.
+* Minikube cluster is running locally.
+
+
+.Procedure
+. In Knative CLI, enter the following command to build and run your workflow project:
+
--
-.Create a project with `quarkus-jsonp` and `quarkus-smallrye-openapi` extensions
+.Run the project and start a local development image.
[source,shell]
----
-kn workflow create --extension quarkus-jsonp,quarkus-smallrye-openapi
+kn workflow run
----
+--
+. Once the project is ready, the Development UI will be opened up in a browser automatically (on `localhost:8080/q/dev`).
-You can add multiple extensions using the comma-separated names of the extensions in the previous command.
+[[proc-deploy-sw-project-kn-cli]]
+== Deploying a workflow project using Knative CLI
+
+You can use the `deploy` command combined with `kn workflow` to deploy your workflow project in your current directory.
+
+.Prerequisites
+* {product_name} plug-in for Knative CLI is installed.
++
+For more information about installing the plug-in, see <>.
+
+* A workflow project is created.
++
+For more information about creating a workflow project, see <>.
+
+* A minikube cluster is running locally.
+
+.Procedure
+. In Knative CLI, enter the following command to deploy your workflow project:
++
+--
+.Deploy a workflow project
+[source,shell]
+----
+kn workflow deploy
+----
+
+Also, ensure that you have access to your cluster and your cluster can access the generated container image.
+For more options with `deploy` command use `[-h|--help]`.
+[NOTE]
+====
+You can use the `kubectl` command line if you want to use a complex deployment setup for your workflow project.
+====
+--
+
+[[proc-create-quarkus-sw-project-kn-cli]]
+== Creating a Quarkus Workflow project using Knative CLI
+
+After installing the {product_name} plug-in, you can use the `quarkus create` command with `kn workflow` to scaffold a new Quarkus Workflow project in your current directory.
+
+The `quarkus create` command sets up a {product_name} Quarkus project containing minimal extensions to build a workflow project. Also, the generated workflow project contains a "hello world" `workflow.sw.json` file in your `.//src/main/resources` directory.
+
+.Prerequisites
+* {product_name} plug-in for Knative CLI is installed.
+For more information about installing the plug-in, see <>.
+ifeval::["{kogito_version_redhat}" != ""]
+* You followed the steps in xref:getting-started/create-your-first-workflow-service.adoc#proc-configuring-maven-rhbq[Configuring your Maven project to Red Hat build of Quarkus and OpenShift Serverless Logic]
+endif::[]
+
+.Procedure
+. In Knative CLI, enter the following command to create a new project:
++
+--
+.Creates a project named `new-project`
+[source,shell]
+----
+kn workflow quarkus create
+----
+
+By default, the generated project is named as `new-project`. You can overwrite the project name by using the `[-n|--name]` flag as follows:
+
+.Create a project named `my-project`
+[source,shell]
+----
+kn workflow quarkus create --name my-project
+----
+--
+
+. Add more extensions to the Quarkus project during its creation by using the `[-e|--extension]` flag as follows:
++
+--
+.Create a project with `quarkus-jsonp and quarkus-smallrye-openapi` extensions
+[source,shell]
+----
+kn workflow quarkus create --extension quarkus-jsonp,quarkus-smallrye-openapi
+----
+You can add multiple extensions using the comma-separated names of the extensions in the previous command.
[NOTE]
====
When you run the `create` command for the first time, it might take a while due to the necessity of downloading the required dependencies for the Quarkus project.
====
--
-[[proc-build-sw-project-kn-cli]]
-== Building a workflow project using Knative CLI
+[[proc-build-quarkus-sw-project-kn-cli]]
+== Building a Quarkus workflow project using Knative CLI
-After creating your workflow project, you can use the `build` command with `kn workflow` to build your workflow project in your current directory and to generate a container image.
+After creating your workflow project, you can use the `quarkus build` command with `kn workflow` to build your workflow project in your current directory and to generate a container image.
The process of building your workflow project produces a `knative.yml` file in the `./target/kubernetes` folder. If your workflow contains events, then the building process also generates a `kogito.yml` file.
@@ -160,7 +253,7 @@ For more information about creating a workflow project, see <
org.kie.kogito
kogito-quarkus-serverless-workflow-devui
- ${VERSION}
----
--
@@ -44,7 +43,7 @@ Executing the previous command adds the following dependency to `pom.xml` file o
. Enter the following command to add the `kogito-addons-quarkus-source-files` extension that provides the source code to generate the Serverless Workflow diagram in the consoles:
+
--
-.Install Kogito source files add-on extension
+.Install {product_name} source files add-on extension
[source,shell]
----
quarkus ext add org.kie.kogito:kogito-addons-quarkus-source-files
@@ -76,15 +75,15 @@ quarkus dev
+
--
-.Kogito Serverless Workflow Tools extension in Quarkus Dev UI
-image::tooling/quarkus-dev-ui-extension/kogito-swf-tools-quarkus-dev-ui.png[]
+.{product_name} extension in Quarkus Dev UI
+image::testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-quarkus-dev-ui.png[]
-The Kogito Serverless Workflow Tools extension contains the following pages:
-
-* xref:serverless-logic:tooling/quarkus-dev-ui-extension/quarkus-dev-ui-workflow-instances-page.adoc[Workflow Instances]
-* xref:serverless-logic:tooling/quarkus-dev-ui-extension/quarkus-dev-ui-workflow-definition-page.adoc[Workflow Definitions]
-* xref:tooling/quarkus-dev-ui-extension/quarkus-dev-ui-custom-dashboard-page.adoc[Dashboards]
+The {product_name} Dev UI extension contains the following pages:
+* xref:testing-and-troubleshooting/quarkus-dev-ui-extension/quarkus-dev-ui-workflow-instances-page.adoc[Workflow Instances]
+* xref:testing-and-troubleshooting/quarkus-dev-ui-extension/quarkus-dev-ui-workflow-definition-page.adoc[Workflow Definitions]
+* xref:testing-and-troubleshooting/quarkus-dev-ui-extension/quarkus-dev-ui-monitoring-page.adoc[Monitoring]
+* xref:testing-and-troubleshooting/quarkus-dev-ui-extension/quarkus-dev-ui-custom-dashboard-page.adoc[Dashboards]
--
diff --git a/modules/serverless-logic/pages/testing-and-troubleshooting/quarkus-dev-ui-extension/quarkus-dev-ui-workflow-definition-page.adoc b/modules/serverless-logic/pages/testing-and-troubleshooting/quarkus-dev-ui-extension/quarkus-dev-ui-workflow-definition-page.adoc
new file mode 100644
index 00000000..9eb36203
--- /dev/null
+++ b/modules/serverless-logic/pages/testing-and-troubleshooting/quarkus-dev-ui-extension/quarkus-dev-ui-workflow-definition-page.adoc
@@ -0,0 +1,109 @@
+= Workflow Definitions in {product_name} Dev UI extension
+:compat-mode!:
+// Metadata:
+:description: Workflow Definitions in {product_name} Dev UI extension
+:keywords: kogito, workflow, serverless, Quarkus, Dev UI, definitions
+
+In the {product_name} Dev UI extension, the *Workflow Definitions* page displays the list of available workflows in
+the {product_name} project and helps you to start new workflow instances either via REST or by triggering cloud events.
+
+.Workflow Definitions page
+image::testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-definitions.png[]
+
+In the *Workflow Definitions* table you will find the list of workflows that shows the following details:
+
+* *Workflow Name*: Displays the name of a workflow definition.
+* *Endpoint*: Displays the URL to a workflow definition.
+* *Actions*: Provides the *Start new Workflow* button to start workflow instances via REST.
+
+In the table, the toolbar provides the ability to apply filters to the table or to trigger cloud events that can start workflow instances
+
+== Applying filters to the Workflow Definitions table
+. To search for a specific workflow definition, enter the workflow's name in the *Filter by workflow name* field.
+. Click the *Apply Filter* button. The matching workflow definitions appear as a chip below the search field.
+. Click the *Reset to default* button to clear the applied filters.
+. To fetch newly added workflow definitions, click on the refresh icon next to the *Apply Filter* button.
+Click the *Apply Filter* button. The matching workflow definitions appear as a chip below the search field.
+
+To clear the applied filters, you can click the *Reset to default* button. Also, to fetch newly added workflow definitions,
+click on the refresh icon next to the *Apply Filter* button.
+
+.Workflow Definitions filters
+image::testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-definitions-filter.png[]
+
+[[con-start-workflow-instances]]
+== Starting new Workflow instances
+Workflows can be started either by using the specific workflow REST endpoints or by triggering Cloud Events.
+The {product_name} Dev UI extension allows you to use both mechanisms.
+
+[[con-start-workflow-instances-rest]]
+=== Starting new Workflow instances using REST
+If you want to start a new workflow instance using the workflow REST endpoint, just click on the *Start new Workflow*
+button of any of the workflow in the *Workflow Definitions* table, then you'll be redirected to the *Start New Workflow*
+page where you could setup the data and Business Key that will be used to start the new workflow instance.
+
+=== Filling up the Workflow data
+Depending on your workflow configuration the page can provide different mechanisms to fill the workflow data.
+If your workflow is configured with a valid _Data Input Schema_, this page will be able to display a custom Form to help
+a filling the data.
+
+.Start Workflow Page with a Form defined in the _Data Input Schema_
+image::testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-start-workflow-form.png[]
+
+If your workflow does not provide a _Data Input Schema_, the page will display a Code Editor that will enable you to manually fill the workflow data in JSON format.
+
+.Start Workflow Page with a Code Editor.
+image::testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-start-workflow-no-form.png[]
+
+[NOTE]
+====
+For more information about how to setup the Input Schema Definition on your {product_name}, please take a look at the
+xref:core/defining-an-input-schema-for-workflows.adoc[Input Schema for {product_name}] section.
+====
+
+=== Setting up the Business Key
+You can use the *Business key* text box to define a custom business key value to the workflow instance.
+If the *Business Key* field is blank, then an auto-generated business key is defined for the workflow instance.
+
+=== Starting the new Workflow instance
+By clicking on the *Start* button will POST the workflow data and the Business Key to the workflow REST endpoint. If the
+workflow instance starts successfully, a success alert appears on the top of the screen, which contains the
+*Go to workflow list* link to navigate to the xref:testing-and-troubleshooting/quarkus-dev-ui-extension/quarkus-dev-ui-workflow-instances-page.adoc[Workflow Instances page].
+
+.Example of workflow successful starting notification
+image::testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-start-workflow-success-alert.png[]
+
+If there is an issue while starting a workflow, then a failure alert appears on the top of the screen, containing the*View Details* and *Go to workflow list* options. The *View Details* enables you to view the error message.
+
+.Example of workflow starting failure notification
+image::testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-start-workflow-fail-alert.png[]
+
+[[con-trigger-cloud-event-page]]
+== Starting Workflows using Cloud Events
+On the *Workflow Definitions* Page, the *Trigger Cloud Event* button enables you to start new workflow instances by using
+HTTP Cloud Events. This will lead you to the *Trigger Cloud Event* page.
+
+Once there, you will have to fill out the form with the Cloud Event information:
+
+- *Endpoint*: Defines the Path and the HTTP method where the cloud event should be triggered.
+- *Event Type*: Sets the Cloud Event type header.
+- *Event Source*: Sets the Cloud Event Source header. Defaults to `/local/quarkus-devUi`.
+- *Business Key*: Sets the new workflow instance Business Key, it will be added in the `kogitobusinesskey` event header.
+- *Event Custom Headers*: Allows you to define custom headers to be added to the event.
+- *Event Data*: Data that will be in the event in JSON format.
+
+.Starting a workflow using a cloud event
+image::testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-trigger-cloud-events.png[]
+
+Click the *Trigger* button to trigger the cloud event. If the workflow instance starts successfully, a success alert appears on the top of the screen, which contains the
+*Go to workflow list* link to navigate to the xref:testing-and-troubleshooting/quarkus-dev-ui-extension/quarkus-dev-ui-workflow-instances-page.adoc[Workflow Instances page].
+
+.Example of workflow successful starting notification
+image::testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-trigger-cloud-event-start-success-alert.png[]
+
+If there is an issue while starting a workflow, then a failure alert appears on the top of the screen, containing *View Details* and *Go to workflow list* options. The *View Details* enables you to view the error message.
+
+.Example of trigger workflow failure alert
+image::testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-trigger-cloud-event-start-error-alert.png[]
+
+include::../../../pages/_common-content/report-issue.adoc[]
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/testing-and-troubleshooting/quarkus-dev-ui-extension/quarkus-dev-ui-workflow-instances-page.adoc b/modules/serverless-logic/pages/testing-and-troubleshooting/quarkus-dev-ui-extension/quarkus-dev-ui-workflow-instances-page.adoc
new file mode 100644
index 00000000..5716dbe4
--- /dev/null
+++ b/modules/serverless-logic/pages/testing-and-troubleshooting/quarkus-dev-ui-extension/quarkus-dev-ui-workflow-instances-page.adoc
@@ -0,0 +1,155 @@
+= Workflow Instances in {product_name} Dev UI extension
+:compat-mode!:
+// Metadata:
+:description: Workflow Instances page in {product_name} Dev UI extension
+:keywords: kogito, workflow, serverless, Quarkus, Dev UI, instances
+
+In {product_name} Dev UI extension, the Workflow Instances page is used to monitor and manage the available instances. The Workflow Instances page displays a list of available instances and add filters to the list.
+
+.Workflow Instances page with no instances
+image::testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instances-empty.png[]
+
+To create a workflow instance, you can use the Workflow Definitions page. For more information, see xref:testing-and-troubleshooting/quarkus-dev-ui-extension/quarkus-dev-ui-workflow-definition-page.adoc[Workflow Definitions in {product_name} Dev UI extension].
+
+When you create a workflow instance, the instance appears in the workflow instances table on the page, containing the following details:
+
+* *Id*: Displays the name and business key of an instance. Also, when you hover on the name of the workflow instance, a unique ID is displayed for the instance. You can click on the ID to navigate to the *Workflow Details* page.
+* *Status*: Displays the current state of the instance. This column can contain *Active*, *Completed*, *Aborted*, *Error*, and *Suspended* as values.
+* *Created*: Displays the time reference indicating when the instance is created.
+* *Last Update*: Displays the time reference indicating when the instance is last updated.
+
+.Workflow Instances page with an instance
+image::testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instance-completed.png[]
+
+You can also sort the columns of the workflow instances table.
+
+On the Workflow Instances page, you can filter the list of workflow instances based on the states and business keys. By default, *Active*, *Completed*, *Aborted*, *Error*, and *Suspended* are applied on the list. You can use the *Status* drop-down to see the available states. To filter the workflow instances based on the states, select or unselect the states in the *Status* drop-down and click *Apply filter* button.
+
+To filter the workflow instances based on business keys, enter a business key in the *Filter by business key* field and click *Apply filter* button.
+
+.Example filters applied on the workflow instances
+image::testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instances-filters.png[]
+
+To fetch the newly added workflow instances, click on the refresh icon next to the *Apply Filter* button. Also, to clear the applied filters and reset to the default state, click *Reset to default* button.
+
+== Opening the Workflow instance details
+
+In the workflow instances table, the *Id* column contains a clickable link, which enables you to navigate to the *Workflow Details* page. The *Workflow Details* page contains various panels, providing detailed information about a workflow instance.
+
+.Example Workflow Details page
+image::testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-details-page.png[]
+
+The Workflow Details page consists of the following panels:
+
+* Serverless Workflow Diagram panel
+* Timeline panel
+* Details panel
+* Variables panel
+
+Serverless Workflow Diagram panel::
++
+--
+The Serverless Workflow Diagram panel enables you to explore the workflow diagram and execution path of the workflow instance. The workflow diagram and execution path are displayed by consuming the source which is exposed through the `kogito-addons-quarkus-source-files`.
+
+To add the source files add-on configuration, add the following dependency to `pom.xml` file of your project:
+
+.source-files add-on dependency in `pom.xml` file
+[source,xml]
+----
+
+ org.kie.kogito
+ kogito-addons-quarkus-source-files
+
+----
+
+There is a slider available in the diagram panel, which when dragged to the right displays the source code in read-only mode.
+
+.Stunner based Diagram panel
+image::testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-stunner-diagram-panel.png[]
+--
+
+Timeline panel::
++
+--
+The Timeline panel displays the list of nodes that are related to a workflow instance. Each node in the Timeline panel consists of an icon, indicating the state of the node, such as *Active*, *Completed*, or *Error*.
+
+.Timeline panel
+image::testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-timeline-panel.png[]
+--
+
+Details panel::
++
+--
+The Details panel displays the basic information related to a workflow instance, including:
+
+* *Name*: Name of the workflow instance.
+* *Business key*: Business key related to the workflow instance.
+* *State*: Current state of the workflow instance.
+* *Id*: Unique ID of the workflow instance.
+* *Start*: Time reference indicating when the workflow instance is started.
+* *Last Updated*: Time reference indicating when the workflow instance is last updated.
+* *End*: Time reference indicating when the workflow instance is completed.
+
+.Details panel
+image::testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-details-panel.png[]
+--
+
+Variables panel::
++
+--
+The Variables panel displays the data of a workflow in the form of JSON.
+
+.Variables panel
+image::testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-variables-panel.png[]
+--
+
+== Sending Cloud Events to active Workflow Instances
+
+The *Workflow Instances* page also allows you to send HTTP Cloud Events to the active workflow instances that are waiting for
+an event to advance.
+
+To do so you just have to click on the *Trigger Cloud Event* button that will lead you to the *Trigger Cloud Event* page.
+
+Once there, you will have to fill out the form with the Cloud Event information:
+
+- *Endpoint*: Defines the Path and the HTTP method where the cloud event should be triggered.
+- *Event Type*: Sets the Cloud Event type header.
+- *Event Source*: Sets the Cloud Event Source header. Defaults to `/local/quarkus-devUi`.
+- *Instance Id*: Sets the id of the workflow instance that must receive the event, it will be added in the `kogitoprocrefid` event header.
+- *Event Data*: Data that will be in the event in JSON format.
+
+.Sending a Cloud Event to an active workflow instance.
+image::testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instances-cloud-event.png[]
+
+Additionally, you can use the *Send Cloud Event* action present available on the instance actions kebab. By using it you
+will be lead to the *Trigger Cloud Event* page, but in this case the *Instance Id* field will be already filled with
+the selected workflow id.
+
+.*Send Cloud Event* button in the actions kebab.
+image::testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instances-kebab.png[]
+
+[NOTE]
+====
+To enable the actions kebab, make sure your project is configured to have the `kogito-addons-quarkus-process-management`
+dependency on its `pom.xml` file, like:
+[source,xml]
+----
+
+ org.kie.kogito
+ kogito-addons-quarkus-process-management
+
+----
+====
+
+Click the *Trigger* button to trigger the cloud event. If the event is successfully triggered, a success alert appears
+at the top of the screen, which contains the *Go to workflow list* link to navigate to the xref:testing-and-troubleshooting/quarkus-dev-ui-extension/quarkus-dev-ui-workflow-instances-page.adoc[Workflow Instances page].
+
+.Example of cloud event successfully sent notification.
+image::testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instances-cloud-event-success.png[]
+
+If there is an issue while sending the event, then a failure alert appears at the top of the screen, containing *View Details* and *Go to workflow list* options. The *View Details* enables you to view the error message.
+
+.Example cloud event unsuccessfully sent notification.
+image::testing-and-troubleshooting/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instances-cloud-event-failure.png[]
+
+include::../../../pages/_common-content/report-issue.adoc[]
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/tooling/quarkus-dev-ui-extension/quarkus-dev-ui-workflow-definition-page.adoc b/modules/serverless-logic/pages/tooling/quarkus-dev-ui-extension/quarkus-dev-ui-workflow-definition-page.adoc
deleted file mode 100644
index 22c3593a..00000000
--- a/modules/serverless-logic/pages/tooling/quarkus-dev-ui-extension/quarkus-dev-ui-workflow-definition-page.adoc
+++ /dev/null
@@ -1,54 +0,0 @@
-= Workflow Definitions in Kogito Serverless Workflow Tools extension
-:compat-mode!:
-// Metadata:
-:description: Workflow Definitions in Kogito Serverless Workflow Tools extension
-:keywords: kogito, workflow, serverless, Quarkus, Dev UI, definitions
-
-In Kogito Serverless Workflow Tools extension, the Workflow Definitions page displays the list of available workflow definitions, which are used to trigger the cloud events and start the workflow instances. Also, the Workflow Definitions page enables you to add filters to the workflow definitions list.
-
-.Workflow Definitions page
-image::tooling/quarkus-dev-ui-extension/kogito-swf-tools-workflow-definitions.png[]
-
-The table on the Workflow Definitions page displays the following details:
-
-* *Workflow Name*: Displays the name of a workflow definition.
-* *Endpoint*: Displays the URL to a workflow definition.
-* *Actions*: Provides a button to start a new workflow.
-
-The filters on the Workflow Definitions page enables you to add filters to the table. To search for a specific workflow definition, enter the name of the workflow in the *Filter by workflow name* field and click *Apply Filter* button. The matching workflow definitions appear as a chip below the search field.
-
-To clear the applied filters, you can click the *Reset to default* button. Also, to fetch newly added workflow definitions, click on the refresh icon next to the *Apply Filter* button.
-
-.Workflow Definitions filters
-image::tooling/quarkus-dev-ui-extension/kogito-swf-tools-workflow-definitions-filter.png[]
-
-[[con-trigger-cloud-event-page]]
-== Start New Workflow page
-
-The *Start New Workflow* page is used to start a workflow instance through a cloud event or a custom form, depending on the workflow configuration. To navigate to the *Start New Workflow* page, click the play button in the *Actions* column of the workflow definitions table.
-
-*If there is no JSON schema for the workflow, then the workflow is started by triggering a cloud event.
-To trigger a cloud event, you can use the *Cloud Event Type* and *Cloud Event Data* input fields to set the type of a cloud event and event payload in JSON format respectively and click the *Start* button to start a workflow. The *Reset* button on the page resets the values entered in the given fields.
-
-.Starting a workflow using a cloud event
-image::tooling/quarkus-dev-ui-extension/kogito-swf-tools-trigger-cloud-events.png[]
-
-If there is a JSON schema for the workflow configured in the `dataInputSchema` property, then a form is displayed to start a new workflow instance. You can fill in the required form details and click the *Start* button to trigger the workflow.
-The *Reset* button is used to clear the form data.
-
-.Starting a workflow using the form
-image::tooling/quarkus-dev-ui-extension/kogito-swf-tools-trigger-cloud-events-custom-form.png[]
-
-You can also use the *Business key* text box to define a custom business key value to the workflow instance. If the *Business Key* field is blank, then an auto-generated business key is defined for the workflow instance.
-
-When a workflow instance starts successfully, a success alert appears on the top of the screen, which contains a *Go to workflow list* option. The *Go to workflow list* option enables you to navigate to the xref:serverless-logic:tooling/quarkus-dev-ui-extension/quarkus-dev-ui-workflow-instances-page.adoc[Workflow Instances page].
-
-.Example of trigger workflow success alert
-image::tooling/quarkus-dev-ui-extension/kogito-swf-tools-start-workflow-success-alert.png[]
-
-If there is an issue while starting a workflow, then a failure alert appears on the top of the screen, containing *View Details* and *Go to workflow list* options. The *View Details* enables you to view the error message.
-
-.Example of trigger workflow failure alert
-image::tooling/quarkus-dev-ui-extension/kogito-swf-tools-start-workflow-fail-alert.png[]
-
-include::../../../pages/_common-content/report-issue.adoc[]
diff --git a/modules/serverless-logic/pages/tooling/quarkus-dev-ui-extension/quarkus-dev-ui-workflow-instances-page.adoc b/modules/serverless-logic/pages/tooling/quarkus-dev-ui-extension/quarkus-dev-ui-workflow-instances-page.adoc
deleted file mode 100644
index bf74e1d1..00000000
--- a/modules/serverless-logic/pages/tooling/quarkus-dev-ui-extension/quarkus-dev-ui-workflow-instances-page.adoc
+++ /dev/null
@@ -1,119 +0,0 @@
-= Workflow Instances in Kogito Serverless Workflow Tools extension
-:compat-mode!:
-// Metadata:
-:description: Workflow Instances page in Kogito Serverless Workflow Tools extension
-:keywords: kogito, workflow, serverless, Quarkus, Dev UI, instances
-
-In Kogito Serverless Workflow Tools, the Workflow Instances page is used to monitor and manage the available instances. The Workflow Instances page displays a list of available instances and add filters to the list.
-
-.Workflow Instances page with no instances
-image::tooling/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instances-empty.png[]
-
-To create a workflow instance, you can use the Workflow Definitions page. For more information, see xref:serverless-logic:tooling/quarkus-dev-ui-extension/quarkus-dev-ui-workflow-definition-page.adoc[Workflow Definitions in Kogito Serverless Workflow Tools extension].
-
-When you create a workflow instance, the instance appears in the workflow instances table on the page, containing the following details:
-
-* *Id*: Displays the name and business key of an instance. Also, when you hover on the name of the workflow instance, a unique ID is displayed for the instance. You can click on the ID to navigate to the *Workflow Details* page.
-* *Status*: Displays the current state of the instance. This column can contain *Active*, *Completed*, *Aborted*, *Error*, and *Suspended* as values.
-* *Created*: Displays the time reference indicating when the instance is created.
-* *Last Update*: Displays the time reference indicating when the instance is last updated.
-
-.Workflow Instances page with an instance
-image::tooling/quarkus-dev-ui-extension/kogito-swf-tools-workflow-instance-completed.png[]
-
-You can also sort the columns of the workflow instances table.
-
-On the Workflow Instances page, you can filter the list of workflow instances based on the states and business keys. By default, *Active*, *Completed*, *Aborted*, *Error*, and *Suspended* are applied on the list. You can use the *Status* drop-down to see the available states. To filter the workflow instances based on the states, select or unselect the states in the *Status* drop-down and click *Apply filter* button.
-
-To filter the workflow instances based on business keys, enter a business key in the *Filter by business key* field and click *Apply filter* button.
-
-.Example filters applied on the workflow instances
-image::tooling/quarkus-dev-ui-extension/kogito-swf-tools-filters-in-workflow-instances.png[]
-
-To fetch the newly added workflow instances, click on the refresh icon next to the *Apply Filter* button. Also, to clear the applied filters and reset to the default state, click *Reset to default* button.
-
-In the workflow instances table, the *Id* column contains a clickable link, which enables you to navigate to the *Workflow Details* page. The *Workflow Details* page contains various panels, providing detailed information about a workflow instance.
-
-.Example Workflow Details page
-image::tooling/quarkus-dev-ui-extension/kogito-swf-tools-workflow-details-page.png[]
-
-The Workflow Details page consists of the following panels:
-
-* Serverless Workflow Diagram panel
-* Timeline panel
-* Details panel
-* Variables panel
-
-Serverless Workflow Diagram panel::
-+
---
-The Serverless Workflow Diagram panel enables you to explore the workflow diagram and execution path of the workflow instance. The workflow diagram and execution path are displayed by consuming the source which is exposed through the `kogito-addons-quarkus-source-files`.
-
-To add the source files add-on configuration, add the following dependency to `pom.xml` file of your project:
-
-.source-files add-on dependency in `pom.xml` file
-[source,xml]
-----
-
- org.kie.kogito
- kogito-addons-quarkus-source-files
-
-----
-
-There are two ways to display the diagram.
-
-1. Stunner Diagram
-2. Mermaid Diagram
-
-By default, the Stunner diagram is displayed. To toggle between the two diagrams you can use the `kogito.swf.stunner.enabled`(defaulted to true) environment variable in your application properties.
-
-[NOTE]
-====
-For yaml-based workflow files, the mermaid diagram is displayed instead of the stunner diagram.
-====
-
-There is a slider available in the diagram panel, which when dragged to the right displays the source code in read-only mode.
-
-.Stunner based Diagram panel
-image::tooling/quarkus-dev-ui-extension/kogito-swf-tools-workflow-stunner-diagram-panel.png[]
-
-.Mermaid based Diagram panel
-image::tooling/quarkus-dev-ui-extension/kogito-swf-tools-workflow-mermaid-diagram-panel.png[]
---
-
-Timeline panel::
-+
---
-The Timeline panel displays the list of nodes that are related to a workflow instance. Each node in the Timeline panel consists of an icon, indicating the state of the node, such as *Active*, *Completed*, or *Error*.
-
-.Timeline panel
-image::tooling/quarkus-dev-ui-extension/kogito-swf-tools-workflow-timeline-panel.png[]
---
-
-Details panel::
-+
---
-The Details panel displays the basic information related to a workflow instance, including:
-
-* *Name*: Name of the workflow instance.
-* *Business key*: Business key related to the workflow instance.
-* *State*: Current state of the workflow instance.
-* *Id*: Unique ID of the workflow instance.
-* *Start*: Time reference indicating when the workflow instance is started.
-* *Last Updated*: Time reference indicating when the workflow instance is last updated.
-* *End*: Time reference indicating when the workflow instance is completed.
-
-.Details panel
-image::tooling/quarkus-dev-ui-extension/kogito-swf-tools-workflow-details-panel.png[]
---
-
-Variables panel::
-+
---
-The Variables panel displays the data of a workflow in the form of JSON.
-
-.Variables panel
-image::tooling/quarkus-dev-ui-extension/kogito-swf-tools-workflow-variables-panel.png[]
---
-
-include::../../../pages/_common-content/report-issue.adoc[]
diff --git a/modules/serverless-logic/pages/tooling/serverless-logic-web-tools/serverless-logic-web-tools-deploy-projects.adoc b/modules/serverless-logic/pages/tooling/serverless-logic-web-tools/serverless-logic-web-tools-deploy-projects.adoc
index 56c8df2b..22e62b47 100644
--- a/modules/serverless-logic/pages/tooling/serverless-logic-web-tools/serverless-logic-web-tools-deploy-projects.adoc
+++ b/modules/serverless-logic/pages/tooling/serverless-logic-web-tools/serverless-logic-web-tools-deploy-projects.adoc
@@ -1,55 +1,73 @@
-= Deploying your projects
+= Deploying your {product_name} projects using {serverless_logic_web_tools_name}
:compat-mode!:
// Metadata:
:description: {serverless_logic_web_tools_name} deploying your projects
:keywords: kogito, workflow, serverless, editor, web, tools, settings, openshift, deploy, project
-{product_name} projects can be deployed to an OpenShift instance via the OpenShift integration, allowing authors to test their implementations in a live environment. Note that these deployments are not meant for production, only development purposes.
+You can deploy your {product_name} projects to an OpenShift instance using the OpenShift integration. The OpenShift integration allows you to test your implementations in a live environment.
+
+[NOTE]
+====
+The deployments described in this document are for development purposes, but not for production.
+====
[[proc-deploy-first-serverless-project-serverless-logic-web-tools]]
-== Deploying your first simple serverless project
+== Deploying your first {product_name} project
-Deploy your first {product_name} project to an OpenShift instance and see it running live.
+You can deploy your first {product_name} project to an OpenShift instance and run the project in a live environment.
.Prerequisites
-* OpenShift integration has to be correctly configured (see how to xref:serverless-logic:tooling/serverless-logic-web-tools/serverless-logic-web-tools-openshift-integration.adoc[here]).
+* OpenShift integration is configured correctly.
++
+For more information, see xref:tooling/serverless-logic-web-tools/serverless-logic-web-tools-openshift-integration.adoc[Integrating your {product_name} project with OpenShift using {serverless_logic_web_tools_name}].
.Procedure
-. Start by creating a project from one of the samples. For this tutorial, the **Greetings** sample is recommended, as it has no external dependencies and is easy to test and comprehend. Here's a brief explanation of what it does:
+. Create a project using an example application in link:{kogito_sw_examples_url}[GitHub].
+
-____
-This example shows a single Operation State with one action that calls the "greeting" function. The workflow data input is assumed to be the name of the person to greet. The result of the action is assumed to be the greeting for the provided person's name, which is added to the state's data and becomes the workflow data output.
-____
-. On the editor page, click on the **Try on OpenShift** button, then, on the context menu, click on **Deploy "greetings" ({product_name})**;
-. A modal should appear, showing deployment options:
- * **Deploy as a project**: This will be available if your current workspace has a full project structure (with a single `pom.xml` file);
- ** If the option is **disabled**: A pre-built image container that contains a Quarkus project will be used and all serverless workflow files will be placed inside `src/main/resources` folder of the project. In this case, it is important to make sure the serverless workflow files do not have any other dependencies because they are not included in the deployment.
- ** If the option is **enabled**: All files in the workspace will be deployed as-is, meaning no project template will be used. So it is up to you to make sure the project is _deployable_.
- * **Advanced options**:
- ** **Upload OpenAPI spec to Service Registry**: After a {product_name} is deployed, an OpenAPI spec is available in the deployed service. Checking this option will make the {serverless_logic_web_tools_name} poll for this spec and upload it to your configured Service Registry (_To use this option, you need to configure your Service Account and Service Registry on the Settings page. You can check how to configure it xref:serverless-logic:tooling/serverless-logic-web-tools/serverless-logic-web-tools-redhat-application-services-integration.adoc[here]_);
- ** **Attach KafkaSource to the deployment**: If your {product_name} has an Apache Kafka event dependency then this option should be checked (_To use this option, you need to configure your Service Account and Streams for Apache Kafka on the Settings page. You can check how to configure it xref:serverless-logic:tooling/serverless-logic-web-tools/serverless-logic-web-tools-redhat-application-services-integration.adoc[here]_).
-. Leave everything unchecked and click on **Confirm**.
-. A toast should show the following message: **Your deployment has been successfully started and will be available shortly. Please do not close this browser tab until the operation is completed.** and clicking on the **OpenShift deployments** icon should show your deployment status. The process should take a few minutes since a build needs to be completed before the services are up.
+--
+You can use the link:https://github.com/kiegroup/kogito-examples/tree/main/serverless-workflow-examples/serverless-workflow-greeting-quarkus[`serverless-workflow-greeting-quarkus`] example application, which does not contain external dependencies and is easy to test and comprehend.
+
+The `serverless-workflow-greeting-quarkus` example application contains a single operation state with one action, sending request to the `greeting` function. In the example application, the workflow data is assumed to be the name of the person to greet, and the result is the greeting for the person. The result is added to the state data, which later becomes the workflow data output.
+--
+
+. On the editor page, click *Try on OpenShift* button.
+. On the context menu, click *Deploy "greetings" ({product_name})*.
+
--
-.OpenShift deployment status (success)
-image:tooling/serverless-logic-web-tools/serverless-logic-web-tools-openshift-deployments.png[]
+A modal appears, displaying the following deployment options:
+
+* *Deploy as a project*: This option is available if your current workspace contains the entire project structure including a single `pom.xml` file.
+
+** If *Deploy as a project* option is disabled, a pre-built image container, containing a Quarkus project is used and the workflow files are placed in the `src/main/resources` folder of the project. In this case, ensure that the workflow files do not have other dependencies as the dependencies are not included in the deployment.
+** If *Deploy as a project* option is enabled, all the workflow files are deployed as-is, indicating that no project template is used. Therefore, ensure that the project is deployable.
+
+* *Upload OpenAPI spec to Service Registry*: After a workflow is deployed, an OpenAPI specification is available in the deployed service. When you check this option, the {serverless_logic_web_tools_name} fetches the OpenAPI specification, and when ready, the OpenAPI specification is uploaded to your configured service registry. To use this option, you need to configure your service account and service registry on the *Settings* page. To configure the service account and service registry, see xref:tooling/serverless-logic-web-tools/serverless-logic-web-tools-redhat-application-services-integration.adoc[Integrating with Red Hat OpenShift Application and Data Services].
+--
+
+. Uncheck all options and click *Confirm*.
++
--
+A pop-up appears displaying *Your deployment has been successfully started and will be available shortly. Please do not close this browser tab until the operation is completed.* message. Clicking on the *OpenShift deployments* icon shows your deployment status. The deployment process takes a few minutes since a build needs to be completed before the services are active.
+.OpenShift deployment status
+image:tooling/serverless-logic-web-tools/serverless-logic-web-tools-openshift-deployments.png[]
+--
-[[proc-check-deploy-status-serverless-logic-web-tools]]
-== Checking your deployment status
+[[proc-verify-deploy-status-serverless-logic-web-tools]]
+== Verifying the deployment status of your first project
-After deployment is successful you can check out multiple information about your service.
+After the deployment of your {product_name} project is successful, you can verify various information about the deployed service.
.Prerequisites
-* OpenShift integration has to be correctly configured (see how to xref:serverless-logic:tooling/serverless-logic-web-tools/serverless-logic-web-tools-openshift-integration.adoc[here]);
-* Have a successfully deployed project.
-* The deployed project must have been deployed with the *Deploy as a project* option *unchecked*, as the Serverless deployment page is only available via the pre-built image container. If the option was checked the tool will open whatever `index.html` file your project provided.
+* OpenShift integration is configured correctly.
++
+For more information, see xref:tooling/serverless-logic-web-tools/serverless-logic-web-tools-openshift-integration.adoc[Integrating your {product_name} project with OpenShift using {serverless_logic_web_tools_name}].
+* Your {product_name} project is deployed successfully.
+* Deployed project must be deployed using the *Deploy as a project* option as unchecked, as the deployment page is only available using the pre-built image container. If the option *Deploy as a project* option is checked the tool opens the `index.html` file your project provides, if any.
.Procedure
-. Click on the **OpenShift deployments** icon to show a list of deployments;
-. If your deployment is successful (listed with a emoji:white_check_mark[1x]) you can click on it to access the **Serverless Deployment** page and check on the **Swagger UI** interface, **Open API** specification and **Metrics** page via DashBuilder.
+. Click on the *OpenShift deployments* icon to view a list of deployments.
+. If your deployment is successful (listed with emoji:white_check_mark[1x]), you can click on it and access the deployment page to check the *Swagger UI*, *Open API* specification, and *Metrics* page using DashBuilder.
+
--
.Serverless deployment page
diff --git a/modules/serverless-logic/pages/tooling/serverless-logic-web-tools/serverless-logic-web-tools-enable-kogito-swf-visualization.adoc b/modules/serverless-logic/pages/tooling/serverless-logic-web-tools/serverless-logic-web-tools-enable-kogito-swf-visualization.adoc
index d54903a8..532133ae 100644
--- a/modules/serverless-logic/pages/tooling/serverless-logic-web-tools/serverless-logic-web-tools-enable-kogito-swf-visualization.adoc
+++ b/modules/serverless-logic/pages/tooling/serverless-logic-web-tools/serverless-logic-web-tools-enable-kogito-swf-visualization.adoc
@@ -1,15 +1,15 @@
= Enable the Kogito Serverless Workflow Visualization feature preview for {product_name}
:compat-mode!:
// Metadata:
-:description: Enable the Kogito Serverless Workflow Visualization diagram preview
+:description: Enable the {product_name} Workflow Visualization diagram preview
:keywords: kogito, workflow, serverless, editor, logic, web, tools, stunner, visualization
-The Kogito Serverless Workflow Visualization is a new way to preview your {product_name} diagrams alongside the editor. In this tutorial, you'll learn how to enable it and try it out.
+The {product_name} Workflow Visualization is a new way to preview your {product_name} diagrams alongside the editor. In this tutorial, you'll learn how to enable it and try it out.
.Procedure
. On the top right corner, click on the **Cog wheel** (⚙️);
. Scroll through the tabs and click on **Feature Preview**;
-. Check the option **Kogito Serverless Workflow Visualization** and close the modal.
+. Check the option **{product_name} Workflow Visualization** and close the modal.
+
--
@@ -20,7 +20,7 @@ image::tooling/serverless-logic-web-tools/serverless-logic-web-tools-feature-pre
That's it, now you can try out the new visualization for {product_name}.
--
-.Kogito Serverless Workflow Visualization
+.{product_name} Workflow Visualization
image::tooling/serverless-logic-web-tools/serverless-logic-web-tools-workflow-editor.png[]
--
diff --git a/modules/serverless-logic/pages/tooling/serverless-logic-web-tools/serverless-logic-web-tools-github-integration.adoc b/modules/serverless-logic/pages/tooling/serverless-logic-web-tools/serverless-logic-web-tools-github-integration.adoc
index 8fac4813..86cb95f4 100644
--- a/modules/serverless-logic/pages/tooling/serverless-logic-web-tools/serverless-logic-web-tools-github-integration.adoc
+++ b/modules/serverless-logic/pages/tooling/serverless-logic-web-tools/serverless-logic-web-tools-github-integration.adoc
@@ -1,68 +1,82 @@
-= GitHub integration
+= Integrating your {product_name} project in GitHub using {serverless_logic_web_tools_name}
:compat-mode!:
// Metadata:
:description: {serverless_logic_web_tools_name} github integration
:keywords: kogito, workflow, serverless, editor, web, tools, settings, github, integration
-The {serverless_logic_web_tools_name} implements a web version of a git client, allowing users to clone, create, commit, push and pull repositories, making it possible to sync workspaces remotely.
+The {serverless_logic_web_tools_name} implements a web version of a Git client, enabling you to clone, create, commit, push, and pull repositories. This process synchronizes your workspaces remotely.
-This document describes how to configure the integration and sync your projects.
+This document describes how you can configure the integration and synchronize your projects.
[[proc-setting-github-token-serverless-logic-web-tools]]
-== Setting up your GitHub token
+== Setting your GitHub token in {serverless_logic_web_tools_name}
-Generate a token from your GitHub account settings page and add it to the {serverless_logic_web_tools_name} settings tab.
+You can generate a token from your GitHub account and add the token to the {serverless_logic_web_tools_name}.
.Prerequisites
-* GitHub account.
+* You have an account in GitHub.
.Procedure
-. In the {serverless_logic_web_tools_name}, click on the **Cog wheel** (⚙️) on the top right corner and go to the **GitHub** tab;
-. From there, you'll see a link to the GitHub token settings page and the **Token** field;
-. Click the link:{github_tokens_url}[link] and create your GitHub token, making sure you select at least the **repo** option. Optionally select **gist** as well, as it will give you the ability to import and update gists;
-. Copy the token generated and paste it into the GitHub settings tab on the {serverless_logic_web_tools_name};
-. The contents of the tab should be updated, showing that you're signed into GitHub and have all the permissions required.
-
+. Go to link:{serverless_logic_web_tools_url}[{serverless_logic_web_tools_name}] web application, and click the *Cogwheel* (⚙️) on the top-right corner of the screen.
+. Go to the *GitHub* tab.
+. In the *GitHub* tab, click the *Add access token* button and a window will be shown.
+. Click *Create a new token* option.
++
+--
+Ensure that you select the *repo* option.
+--
+. Optionally, select *gist*, which enables you to import and update gists.
+. Copy the generated token and paste it into the *Token* field in {serverless_logic_web_tools_name} GitHub *Settings*.
++
+The contents of the tab are updated and displays that you are signed into the GitHub and contains all the required permissions.
[[proc-sync-workspace-github-serverless-logic-web-tools]]
-== Syncing your workspaces with GitHub
+== Synchronizing your workspaces with GitHub
-After configuring your GitHub token you can now sync workspaces with remote repositories.
+After your GitHub token is set, you can synchronize your workspaces with remote repositories.
.Prerequisites
-* You have configured your GitHub token on the {serverless_logic_web_tools_name}.
+* Your GitHub token is configured in the {serverless_logic_web_tools_name}.
++
+For more information, see <>.
.Procedure
-. Create or open a workspace;
-. Add or edit files;
-. Click **Share -> Github: Create Repository**;
-. Name your repository and select if it's going to be **Public** or **Private**;
-. Selecting the **Use Quarkus Accelerator** option will create a repository with a base Quarkus project, moving the workspace files to `src/main/resources`.
-
+. In the {serverless_logic_web_tools_name} web application, create or open a workspace.
+. Add or edit the existing files in the workspace.
+. Click *Share -> Github: Create Repository*.
+. Name your repository and set the repository as *Public* or *Private*.
+. (Optional) Select the *Use Quarkus Accelerator* to create a repository with a base Quarkus project and move the workspace files to `src/main/resources` folder.
+
--
-.Creating a repository for your workspace
+.Create a repository for your workspace
image::tooling/serverless-logic-web-tools/serverless-logic-web-tools-github-repo.png[]
--
-. To update the remote repository with your local changes click on **Sync -> Push**;
-. To get new updates from the remote repository click on **Sync -> Pull**;
-
-Currently, the tool cannot resolve merge conflicts, so make sure to always pull new changes before working on your files.
+. Click *Sync -> Push* to update the remote repository with your local changes.
+. To get new updates from the remote repository, click *Sync -> Pull*.
++
+[NOTE]
+====
+Currently, {serverless_logic_web_tools_name} cannot resolve the merge conflicts. Therefore, ensure that you always pull changes before working on your files.
+====
[[proc-import-workspace-github-serverless-logic-web-tools]]
== Importing a workspace from GitHub
-Let's say you need to work from another computer or use a workspace created by someone else, in these cases importing a workspace from GitHub is useful. In this tutorial, you'll learn how to import these repositories as workspaces on {serverless_logic_web_tools_name}.
+You can import a workspace from GitHub in {serverless_logic_web_tools_name} when you need to work from another computer or need to use someone else's workspace.
.Prerequisites
-* You have configured your GitHub token on the {serverless_logic_web_tools_name};
-* Have a repository with Serverless Logic files.
+* Your GitHub token is configured in the {serverless_logic_web_tools_name}.
++
+For more information, see <>.
+* You have a repository containing workflow files.
.Procedure
-. On GitHub, find the repository with your project and copy its URL;
-. Back on the {serverless_logic_web_tools_name}, paste the URL to the **Import -> From URL** field and click on **Clone**;
-. The page should load your imported project, defaulting to a Serverless Logic file, if present;
-. If your GitHub user has access to push to this repository you'll be able to sync changes by clicking on **Sync -> Push**;
+. Go to GitHub, find the repository with your project, and copy the repository URL.
+. In {serverless_logic_web_tools_name} web application, paste the repository URL in the *Import -> From URL* field and click *Clone*.
++
+The page loads your imported project, defaulting to a workflow file, if present.
+
+. If applicable, you can push to the imported repository by clicking on the *Sync -> Push*.
include::../../../pages/_common-content/report-issue.adoc[]
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/tooling/serverless-logic-web-tools/serverless-logic-web-tools-openshift-integration.adoc b/modules/serverless-logic/pages/tooling/serverless-logic-web-tools/serverless-logic-web-tools-openshift-integration.adoc
index 62492bf0..6414a653 100644
--- a/modules/serverless-logic/pages/tooling/serverless-logic-web-tools/serverless-logic-web-tools-openshift-integration.adoc
+++ b/modules/serverless-logic/pages/tooling/serverless-logic-web-tools/serverless-logic-web-tools-openshift-integration.adoc
@@ -1,69 +1,79 @@
-= OpenShift integration
+= Integrating your {product_name} project with OpenShift using {serverless_logic_web_tools_name}
:compat-mode!:
// Metadata:
:description: {serverless_logic_web_tools_name} openshift integration
:keywords: kogito, workflow, serverless, editor, web, tools, settings, openshift, integration
-Integrate with Red Hat® OpenShift®, an enterprise-ready Kubernetes container platform, allowing your projects to be deployed and tested online.
+You can integrate your {product_name} project with Red Hat OpenShift. OpenShift is an enterprise-ready Kubernetes container platform, enabling your {product_name} projects to be deployed and tested online.
[[proc-setting-kie-sandbox-extended-services-serverless-logic-web-tools]]
-== Setting up KIE Sandbox Extended Services
+== Setting Extended Services
-The KIE Sandbox Extended Services tool is required to proxy requests to an OpenShift instance, thus making it possible to deploy and monitor projects.
-
-.Prerequisites
-* None.
+The Extended Services tool is required to proxy requests to an OpenShift instance. Therefore, setting the Extended Services enables you to deploy and monitor your {product_name} projects.
.Procedure
-. In the {serverless_logic_web_tools_name}, click on the **Cog wheel** (⚙️) on the top right corner and go to the **KIE Sandbox Extended Services** tab;
-. If you're not connected you should see the following message: "**You are not connected to KIE Sandbox Extended Services. Click to setup**" as well as a **Host** and **Port** fields filled with `http://localhost` and `21345` respectively;
-. Click the link in the message and a new window will appear, with your current operating system already selected and a link to download the latest version of the KIE Sandbox Extended Services;
-. Download the file and execute it; (You may need to grant permissions to run the file, depending on your OS and settings!)
-. The contents of the tab should be updated, showing that everything is set and you are connected to the KIE Sandbox Extended Services.
+. In the {serverless_logic_web_tools_name} web application, click the *Cogwheel* (⚙️) on the top-right corner and go to the *Extended Services* tab.
+. In case you are not connected to Extended Services, you see *You are not connected to Extended Services. Click to setup* message.
+. Click on the link in the message.
++
+A window appears containing your current operating system as selected and a link to download the latest version of Extended Services.
-. Download the KIE Sandbox Extended Services and execute it.
+. Download the Extended Services and execute it.
+
--
You might need to grant permissions to run the file depending on your operating system and settings.
[NOTE]
====
-If you are using macOS, you might encounter the `"KIE Sandbox Extended Services" is damaged and can't be opened. You should move it to Bin` while running the application "KIE Sandbox Extended Services". This is due to the overeager https://support.apple.com/en-us/HT202491[approach towards security] by macOS and you can fix it by moving the "KIE Sandbox Extended Services" application out of quarantine. You can execute the `xattr -d com.apple.quarantine /Applications/KIE\ Sandbox\ Extended\ Services.app` command to achieve this.
+If you are using macOS, you might encounter the `"Extended Services" is damaged and can't be opened. You should move it to Bin` error message while running the "Extended Services" application. This is due to the overeager https://support.apple.com/en-us/HT202491[approach towards security] by macOS and you can fix it by moving the "Extended Services" application out of quarantine. You can execute the `xattr -d com.apple.quarantine /Applications/KIE\ Sandbox\ Extended\ Services.app` command to achieve this.
====
-After executing the KIE Sandbox Extended Services the content in the *KIE Sandbox Extended Services* is updated and displays that you are connected to the KIE Sandbox Extended Services.
+After executing the Extended Services the content in the *Extended Services* is updated and displays that you are connected to the Extended Services.
--
[[proc-connecting-openshift-instance-serverless-logic-web-tools]]
-== Connecting to your OpenShift instance
+== Connecting to OpenShift instance using {serverless_logic_web_tools_name}
+
+After setting the Extended Services, you can connect to your OpenShift instance to deploy your {product_name} projects with {serverless_logic_web_tools_name}.
.Prerequisites
-* Have the KIE Sandbox Extended Services tool installed and running;
-* Have an OpenShift instance up (you can create a free developer sandbox here link:{openshift_developer_sandbox_url}[OpenShift Developer Sandbox]).
+* Extended Services tool is installed and running.
+* OpenShift instance is active.
++
+You can create a free developer sandbox. For more information, see link:{openshift_developer_sandbox_url}[OpenShift Developer Sandbox].
.Procedure
-. Start by logging into your OpenShift instance console interface;
-. You'll need your OpenShift project name (also known as namespace), the API server and an access token:
- * For the OpenShift project name look at the **Topology** tab, and in the top left corner you should see your project name;
+. Log in to your OpenShift instance console interface.
+. In the OpenShift instance console interface, you need your OpenShift project name (also known as namespace), API server, and an access token.
+
--
-.Getting the OpenShift project name and username menu
+* For the OpenShift project name, go to the *Topology* tab and in the top-left corner you see your project name.
++
+.OpenShift project name in OpenShift instance console
image::tooling/serverless-logic-web-tools/serverless-logic-web-tools-openshift-project.png[]
---
- * The API server and access token can be obtained by clicking on your username in the top right corner and then on **Copy login command**. A new page will open with your new API Token as well as an `oc cli` login command, from that command copy the **--server=** value;
+
+* To obtain the API server and access token, click on your username and *Copy login command*.
+
---
-.Getting the OpenShift access token and API server
+A new page opens containing your new API token along with `oc cli` login command. From the `oc cli` command, copy the value of `--server=`.
++
+.OpenShift access token and API server
image::tooling/serverless-logic-web-tools/serverless-logic-web-tools-openshift-info.png[]
--
-. Having all that information, go back to the {serverless_logic_web_tools_name}, click on the **Cog wheel** (⚙️) on the top right corner and go to the **OpenShift** tab;
-. Fill the **Namespace (project)** field with the OpenShift instance project name;
-. Fill the **Host** field with the value copied from the **--server** flag;
-. Fill the **Token** field with the API Token;
-. Now you can click on **Connect** and if everything is correct the tab should update to show **You're connected to OpenShift**.
-Now you should be ready to start deploying your projects with the {serverless_logic_web_tools_name}, check how to do it xref:serverless-logic:tooling/serverless-logic-web-tools/serverless-logic-web-tools-deploy-projects.adoc[here].
+. Go to the {serverless_logic_web_tools_name} web application, click the *Cogwheel* (⚙️) on the top-right corner and go to the *OpenShift* tab.
+. Click the *Add connection* button and a window will be shown.
+. Enter your OpenShift project name in the *Namespace (project)* field.
+. Enter the value copied value of `--server` flag in the *Host* field.
+. Enter the value of API token in the *Token* field.
+. Click *Connect*.
++
+If the entered values are correct, then the tab updates and displays *You're connected to OpenShift* message.
+
+After connecting to OpenShift, you are ready to deploy your {product_name} projects using {serverless_logic_web_tools_name}. For more information about deploying your projects, see xref:tooling/serverless-logic-web-tools/serverless-logic-web-tools-deploy-projects.adoc[Deploying your {product_name} projects using {serverless_logic_web_tools_name}].
-Note: If your OpenShift instance uses self-signed certificates you may need to enable `InsecureSkipVerify` on KIE Sandbox Extended Services.
+[NOTE]
+====
+If your OpenShift instance uses self-signed certificates, then you must enable the `InsecureSkipVerify` on Extended Services.
+====
include::../../../pages/_common-content/report-issue.adoc[]
diff --git a/modules/serverless-logic/pages/tooling/serverless-logic-web-tools/serverless-logic-web-tools-overview.adoc b/modules/serverless-logic/pages/tooling/serverless-logic-web-tools/serverless-logic-web-tools-overview.adoc
index f8a5e451..e80b1cf5 100644
--- a/modules/serverless-logic/pages/tooling/serverless-logic-web-tools/serverless-logic-web-tools-overview.adoc
+++ b/modules/serverless-logic/pages/tooling/serverless-logic-web-tools/serverless-logic-web-tools-overview.adoc
@@ -4,36 +4,44 @@
:description: Kogito {serverless_logic_web_tools_name}
:keywords: kogito, workflow, serverless, editor, logic, web, tools
-The link:{serverless_logic_web_tools_url}[{serverless_logic_web_tools_name}] is a web application that allows authors to create and sync {product_name}, Decision and Dashbuilder files, all in a single interface, while also providing the integrations needed to deploy and test {product_name} models in development mode.
+The link:{serverless_logic_web_tools_url}[{serverless_logic_web_tools_name}] is a web application that enables you to create and synchronize your {product_name}, decision files, and Dashbuilder files in a single interface. Also, the {serverless_logic_web_tools_name} application provides the integrations that are needed to deploy and test the {product_name} models in development mode.
-.Home page
+.Home page of {serverless_logic_web_tools_name}
image::tooling/serverless-logic-web-tools/serverless-logic-web-tools-overview.png[]
-[[ref-serverless-logic-web-tools-features]]
-== What's inside?
+The {serverless_logic_web_tools_name} provides three different editors for your projects, including
-The {serverless_logic_web_tools_name} provides 3 different editors for your projects:
+* {product_name} editor for `.sw.json` or `.sw.yaml|yml` files
+* Serverless Decision editor for `.yard.yaml|yml` files
+* Dashbuilder editor for `dash.yaml|yml` files
-* {product_name} editor, for `.sw.json` or `.sw.yaml|yml` files;
-* Serverless Decision editor, for `.yard.json` or `.yard.yaml|yml` files;
-* Dashbuilder editor, for `dash.yaml|yml` files.
+[[proc-create-workflow-model-web-tools]]
+== Creating a workflow model in {serverless_logic_web_tools_name}
-*Feature Preview*: For an improved experience when previewing your {product_name}, check out xref:serverless-logic:tooling/serverless-logic-web-tools/serverless-logic-web-tools-enable-kogito-swf-visualization.adoc[how to enable the new Kogito Serverless Workflow Visualization diagram].
+You can start by creating a new model from scratch or using one of the samples provided.
-== Getting started
-
-Not much needs to be done to start creating your projects in the web tool. You can start by creating a new model from scratch or using one of the samples provided.
-
-On the home page, choose the type of model you want to create, then click the button corresponding to the file type you wish (JSON or YAML).
-
-.Create
+.Procedure
+. Go to the link:{serverless_logic_web_tools_url}[{serverless_logic_web_tools_name}] web application.
+. On the Home page, select the type of model you want to create and click the button corresponding to the file type you wish such as JSON or YAML.
++
+--
+.Create section in {serverless_logic_web_tools_name}
image::tooling/serverless-logic-web-tools/serverless-logic-web-tools-create.png[]
-A new workspace will be created, containing a single file of the chosen type. The file is created with the name "Untitled", but clicking its name allows you to rename it to your liking.
+A new workspace is created, containing a single file named as _Untitled_ of the selected type. Click on the name to rename the file.
+--
+
+. Edit your workflow file, which updates the preview on the right-side of the editor.
++
+--
+The workflow files in {serverless_logic_web_tools_name} are saved automatically after each change. The files are persisted in the browser, but you can synchronize the file using GitHub integration. For more information about GitHub integration, see xref:tooling/serverless-logic-web-tools/serverless-logic-web-tools-github-integration.adoc[Integrating your {product_name} project in GitHub using {serverless_logic_web_tools_name}].
+--
-Editing workflow files will update the preview on the right-hand side of the editor and all files are saved automatically on every change.
+== Additional resources
-The files are persisted in the browser, but syncing can be done with the xref:serverless-logic:tooling/serverless-logic-web-tools/serverless-logic-web-tools-github-integration.adoc[Github integration].
+* xref:tooling/serverless-logic-web-tools/serverless-logic-web-tools-openshift-integration.adoc[Integrating your {product_name} project with OpenShift using {serverless_logic_web_tools_name}]
+* xref:tooling/serverless-logic-web-tools/serverless-logic-web-tools-redhat-application-services-integration.adoc[Integrating with Red Hat OpenShift Application and Data Services]
+* xref:tooling/serverless-logic-web-tools/serverless-logic-web-tools-deploy-projects.adoc[Deploying your {product_name} projects using {serverless_logic_web_tools_name}]
include::../../../pages/_common-content/report-issue.adoc[]
diff --git a/modules/serverless-logic/pages/tooling/serverless-logic-web-tools/serverless-logic-web-tools-redhat-application-services-integration.adoc b/modules/serverless-logic/pages/tooling/serverless-logic-web-tools/serverless-logic-web-tools-redhat-application-services-integration.adoc
index 6c25f737..f1e13dfd 100644
--- a/modules/serverless-logic/pages/tooling/serverless-logic-web-tools/serverless-logic-web-tools-redhat-application-services-integration.adoc
+++ b/modules/serverless-logic/pages/tooling/serverless-logic-web-tools/serverless-logic-web-tools-redhat-application-services-integration.adoc
@@ -1,100 +1,100 @@
-= Red Hat OpenShift Application and Data Services integration
+= Integrating with Red Hat OpenShift Application and Data Services
:compat-mode!:
// Metadata:
:description: {serverless_logic_web_tools_name} Red Hat OpenShift Application and Data Services integration
:keywords: kogito, workflow, serverless, editor, web, tools, settings, red hat, application, data, services, integration
-Some {serverless_logic_web_tools_name} features require integration with Red Hat OpenShift Application and Data Services. Uploading OpenAPI specifications to a Service Registry and deploying {product_name} that required an Apache Kafka stream are some of the examples.
+Some of the features in {serverless_logic_web_tools_name} require integration with Red Hat OpenShift Application and Data Services. Consider uploading OpenAPI specifications to a service registry and fetching its service functions as examples of integration with Red Hat OpenShift Application and Data Services.
-On this page, you'll configure all the settings needed for complete integration.
+This document describes how you can configure the required settings to complete the integration with Red Hat OpenShift Application and Data Services.
-[[proc-setting-service-account-serverless-logic-web-tools]]
-== Setting up a Service Account
+[[proc-create-service-account-serverless-logic-web-tools]]
+== Creating a service account in Red Hat OpenShift application and Data Services
-Create or use a Service Account from your Red Hat OpenShift Application and Data Services console and add it to the {serverless_logic_web_tools_name} settings tab.
+You can create or use a service account from your Red Hat OpenShift Application and Data Services console and add the service account to the {serverless_logic_web_tools_name}.
.Prerequisites
-* Access to Red Hat OpenShift Application and Data Services console.
+* You have access to the Red Hat OpenShift Application and Data Services console.
.Procedure
-. Create a Service Account on Red Hat Openshift Application and Data Services console (if you already have one, skip this step):
- * Go to link:{openshift_application_data_services_service_account_url}[Service Accounts | Red Hat OpenShift Application and Data Services];
- * Click on **Create service account**;
- * In the window that opens up type a Service account name;
- * Click on **Create**.
- * A new modal will show up with your **Client ID** and **Client Secret**, copy those to somewhere safe and save it.
- * Check the **I have copied the client ID and secret** checkbox and click on **Close**.
-. If you skipped the previous step, find your saved **Client ID** and **Client Secret** as they will be necessary for the next steps;
-. In the {serverless_logic_web_tools_name}, click on the **Cog wheel** (⚙️) on the top right corner and go to the **Service Account** tab;
-. Paste your **Client ID** and **Client Secret** in the respective fields;
-. Click on **Apply**.
-. The tab contents should be updated, showing **Your Service Account information is set**.
-
-
-[[proc-setting-service-registry-serverless-logic-web-tools]]
-== Setting up a Service Registry
-
-Create or use a Service Registry instance from your Red Hat OpenShift Application and Data Services console and add it to the {serverless_logic_web_tools_name} settings tab.
-
-.Prerequisites
-* Access to Red Hat OpenShift Application and Data Services console;
-* A Service Account.
-
-.Procedure
-. Create a Service Registry instance on Red Hat Openshift Application and Data Services console (if you already have one, skip this step):
- * Go to link:{openshift_application_data_services_service_registry_url}[Service Registry | Red Hat OpenShift Application and Data Services];
- * Click on **Create Service Registry instance**;
- * In the window that opens up type a Service Registry instance name;
- * Click on **Create**;
- * The list of instances will be updated with your new instance;
- * Find it in the list and click on its name;
- * Go to the **Settings** tab and click on **Grant access**;
- * From the dropdown, select your Service Account desired (the same you configured on the {serverless_logic_web_tools_name});
- * Select a role for that Service Account (has to be Manager or Administrator, to have read and write access);
- * Click on **Save**;
- * On the top right-hand corner there should be a triple dotted menu, click on it and then on **Connection**;
- * A drawer should open with all the connection and authentication information you'll need;
- * Copy the **Core Registry API** value.
-. If you skipped the previous step, find your Service Registry instance **Core Registry API** as it will be necessary for the next steps;
-. In the {serverless_logic_web_tools_name}, click on the **Cog wheel** (⚙️) on the top right corner and go to the **Service Registry** tab;
-. Input a name for your registry, preferably the same one you used when creating the Service Registry instance;
-. Paste your **Core Registry API** in the respective field;
-. Click on **Apply**.
-. The tab contents should be updated, showing **Your Service Registry information is set**.
-
-[[proc-setting-apache-kafka-serverless-logic-web-tools]]
-== Setting up Streams for Apache Kafka
-
-Create or use a Kafka instance from your Red Hat OpenShift Application and Data Services console and add it to the {serverless_logic_web_tools_name} settings tab.
+. To create a service account in Red Hat Openshift Application and Data Services, perform the following steps:
++
+--
+[NOTE]
+====
+You can skip this step if you already have a service account.
+====
+.. Go to link:{openshift_application_data_services_service_account_url}[Service Accounts | Red Hat OpenShift Application and Data Services].
+.. Click *Create service account*.
+.. In the *Create a service account* window, enter a service account name in the *Short description* field.
+.. Click *Create*.
++
+A modal displaying your *Client ID* and *Client Secret* appears.
+.. Copy and save the Client ID and Client Secret.
+.. Check the *I have copied the client ID and secret* checkbox and click *Close*.
+--
+
+. If you already have a service account, find your *Client ID* and *Client Secret*.
+. In the {serverless_logic_web_tools_name}, click the *Cogwheel* (⚙️) on the top-right corner and go to the *Service Account* tab.
+. Click on the *Add service account* button and a window will be shown.
+. Enter your *Client ID* and *Client Secret* in the respective fields.
+. Click *Apply*.
++
+The content in the *Service Account* tab updates and displays *Your Service Account information is set* message.
+
+
+[[proc-create-service-registry-serverless-logic-web-tools]]
+== Creating a Service Registry in Red Hat OpenShift application and Data Services
+
+You can create or use a Service Registry instance from your Red Hat OpenShift Application and Data Services console and add the Service Registry to {serverless_logic_web_tools_name}.
.Prerequisites
-* Access to Red Hat OpenShift Application and Data Services console;
+* You have access to the Red Hat OpenShift Application and Data Services console.
+* You have created a service account.
++
+For information about creating a service account, see <>.
.Procedure
-. Create a Kafka instance on Red Hat Openshift Application and Data Services console (if you already have one, skip this step):
- * Go to link:{openshift_application_data_services_apache_kafka_url}[Streams for Apache Kafka | Red Hat OpenShift Application and Data Services];
- * Click on **Create Kafka instance**;
- * In the window that opens up type a Kafka instance name;
- * Fill the other fields to your liking, or leave them with the default values;
- * Click on **Create instance**;
- * Reload the page for the list of instances to be updated with your new instance;
- * Wait for the status to be updated to **Ready**;
- * Find it in the list and click on its name;
- * Go to the **Topics** tab and create a new topic, you'll need its name later;
- * Go to the **Access** tab;
- * Click on **Manage Access** and select **All Accounts** or your Service Account;
- * Add the following permissions:
- ** _Consumer group is " * " | Allow All | All Accounts_;
- ** _Topic is " * " | Allow All | All Accounts_;
- * On the top right-hand corner there should be a triple dotted menu, click on it and then on **Connection**;
- * Copy the **Bootstrap server** value.
-. If you skipped the previous step, find your Kafka instance **Bootstrap server** value as it will be necessary for the next steps;
-. In the {serverless_logic_web_tools_name}, click on the **Cog wheel** (⚙️) on the top right corner and go to the **Streams for Apache Kafka** tab;
-. Paste the **Bootstrap server** value you copied before to the **Bootstrap Server** field;
-. Type the name of the topic you created on the **Topic** field;
-. Click on **Apply**.
-. The tab contents should be updated, showing **Your Streams for Apache Kafka information is set**.
-
-Note: using these broad settings is meant to make it easy to configure but you can set up specific Service Accounts, Topics, and Consumer Groups as well.
+. To create a Service Registry instance in Red Hat Openshift Application and Data Services console, perform the following steps:
++
+--
+[NOTE]
+====
+You can skip this step if you already have a Service Registry instance.
+====
+.. Go to link:{openshift_application_data_services_service_registry_url}[Service Registry | Red Hat OpenShift Application and Data Services].
+.. Click *Create Service Registry instance* button.
+.. In the *Create a Service Registry instance* window, enter a Service Registry instance name and click *Create*.
++
+The list of Service Registry instances updates with your instance.
+.. Find the Service Registry instance you created in the list and click on the instance.
+.. Go to the *Settings* tab and click on *Grant access*.
+.. In the drop-down, select the service account you created in the previous procedure.
+.. Select a role for your service account.
++
+[IMPORTANT]
+====
+You must select the role as Manager or Administrator to have the read and write access.
+====
+
+.. Click *Save*.
+.. Click on the menu on the top-right corner of the screen.
+.. Click *Connection*.
++
+A drawer opens containing the required connection and authentication information.
+
+.. Copy the value of *Core Registry API*.
+--
+
+. If you already have a Service Registry, find the value of *Core Registry API* of your Service Registry.
+. In the {serverless_logic_web_tools_name} web application, click the *Cogwheel* (⚙️) on the top-right corner and go to the *Service Registry* tab.
+. Click on the *Add service registry* button and a window will be shown.
+. Enter a name for your registry.
++
+You can enter the same name that you used while creating the Service Registry instance.
+
+. Enter the value of *Core Registry API* and click *Apply*.
++
+The content in the *Service Registry* tab updates and displays *Your Service Registry information is set* message.
include::../../../pages/_common-content/report-issue.adoc[]
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/tooling/serverless-workflow-editor/swf-editor-chrome-extension.adoc b/modules/serverless-logic/pages/tooling/serverless-workflow-editor/swf-editor-chrome-extension.adoc
index 5422ff50..dbc75e7e 100644
--- a/modules/serverless-logic/pages/tooling/serverless-workflow-editor/swf-editor-chrome-extension.adoc
+++ b/modules/serverless-logic/pages/tooling/serverless-workflow-editor/swf-editor-chrome-extension.adoc
@@ -1,17 +1,17 @@
-= Chrome GitHub extension for Serverless Workflow editor
+= Chrome extension for Serverless Workflow editor on GitHub
:compat-mode!:
// Metadata:
-:description: Chrome GitHub extension for Serverless Workflow editor
+:description: Chrome extension for Serverless Workflow editor on GitHub
:keywords: kogito, workflow, serverless, editor, chrome extension, extension
// links:
:kogito_swf_editor_chrome_webstore_url: https://chrome.google.com/webstore/detail/serverless-workflow-edito/ijamhkegfogkfnmfnfkjdiadomlphfej
The Google Chrome extension for the Serverless Workflow editor enables you to view and edit link:{spec_website_url}[CNCF Serverless Workflow specification] files in GitHub.
-This document describes how to install and use the Chrome GitHub extension for Serverless Workflow editor.
+This document describes how to install and use the Chrome extension for Serverless Workflow editor on GitHub.
[[proc-install-chrome-extension-sw-editor]]
-== Installing the Chrome GitHub extension for Serverless Workflow editor
+== Installing the Chrome extension for Serverless Workflow editor on GitHub
You can install the Chrome extension for the Serverless Workflow editor to view and edit the workflow specification files in GitHub.
@@ -39,14 +39,14 @@ The Chrome extension for Serverless Workflow editor is successfully installed.
--
[[proc-using-chrome-extension-sw-editor]]
-== Using the Chrome GitHub extension for Serverless Workflow editor
+== Using the Chrome extension for Serverless Workflow editor on GitHub
After installing the Chrome extension for the Serverless Workflow editor, you can use the editor to edit and view the workflow files in GitHub.
.Prerequisites
* You have installed the Serverless Workflow editor extension in Chrome.
+
-For more information, see <>.
+For more information, see <>.
.Procedure
. Create a workflow file (`.sw.json` or `.sw.yaml`).
diff --git a/modules/serverless-logic/pages/tooling/serverless-workflow-editor/swf-editor-overview.adoc b/modules/serverless-logic/pages/tooling/serverless-workflow-editor/swf-editor-overview.adoc
index 4fdd1d11..c07a9ed9 100644
--- a/modules/serverless-logic/pages/tooling/serverless-workflow-editor/swf-editor-overview.adoc
+++ b/modules/serverless-logic/pages/tooling/serverless-workflow-editor/swf-editor-overview.adoc
@@ -1,7 +1,7 @@
= Serverless Workflow editor
:compat-mode!:
// Metadata:
-:description: Kogito Serverless Workflow editor
+:description: {product_name} editor
:keywords: kogito, workflow, serverless, editor
You can use the Serverless Workflow editor to edit the link:{spec_website_url}[CNCF Serverless Workflow specification] files in the code editor and view the workflow diagram in the diagram visualizer. When you make changes to the specification files in the editor, you see the workflow diagram is updated with the new changes.
@@ -26,15 +26,15 @@ The editor performs validation using JSON schema to the workflow file structure
The following extensions are provided that you can use to design the workflow files:
-* xref:serverless-logic:tooling/serverless-workflow-editor/swf-editor-vscode-extension.adoc[VS Code extension]
-* xref:serverless-logic:tooling/serverless-workflow-editor/swf-editor-chrome-extension.adoc[Chrome GitHub extension]
+* xref:tooling/serverless-workflow-editor/swf-editor-vscode-extension.adoc[VS Code extension]
+* xref:tooling/serverless-workflow-editor/swf-editor-chrome-extension.adoc[Chrome GitHub extension]
-You can also try it at the xref:serverless-logic:tooling/serverless-logic-web-tools/serverless-logic-web-tools-overview.adoc[{serverless_logic_web_tools_name}].
+You can also try it at the xref:tooling/serverless-logic-web-tools/serverless-logic-web-tools-overview.adoc[{serverless_logic_web_tools_name}].
== Additional resources
-* xref:serverless-logic:getting-started/cncf-serverless-workflow-specification-support.adoc[CNCF Serverless Workflow specification]
+* xref:core/cncf-serverless-workflow-specification-support.adoc[CNCF Serverless Workflow specification]
include::../../../pages/_common-content/report-issue.adoc[]
diff --git a/modules/serverless-logic/pages/tooling/serverless-workflow-editor/swf-editor-vscode-extension.adoc b/modules/serverless-logic/pages/tooling/serverless-workflow-editor/swf-editor-vscode-extension.adoc
index 3d197e8c..8d446e0c 100644
--- a/modules/serverless-logic/pages/tooling/serverless-workflow-editor/swf-editor-vscode-extension.adoc
+++ b/modules/serverless-logic/pages/tooling/serverless-workflow-editor/swf-editor-vscode-extension.adoc
@@ -4,7 +4,7 @@
:description: VS Code Extension for Serverless Workflow editor
:keywords: kogito, workflow, serverless, editor, VS Code, extension
// links:
-:kogito_swf_editor_vscode_marketplace_url: https://marketplace.visualstudio.com/items?itemName=redhat.vscode-extension-serverless-workflow-editor
+:kogito_swf_editor_vscode_marketplace_url: https://marketplace.visualstudio.com/items?itemName=kie-group.swf-vscode-extension
:red_hat_sso_login_url: https://sso.redhat.com/auth/realms/redhat-external/login-actions/registration?client_id=cloud-services&tab_id=Rd8aj14U3Xk
:red_hat_auth_vscode_marketplace_url: https://marketplace.visualstudio.com/items?itemName=redhat.vscode-redhat-account
:red_hat_hybrid_cloud_console_url: https://console.redhat.com/
@@ -27,19 +27,19 @@ image::tooling/serverless-workflow-editor/swf-editor-vscode-extension-page.png[]
.Procedure
. You can install the VS Code extension for Serverless Workflow editor using one of the following methods:
-* *Go to Visual Studio Marketplace*: Go to link:{kogito_swf_editor_vscode_marketplace_url}[Serverless Workflow Editor] extension page in Visual Studio Marketplace and click *Install*.
+* *Go to Visual Studio Marketplace*: Go to link:{kogito_swf_editor_vscode_marketplace_url}[{serverless_workflow_vscode_extension_name}] extension page in Visual Studio Marketplace and click *Install*.
+
--
-.Serverless Workflow Editor in Visual Studio Marketplace
-image::tooling/serverless-workflow-editor/swf-editor-vscode-market-place.png[]
+.{serverless_workflow_vscode_extension_name} in Visual Studio Marketplace
+image::tooling/serverless-workflow-editor/swf-editor-vscode-marketplace.png[]
--
* *Download VSIX file*:
+
--
-.. Download the `vscode_extension_serverless_workflow_editor_VERSION.vsix` file from the link:{kie_tools_releases_page_url}[kie-tools] releases page in GitHub.
+.. Download the latest {product_name} VS Code extension VSIX file from the link:{kie_tools_releases_page_url}[kie-tools] releases page in GitHub.
-.. Click *Install from VSIX* option from the extensions menu and install the `vscode_extension_serverless_workflow_editor_VERSION.vsix` file in VS Code.
+.. Click *Install from VSIX* option from the extensions menu and install the `serverless_workflow_vscode_extension_VERSION.vsix` file in VS Code.
.Install VS Code extension using VSIX
image::tooling/serverless-workflow-editor/swf-editor-vscode-vsix-package.png[]
diff --git a/modules/serverless-logic/pages/use-cases/newsletter-subscription-example.adoc b/modules/serverless-logic/pages/use-cases/newsletter-subscription-example.adoc
deleted file mode 100644
index f06f1583..00000000
--- a/modules/serverless-logic/pages/use-cases/newsletter-subscription-example.adoc
+++ /dev/null
@@ -1 +0,0 @@
-//= Newsletter subscription example
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/use-cases/orchestration-based-saga-pattern.adoc b/modules/serverless-logic/pages/use-cases/orchestration-based-saga-pattern.adoc
index 3fe84d60..1a51d6ab 100644
--- a/modules/serverless-logic/pages/use-cases/orchestration-based-saga-pattern.adoc
+++ b/modules/serverless-logic/pages/use-cases/orchestration-based-saga-pattern.adoc
@@ -8,16 +8,16 @@
[[con-saga-overview]]
== Overview of Saga pattern
-The Saga design pattern manages data consistency across participants that are available in distributed transaction scenarios. For more information about Saga pattern, see the initial link:https://www.cs.cornell.edu/andru/cs711/2002fa/reading/sagas.pdf[publication].
+The Saga design pattern manages data consistency across participants that are available in distributed transaction scenarios. For more information about Saga pattern, see the initial link:https://www.cs.cornell.edu/andru/cs711/2002fa/reading/sagas.pdf[publication].
-In a microservice architecture, you can define a participant as microservice, which is responsible to perform actions related to the business domain.
+In a microservice architecture, you can define a participant as microservice, which is responsible to perform actions related to the business domain.
The Saga pattern manages the transactions using a sequence of steps. If a failure occurs while executing a step, then a sequence of compensating actions is executed to undo the changes that are made during the execution. As an alternative, you can leave the system in a known termination state to be consistent.
[[ref-sw-example-saga-pattern]]
== Example of Saga pattern a workflow
-To understand the implementation of Saga pattern in a workflow, you can use the link:{kogito_sw_examples_url}/serverless-workflow-saga-quarkus[`serverless-workflow-saga-quarkus`] example application in GitHub repository.
+To understand the implementation of Saga pattern in a workflow, you can use the link:{kogito_sw_examples_url}/serverless-workflow-saga-quarkus[`serverless-workflow-saga-quarkus`] example application in GitHub repository.
The `serverless-workflow-saga-quarkus` example application is based on the order fulfillment process and describes how to define the Saga pattern using {product_name}. In the order fulfillment example, a user buys an item from an e-commerce application. The user adds the delivery information and payment details, and waits for the item to be delivered. The following figure shows the sequence of steps that are executed to complete an order:
@@ -46,14 +46,14 @@ However, in the failure workflow an error occurred during the shipping process.
In the `serverless-workflow-saga-quarkus` example application, a link:{spec_doc_url}[workflow] is used that implements the Saga pattern, in which all the steps and compensation actions are defined. Also, the workflow plays the role of Saga Executor Coordinator (SEC), which orchestrates the calls to the participants in the Saga pattern.
-The workflow definition used to define the Saga pattern is available in the link:{kogito_sw_examples_url}/serverless-workflow-saga-quarkus/src/main/resources/order-saga-error-handling.sw.json[`order-saga-error-handling.sw.json`] file.
+The workflow definition used to define the Saga pattern is available in the link:{kogito_sw_examples_url}/serverless-workflow-saga-quarkus/src/main/resources/order-saga-error-handling.sw.json[`order-saga-error-handling.sw.json`] file.
.Example of order fulfillment Saga workflow
image::use-cases/orchestration-based-saga-pattern/order-fulfillment-saga-workflow.png[]
In the previous example figure of workflow, the calls are orchestrated to the participants (for example, order service, payment service), each participant can throw possible errors, and compensations for each step are defined, that are executed once an error appears during the workflow execution.
-To define the interactions among participants in the Saga pattern using Serverless Workflow specification, you can use link:{spec_doc_url}#workflow-states[workflow states] with link:{spec_doc_url}#Transitions[transitions].
+To define the interactions among participants in the Saga pattern using Serverless Workflow specification, you can use link:{spec_doc_url}#workflow-states[workflow states] with link:{spec_doc_url}#Transitions[transitions].
In Serverless Workflow specification, each workflow state represents a step to be completed in the Saga pattern. Also, an action associated with the workflow state represents how a participant is invoked to execute a given step.
@@ -108,7 +108,7 @@ In {product_name} each workflow state must define a compensation action using `c
Errors::
+
--
-In {product_name} errors are identified by a name and can be associated with a workflow state. For example, a `process payment failed` error is associated with the `processPayment` state.
+In {product_name} errors are identified by a name and can be associated with a workflow state. For example, a `process payment failed` error is associated with the `processPayment` state.
Following is an example of error declaration in the workflow definition:
@@ -134,9 +134,9 @@ Following is an example of error declaration in the workflow definition:
}
----
-Once an error occurs during the workflow execution, the associated compensation action is triggered.
+Once an error occurs during the workflow execution, the associated compensation action is triggered.
-An error definition uses the fully qualified class name (FQCN) for Java exceptions that are thrown by functions. In the previous example of error definition, `org.kie.kogito.ServiceException` is thrown by the service calls that are defined as <<#custom-function, Java methods>> in the link:{kogito_sw_examples_url}/serverless-workflow-saga-quarkus/src/main/java/org/kie/kogito/PaymentService.java[`PaymentService.java`] file.
+An error definition uses the fully qualified class name (FQCN) for Java exceptions that are thrown by functions. In the previous example of error definition, `org.kie.kogito.ServiceException` is thrown by the service calls that are defined as <<#custom-function, Java methods>> in the link:{kogito_sw_examples_url}/serverless-workflow-saga-quarkus/src/main/java/org/kie/kogito/PaymentService.java[`PaymentService.java`] file.
[#custom-function]
.Example custom function using a Java class and method
@@ -149,10 +149,10 @@ An error definition uses the fully qualified class name (FQCN) for Java exceptio
}
----
-The function that are throwing errors can be any type of functions, such as REST, OpenAPI, or gRPC. For information about error handling, see xref:serverless-logic:core/understanding-workflow-error-handling.adoc[Error handling in {product_name}].
+The function that are throwing errors can be any type of functions, such as REST, OpenAPI, or gRPC. For information about error handling, see xref:core/understanding-workflow-error-handling.adoc[Error handling in {product_name}].
--
-The workflow engine controls the execution of the flow and keeps the track of the steps that need to be compensated. Also, the engine ensures that compensated states are executed in reverse order of each completed step.
+The workflow engine controls the execution of the flow and keeps the track of the steps that need to be compensated. Also, the engine ensures that compensated states are executed in reverse order of each completed step.
The engine is a stateful, allowing Saga to contain wait states, such as callbacks. After each wait state, the workflow is persisted and can continue once it receives a request or event.
@@ -203,7 +203,7 @@ curl -L -X POST "http://localhost:8080/order_saga_error_workflow" -H 'Content-Ty
}
----
-The response contains the workflow data with nested attributes, which represent the responses from the execution of each step including success or failure.
+The response contains the workflow data with nested attributes, which represent the responses from the execution of each step including success or failure.
In the previous example, the `orderResponse` attribute indicates if the order can be confirmed by the client by initiating the Saga workflow. Therefore, if the value of the `orderResponse` attribute is `success`, then the order can be confirmed, otherwise the order can be canceled.
@@ -282,6 +282,6 @@ When executing the application, you can also verify the log with information rel
== Additional resources
-* xref:serverless-logic:core/understanding-workflow-error-handling.adoc[Error handling in {product_name}]
+* xref:core/understanding-workflow-error-handling.adoc[Error handling in {product_name}]
-include::../../pages/_common-content/report-issue.adoc[]
+include::../../pages/_common-content/report-issue.adoc[]
\ No newline at end of file
diff --git a/modules/serverless-logic/pages/use-cases/timeout-showcase-example.adoc b/modules/serverless-logic/pages/use-cases/timeout-showcase-example.adoc
index 067b89ca..0cf0ae0a 100644
--- a/modules/serverless-logic/pages/use-cases/timeout-showcase-example.adoc
+++ b/modules/serverless-logic/pages/use-cases/timeout-showcase-example.adoc
@@ -1,22 +1,57 @@
-[#timeout-example]
-= Timeout example in {product_name}
+[#timeouts-showcase]
+= Timeouts Showcase in {product_name}
:compat-mode!:
// Metadata:
-:description: Timeout example use case example in Serverless Workflow
+:description: Timeouts showcase in Serverless Workflow
:keywords: kogito, workflow, serverless, timer, timeout
-In the link:{kogito_sw_examples_url}/serverless-workflow-timeouts-showcase[serverless-workflow-timeouts-showcase] you can see an end-to-end example that contains a serverless workflow application with timeouts configured alongside Job Service running on Knative.
+The timeouts showcase is designed to show how to configure and execute workflows that use timeouts, according to different deployment scenarios.
+While all the scenarios contain the same set of workflows, they are provided as independent example projects, to facilitate the execution and understanding of each case.
-There are two workflows that showcase the timeouts usage in the `Callback` and `Switch` states.
+The following workflows are provided:
-== Callback workflow
+* <>
+* <>
+* <>
+* <>
-It is a simple workflow, where once the execution reaches the callback state it waits for the event `callbackEvent` to arrive and continue the execution.
+[#workflow_timeouts_workflow]
+== `workflow_timeouts` workflow
-.Callback timeout workflow
-image::core/callback-state-timeouts.svg[]
+It is a simple workflow that, rather than configuring a timeout for a particular state, is configured for the whole execution of the workflow.
+This can be done by using the `workflowExecTimeout` property, which defines the maximum workflow execution time. If this time is surpassed and the workflow has not finished, it will be automatically canceled.
-.Callback event
+See the xref:core/timeouts-support.adoc#workflow-timeout[workflow timeout definition] for more information.
+
+.workflow_timeouts workflow
+image::use-cases/timeouts-showcase/workflow-timeouts-decorated.svg[]
+
+.Workflow execution timeout definition
+[source,json]
+----
+{
+ "id": "workflow_timeouts",
+ "version": "1.0",
+ "name": "Workflow Timeouts",
+ "description": "Simple workflow to show the workflowExecTimeout working",
+ "start": "PrintStartMessage",
+ "timeouts": {
+ "workflowExecTimeout": "PT1H"
+ }
+...
+}
+----
+
+[#callback_state_timeouts_workflow]
+== `callback_state_timeouts` workflow
+
+It is a simple workflow that, when the execution reaches the state `CallbackState`, an action is executed, and it waits for the event `callbackEvent` to arrive in order to continue the execution.
+However, a timeout is configured to set the maximum waiting time for that event.
+
+.callback_state_timeouts workflow
+image::use-cases/timeouts-showcase/callback-state-timeouts-decorated.svg[]
+
+.Callback event definition
[source, json]
----
{
@@ -26,218 +61,254 @@ image::core/callback-state-timeouts.svg[]
}
----
-The timeout is configured with a maximum time 30 seconds to be waited by the workflow to receive the callbackEvent, in case it does not arrive in time, the execution moves, and the eventData variable remains null.
-See the xref:core/timeouts-support.adoc#callback-state[callback state definition].
+.CallbackState definition
+[source,json]
+----
+{
+ "name": "CallbackState",
+ "type": "callback",
+ "action": {
+ "name": "callbackAction",
+ "functionRef": {
+ "refName": "callbackFunction",
+ "arguments": {
+ "input": "${\"callback-state-timeouts: \" + $WORKFLOW.instanceId + \" has executed the callbackFunction.\"}"
+ }
+ }
+ },
+ "eventRef": "callbackEvent",
+ "transition": "CheckEventArrival",
+ "onErrors": [
+ {
+ "errorRef": "callbackError",
+ "transition": "FinalizeWithError"
+ }
+ ],
+ "timeouts": {
+ "eventTimeout": "PT30S"
+ }
+}
+----
+
+The timeout is configured with a duration of 30 seconds, and if no event arrives during this time, the flow execution moves to the next state, and the workflow's data remains unchanged.
+On the other hand, if the event arrive, the event payload is merged into the workflow's data, and thus, the `eventData` property of the workflow's data, will contain the information carried by the event payload.
+Using this simple configuration strategy, the workflow can collect the event information, and use it for example to determine the path to go in the next state.
-== Switch workflow
+See the xref:core/timeouts-support.adoc#callback-state-timeout[callback state definition] for more information.
-The switch example is similar to the callback but once the execution reaches the state, it waits for one of the two configured events, `visaDeniedEvent` or `visaApprovedEvent`, to arrive, see the <>.
+For more information about how the incoming event information can be merged into the workflow's data you can see link:{spec_doc_url}#Event-data-filters[Event data filters].
-If any of the configured events arrives before the timeout is overdue, the workflow execution moves to the next state defined in the corresponding `transition`.
+[#switch_state_timeouts_workflow]
+== `switch_state_timeouts` workflow
-If none of the events arrive before the timeout is overdue, the workflow then moves to the state defined in `defaultCondition` transition.
+This workflow is similar to the `callback_state_timeouts`, but when the execution reaches the state `ChooseOneEvent`, it waits for one of the two configured events, `visaDeniedEvent` or `visaApprovedEvent` to arrive.
-.Switch timeout workflow
-image::core/switch-state-timeouts.svg[]
+If any of the configured events arrives before the timeout is overdue, the workflow execution moves to the next state defined in the corresponding `transition`.
-== Event workflow
+If none of the events arrives before the timeout is overdue, the workflow execution moves to the state defined in the `defaultCondition` transition.
-The event example is similar to the switch one but once the execution reaches the state, it waits for one of the configured events, `event1` or `event2`, to arrive, see the <>.
+See the xref:core/timeouts-support.adoc#switch-state-timeout[switch state definition] for more information.
-If none of the configured events arrive before the timeout is overdue, the workflow execution moves to the next state defined in the corresponding `transition` property, skipping the events that were not received in time together with actions configured for them.
+.switch_state_timeouts workflow
+image::use-cases/timeouts-showcase/switch-state-timeouts-decorated.svg[]
-If one of the events arrives before the timeout is overdue, the workflow then moves to the state defined in `transition`, executing the corresponding event that has arrived in the state definition together with actions defined for it.
+.ChooseOneEvent definition
+[source,json]
+----
+ {
+ "name": "ChooseOnEvent",
+ "type": "switch",
+ "eventConditions": [
+ {
+ "eventRef": "visaApprovedEvent",
+ "transition": "ApprovedVisa"
+ },
+ {
+ "eventRef": "visaDeniedEvent",
+ "transition": "DeniedVisa"
+ }
+ ],
+ "defaultCondition": {
+ "transition": "HandleNoVisaDecision"
+ },
+ "timeouts": {
+ "eventTimeout": "PT30S"
+ }
+ }
+----
-.Event timeout workflow
-image::core/event-state-timeouts.svg[]
+[#event_state_timeouts_workflow]
+== `event_state_timeouts` workflow
-== Running the example
+This workflow is similar to the `switch_state_timeouts`, but when the execution reaches the state `WaitForEvent`, it waits for one of the configured events, `event1` or `event2`, to arrive.
+Each event has a number of configured actions to execute, but unlike the switch state, only one possible `transition` exists.
-To run the example you must have access to a kubernetes cluster running with Knative configured.
+If none of the configured events arrives before the timeout is overdue, the workflow execution moves to the next state defined in the `transition` property, skipping the events that were not received in time together with actions configured for them.
-For simplicity, the example uses minikube, you can follow the steps described in the example's link:{kogito_sw_examples_url}/serverless-workflow-timeouts-showcase[readme].
+If one of the events arrives before the timeout is overdue, the workflow executes the corresponding actions, and finally moves to the state defined in `transition`.
[NOTE]
====
-All the descriptor files used to deploy the example infrastructure are present in the example.
+The semantic of this state might vary depending on the `exclusive` flag, in the example the default value of `true` will be applied, and thus, the workflow will transition as soon as the first event arrives.
====
-The database and Job Service deployment files are located under `/kubernetes` folder.
-
-The descriptors related to the workflow application are generated after the build under `/target/kubernetes`.
-
-The following diagram shows the example's architecture when it is deployed in the Kubernetes + Knative infrastructure.
+See the xref:core/timeouts-support.adoc#event-state-timeout[event state definition] for more information.
-.Knative Workflow with Job Service architecture
-image::core/jobs-service-knative-architecture.png[]
-
-== Deploying the database
-The workflow application and Job Service uses PostgreSQL as the persistence backend to store information about the workflow instances and jobs, respectively.
-In the example you can deploy a single database instance to be used on both, in a production environment is recommended to have independent database instances.
+.event_state_timeouts workflow
+image::use-cases/timeouts-showcase/event-state-timeouts-decorated.svg[]
-To run PostgreSQL you need to apply the following on the cluster:
-.Deploying the database
-[source, shell]
+.WaitForEvent definition
+[source,json]
----
-kubectl apply -f kubernetes/timeouts-showcase-database.yml
+ {
+ "name": "WaitForEvent",
+ "type": "event",
+ "onEvents": [
+ {
+ "eventRefs": [
+ "event1"
+ ],
+ "eventDataFilter": {
+ "data": "${ \"The event1 was received.\" }",
+ "toStateData": "${ .exitMessage }"
+ },
+ "actions": [
+ {
+ "name": "printAfterEvent1",
+ "functionRef": {
+ "refName": "systemOut",
+ "arguments": {
+ "message": "${\"event-state-timeouts: \" + $WORKFLOW.instanceId + \" executing actions for event1.\"}"
+ }
+ }
+ }
+ ]
+ },
+ {
+ "eventRefs": [
+ "event2"
+ ],
+ "eventDataFilter": {
+ "data": "${ \"The event2 was received.\" }",
+ "toStateData": "${ .exitMessage }"
+ },
+ "actions": [
+ {
+ "name": "printAfterEvent2",
+ "functionRef": {
+ "refName": "systemOut",
+ "arguments": {
+ "message": "${\"event-state-timeouts: \" + $WORKFLOW.instanceId + \" executing actions for event2.\"}"
+ }
+ }
+ }
+ ]
+ }
+ ],
+ "timeouts": {
+ "eventTimeout": "PT30S"
+ },
+ "transition": "PrintExitMessage"
+ }
----
-.After executing the command, you will see an output like this:
-[source, shell]
-----
-secret/timeouts-showcase-database created
-deployment.apps/timeouts-showcase-database created
-service/timeouts-showcase-database created
-----
+== Executing the workflows
-[#job-service-deploy]
-== Deploying Job Service
-.Deploying Job Service
-[source, shell]
-----
-kubectl apply -f kubernetes/jobs-service-postgresql.yml
-----
+To execute the workflows you can use any of the available deployment scenarios:
-.After executing the command, you will see an output like this:
-[source, shell]
-----
-service/jobs-service-postgresql created
-deployment.apps/jobs-service-postgresql created
-trigger.eventing.knative.dev/jobs-service-postgresql-create-job-trigger created
-trigger.eventing.knative.dev/jobs-service-postgresql-cancel-job-trigger created
-sinkbinding.sources.knative.dev/jobs-service-postgresql-sb created
-----
+* <>
-== Deploying the timeout showcase workflow
+* <>
-You need to build the workflow with the `knative` maven profile, then the descriptor files are generated under the `target/kubernetes` folder, and the image is pushed in the container registry.
+* <>
-.Building the timeout workflow showcase for Knative
-[source, shell]
-----
-mvn clean install -Pknative
-----
+[#execute-operator-dev-profile]
+=== {product_name} Operator Dev Profile
-.Deploying the timeout workflow showcase in Knative
-[source, shell]
-----
-kubectl apply -f target/kubernetes/knative.yml
-kubectl apply -f target/kubernetes/kogito.yml
-----
+When you work with the xref:cloud/operator/developing-workflows.adoc[{operator_name} Dev Profile], the operator will automatically provision an execution environment that contains an embedded {job_service_xref}[job service] instance, as well as an instance of the {data_index_xref}[data index service].
+And thus, there is no need for additional configurations when you use timeouts.
+
+To execute the workflows you must:
+
+In a command terminal, clone the `kogito-examples` repository, navigate to the cloned directory, and follow https://github.com/kiegroup/kogito-examples/tree/main/serverless-workflow-examples/serverless-workflow-timeouts-showcase-operator-devprofile/README.md[these steps]:
-.After executing the commands you will see an output like this:
-[source, shell]
+[source, bash]
----
-service.serving.knative.dev/timeouts-showcase created
+git clone https://github.com/kiegroup/kogito-examples.git
-trigger.eventing.knative.dev/visa-denied-event-type-trigger-timeouts-showcase created
-trigger.eventing.knative.dev/visa-approved-event-type-trigger-timeouts-showcase created
-trigger.eventing.knative.dev/callback-event-type-trigger-timeouts-showcase created
-sinkbinding.sources.knative.dev/sb-timeouts-showcase created
+cd kogito-examples/serverless-workflow-examples/serverless-workflow-timeouts-showcase-operator-devprofile
----
-== Creating a workflow instance
+[#execute-quarkus-project-embedded-services]
+=== Quarkus Workflow Project with embedded services
-To create a workflow you can interact with the workflow using the provided REST APIs, in the example provide a test Web UI to make it easy to test.
+Similar to the <<#execute-operator-dev-profile, {operator_name} Dev Profile>>, this scenario shows how to configure the embedded {job_service_xref}[job service] and {data_index_xref}[data index service], when you work with a Quarkus Workflow project and it is also intended for development purposes.
-First, you need to get the service URL on the cluster.
+In a command terminal, clone the `kogito-examples` repository, navigate to the cloned directory, and follow link:{kogito_sw_examples_url}/serverless-workflow-timeouts-showcase-embedded/README.md[these steps]:
-.Getting the workflow service URL on the cluster
-[source, shell]
-----
-kn service list | grep timeouts-showcase
+[source, bash]
----
+git clone https://github.com/kiegroup/kogito-examples.git
-.Service URL in the response, similar to this.
-[source, shell]
-----
-NAME URL LATEST AGE CONDITIONS READY REASON
-timeouts-showcase http://timeouts-showcase.default.10.105.86.217.sslip.io timeouts-showcase-00001 3m50s 3 OK / 3 True
+cd kogito-examples/serverless-workflow-examples/serverless-workflow-timeouts-showcase-embedded
----
-== Using the showcase UI
-The example Web UI is handy to interact with the workflow, you just need to open in the browser the URL you got from the previous step.
+[#execute-quarkus-project-standalone-services]
+=== Quarkus Workflow Project with standalone services
-.Timeout workflow showcase UI
-image::core/timeout-switch-wokflow-ui.png[]
+This is the most complex and close to a production scenario. In this case, the workflows, the {job_service_xref}[job service], the {data_index_xref}[data index service], and the database are deployed as standalone services in the kubernetes or knative cluster.
+Additionally, the communications from the workflows to the {job_service_xref}[job service], and from the {job_service_xref}[job service] to the {data_index_xref}[data index service], are resolved via the knative eventing system.
-You can create new workflow instances and interact with them to complete, or simply wait for the timeout to be triggered to check it's working.
-More details on the link:{kogito_sw_examples_url}/serverless-workflow-timeouts-showcase#timeouts-showcase-ui[readme].
+[NOTE]
+====
+By using the knative eventing system the underlying low level communication system is transparent to the integration.
+====
-== Using REST APIs
-You can test the workflows using the REST APIs, in fact they are the same used by the Web UI in both workflows.
+==== Architecture
-* Callback
+The following diagram shows the architecture for this use case:
-.Creating a callback workflow with timeout
-[source, shell]
-----
-curl -X 'POST' \
-'http://timeouts-showcase.default.10.105.86.217.sslip.io/callback_state_timeouts' \
--H 'accept: */*' \
--H 'Content-Type: application/json' \
--d '{}'
-----
+. Every time a workflow needs to program a timer for a given timeout, a cloud event is sent to the {job_service_xref}#integration-with-the-workflows[job service] for that purpose.
+. When a timer is overdue, a rest call is executed to notify the workflow, which then must execute accordingly to the given state semantic.
+. Workflow and job status changes are propagated to the {data_index_xref}[data index service] via cloud events.
-* Switch
+.Knative Workflow with Job Service architecture
+image::use-cases/timeouts-showcase/timeouts-showcase-extended-architecture.png[]
-.Creating a Switch workflow with timeout
-[source, shell]
-----
-curl -X 'POST' \
-'http://timeouts-showcase.default.10.105.86.217.sslip.io/callback_state_timeouts' \
--H 'accept: */*' \
--H 'Content-Type: application/json' \
--d '{}'
-----
+* *timeouts-showcase-extended:* Is the Quarkus Workflow Project that contains the workflows, that must be maven build, and deployed into the kubernetes cluster.
-* Event
+* *jobs-service-postresql:* Is the job service that will be deployed into the kubernetes cluster.
-.Creating an event workflow with timeout
-[source, shell]
-----
-curl -X 'POST' \
-'http://timeouts-showcase.default.10.105.86.217.sslip.io/event_state_timeouts' \
--H 'accept: */*' \
--H 'Content-Type: application/json' \
--d '{}'
-----
+* *data-index-service-postgresql:* Is the data index service that will be deployed into the kubernetes cluster.
-* Checking whether the workflow instance was created
+* *timeouts-showcase-database:* Is the PostgreSQL instance that will be deployed into the kubernetes cluster.
-.Getting the created workflow instance
-[source, shell]
-----
-curl -X 'GET' 'http://timeouts-showcase.default.10.105.86.217.sslip.io/switch_state_timeouts'
-----
+[NOTE]
+====
+For simplification purposes, a single database instance is used for both services to store the information about the workflow instances, and the timers. However, in a production environment is recommended to have independent database instances.
+====
-The command will produce an output like this, which indicates that the process is waiting for an event to arrive.
+==== Running the example
-.Response with the created instance
-[source, shell]
-----
-[{"id":"2e8e1930-9bae-4d60-b364-6fbd61128f51","workflowdata":{}}]
-----
+To execute the workflows you must:
-* Checking the timeout was executed after 30 seconds:
+In a command terminal, clone the `kogito-examples` repository, navigate to the cloned directory, and follow link:{kogito_sw_examples_url}/serverless-workflow-timeouts-showcase-extended/README.md[these steps]:
-.Getting the created workflow instance after 30 seconds
-[source, shell]
-----
-curl -X 'GET' 'http://timeouts-showcase.default.10.105.86.217.sslip.io/switch_state_timeouts'
-[]
+[source, bash]
----
+git clone https://github.com/kiegroup/kogito-examples.git
-As you can see there are no active workflow instances, indicating the timeout was executed and the created instance was completed.
+cd kogito-examples/serverless-workflow-examples/serverless-workflow-timeouts-showcase-extended
+----
== Additional resources
-* xref:core/timeouts-support.adoc[Timeout support in {product_name}]
+* xref:core/timeouts-support.adoc[Timeouts support in {product_name}]
include::../../pages/_common-content/report-issue.adoc[]
\ No newline at end of file