diff --git a/.gitignore b/.gitignore index f0970df67..b61417303 100644 --- a/.gitignore +++ b/.gitignore @@ -55,3 +55,7 @@ scripts/*.csv # docker docker-compose.prod.yml .volumes + +# helm chart dependencies +**/charts/*.tgz + diff --git a/.prettierignore b/.prettierignore index 6b540313c..28f02a097 100644 --- a/.prettierignore +++ b/.prettierignore @@ -3,3 +3,6 @@ dist coverage tests .volumes + +# Ignore helm templates and yamls https://github.com/prettier/prettier/issues/6517 +charts/ diff --git a/charts/hyperdx/.helmignore b/charts/hyperdx/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/hyperdx/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/hyperdx/Chart.lock b/charts/hyperdx/Chart.lock new file mode 100644 index 000000000..75001a598 --- /dev/null +++ b/charts/hyperdx/Chart.lock @@ -0,0 +1,18 @@ +dependencies: +- name: common + repository: oci://registry-1.docker.io/bitnamicharts + version: 2.14.1 +- name: redis + repository: oci://registry-1.docker.io/bitnamicharts + version: 18.12.1 +- name: clickhouse + repository: oci://registry-1.docker.io/bitnamicharts + version: 4.5.4 +- name: mongodb + repository: oci://registry-1.docker.io/bitnamicharts + version: 14.8.2 +- name: kong + repository: oci://registry-1.docker.io/bitnamicharts + version: 10.4.2 +digest: sha256:d077b3408144c1ba7a0d2127a024aee5579b46d3f75d2fa721e00ec7fa1dc176 +generated: "2024-02-12T01:36:17.450446-08:00" diff --git a/charts/hyperdx/Chart.yaml b/charts/hyperdx/Chart.yaml new file mode 100644 index 000000000..b71501d3d --- /dev/null +++ b/charts/hyperdx/Chart.yaml @@ -0,0 +1,46 @@ +apiVersion: v2 +name: hyperdx +description: A Helm chart for hyperdx +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: '1.6.0' + +home: https://hyperdx.io + +dependencies: + - name: common + repository: oci://registry-1.docker.io/bitnamicharts + tags: + - bitnami-common + version: 2.x.x + - name: redis + repository: oci://registry-1.docker.io/bitnamicharts + tags: + - redis + version: 18.x.x + condition: redis.enabled + - name: clickhouse + repository: oci://registry-1.docker.io/bitnamicharts + tags: + - clickhouse + version: 4.x.x + condition: clickhouse.enabled + - name: mongodb + repository: oci://registry-1.docker.io/bitnamicharts + tags: + - mongodb + version: 14.x.x + condition: mongodb.enabled + - condition: kong.enabled + name: kong + repository: oci://registry-1.docker.io/bitnamicharts + version: 10.x.x diff --git a/charts/hyperdx/README.md b/charts/hyperdx/README.md new file mode 100644 index 000000000..652208295 --- /dev/null +++ b/charts/hyperdx/README.md @@ -0,0 +1,1029 @@ + +# HyperDX + +[HyperDX](https://hyperdx.io) helps engineers figure out why production is +broken faster by centralizing and correlating logs, metrics, traces, exceptions +and session replays in one place. An open source and developer-friendly +alternative to Datadog and New Relic. + +- 🕵️ Correlate end to end, go from browser session replay to logs and traces in + just a few clicks +- 🔥 Blazing fast performance powered by Clickhouse +- 🔍 Intuitive full-text search and property search syntax (ex. `level:err`) +- 🤖 Automatically cluster event patterns from billions of events +- 📈 Dashboard high cardinality events without a complex query language +- 🔔 Set up alerts in just a few clicks +- `{` Automatic JSON/structured log parsing +- 🔭 OpenTelemetry native + +# Prerequisites + +- Kubernetes 1.23+ +- Helm 3.8.0+ +- Persistent volume provisioner support in the underlying infrastructure + + +# Deployment + +This chart is still in development and requires a few involved steps. If you wish to deploy Hyperdx on a k8s cluster to be used outside of a local dev setup, you will need to build a custom docker image for the Dashboard. +
+ FYI + + - For the installation to be useful outside of a local playground, the user needs to build their own image because the dashboard code reads the build time envs but the envs are only available during runtime on the server side in the pod. (TODO: fix this in dashboard so the same image can be used for [any environments](https://github.com/vercel/next.js/discussions/17641)) + - The current latest release is `ghcr.io/hyperdxio/hyperdx:1.6.0-app` and is built with + ``` + PORT=8080 + SERVER_URL=localhost:8000 + OTEL_EXPORTER_OTLP_ENDPOINT=localhost:4317 + ``` + therefore this image only works locally in a dev environment IF AND ONLY IF the api, collector, and dashboard are all port-forwarded with the corresponding ports. If you wish to do this, then running `helm dependency build && helm install hyperdx . -n hyperdx --set kong.enabled=false` and following the console instructions thereafter is sufficient + - An API gateway is required to host the api, collector, and dashboard on the same domain in order to not get CORS issues since the dashboard app directly makes requests from the browser client to the api and collector without going through nextjs server side. +
+From the root directory of the project run + +```bash +DOMAIN=https://hyperdx.mydomain.com +IMAGE_REGISTRY=docker.io +IMAGE_REPO=myuniquereponame/hyperdx +IMAGE_TAG=1.6.1-app +docker build \ + --build-arg CODE_VERSION=1.6.1 \ + --build-arg OTEL_EXPORTER_OTLP_ENDPOINT=$DOMAIN/collector \ + --build-arg OTEL_SERVICE_NAME="hdx-oss-app" \ + --build-arg PORT=8080 \ + --build-arg SERVER_URL=$DOMAIN/api/v1/ \ + . -f ./packages/app/Dockerfile -t $IMAGE_REGISTRY/$IMAGE_REPO:$IMAGE_TAG \ + --target prod --platform linux/amd64 --push +``` +```console +helm dependency build +helm upgrade --install hyperdx . --namespace hyperdx --create-namespace \ + --set publicUrl=$DOMAIN \ + --set app.image.registry=$IMAGE_REGISTRY \ + --set app.image.repository=$IMAGE_REPO \ + --set app.image.tag=$IMAGE_TAG +``` + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + +### Common parameters + +| Name | Description | Value | +| ------------------------ | --------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `nameOverride` | String to partially override common.names.name | `""` | +| `fullnameOverride` | String to fully override common.names.fullname | `""` | +| `namespaceOverride` | String to fully override common.names.namespace | `""` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `clusterDomain` | Kubernetes cluster domain name | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | + +### Hyperdx Common parameters + +| Name | Description | Value | +| ----------- | -------------------------------------------------------------------------------------------------------------------------------------- | ------ | +| `apiKey` | the Hyperdx api key. TODO: If not specified, use the ingestion api key for self-instrumentation. | `""` | +| `logLevel` | the logging level across all Hyperdx pipeline components. Allowed values: `error`, `warn`, `info`, `http`, `verbose`, `debug`, `silly` | `info` | +| `publicUrl` | Type A DNS record that points to the the LoadBalancer service of the API gateway (Kong) | `""` | + +### Hyperdx dashboard app Parameters + +| Name | Description | Value | +| ------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `app.enabled` | Enable hyperdx dashboard app | `true` | +| `app.replicaCount` | Number of Hyperdx app replicas to deploy | `1` | +| `app.defaultConfig` | Hyperdx app default configuration | `""` | +| `app.extraConfig` | Hyperdx app extra configuration | `{}` | +| `app.existingConfigmap` | The name of an existing ConfigMap with the default configuration | `""` | +| `app.extraConfigExistingConfigmap` | The name of an existing ConfigMap with extra configuration | `""` | +| `app.image.registry` | app image registry | `ghcr.io` | +| `app.image.repository` | app image repository | `hyperdxio/hyperdx` | +| `app.image.digest` | app image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) | `""` | +| `app.image.pullPolicy` | app image pull policy | `IfNotPresent` | +| `app.image.pullSecrets` | app image pull secrets | `[]` | +| `app.containerPorts.http` | Hyperdx dashboard app HTTP container port | `8080` | +| `app.livenessProbe.enabled` | Enable livenessProbe on Hyperdx app containers | `true` | +| `app.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `app.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `app.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `app.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `app.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `app.readinessProbe.enabled` | Enable readinessProbe on Hyperdx app containers | `true` | +| `app.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `app.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `app.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `app.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `app.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `app.startupProbe.enabled` | Enable startupProbe on Hyperdx app containers | `false` | +| `app.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `app.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `app.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `app.startupProbe.failureThreshold` | Failure threshold for startupProbe | `6` | +| `app.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `app.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `app.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `app.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `app.resources.limits` | The resources limits for the Hyperdx app containers | `{}` | +| `app.resources.requests` | The requested resources for the Hyperdx app containers | `{}` | +| `app.podSecurityContext.enabled` | Enabled Hyperdx app pods' Security Context | `true` | +| `app.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `app.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `app.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `app.podSecurityContext.fsGroup` | Set Hyperdx app pod's Security Context fsGroup | `1001` | +| `app.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `app.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` | +| `app.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `app.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `app.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `app.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` | +| `app.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `app.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `app.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `app.command` | Override default container command (useful when using custom images) | `[]` | +| `app.args` | Override default container args (useful when using custom images) | `[]` | +| `app.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `app.hostAliases` | Hyperdx app pods host aliases | `[]` | +| `app.podLabels` | Extra labels for Hyperdx app pods | `{}` | +| `app.podAnnotations` | Annotations for Hyperdx app pods | `{}` | +| `app.podAffinityPreset` | Pod affinity preset. Ignored if `app.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `app.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `app.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `app.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `app.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `app.nodeAffinityPreset.key` | Node label key to match. Ignored if `app.affinity` is set | `""` | +| `app.nodeAffinityPreset.values` | Node label values to match. Ignored if `app.affinity` is set | `[]` | +| `app.affinity` | Affinity for Hyperdx app pods assignment | `{}` | +| `app.nodeSelector` | Node labels for Hyperdx app pods assignment | `{}` | +| `app.tolerations` | Tolerations for Hyperdx app pods assignment | `[]` | +| `app.updateStrategy.type` | Hyperdx app statefulset strategy type | `RollingUpdate` | +| `app.priorityClassName` | Hyperdx app pods' priorityClassName | `""` | +| `app.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `app.schedulerName` | Name of the k8s scheduler (other than default) for Hyperdx app pods | `""` | +| `app.terminationGracePeriodSeconds` | Seconds Redmine pod needs to terminate gracefully | `""` | +| `app.lifecycleHooks` | for the Hyperdx app container(s) to automate configuration before or after startup | `{}` | +| `app.extraEnvVars` | Array with extra environment variables to add to Hyperdx app nodes | `[]` | +| `app.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Hyperdx app nodes | `""` | +| `app.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Hyperdx app nodes | `""` | +| `app.extraVolumes` | Optionally specify extra list of additional volumes for the Hyperdx app pod(s) | `[]` | +| `app.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Hyperdx app container(s) | `[]` | +| `app.sidecars` | Add additional sidecar containers to the Hyperdx app pod(s) | `[]` | +| `app.initContainers` | Add additional init containers to the Hyperdx app pod(s) | `[]` | + +### Hyperdx app Traffic Exposure Parameters + +| Name | Description | Value | +| -------------------------------------- | -------------------------------------------------------------------------------------- | ----------- | +| `app.service.type` | Hyperdx app service type | `ClusterIP` | +| `app.service.ports.http` | Hyperdx app service HTTP port | `80` | +| `app.service.nodePorts.http` | Node port for HTTP | `""` | +| `app.service.clusterIP` | Hyperdx app service Cluster IP | `""` | +| `app.service.loadBalancerIP` | Hyperdx app service Load Balancer IP | `""` | +| `app.service.loadBalancerSourceRanges` | Hyperdx app service Load Balancer sources | `[]` | +| `app.service.externalTrafficPolicy` | Hyperdx app service external traffic policy | `Cluster` | +| `app.service.annotations` | Additional custom annotations for Hyperdx app service | `{}` | +| `app.service.extraPorts` | Extra ports to expose in Hyperdx app service (normally used with the `sidecars` value) | `[]` | +| `app.service.sessionAffinity` | Control where app requests go, to the same pod or round-robin | `None` | +| `app.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | + +### Hyperdx api Parameters + +| Name | Description | Value | +| ------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `api.enabled` | Enable Hyperdx api | `true` | +| `api.replicaCount` | Number of Hyperdx api replicas to deploy | `1` | +| `api.defaultConfig` | Default configuration for the Hyperdx api service | `""` | +| `api.extraConfig` | Extra configuration for the Hyperdx api service | `{}` | +| `api.existingConfigmap` | The name of an existing ConfigMap with the default configuration | `""` | +| `api.extraConfigExistingConfigmap` | The name of an existing ConfigMap with extra configuration | `""` | +| `api.image.registry` | api image registry | `ghcr.io` | +| `api.image.repository` | api image repository | `hyperdxio/hyperdx` | +| `api.image.digest` | api image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) | `""` | +| `api.image.pullPolicy` | api image pull policy | `IfNotPresent` | +| `api.image.pullSecrets` | api image pull secrets | `[]` | +| `api.containerPorts.http` | Hyperdx api HTTP container port | `8000` | +| `api.livenessProbe.enabled` | Enable livenessProbe on Hyperdx api containers | `true` | +| `api.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `api.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `api.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `api.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `api.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `api.readinessProbe.enabled` | Enable readinessProbe on Hyperdx api containers | `true` | +| `api.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `api.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `api.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `api.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `api.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `api.startupProbe.enabled` | Enable startupProbe on Hyperdx api containers | `false` | +| `api.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `api.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `api.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `api.startupProbe.failureThreshold` | Failure threshold for startupProbe | `6` | +| `api.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `api.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `api.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `api.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `api.resources.limits` | The resources limits for the Hyperdx api containers | `{}` | +| `api.resources.requests` | The requested resources for the Hyperdx api containers | `{}` | +| `api.podSecurityContext.enabled` | Enabled Hyperdx api pods' Security Context | `true` | +| `api.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `api.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `api.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `api.podSecurityContext.fsGroup` | Set Hyperdx api pod's Security Context fsGroup | `1001` | +| `api.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `api.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` | +| `api.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `api.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `api.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `api.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` | +| `api.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `api.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `api.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `api.command` | Override default container command (useful when using custom images) | `[]` | +| `api.args` | Override default container args (useful when using custom images) | `[]` | +| `api.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `api.hostAliases` | Hyperdx api pods host aliases | `[]` | +| `api.podLabels` | Extra labels for Hyperdx api pods | `{}` | +| `api.podAnnotations` | Annotations for Hyperdx api pods | `{}` | +| `api.podAffinityPreset` | Pod affinity preset. Ignored if `api.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `api.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `api.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `api.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `api.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `api.nodeAffinityPreset.key` | Node label key to match. Ignored if `api.affinity` is set | `""` | +| `api.nodeAffinityPreset.values` | Node label values to match. Ignored if `api.affinity` is set | `[]` | +| `api.affinity` | Affinity for Hyperdx api pods assignment | `{}` | +| `api.nodeSelector` | Node labels for Hyperdx api pods assignment | `{}` | +| `api.tolerations` | Tolerations for Hyperdx api pods assignment | `[]` | +| `api.updateStrategy.type` | Hyperdx api statefulset strategy type | `RollingUpdate` | +| `api.priorityClassName` | Hyperdx api pods' priorityClassName | `""` | +| `api.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `api.schedulerName` | Name of the k8s scheduler (other than default) for Hyperdx api pods | `""` | +| `api.terminationGracePeriodSeconds` | Seconds Redmine pod needs to terminate gracefully | `""` | +| `api.lifecycleHooks` | for the Hyperdx api container(s) to automate configuration before or after startup | `{}` | +| `api.extraEnvVars` | Array with extra environment variables to add to Hyperdx api nodes | `[]` | +| `api.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Hyperdx api nodes | `""` | +| `api.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Hyperdx api nodes | `""` | +| `api.extraVolumes` | Optionally specify extra list of additional volumes for the Hyperdx api pod(s) | `[]` | +| `api.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Hyperdx api container(s) | `[]` | +| `api.sidecars` | Add additional sidecar containers to the Hyperdx api pod(s) | `[]` | +| `api.initContainers` | Add additional init containers to the Hyperdx api pod(s) | `[]` | + +### Hyperdx Rest Traffic Exposure Parameters + +| Name | Description | Value | +| -------------------------------------- | -------------------------------------------------------------------------------------- | ----------- | +| `api.service.type` | Hyperdx api service type | `ClusterIP` | +| `api.service.ports.http` | Hyperdx api service HTTP port | `80` | +| `api.service.nodePorts.http` | Node port for HTTP | `""` | +| `api.service.clusterIP` | Hyperdx api service Cluster IP | `""` | +| `api.service.loadBalancerIP` | Hyperdx api service Load Balancer IP | `""` | +| `api.service.loadBalancerSourceRanges` | Hyperdx api service Load Balancer sources | `[]` | +| `api.service.externalTrafficPolicy` | Hyperdx api service external traffic policy | `Cluster` | +| `api.service.annotations` | Additional custom annotations for Hyperdx api service | `{}` | +| `api.service.extraPorts` | Extra ports to expose in Hyperdx api service (normally used with the `sidecars` value) | `[]` | +| `api.service.sessionAffinity` | Control where api requests go, to the same pod or round-robin | `None` | +| `api.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | + +### Hyperdx Ingestor Parameters + +| Name | Description | Value | +| ------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `ingestor.enabled` | Enable Hyperdx ingestor | `true` | +| `ingestor.replicaCount` | Number of Hyperdx ingestor replicas to deploy | `1` | +| `ingestor.defaultConfig` | Default configuration for the Hyperdx ingestor service | `""` | +| `ingestor.extraConfig` | Extra configuration for the Hyperdx ingestor service | `{}` | +| `ingestor.existingConfigmap` | The name of an existing ConfigMap with the default configuration | `""` | +| `ingestor.extraConfigExistingConfigmap` | The name of an existing ConfigMap with extra configuration | `""` | +| `ingestor.image.registry` | ingestor image registry | `ghcr.io` | +| `ingestor.image.repository` | ingestor image repository | `hyperdxio/hyperdx` | +| `ingestor.image.digest` | ingestor image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) | `""` | +| `ingestor.image.pullPolicy` | ingestor image pull policy | `IfNotPresent` | +| `ingestor.image.pullSecrets` | ingestor image pull secrets | `[]` | +| `ingestor.containerPorts.http` | Hyperdx ingestor HTTP container port | `8002` | +| `ingestor.containerPorts.health` | Hyperdx ingestor HTTP health container port | `8686` | +| `ingestor.livenessProbe.enabled` | Enable livenessProbe on Hyperdx ingestor containers | `true` | +| `ingestor.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `ingestor.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `ingestor.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `ingestor.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `ingestor.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `ingestor.readinessProbe.enabled` | Enable readinessProbe on Hyperdx ingestor containers | `true` | +| `ingestor.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `ingestor.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `ingestor.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `ingestor.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `ingestor.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `ingestor.startupProbe.enabled` | Enable startupProbe on Hyperdx ingestor containers | `false` | +| `ingestor.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `ingestor.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `ingestor.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `ingestor.startupProbe.failureThreshold` | Failure threshold for startupProbe | `6` | +| `ingestor.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `ingestor.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `ingestor.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `ingestor.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `ingestor.resources.limits` | The resources limits for the Hyperdx ingestor containers | `{}` | +| `ingestor.resources.requests` | The requested resources for the Hyperdx ingestor containers | `{}` | +| `ingestor.podSecurityContext.enabled` | Enabled Hyperdx ingestor pods' Security Context | `true` | +| `ingestor.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `ingestor.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `ingestor.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `ingestor.podSecurityContext.fsGroup` | Set Hyperdx ingestor pod's Security Context fsGroup | `1001` | +| `ingestor.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `ingestor.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` | +| `ingestor.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `ingestor.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `ingestor.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `ingestor.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` | +| `ingestor.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `ingestor.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `ingestor.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `ingestor.command` | Override default container command (useful when using custom images) | `[]` | +| `ingestor.args` | Override default container args (useful when using custom images) | `[]` | +| `ingestor.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `ingestor.hostAliases` | Hyperdx ingestor pods host aliases | `[]` | +| `ingestor.podLabels` | Extra labels for Hyperdx ingestor pods | `{}` | +| `ingestor.podAnnotations` | Annotations for Hyperdx ingestor pods | `{}` | +| `ingestor.podAffinityPreset` | Pod affinity preset. Ignored if `ingestor.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `ingestor.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `ingestor.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `ingestor.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `ingestor.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `ingestor.nodeAffinityPreset.key` | Node label key to match. Ignored if `ingestor.affinity` is set | `""` | +| `ingestor.nodeAffinityPreset.values` | Node label values to match. Ignored if `ingestor.affinity` is set | `[]` | +| `ingestor.affinity` | Affinity for Hyperdx ingestor pods assignment | `{}` | +| `ingestor.nodeSelector` | Node labels for Hyperdx ingestor pods assignment | `{}` | +| `ingestor.tolerations` | Tolerations for Hyperdx ingestor pods assignment | `[]` | +| `ingestor.updateStrategy.type` | Hyperdx ingestor statefulset strategy type | `RollingUpdate` | +| `ingestor.priorityClassName` | Hyperdx ingestor pods' priorityClassName | `""` | +| `ingestor.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `ingestor.schedulerName` | Name of the k8s scheduler (other than default) for Hyperdx ingestor pods | `""` | +| `ingestor.terminationGracePeriodSeconds` | Seconds Redmine pod needs to terminate gracefully | `""` | +| `ingestor.lifecycleHooks` | for the Hyperdx ingestor container(s) to automate configuration before or after startup | `{}` | +| `ingestor.extraEnvVars` | Array with extra environment variables to add to Hyperdx ingestor nodes | `[]` | +| `ingestor.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Hyperdx ingestor nodes | `""` | +| `ingestor.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Hyperdx ingestor nodes | `""` | +| `ingestor.extraVolumes` | Optionally specify extra list of additional volumes for the Hyperdx ingestor pod(s) | `[]` | +| `ingestor.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Hyperdx ingestor container(s) | `[]` | +| `ingestor.sidecars` | Add additional sidecar containers to the Hyperdx ingestor pod(s) | `[]` | +| `ingestor.initContainers` | Add additional init containers to the Hyperdx ingestor pod(s) | `[]` | + +### Hyperdx Rest Traffic Exposure Parameters + +| Name | Description | Value | +| ------------------------------------------- | ------------------------------------------------------------------------------------------- | ----------- | +| `ingestor.service.type` | Hyperdx ingestor service type | `ClusterIP` | +| `ingestor.service.ports.http` | Hyperdx ingestor service HTTP port | `80` | +| `ingestor.service.nodePorts.http` | Node port for HTTP | `""` | +| `ingestor.service.clusterIP` | Hyperdx ingestor service Cluster IP | `""` | +| `ingestor.service.loadBalancerIP` | Hyperdx ingestor service Load Balancer IP | `""` | +| `ingestor.service.loadBalancerSourceRanges` | Hyperdx ingestor service Load Balancer sources | `[]` | +| `ingestor.service.externalTrafficPolicy` | Hyperdx ingestor service external traffic policy | `Cluster` | +| `ingestor.service.annotations` | Additional custom annotations for Hyperdx ingestor service | `{}` | +| `ingestor.service.extraPorts` | Extra ports to expose in Hyperdx ingestor service (normally used with the `sidecars` value) | `[]` | +| `ingestor.service.sessionAffinity` | Control where ingestor requests go, to the same pod or round-robin | `None` | +| `ingestor.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | + +### Hyperdx aggregator Parameters + +| Name | Description | Value | +| -------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `aggregator.enabled` | Enable Hyperdx aggregator | `true` | +| `aggregator.replicaCount` | Number of Hyperdx aggregator replicas to deploy | `1` | +| `aggregator.defaultConfig` | Default configuration for the Hyperdx aggregator service | `""` | +| `aggregator.extraConfig` | Extra configuration for the Hyperdx aggregator service | `{}` | +| `aggregator.existingConfigmap` | The name of an existing ConfigMap with the default configuration | `""` | +| `aggregator.extraConfigExistingConfigmap` | The name of an existing ConfigMap with extra configuration | `""` | +| `aggregator.image.registry` | aggregator image registry | `ghcr.io` | +| `aggregator.image.repository` | aggregator image repository | `hyperdxio/hyperdx` | +| `aggregator.image.digest` | aggregator image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) | `""` | +| `aggregator.image.pullPolicy` | aggregator image pull policy | `IfNotPresent` | +| `aggregator.image.pullSecrets` | aggregator image pull secrets | `[]` | +| `aggregator.containerPorts.http` | Hyperdx aggregator HTTP container port | `8001` | +| `aggregator.livenessProbe.enabled` | Enable livenessProbe on Hyperdx aggregator containers | `true` | +| `aggregator.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `aggregator.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `aggregator.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `aggregator.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `aggregator.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `aggregator.readinessProbe.enabled` | Enable readinessProbe on Hyperdx aggregator containers | `true` | +| `aggregator.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `aggregator.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `aggregator.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `aggregator.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `aggregator.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `aggregator.startupProbe.enabled` | Enable startupProbe on Hyperdx aggregator containers | `false` | +| `aggregator.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `aggregator.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `aggregator.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `aggregator.startupProbe.failureThreshold` | Failure threshold for startupProbe | `6` | +| `aggregator.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `aggregator.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `aggregator.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `aggregator.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `aggregator.resources.limits` | The resources limits for the Hyperdx aggregator containers | `{}` | +| `aggregator.resources.requests` | The requested resources for the Hyperdx aggregator containers | `{}` | +| `aggregator.podSecurityContext.enabled` | Enabled Hyperdx aggregator pods' Security Context | `true` | +| `aggregator.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `aggregator.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `aggregator.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `aggregator.podSecurityContext.fsGroup` | Set Hyperdx aggregator pod's Security Context fsGroup | `1001` | +| `aggregator.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `aggregator.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` | +| `aggregator.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `aggregator.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `aggregator.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `aggregator.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` | +| `aggregator.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `aggregator.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `aggregator.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `aggregator.command` | Override default container command (useful when using custom images) | `[]` | +| `aggregator.args` | Override default container args (useful when using custom images) | `[]` | +| `aggregator.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `aggregator.hostAliases` | Hyperdx aggregator pods host aliases | `[]` | +| `aggregator.podLabels` | Extra labels for Hyperdx aggregator pods | `{}` | +| `aggregator.podAnnotations` | Annotations for Hyperdx aggregator pods | `{}` | +| `aggregator.podAffinityPreset` | Pod affinity preset. Ignored if `aggregator.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `aggregator.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `aggregator.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `aggregator.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `aggregator.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `aggregator.nodeAffinityPreset.key` | Node label key to match. Ignored if `aggregator.affinity` is set | `""` | +| `aggregator.nodeAffinityPreset.values` | Node label values to match. Ignored if `aggregator.affinity` is set | `[]` | +| `aggregator.affinity` | Affinity for Hyperdx aggregator pods assignment | `{}` | +| `aggregator.nodeSelector` | Node labels for Hyperdx aggregator pods assignment | `{}` | +| `aggregator.tolerations` | Tolerations for Hyperdx aggregator pods assignment | `[]` | +| `aggregator.updateStrategy.type` | Hyperdx aggregator statefulset strategy type | `RollingUpdate` | +| `aggregator.priorityClassName` | Hyperdx aggregator pods' priorityClassName | `""` | +| `aggregator.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `aggregator.schedulerName` | Name of the k8s scheduler (other than default) for Hyperdx aggregator pods | `""` | +| `aggregator.terminationGracePeriodSeconds` | Seconds Redmine pod needs to terminate gracefully | `""` | +| `aggregator.lifecycleHooks` | for the Hyperdx aggregator container(s) to automate configuration before or after startup | `{}` | +| `aggregator.extraEnvVars` | Array with extra environment variables to add to Hyperdx aggregator nodes | `[]` | +| `aggregator.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Hyperdx aggregator nodes | `""` | +| `aggregator.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Hyperdx aggregator nodes | `""` | +| `aggregator.extraVolumes` | Optionally specify extra list of additional volumes for the Hyperdx aggregator pod(s) | `[]` | +| `aggregator.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Hyperdx aggregator container(s) | `[]` | +| `aggregator.sidecars` | Add additional sidecar containers to the Hyperdx aggregator pod(s) | `[]` | +| `aggregator.initContainers` | Add additional init containers to the Hyperdx aggregator pod(s) | `[]` | + +### Hyperdx Rest Traffic Exposure Parameters + +| Name | Description | Value | +| --------------------------------------------- | --------------------------------------------------------------------------------------------- | ----------- | +| `aggregator.service.type` | Hyperdx aggregator service type | `ClusterIP` | +| `aggregator.service.ports.http` | Hyperdx aggregator service HTTP port | `80` | +| `aggregator.service.nodePorts.http` | Node port for HTTP | `""` | +| `aggregator.service.clusterIP` | Hyperdx aggregator service Cluster IP | `""` | +| `aggregator.service.loadBalancerIP` | Hyperdx aggregator service Load Balancer IP | `""` | +| `aggregator.service.loadBalancerSourceRanges` | Hyperdx aggregator service Load Balancer sources | `[]` | +| `aggregator.service.externalTrafficPolicy` | Hyperdx aggregator service external traffic policy | `Cluster` | +| `aggregator.service.annotations` | Additional custom annotations for Hyperdx aggregator service | `{}` | +| `aggregator.service.extraPorts` | Extra ports to expose in Hyperdx aggregator service (normally used with the `sidecars` value) | `[]` | +| `aggregator.service.sessionAffinity` | Control where aggregator requests go, to the same pod or round-robin | `None` | +| `aggregator.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | + +### Hyperdx goParser Parameters + +| Name | Description | Value | +| ------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `goParser.enabled` | Enable Hyperdx goParser | `true` | +| `goParser.replicaCount` | Number of Hyperdx goParser replicas to deploy | `1` | +| `goParser.defaultConfig` | Default configuration for the Hyperdx goParser service | `""` | +| `goParser.extraConfig` | Extra configuration for the Hyperdx goParser service | `{}` | +| `goParser.existingConfigmap` | The name of an existing ConfigMap with the default configuration | `""` | +| `goParser.extraConfigExistingConfigmap` | The name of an existing ConfigMap with extra configuration | `""` | +| `goParser.image.registry` | go parser image registry | `ghcr.io` | +| `goParser.image.repository` | go parser image repository | `hyperdxio/hyperdx` | +| `goParser.image.digest` | go parser image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) | `""` | +| `goParser.image.pullPolicy` | go parser image pull policy | `IfNotPresent` | +| `goParser.image.pullSecrets` | go parser image pull secrets | `[]` | +| `goParser.containerPorts.http` | Hyperdx goParser HTTP container port | `7777` | +| `goParser.livenessProbe.enabled` | Enable livenessProbe on Hyperdx goParser containers | `true` | +| `goParser.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `goParser.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `goParser.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `goParser.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `goParser.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `goParser.readinessProbe.enabled` | Enable readinessProbe on Hyperdx goParser containers | `true` | +| `goParser.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `goParser.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `goParser.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `goParser.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `goParser.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `goParser.startupProbe.enabled` | Enable startupProbe on Hyperdx goParser containers | `false` | +| `goParser.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `goParser.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `goParser.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `goParser.startupProbe.failureThreshold` | Failure threshold for startupProbe | `6` | +| `goParser.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `goParser.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `goParser.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `goParser.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `goParser.resources.limits` | The resources limits for the Hyperdx goParser containers | `{}` | +| `goParser.resources.requests` | The requested resources for the Hyperdx goParser containers | `{}` | +| `goParser.podSecurityContext.enabled` | Enabled Hyperdx goParser pods' Security Context | `true` | +| `goParser.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `goParser.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `goParser.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `goParser.podSecurityContext.fsGroup` | Set Hyperdx goParser pod's Security Context fsGroup | `1001` | +| `goParser.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `goParser.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` | +| `goParser.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `goParser.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `goParser.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `goParser.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` | +| `goParser.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `goParser.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `goParser.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `goParser.command` | Override default container command (useful when using custom images) | `[]` | +| `goParser.args` | Override default container args (useful when using custom images) | `[]` | +| `goParser.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `goParser.hostAliases` | Hyperdx goParser pods host aliases | `[]` | +| `goParser.podLabels` | Extra labels for Hyperdx goParser pods | `{}` | +| `goParser.podAnnotations` | Annotations for Hyperdx goParser pods | `{}` | +| `goParser.podAffinityPreset` | Pod affinity preset. Ignored if `goParser.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `goParser.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `goParser.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `goParser.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `goParser.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `goParser.nodeAffinityPreset.key` | Node label key to match. Ignored if `goParser.affinity` is set | `""` | +| `goParser.nodeAffinityPreset.values` | Node label values to match. Ignored if `goParser.affinity` is set | `[]` | +| `goParser.affinity` | Affinity for Hyperdx goParser pods assignment | `{}` | +| `goParser.nodeSelector` | Node labels for Hyperdx goParser pods assignment | `{}` | +| `goParser.tolerations` | Tolerations for Hyperdx goParser pods assignment | `[]` | +| `goParser.updateStrategy.type` | Hyperdx goParser statefulset strategy type | `RollingUpdate` | +| `goParser.priorityClassName` | Hyperdx goParser pods' priorityClassName | `""` | +| `goParser.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `goParser.schedulerName` | Name of the k8s scheduler (other than default) for Hyperdx goParser pods | `""` | +| `goParser.terminationGracePeriodSeconds` | Seconds Redmine pod needs to terminate gracefully | `""` | +| `goParser.lifecycleHooks` | for the Hyperdx goParser container(s) to automate configuration before or after startup | `{}` | +| `goParser.extraEnvVars` | Array with extra environment variables to add to Hyperdx goParser nodes | `[]` | +| `goParser.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Hyperdx goParser nodes | `""` | +| `goParser.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Hyperdx goParser nodes | `""` | +| `goParser.extraVolumes` | Optionally specify extra list of additional volumes for the Hyperdx goParser pod(s) | `[]` | +| `goParser.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Hyperdx goParser container(s) | `[]` | +| `goParser.sidecars` | Add additional sidecar containers to the Hyperdx goParser pod(s) | `[]` | +| `goParser.initContainers` | Add additional init containers to the Hyperdx goParser pod(s) | `[]` | + +### Hyperdx Rest Traffic Exposure Parameters + +| Name | Description | Value | +| ------------------------------------------- | ------------------------------------------------------------------------------------------- | ----------- | +| `goParser.service.type` | Hyperdx goParser service type | `ClusterIP` | +| `goParser.service.ports.http` | Hyperdx goParser service HTTP port | `80` | +| `goParser.service.nodePorts.http` | Node port for HTTP | `""` | +| `goParser.service.clusterIP` | Hyperdx goParser service Cluster IP | `""` | +| `goParser.service.loadBalancerIP` | Hyperdx goParser service Load Balancer IP | `""` | +| `goParser.service.loadBalancerSourceRanges` | Hyperdx goParser service Load Balancer sources | `[]` | +| `goParser.service.externalTrafficPolicy` | Hyperdx goParser service external traffic policy | `Cluster` | +| `goParser.service.annotations` | Additional custom annotations for Hyperdx goParser service | `{}` | +| `goParser.service.extraPorts` | Extra ports to expose in Hyperdx goParser service (normally used with the `sidecars` value) | `[]` | +| `goParser.service.sessionAffinity` | Control where goParser requests go, to the same pod or round-robin | `None` | +| `goParser.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | + +### Hyperdx miner Parameters + +| Name | Description | Value | +| --------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------- | +| `miner.enabled` | Enable Hyperdx miner | `true` | +| `miner.replicaCount` | Number of Hyperdx miner replicas to deploy | `1` | +| `miner.defaultConfig` | Default configuration for the Hyperdx miner service | `""` | +| `miner.extraConfig` | Extra configuration for the Hyperdx miner service | `{}` | +| `miner.existingConfigmap` | The name of an existing ConfigMap with the default configuration | `""` | +| `miner.extraConfigExistingConfigmap` | The name of an existing ConfigMap with extra configuration | `""` | +| `miner.image.registry` | miner image registry | `ghcr.io` | +| `miner.image.repository` | miner image repository | `hyperdxio/hyperdx` | +| `miner.image.digest` | miner image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) | `""` | +| `miner.image.pullPolicy` | miner image pull policy | `IfNotPresent` | +| `miner.image.pullSecrets` | miner image pull secrets | `[]` | +| `miner.containerPorts.http` | Hyperdx miner HTTP container port | `5123` | +| `miner.livenessProbe.enabled` | Enable livenessProbe on Hyperdx miner containers | `true` | +| `miner.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `miner.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `miner.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `miner.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `miner.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `miner.readinessProbe.enabled` | Enable readinessProbe on Hyperdx miner containers | `true` | +| `miner.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `miner.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `miner.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `miner.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `miner.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `miner.startupProbe.enabled` | Enable startupProbe on Hyperdx miner containers | `false` | +| `miner.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `miner.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `miner.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `miner.startupProbe.failureThreshold` | Failure threshold for startupProbe | `6` | +| `miner.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `miner.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `miner.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `miner.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `miner.resources.limits` | The resources limits for the Hyperdx miner containers | `{}` | +| `miner.resources.requests` | The requested resources for the Hyperdx miner containers | `{}` | +| `miner.podSecurityContext.enabled` | Enabled Hyperdx miner pods' Security Context | `true` | +| `miner.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `miner.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `miner.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `miner.podSecurityContext.fsGroup` | Set Hyperdx miner pod's Security Context fsGroup | `1001` | +| `miner.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `miner.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` | +| `miner.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `miner.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `miner.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `miner.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` | +| `miner.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `miner.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `miner.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `miner.command` | Override default container command (useful when using custom images) | `[]` | +| `miner.args` | Override default container args (useful when using custom images) | `[]` | +| `miner.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `miner.hostAliases` | Hyperdx miner pods host aliases | `[]` | +| `miner.podLabels` | Extra labels for Hyperdx miner pods | `{}` | +| `miner.podAnnotations` | Annotations for Hyperdx miner pods | `{}` | +| `miner.podAffinityPreset` | Pod affinity preset. Ignored if `miner.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `miner.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `miner.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `miner.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `miner.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `miner.nodeAffinityPreset.key` | Node label key to match. Ignored if `miner.affinity` is set | `""` | +| `miner.nodeAffinityPreset.values` | Node label values to match. Ignored if `miner.affinity` is set | `[]` | +| `miner.affinity` | Affinity for Hyperdx miner pods assignment | `{}` | +| `miner.nodeSelector` | Node labels for Hyperdx miner pods assignment | `{}` | +| `miner.tolerations` | Tolerations for Hyperdx miner pods assignment | `[]` | +| `miner.updateStrategy.type` | Hyperdx miner statefulset strategy type | `RollingUpdate` | +| `miner.priorityClassName` | Hyperdx miner pods' priorityClassName | `""` | +| `miner.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `miner.schedulerName` | Name of the k8s scheduler (other than default) for Hyperdx miner pods | `""` | +| `miner.terminationGracePeriodSeconds` | Seconds Redmine pod needs to terminate gracefully | `""` | +| `miner.lifecycleHooks` | for the Hyperdx miner container(s) to automate configuration before or after startup | `{}` | +| `miner.extraEnvVars` | Array with extra environment variables to add to Hyperdx miner nodes | `[]` | +| `miner.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Hyperdx miner nodes | `""` | +| `miner.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Hyperdx miner nodes | `""` | +| `miner.extraVolumes` | Optionally specify extra list of additional volumes for the Hyperdx miner pod(s) | `[]` | +| `miner.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Hyperdx miner container(s) | `[]` | +| `miner.sidecars` | Add additional sidecar containers to the Hyperdx miner pod(s) | `[]` | +| `miner.initContainers` | Add additional init containers to the Hyperdx miner pod(s) | `[]` | + +### Hyperdx Rest Traffic Exposure Parameters + +| Name | Description | Value | +| ---------------------------------------- | ---------------------------------------------------------------------------------------- | ----------- | +| `miner.service.type` | Hyperdx miner service type | `ClusterIP` | +| `miner.service.ports.http` | Hyperdx miner service HTTP port | `80` | +| `miner.service.nodePorts.http` | Node port for HTTP | `""` | +| `miner.service.clusterIP` | Hyperdx miner service Cluster IP | `""` | +| `miner.service.loadBalancerIP` | Hyperdx miner service Load Balancer IP | `""` | +| `miner.service.loadBalancerSourceRanges` | Hyperdx miner service Load Balancer sources | `[]` | +| `miner.service.externalTrafficPolicy` | Hyperdx miner service external traffic policy | `Cluster` | +| `miner.service.annotations` | Additional custom annotations for Hyperdx miner service | `{}` | +| `miner.service.extraPorts` | Extra ports to expose in Hyperdx miner service (normally used with the `sidecars` value) | `[]` | +| `miner.service.sessionAffinity` | Control where miner requests go, to the same pod or round-robin | `None` | +| `miner.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | + +### Hyperdx open-telemetry collector Parameters + +| Name | Description | Value | +| ----------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `otelCollector.enabled` | Enable Hyperdx open-telemetry collector | `true` | +| `otelCollector.replicaCount` | Number of Hyperdx otelCollector replicas to deploy | `1` | +| `otelCollector.defaultConfig` | Default configuration for the Hyperdx otelCollector service | `""` | +| `otelCollector.extraConfig` | Extra configuration for the Hyperdx otelCollector service | `{}` | +| `otelCollector.existingConfigmap` | The name of an existing ConfigMap with the default configuration | `""` | +| `otelCollector.extraConfigExistingConfigmap` | The name of an existing ConfigMap with extra configuration | `""` | +| `otelCollector.image.registry` | otelCollector image registry | `ghcr.io` | +| `otelCollector.image.repository` | otelCollector image repository | `hyperdxio/hyperdx` | +| `otelCollector.image.digest` | otelCollector image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) | `""` | +| `otelCollector.image.pullPolicy` | otelCollector image pull policy | `IfNotPresent` | +| `otelCollector.image.pullSecrets` | otelCollector image pull secrets | `[]` | +| `otelCollector.containerPorts.http` | Hyperdx otelCollector HTTP container port | `4318` | +| `otelCollector.containerPorts.health` | Hyperdx otelCollector health container port | `13133` | +| `otelCollector.containerPorts.grpc` | Hyperdx otelCollector grpc container port | `4317` | +| `otelCollector.containerPorts.pprof` | Hyperdx otelCollector pprof container port | `1888` | +| `otelCollector.containerPorts.fluentd` | Hyperdx otelCollector fluentd container port | `24225` | +| `otelCollector.containerPorts.zpages` | Hyperdx otelCollector zpages container port | `55679` | +| `otelCollector.containerPorts.metrics` | Hyperdx otelCollector metrics container port | `8888` | +| `otelCollector.containerPorts.zipkin` | Hyperdx otelCollector zipkin container port | `9411` | +| `otelCollector.livenessProbe.enabled` | Enable livenessProbe on Hyperdx otelCollector containers | `true` | +| `otelCollector.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `otelCollector.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `otelCollector.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `otelCollector.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `otelCollector.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `otelCollector.readinessProbe.enabled` | Enable readinessProbe on Hyperdx otelCollector containers | `true` | +| `otelCollector.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `otelCollector.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `otelCollector.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `otelCollector.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `otelCollector.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `otelCollector.startupProbe.enabled` | Enable startupProbe on Hyperdx otelCollector containers | `false` | +| `otelCollector.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `otelCollector.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `otelCollector.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `otelCollector.startupProbe.failureThreshold` | Failure threshold for startupProbe | `6` | +| `otelCollector.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `otelCollector.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `otelCollector.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `otelCollector.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `otelCollector.resources.limits` | The resources limits for the Hyperdx otelCollector containers | `{}` | +| `otelCollector.resources.requests` | The requested resources for the Hyperdx otelCollector containers | `{}` | +| `otelCollector.podSecurityContext.enabled` | Enabled Hyperdx otelCollector pods' Security Context | `true` | +| `otelCollector.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `otelCollector.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `otelCollector.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `otelCollector.podSecurityContext.fsGroup` | Set Hyperdx otelCollector pod's Security Context fsGroup | `1001` | +| `otelCollector.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `otelCollector.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` | +| `otelCollector.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `otelCollector.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `otelCollector.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `otelCollector.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` | +| `otelCollector.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `otelCollector.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `otelCollector.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `otelCollector.command` | Override default container command (useful when using custom images) | `[]` | +| `otelCollector.args` | Override default container args (useful when using custom images) | `[]` | +| `otelCollector.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `otelCollector.hostAliases` | Hyperdx otelCollector pods host aliases | `[]` | +| `otelCollector.podLabels` | Extra labels for Hyperdx otelCollector pods | `{}` | +| `otelCollector.podAnnotations` | Annotations for Hyperdx otelCollector pods | `{}` | +| `otelCollector.podAffinityPreset` | Pod affinity preset. Ignored if `otelCollector.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `otelCollector.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `otelCollector.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `otelCollector.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `otelCollector.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `otelCollector.nodeAffinityPreset.key` | Node label key to match. Ignored if `otelCollector.affinity` is set | `""` | +| `otelCollector.nodeAffinityPreset.values` | Node label values to match. Ignored if `otelCollector.affinity` is set | `[]` | +| `otelCollector.affinity` | Affinity for Hyperdx otelCollector pods assignment | `{}` | +| `otelCollector.nodeSelector` | Node labels for Hyperdx otelCollector pods assignment | `{}` | +| `otelCollector.tolerations` | Tolerations for Hyperdx otelCollector pods assignment | `[]` | +| `otelCollector.updateStrategy.type` | Hyperdx otelCollector statefulset strategy type | `RollingUpdate` | +| `otelCollector.priorityClassName` | Hyperdx otelCollector pods' priorityClassName | `""` | +| `otelCollector.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `otelCollector.schedulerName` | Name of the k8s scheduler (other than default) for Hyperdx otelCollector pods | `""` | +| `otelCollector.terminationGracePeriodSeconds` | Seconds Redmine pod needs to terminate gracefully | `""` | +| `otelCollector.lifecycleHooks` | for the Hyperdx otelCollector container(s) to automate configuration before or after startup | `{}` | +| `otelCollector.extraEnvVars` | Array with extra environment variables to add to Hyperdx otelCollector nodes | `[]` | +| `otelCollector.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Hyperdx otelCollector nodes | `""` | +| `otelCollector.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Hyperdx otelCollector nodes | `""` | +| `otelCollector.extraVolumes` | Optionally specify extra list of additional volumes for the Hyperdx otelCollector pod(s) | `[]` | +| `otelCollector.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Hyperdx otelCollector container(s) | `[]` | +| `otelCollector.sidecars` | Add additional sidecar containers to the Hyperdx otelCollector pod(s) | `[]` | +| `otelCollector.initContainers` | Add additional init containers to the Hyperdx otelCollector pod(s) | `[]` | + +### Hyperdx Rest Traffic Exposure Parameters + +| Name | Description | Value | +| ------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ----------- | +| `otelCollector.service.type` | Hyperdx otelCollector service type | `ClusterIP` | +| `otelCollector.service.ports.http` | Hyperdx otelCollector service HTTP port | `80` | +| `otelCollector.service.nodePorts.http` | Node port for HTTP | `""` | +| `otelCollector.service.clusterIP` | Hyperdx otelCollector service Cluster IP | `""` | +| `otelCollector.service.loadBalancerIP` | Hyperdx otelCollector service Load Balancer IP | `""` | +| `otelCollector.service.loadBalancerSourceRanges` | Hyperdx otelCollector service Load Balancer sources | `[]` | +| `otelCollector.service.externalTrafficPolicy` | Hyperdx otelCollector service external traffic policy | `Cluster` | +| `otelCollector.service.annotations` | Additional custom annotations for Hyperdx otelCollector service | `{}` | +| `otelCollector.service.extraPorts` | Extra ports to expose in Hyperdx otelCollector service (normally used with the `sidecars` value) | `[]` | +| `otelCollector.service.sessionAffinity` | Control where otelCollector requests go, to the same pod or round-robin | `None` | +| `otelCollector.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | + +### Hyperdx task check alerts Parameters + +| Name | Description | Value | +| --------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `taskCheckAlerts.enabled` | Enable the task-check-alerts Cronjob which checks for alert criteria and fires off any alerts as needed | `false` | +| `taskCheckAlerts.defaultConfig` | Hyperdx taskCheckAlerts default configuration | `""` | +| `taskCheckAlerts.image.registry` | taskCheckAlerts image registry | `ghcr.io` | +| `taskCheckAlerts.image.repository` | taskCheckAlerts image repository | `hyperdxio/hyperdx` | +| `taskCheckAlerts.image.digest` | taskCheckAlerts image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) | `""` | +| `taskCheckAlerts.image.pullPolicy` | taskCheckAlerts image pull policy | `IfNotPresent` | +| `taskCheckAlerts.image.pullSecrets` | taskCheckAlerts image pull secrets | `[]` | +| `taskCheckAlerts.cronjob.schedule` | Kubernetes CronJob schedule | `* * * * *` | +| `taskCheckAlerts.cronjob.concurrencyPolicy` | Set the cronjob parameter concurrencyPolicy | `Forbid` | +| `taskCheckAlerts.cronjob.failedJobsHistoryLimit` | Set the cronjob parameter failedJobsHistoryLimit | `1` | +| `taskCheckAlerts.cronjob.successfulJobsHistoryLimit` | Set the cronjob parameter successfulJobsHistoryLimit | `3` | +| `taskCheckAlerts.cronjob.ttlSecondsAfterFinished` | Set the cronjob parameter ttlSecondsAfterFinished | `""` | +| `taskCheckAlerts.cronjob.restartPolicy` | Set the cronjob parameter restartPolicy | `OnFailure` | +| `taskCheckAlerts.cronjob.affinity` | Affinity for CronJob pod assignment | `{}` | +| `taskCheckAlerts.cronjob.command` | Override default container command (useful when using custom images) | `[]` | +| `taskCheckAlerts.cronjob.args` | Override default container args (useful when using custom images) | `[]` | +| `taskCheckAlerts.cronjob.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `taskCheckAlerts.cronjob.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` | +| `taskCheckAlerts.cronjob.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `taskCheckAlerts.cronjob.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `taskCheckAlerts.cronjob.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `taskCheckAlerts.cronjob.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` | +| `taskCheckAlerts.cronjob.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `taskCheckAlerts.cronjob.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `taskCheckAlerts.cronjob.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `taskCheckAlerts.cronjob.podAnnotations` | Additional pod annotations | `{}` | +| `taskCheckAlerts.cronjob.podLabels` | Additional pod labels | `{}` | + +### Kong chart configurations + +| Name | Description | Value | +| ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `kong.enabled` | Enable Kong | `true` | +| `kong.image.registry` | kong image registry | `docker.io` | +| `kong.image.repository` | kong image repository | `bitnami/kong` | +| `kong.image.digest` | kong image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `kong.image.pullPolicy` | kong image pull policy | `IfNotPresent` | +| `kong.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `kong.image.debug` | Enable image debug mode | `false` | +| `kong.replicaCount` | Number of Kong replicas | `1` | +| `kong.initContainers` | Add additional init containers to the Kong pods | `- name: render-kong-declarative-conf + image: '{{ include "kong.image" . }}' + command: + - /bin/bash + args: + - -ec + - | + #!/bin/bash + # https://github.com/bitnami/containers/blob/main/bitnami/kong/3/debian-11/prebuildfs/opt/bitnami/scripts/liblog.sh + . /opt/bitnami/scripts/liblog.sh + info "Rendering hyperdx declarative config template for Kong" + + # https://github.com/bitnami/render-template + render-template /bitnami/kong/declarative-template/kong.yml.tpl > "/bitnami/kong/declarative-conf/kong.yml" + volumeMounts: + - name: declarative-conf-template + mountPath: /bitnami/kong/declarative-template/ + - name: rendered-declarative-conf + mountPath: /bitnami/kong/declarative-conf/ + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 6 }} + {{- end }} +` | +| `kong.ingressController.enabled` | Enable Kong Ingress Controller | `false` | +| `kong.extraVolumes` | Adittional volumes to be added to the Kong deployment pods (evaluated as template). Requires setting `kong.kong.extraVolumeMounts` | `[]` | +| `kong.kong.extraVolumeMounts` | Additional volumeMounts to be added to the Kong Container (evaluated as template). Normally used with `kong.extraVolumes`. | `[]` | +| `kong.kong.extraEnvVars` | Additional env variables to configure Kong. | `[]` | +| `kong.ingress.enabled` | Enable Ingress rule | `false` | +| `kong.service.loadBalancerIP` | Kubernetes service LoadBalancer IP | `""` | +| `kong.service.type` | Kong Kubernetes service type | `LoadBalancer` | +| `kong.service.ports.proxyHttp` | Kong proxy service HTTP port | `80` | +| `kong.database` | Select which database backend Kong will use. Can be 'postgresql', 'cassandra' or 'off'. | `off` | +| `kong.postgresql.enabled` | Switch to enable or disable the PostgreSQL helm chart inside the Kong subchart | `false` | +| `mongodb.enabled` | Switch to enable or disable the mongodb helm chart | `true` | +| `mongodb.image.registry` | mongodb image registry | `docker.io` | +| `mongodb.image.repository` | mongodb image repository | `bitnami/mongodb` | +| `mongodb.image.digest` | mongodb image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `mongodb.image.pullPolicy` | mongodb image pull policy | `IfNotPresent` | +| `mongodb.image.pullSecrets` | Specify image pull secrets | `[]` | +| `mongodb.image.debug` | Specify if debug values should be set | `false` | +| `mongodb.auth.enabled` | Enable authentication | `false` | +| `mongodb.auth.rootUser` | root user | `root` | +| `mongodb.auth.rootPassword` | root password | `root` | +| `mongodb.architecture` | MongoDB architecture (`standalone` or `replicaset`) | `standalone` | +| `mongodb.service.ports.mongodb` | mongodb service port | `27017` | +| `redis.enabled` | Switch to enable or disable the Redis helm chart | `true` | +| `redis.image.registry` | Redis image registry | `docker.io` | +| `redis.image.repository` | Redis image repository | `bitnami/redis` | +| `redis.image.digest` | Redis image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `redis.image.pullPolicy` | Redis image pull policy | `IfNotPresent` | +| `redis.image.pullSecrets` | Specify image pull secrets | `[]` | +| `redis.image.debug` | Specify if debug values should be set | `false` | +| `redis.architecture` | Redis architecture. Allowed values: `standalone` or `replication` | `standalone` | +| `redis.service.ports.redis` | Redis service port | `5432` | +| `redis.auth.enabled` | Enable password authentication | `false` | +| `clickhouse.enabled` | Switch to enable or disable the Clickhouse helm chart | `true` | +| `clickhouse.image.registry` | ClickHouse image registry | `docker.io` | +| `clickhouse.image.repository` | ClickHouse image repository | `bitnami/clickhouse` | +| `clickhouse.image.digest` | ClickHouse image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `clickhouse.image.pullPolicy` | ClickHouse image pull policy | `IfNotPresent` | +| `clickhouse.image.pullSecrets` | ClickHouse image pull secrets | `[]` | +| `clickhouse.image.debug` | Enable ClickHouse image debug mode | `false` | +| `clickhouse.auth.username` | ClickHouse Admin username | `default` | +| `clickhouse.auth.password` | ClickHouse Admin password | `default` | +| `clickhouse.auth.existingSecret` | Name of a secret containing the Admin password | `""` | +| `clickhouse.auth.existingSecretKey` | Name of the key inside the existing secret | `""` | +| `clickhouse.shards` | Number of ClickHouse shards to deploy | `1` | +| `clickhouse.replicaCount` | Number of ClickHouse replicas per shard to deploy | `1` | + +### ClickHouse keeper configuration parameters + +| Name | Description | Value | +| --------------------------- | -------------------------------------------------- | ------- | +| `clickhouse.keeper.enabled` | Deploy ClickHouse keeper. Support is experimental. | `false` | + +### Zookeeper subchart parameters + +| Name | Description | Value | +| --------------------------------------- | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `clickhouse.zookeeper.enabled` | Deploy Zookeeper subchart | `false` | +| `clickhouse.service.type` | ClickHouse service type | `ClusterIP` | +| `clickhouse.service.ports.http` | ClickHouse service HTTP port | `8123` | +| `clickhouse.service.ports.https` | ClickHouse service HTTPS port | `443` | +| `clickhouse.service.ports.tcp` | ClickHouse service TCP port | `9000` | +| `clickhouse.service.ports.tcpSecure` | ClickHouse service TCP (secure) port | `9440` | +| `clickhouse.service.ports.keeper` | ClickHouse keeper TCP container port | `2181` | +| `clickhouse.service.ports.keeperSecure` | ClickHouse keeper TCP (secure) container port | `3181` | +| `clickhouse.service.ports.keeperInter` | ClickHouse keeper interserver TCP container port | `9444` | +| `clickhouse.service.ports.mysql` | ClickHouse service MySQL port | `9004` | +| `clickhouse.service.ports.postgresql` | ClickHouse service PostgreSQL port | `9005` | +| `clickhouse.service.ports.interserver` | ClickHouse service Interserver port | `9009` | +| `clickhouse.service.ports.metrics` | ClickHouse service metrics port | `8001` | +| `clickhouse.extraOverrides` | Extra configuration overrides (evaluated as a template) apart from the default | ` + + system + query_log
+
+ + 4096 + 64 + 100 + 8589934592 + 5368709120 + + default + default + UTC + false + + + + + engine MergeTree + partition by toYYYYMM(finish_date) + order by (finish_date, finish_time_us, trace_id) + + system + opentelemetry_span_log
+ 7500 +
+ + + /clickhouse/task_queue/ddl + + + /var/lib/clickhouse/format_schemas/ +
+` | +| `clickhouse.usersExtraOverrides` | Users extra configuration overrides (evaluated as a template) apart from the default | ` + + + 10000000000 + 0 + in_order + 1 + + + + + + + default + + ::/0 + + default + + + api + default + + ::/0 + + default + + + aggregator + default + + ::/0 + + default + + + worker + default + + ::/0 + + default + + + + + + + 3600 + 0 + 0 + 0 + 0 + 0 + + + + +` | diff --git a/charts/hyperdx/templates/NOTES.txt b/charts/hyperdx/templates/NOTES.txt new file mode 100644 index 000000000..2dc5abf4e --- /dev/null +++ b/charts/hyperdx/templates/NOTES.txt @@ -0,0 +1,88 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +** Please be patient while the chart is being deployed ** + +The following elements have been deployed + +{{- if .Values.app.enabled }} + - app +{{- end }} +{{- if .Values.api.enabled }} + - api +{{- end }} +{{- if .Values.ingestor.enabled }} + - ingestor +{{- end }} +{{- if .Values.aggregator.enabled }} + - aggregator +{{- end }} +{{- if .Values.goParser.enabled }} + - go-parser +{{- end }} +{{- if .Values.miner.enabled }} + - miner +{{- end }} +{{- if .Values.otelCollector.enabled }} + - opentelemetry-collector +{{- end }} +{{- if .Values.taskCheckAlerts.enabled }} + - task-check-alerts +{{- end }} +{{- if .Values.kong.enabled }} + - kong +{{- end }} +{{- if .Values.mongodb.enabled }} + - mongodb +{{- end }} +{{- if .Values.redis.enabled }} + - redis +{{- end }} +{{- if .Values.clickhouse.enabled }} + - clickhouse +{{- end }} + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +{{- else }} + +Hyperdx API can be accessed through the following DNS name from within your cluster: + + {{ include "hyperdx.api.url" . }} + +{{- if .Values.publicUrl }} +To access the Hyperdx API from outside the cluster + + {{ include "hyperdx.api.publicUrl" . }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "hyperdx.kong.fullname" . }}' + + Point your DNS record "{{ .Values.publicUrl }}" to the external IP of the load balancer once it's ready. This is required for the dashboard to work (see README for more details) + Wait for DNS to propagate and you can now access the dashboard at "{{ .Values.publicUrl }}" + +{{- else }} + TODO: provide portforward instructions and how to mimic an identical docker compose setup on a kind cluster + portforward the appropriate svcs specified in the README +{{- end }} +{{- end }} + +To get a quick preview of HyperDX and demo logs, enable self-instrumentation by + - accessing your api key at {{ include "hyperdx.app.publicUrl" . }}/team + - set the api key secret and restart the stack + + kubectl patch secret {{ include "hyperdx.apiKey.secretName" . }} -p '{"data":{"api-key":""}}' + kubectl delete pods --all -n {{ .Release.Namespace }} diff --git a/charts/hyperdx/templates/_helpers.tpl b/charts/hyperdx/templates/_helpers.tpl new file mode 100644 index 000000000..ae9925d0d --- /dev/null +++ b/charts/hyperdx/templates/_helpers.tpl @@ -0,0 +1,520 @@ +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "hyperdx.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.app.image .Values.api.image .Values.ingestor.image .Values.aggregator.image .Values.goParser.image .Values.miner.image .Values.taskCheckAlerts.image .Values.otelCollector.image) "global" .Values.global) -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "hyperdx.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the k8s secret name containing the API key +*/}} +{{- define "hyperdx.apiKey.secretName" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "api-key" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + + +{{/* ============================== */}} +{{/* App (dashboard) */}} +{{/* ============================== */}} +{{/* +Return the proper Hyperdx app fullname +*/}} +{{- define "hyperdx.app.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "app" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper Hyperdx app image name +*/}} +{{- define "hyperdx.app.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.app.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the in cluster url for Hyperdx app +*/}} +{{- define "hyperdx.app.url" -}} +{{- printf "http://%s.%s.svc.%s:%s" (include "hyperdx.app.fullname" .) (include "common.names.namespace" .) .Values.clusterDomain (.Values.app.service.ports.http | toString) -}} +{{- end -}} + +{{/* +Return the Hyperdx app public url +*/}} +{{- define "hyperdx.app.publicUrl" -}} +{{- if .Values.publicUrl -}} +{{- printf .Values.publicUrl -}} +{{- else if (and (eq .Values.kong.service.type "LoadBalancer") .Values.kong.service.loadBalancerIP) -}} +{{- printf "http://%s:%d" .Values.kong.service.loadBalancerIP (int .Values.kong.service.ports.proxyHttp) -}} +{{- else -}} +{{- printf "http://localhost:%d" (int .Values.kong.service.ports.proxyHttp) -}} +{{- end -}} +{{- end -}} + +{{/* +Default configuration ConfigMap name (app) +*/}} +{{- define "hyperdx.app.defaultConfigmapName" -}} +{{- if .Values.app.existingConfigmap -}} + {{- print .Values.app.existingConfigmap -}} +{{- else -}} + {{- printf "%s-default" (include "hyperdx.app.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Extra configuration ConfigMap name (app) +*/}} +{{- define "hyperdx.app.extraConfigmapName" -}} +{{- if .Values.app.extraConfigExistingConfigmap -}} + {{- print .Values.app.extraConfigExistingConfigmap -}} +{{- else -}} + {{- printf "%s-extra" (include "hyperdx.app.fullname" .) -}} +{{- end -}} +{{- end -}} + + +{{/* ============================== */}} +{{/* Api */}} +{{/* ============================== */}} +{{/* +Return the proper Hyperdx api fullname +*/}} +{{- define "hyperdx.api.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "api" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper Hyperdx api image name +*/}} +{{- define "hyperdx.api.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.api.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the in cluster url for Hyperdx api +*/}} +{{- define "hyperdx.api.url" -}} +{{- printf "http://%s.%s.svc.%s:%s" (include "hyperdx.api.fullname" .) (include "common.names.namespace" .) .Values.clusterDomain (.Values.api.service.ports.http | toString) -}} +{{- end -}} + +{{/* +Return the Hyperdx api public url +*/}} +{{- define "hyperdx.api.publicUrl" -}} +{{- if .Values.publicUrl -}} +{{- printf "%s/api/v1/" .Values.publicUrl -}} +{{- else if (and (eq .Values.kong.service.type "LoadBalancer") .Values.kong.service.loadBalancerIP) -}} +{{- printf "http://%s:%d/api/v1/" .Values.kong.service.loadBalancerIP (int .Values.kong.service.ports.proxyHttp) -}} +{{- else -}} +{{- printf "http://localhost:%d" (int .Values.api.containerPorts.http) -}} +{{- end -}} +{{- end -}} + +{{/* +Default configuration ConfigMap name (api) +*/}} +{{- define "hyperdx.api.defaultConfigmapName" -}} +{{- if .Values.api.existingConfigmap -}} + {{- print .Values.api.existingConfigmap -}} +{{- else -}} + {{- printf "%s-default" (include "hyperdx.api.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Extra configuration ConfigMap name (api) +*/}} +{{- define "hyperdx.api.extraConfigmapName" -}} +{{- if .Values.api.extraConfigExistingConfigmap -}} + {{- print .Values.api.extraConfigExistingConfigmap -}} +{{- else -}} + {{- printf "%s-extra" (include "hyperdx.api.fullname" .) -}} +{{- end -}} +{{- end -}} + + +{{/* ============================== */}} +{{/* Ingestor */}} +{{/* ============================== */}} +{{/* +Return the proper Hyperdx ingestor fullname +*/}} +{{- define "hyperdx.ingestor.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "ingestor" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper Hyperdx ingestor image name +*/}} +{{- define "hyperdx.ingestor.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.ingestor.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the url for Hyperdx ingestor +*/}} +{{- define "hyperdx.ingestor.url" -}} +{{- printf "http://%s.%s.svc.%s:%s" (include "hyperdx.ingestor.fullname" .) (include "common.names.namespace" .) .Values.clusterDomain (.Values.ingestor.service.ports.http | toString) -}} +{{- end -}} + +{{/* +Default configuration ConfigMap name (ingestor) +*/}} +{{- define "hyperdx.ingestor.defaultConfigmapName" -}} +{{- if .Values.ingestor.existingConfigmap -}} + {{- print .Values.ingestor.existingConfigmap -}} +{{- else -}} + {{- printf "%s-default" (include "hyperdx.ingestor.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Extra configuration ConfigMap name (ingestor) +*/}} +{{- define "hyperdx.ingestor.extraConfigmapName" -}} +{{- if .Values.ingestor.extraConfigExistingConfigmap -}} + {{- print .Values.ingestor.extraConfigExistingConfigmap -}} +{{- else -}} + {{- printf "%s-extra" (include "hyperdx.ingestor.fullname" .) -}} +{{- end -}} +{{- end -}} + + +{{/* ============================== */}} +{{/* Aggregator */}} +{{/* ============================== */}} +{{/* +Return the proper Hyperdx aggregator fullname +*/}} +{{- define "hyperdx.aggregator.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "aggregator" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper Hyperdx aggregator image name +*/}} +{{- define "hyperdx.aggregator.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.aggregator.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the url for Hyperdx aggregator +*/}} +{{- define "hyperdx.aggregator.url" -}} +{{- printf "http://%s.%s.svc.%s:%s" (include "hyperdx.aggregator.fullname" .) (include "common.names.namespace" .) .Values.clusterDomain (.Values.aggregator.service.ports.http | toString) -}} +{{- end -}} + +{{/* +Default configuration ConfigMap name (aggregator) +*/}} +{{- define "hyperdx.aggregator.defaultConfigmapName" -}} +{{- if .Values.aggregator.existingConfigmap -}} + {{- print .Values.aggregator.existingConfigmap -}} +{{- else -}} + {{- printf "%s-default" (include "hyperdx.aggregator.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Extra configuration ConfigMap name (aggregator) +*/}} +{{- define "hyperdx.aggregator.extraConfigmapName" -}} +{{- if .Values.aggregator.extraConfigExistingConfigmap -}} + {{- print .Values.aggregator.extraConfigExistingConfigmap -}} +{{- else -}} + {{- printf "%s-extra" (include "hyperdx.aggregator.fullname" .) -}} +{{- end -}} +{{- end -}} + + +{{/* ============================== */}} +{{/* Go parser */}} +{{/* ============================== */}} +{{/* +Return the proper Hyperdx go parser fullname +*/}} +{{- define "hyperdx.goParser.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "go-parser" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper Hyperdx go parser image name +*/}} +{{- define "hyperdx.goParser.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.goParser.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the url for Hyperdx go parser +*/}} +{{- define "hyperdx.goParser.url" -}} +{{- printf "http://%s.%s.svc.%s:%s" (include "hyperdx.goParser.fullname" .) (include "common.names.namespace" .) .Values.clusterDomain (.Values.goParser.service.ports.http | toString) -}} +{{- end -}} + +{{/* +Default configuration ConfigMap name (go parser) +*/}} +{{- define "hyperdx.goParser.defaultConfigmapName" -}} +{{- if .Values.goParser.existingConfigmap -}} + {{- print .Values.goParser.existingConfigmap -}} +{{- else -}} + {{- printf "%s-default" (include "hyperdx.goParser.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Extra configuration ConfigMap name (go parser) +*/}} +{{- define "hyperdx.goParser.extraConfigmapName" -}} +{{- if .Values.goParser.extraConfigExistingConfigmap -}} + {{- print .Values.goParser.extraConfigExistingConfigmap -}} +{{- else -}} + {{- printf "%s-extra" (include "hyperdx.goParser.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* ============================== */}} +{{/* Miner */}} +{{/* ============================== */}} +{{/* +Return the proper Hyperdx miner fullname +*/}} +{{- define "hyperdx.miner.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "miner" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper Hyperdx miner image name +*/}} +{{- define "hyperdx.miner.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.miner.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the url for Hyperdx miner +*/}} +{{- define "hyperdx.miner.url" -}} +{{- printf "http://%s.%s.svc.%s:%s" (include "hyperdx.miner.fullname" .) (include "common.names.namespace" .) .Values.clusterDomain (.Values.miner.service.ports.http | toString) -}} +{{- end -}} + +{{/* +Default configuration ConfigMap name (miner) +*/}} +{{- define "hyperdx.miner.defaultConfigmapName" -}} +{{- if .Values.miner.existingConfigmap -}} + {{- print .Values.miner.existingConfigmap -}} +{{- else -}} + {{- printf "%s-default" (include "hyperdx.miner.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Extra configuration ConfigMap name (miner) +*/}} +{{- define "hyperdx.miner.extraConfigmapName" -}} +{{- if .Values.miner.extraConfigExistingConfigmap -}} + {{- print .Values.miner.extraConfigExistingConfigmap -}} +{{- else -}} + {{- printf "%s-extra" (include "hyperdx.miner.fullname" .) -}} +{{- end -}} +{{- end -}} + + +{{/* ============================== */}} +{{/* Otel collector */}} +{{/* ============================== */}} +{{/* +Return the proper Hyperdx otel collector fullname +*/}} +{{- define "hyperdx.otelCollector.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "otel-collector" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper Hyperdx otel collector image name +*/}} +{{- define "hyperdx.otelCollector.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.otelCollector.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the url for Hyperdx otel collector +*/}} +{{- define "hyperdx.otelCollector.url" -}} +{{- printf "http://%s.%s.svc.%s:%s" (include "hyperdx.otelCollector.fullname" .) (include "common.names.namespace" .) .Values.clusterDomain (.Values.otelCollector.service.ports.http | toString) -}} +{{- end -}} + +{{/* +Default configuration ConfigMap name (otel collector) +*/}} +{{- define "hyperdx.otelCollector.defaultConfigmapName" -}} +{{- if .Values.otelCollector.existingConfigmap -}} + {{- print .Values.otelCollector.existingConfigmap -}} +{{- else -}} + {{- printf "%s-default" (include "hyperdx.otelCollector.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Extra configuration ConfigMap name (otel collector) +*/}} +{{- define "hyperdx.otelCollector.extraConfigmapName" -}} +{{- if .Values.otelCollector.extraConfigExistingConfigmap -}} + {{- print .Values.otelCollector.extraConfigExistingConfigmap -}} +{{- else -}} + {{- printf "%s-extra" (include "hyperdx.otelCollector.fullname" .) -}} +{{- end -}} +{{- end -}} + + +{{/* ============================== */}} +{{/* Task check alerts */}} +{{/* ============================== */}} +{{/* +Return the proper Hyperdx task check alerts fullname +*/}} +{{- define "hyperdx.taskCheckAlerts.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "task-check-alerts" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper Hyperdx task check alerts image name +*/}} +{{- define "hyperdx.taskCheckAlerts.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.taskCheckAlerts.image "global" .Values.global) }} +{{- end -}} + +{{/* +Default configuration ConfigMap name (task check alerts) +*/}} +{{- define "hyperdx.taskCheckAlerts.defaultConfigmapName" -}} +{{- if .Values.taskCheckAlerts.existingConfigmap -}} + {{- print .Values.taskCheckAlerts.existingConfigmap -}} +{{- else -}} + {{- printf "%s-default" (include "hyperdx.taskCheckAlerts.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* ========================================== */}} +{{/* Dependency charts */}} +{{/* ========================================== */}} + +{{/* ============================== */}} +{{/* Kong */}} +{{/* ============================== */}} +{{/* +Return the proper Hyperdx Kong fullname +*/}} +{{- define "hyperdx.kong.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "kong" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the url for Hyperdx Kong +*/}} +{{- define "hyperdx.kong.url" -}} +{{- printf "kong://%s.%s.svc.%s:%s" (include "hyperdx.kong.fullname" .) (include "common.names.namespace" .) .Values.clusterDomain (.Values.kong.service.ports.kong | toString) -}} +{{- end -}} + + +{{/* ============================== */}} +{{/* MongoDB */}} +{{/* ============================== */}} +{{/* +Return the proper Hyperdx Mongodb fullname +*/}} +{{- define "hyperdx.mongodb.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "mongodb" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the url for Hyperdx Mongodb +*/}} +{{- define "hyperdx.mongodb.url" -}} +{{- printf "mongodb://%s.%s.svc.%s:%s" (include "hyperdx.mongodb.fullname" .) (include "common.names.namespace" .) .Values.clusterDomain (.Values.mongodb.service.ports.mongodb | toString) -}} +{{- end -}} + +{{/* +Return the Mongodb Hostname +*/}} +{{- define "hyperdx.mongodb.host" -}} +{{- print "TODO:(anjiann)" -}} +{{- end -}} + + +{{/* +Return the Mongodb port +*/}} +{{- define "hyperdx.mongodb.port" -}} +{{- print "TODO:(anjiann)" -}} +{{- end -}} + +{{/* +Return the Mongodb database name +*/}} +{{- define "hyperdx.mongodb.name" -}} +{{- print "hyperdx" -}} +{{- end -}} + + +{{/* +Return the Mongodb connection uri +*/}} +{{- define "hyperdx.mongodb.uri" -}} +{{- printf "mongodb://%s.%s.svc.%s:%s/hyperdx" (include "hyperdx.mongodb.fullname" .) (include "common.names.namespace" .) .Values.clusterDomain (.Values.mongodb.service.ports.mongodb | toString) -}} +{{- end -}} + + +{{/* ============================== */}} +{{/* Clickhouse */}} +{{/* ============================== */}} +{{/* +Return the proper Hyperdx clickhouse fullname +*/}} +{{- define "hyperdx.clickhouse.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "clickhouse" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the url for Hyperdx clickhouse +*/}} +{{- define "hyperdx.clickhouse.url" -}} +{{- printf "http://%s.%s.svc.%s:%s" (include "hyperdx.clickhouse.fullname" .) (include "common.names.namespace" .) .Values.clusterDomain (.Values.clickhouse.service.ports.http | toString) -}} +{{- end -}} + +{{/* +Return the Clickhouse Hostname +*/}} +{{- define "hyperdx.clickhouse.host" -}} +{{- printf "http://%s.%s.svc.%s:%s" (include "hyperdx.clickhouse.fullname" .) (include "common.names.namespace" .) .Values.clusterDomain (.Values.clickhouse.service.ports.http | toString) -}} +{{- end -}} + + +{{/* ============================== */}} +{{/* Redis standalone */}} +{{/* ============================== */}} +{{/* +Return the proper Hyperdx redis fullname +*/}} +{{- define "hyperdx.redis.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "redis-master" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the url for Hyperdx redis. +*/}} +{{- define "hyperdx.redis.url" -}} +{{- printf "redis://%s.%s.svc.%s:%s" (include "hyperdx.redis.fullname" .) (include "common.names.namespace" .) .Values.clusterDomain (.Values.redis.master.service.ports.redis | toString) -}} +{{- end -}} + diff --git a/charts/hyperdx/templates/aggregator/default-configmap.yaml b/charts/hyperdx/templates/aggregator/default-configmap.yaml new file mode 100644 index 000000000..95fe7b2ee --- /dev/null +++ b/charts/hyperdx/templates/aggregator/default-configmap.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.aggregator.enabled (not .Values.aggregator.existingConfigmap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-default" (include "hyperdx.aggregator.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + app.kubernetes.io/component: aggregator + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: {{- include "common.tplvalues.render" (dict "value" .Values.aggregator.defaultConfig "context" $) | nindent 2 }} +{{- end }} diff --git a/charts/hyperdx/templates/aggregator/deployment.yaml b/charts/hyperdx/templates/aggregator/deployment.yaml new file mode 100644 index 000000000..b08edb8aa --- /dev/null +++ b/charts/hyperdx/templates/aggregator/deployment.yaml @@ -0,0 +1,152 @@ +{{- if .Values.aggregator.enabled }} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "hyperdx.aggregator.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + app.kubernetes.io/component: aggregator + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.aggregator.replicaCount }} + {{- if .Values.aggregator.updateStrategy }} + strategy: {{- toYaml .Values.aggregator.updateStrategy | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.aggregator.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: aggregator + template: + metadata: + annotations: + {{- if .Values.aggregator.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.aggregator.podAnnotations "context" $) | nindent 8 }} + {{- end }} + checksum/default-configmap: {{ include (print $.Template.BasePath "/aggregator/default-configmap.yaml") . | sha256sum }} + {{- if .Values.aggregator.extraConfig }} + checksum/extra-configmap: {{ include (print $.Template.BasePath "/aggregator/extra-configmap.yaml") . | sha256sum }} + {{- end }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: aggregator + spec: + serviceAccountName: {{ template "hyperdx.serviceAccountName" . }} + {{- include "hyperdx.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.aggregator.automountServiceAccountToken }} + {{- if .Values.aggregator.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.aggregator.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.aggregator.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.aggregator.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.aggregator.podAffinityPreset "component" "aggregator" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.aggregator.podAntiAffinityPreset "component" "aggregator" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.aggregator.nodeAffinityPreset.type "key" .Values.aggregator.nodeAffinityPreset.key "values" .Values.aggregator.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.aggregator.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.aggregator.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.aggregator.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.aggregator.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.aggregator.priorityClassName }} + priorityClassName: {{ .Values.aggregator.priorityClassName | quote }} + {{- end }} + {{- if .Values.aggregator.schedulerName }} + schedulerName: {{ .Values.aggregator.schedulerName | quote }} + {{- end }} + {{- if .Values.aggregator.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.aggregator.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.aggregator.podSecurityContext.enabled }} + securityContext: {{- omit .Values.aggregator.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.aggregator.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.aggregator.terminationGracePeriodSeconds }} + {{- end }} + containers: + - name: hdx-oss-aggregator + image: {{ template "hyperdx.aggregator.image" . }} + imagePullPolicy: {{ .Values.aggregator.image.pullPolicy }} + {{- if .Values.aggregator.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.aggregator.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.aggregator.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.aggregator.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.aggregator.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.aggregator.args "context" $) | nindent 12 }} + {{- end }} + env: + {{- if .Values.aggregator.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.aggregator.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + - configMapRef: + name: {{ include "hyperdx.aggregator.defaultConfigmapName" . }} + {{- if .Values.aggregator.extraConfigExistingConfigmap }} + - configMapRef: + name: {{ include "hyperdx.aggregator.extraConfigmapName" . }} + {{- end }} + {{- if .Values.aggregator.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.aggregator.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.aggregator.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.aggregator.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- if .Values.aggregator.resources }} + resources: {{- toYaml .Values.aggregator.resources | nindent 12 }} + {{- end }} + ports: + - name: http + containerPort: {{ .Values.aggregator.containerPorts.http }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.aggregator.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.aggregator.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.aggregator.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.aggregator.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /health + port: http + {{- end }} + {{- if .Values.aggregator.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.aggregator.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.aggregator.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.aggregator.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /health + port: http + {{- end }} + {{- if .Values.aggregator.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.aggregator.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.aggregator.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.aggregator.startupProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /health + port: http + {{- end }} + {{- end }} + {{- if .Values.aggregator.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.aggregator.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.aggregator.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.aggregator.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.aggregator.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.aggregator.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.aggregator.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.aggregator.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/hyperdx/templates/aggregator/extra-configmap.yaml b/charts/hyperdx/templates/aggregator/extra-configmap.yaml new file mode 100644 index 000000000..265501f79 --- /dev/null +++ b/charts/hyperdx/templates/aggregator/extra-configmap.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.aggregator.enabled .Values.aggregator.extraConfig (not .Values.aggregator.extraConfigExistingConfigmap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-extra" (include "hyperdx.aggregator.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + app.kubernetes.io/component: aggregator + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: {{- include "common.tplvalues.render" (dict "value" .Values.aggregator.extraConfig "context" $) | nindent 2 }} +{{- end }} diff --git a/charts/hyperdx/templates/aggregator/service.yaml b/charts/hyperdx/templates/aggregator/service.yaml new file mode 100644 index 000000000..b73d52809 --- /dev/null +++ b/charts/hyperdx/templates/aggregator/service.yaml @@ -0,0 +1,51 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "hyperdx.aggregator.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $versionLabel := dict "app.kubernetes.io/version" ( include "common.images.version" ( dict "imageRoot" .Values.aggregator.image "chart" .Chart ) ) }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.commonLabels $versionLabel ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + app.kubernetes.io/component: aggregator + {{- if or .Values.aggregator.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.aggregator.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.aggregator.service.type }} + {{- if and .Values.aggregator.service.clusterIP (eq .Values.aggregator.service.type "ClusterIP") }} + clusterIP: {{ .Values.aggregator.service.clusterIP }} + {{- end }} + {{- if .Values.aggregator.service.sessionAffinity }} + sessionAffinity: {{ .Values.aggregator.service.sessionAffinity }} + {{- end }} + {{- if .Values.aggregator.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.aggregator.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if or (eq .Values.aggregator.service.type "LoadBalancer") (eq .Values.aggregator.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.aggregator.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.aggregator.service.type "LoadBalancer") (not (empty .Values.aggregator.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.aggregator.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.aggregator.service.type "LoadBalancer") (not (empty .Values.aggregator.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.aggregator.service.loadBalancerIP }} + {{- end }} + ports: + - name: http + port: {{ .Values.aggregator.service.ports.http }} + protocol: TCP + {{- if and (or (eq .Values.aggregator.service.type "NodePort") (eq .Values.aggregator.service.type "LoadBalancer")) (not (empty .Values.aggregator.service.nodePorts.http)) }} + nodePort: {{ .Values.aggregator.service.nodePorts.http }} + targetPort: http + {{- else if eq .Values.aggregator.service.type "ClusterIP" }} + nodePort: null + targetPort: {{ .Values.aggregator.containerPorts.http }} + {{- end }} + {{- if .Values.aggregator.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.aggregator.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.aggregator.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: aggregator diff --git a/charts/hyperdx/templates/api-key-secret.yaml b/charts/hyperdx/templates/api-key-secret.yaml new file mode 100644 index 000000000..2a4626a74 --- /dev/null +++ b/charts/hyperdx/templates/api-key-secret.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "hyperdx.apiKey.secretName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + api-key: {{ include "common.secrets.passwords.manage" (dict "secret" (include "hyperdx.apiKey.secretName" .) "key" "api-key" "providedValues" (list "apiKey") "context" $) }} diff --git a/charts/hyperdx/templates/api/default-configmap.yaml b/charts/hyperdx/templates/api/default-configmap.yaml new file mode 100644 index 000000000..02572359b --- /dev/null +++ b/charts/hyperdx/templates/api/default-configmap.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.api.enabled (not .Values.api.existingConfigmap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-default" (include "hyperdx.api.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + api.kubernetes.io/part-of: hyperdx + api.kubernetes.io/component: api + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: {{- include "common.tplvalues.render" (dict "value" .Values.api.defaultConfig "context" $) | nindent 2 }} +{{- end }} diff --git a/charts/hyperdx/templates/api/deployment.yaml b/charts/hyperdx/templates/api/deployment.yaml new file mode 100644 index 000000000..e01659d42 --- /dev/null +++ b/charts/hyperdx/templates/api/deployment.yaml @@ -0,0 +1,155 @@ + +{{- if .Values.api.enabled }} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "hyperdx.api.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $versionLabel := dict "app.kubernetes.io/version" ( include "common.images.version" ( dict "imageRoot" .Values.api.image "chart" .Chart ) ) }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.commonLabels $versionLabel ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + app.kubernetes.io/component: api + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.api.replicaCount }} + {{- if .Values.api.updateStrategy }} + strategy: {{- toYaml .Values.api.updateStrategy | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.api.podLabels .Values.commonLabels $versionLabel ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: api + template: + metadata: + annotations: + {{- if .Values.api.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.api.podAnnotations "context" $) | nindent 8 }} + {{- end }} + checksum/default-configmap: {{ include (print $.Template.BasePath "/api/default-configmap.yaml") . | sha256sum }} + {{- if .Values.api.extraConfig }} + checksum/extra-configmap: {{ include (print $.Template.BasePath "/api/extra-configmap.yaml") . | sha256sum }} + {{- end }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: api + spec: + serviceAccountName: {{ template "hyperdx.serviceAccountName" . }} + {{- include "hyperdx.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.api.automountServiceAccountToken }} + {{- if .Values.api.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.api.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.api.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.api.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.api.podAffinityPreset "component" "api" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.api.podAntiAffinityPreset "component" "api" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.api.nodeAffinityPreset.type "key" .Values.api.nodeAffinityPreset.key "values" .Values.api.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.api.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.api.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.api.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.api.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.api.priorityClassName }} + priorityClassName: {{ .Values.api.priorityClassName | quote }} + {{- end }} + {{- if .Values.api.schedulerName }} + schedulerName: {{ .Values.api.schedulerName | quote }} + {{- end }} + {{- if .Values.api.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.api.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.api.podSecurityContext.enabled }} + securityContext: {{- omit .Values.api.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.api.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.api.terminationGracePeriodSeconds }} + {{- end }} + containers: + - name: hdx-oss-api + image: {{ template "hyperdx.api.image" . }} + imagePullPolicy: {{ .Values.api.image.pullPolicy }} + {{- if .Values.api.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.api.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.api.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.api.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.api.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.api.args "context" $) | nindent 12 }} + {{- end }} + env: + {{- if .Values.api.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.api.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + - configMapRef: + name: {{ include "hyperdx.api.defaultConfigmapName" . }} + {{- if .Values.api.extraConfigExistingConfigmap }} + - configMapRef: + name: {{ include "hyperdx.api.extraConfigmapName" . }} + {{- end }} + {{- if .Values.api.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.api.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.api.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.api.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- if .Values.api.resources }} + resources: {{- toYaml .Values.api.resources | nindent 12 }} + {{- end }} + ports: + - name: http + containerPort: {{ .Values.api.containerPorts.http }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.api.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.api.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.api.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.api.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /health + port: http + {{- end }} + {{- if .Values.api.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.api.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.api.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.api.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /health + port: http + {{- end }} + {{- if .Values.api.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.api.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.api.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.api.startupProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /health + port: http + {{- end }} + {{- end }} + {{- if .Values.api.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.api.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.api.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.api.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.api.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.api.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.api.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.api.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/hyperdx/templates/api/extra-configmap.yaml b/charts/hyperdx/templates/api/extra-configmap.yaml new file mode 100644 index 000000000..8becee9af --- /dev/null +++ b/charts/hyperdx/templates/api/extra-configmap.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.api.enabled .Values.api.extraConfig (not .Values.api.extraConfigExistingConfigmap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-extra" (include "hyperdx.api.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + api.kubernetes.io/part-of: hyperdx + api.kubernetes.io/component: api + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: {{- include "common.tplvalues.render" (dict "value" .Values.api.extraConfig "context" $) | nindent 2 }} +{{- end }} diff --git a/charts/hyperdx/templates/api/service.yaml b/charts/hyperdx/templates/api/service.yaml new file mode 100644 index 000000000..de4426245 --- /dev/null +++ b/charts/hyperdx/templates/api/service.yaml @@ -0,0 +1,51 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "hyperdx.api.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $versionLabel := dict "app.kubernetes.io/version" ( include "common.images.version" ( dict "imageRoot" .Values.api.image "chart" .Chart ) ) }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.commonLabels $versionLabel ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + app.kubernetes.io/component: api + {{- if or .Values.api.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.api.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.api.service.type }} + {{- if and .Values.api.service.clusterIP (eq .Values.api.service.type "ClusterIP") }} + clusterIP: {{ .Values.api.service.clusterIP }} + {{- end }} + {{- if .Values.api.service.sessionAffinity }} + sessionAffinity: {{ .Values.api.service.sessionAffinity }} + {{- end }} + {{- if .Values.api.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.api.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if or (eq .Values.api.service.type "LoadBalancer") (eq .Values.api.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.api.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.api.service.type "LoadBalancer") (not (empty .Values.api.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.api.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.api.service.type "LoadBalancer") (not (empty .Values.api.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.api.service.loadBalancerIP }} + {{- end }} + ports: + - name: http + port: {{ .Values.api.service.ports.http }} + protocol: TCP + {{- if and (or (eq .Values.api.service.type "NodePort") (eq .Values.api.service.type "LoadBalancer")) (not (empty .Values.api.service.nodePorts.http)) }} + nodePort: {{ .Values.api.service.nodePorts.http }} + targetPort: http + {{- else if eq .Values.api.service.type "ClusterIP" }} + nodePort: null + targetPort: {{ .Values.api.containerPorts.http }} + {{- end }} + {{- if .Values.api.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.api.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.api.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: api diff --git a/charts/hyperdx/templates/app/default-configmap.yaml b/charts/hyperdx/templates/app/default-configmap.yaml new file mode 100644 index 000000000..d85dbe20a --- /dev/null +++ b/charts/hyperdx/templates/app/default-configmap.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.app.enabled (not .Values.app.existingConfigmap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-default" (include "hyperdx.app.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + app.kubernetes.io/component: app + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: {{- include "common.tplvalues.render" (dict "value" .Values.app.defaultConfig "context" $) | nindent 2 }} +{{- end }} diff --git a/charts/hyperdx/templates/app/deployment.yaml b/charts/hyperdx/templates/app/deployment.yaml new file mode 100644 index 000000000..73c0eea31 --- /dev/null +++ b/charts/hyperdx/templates/app/deployment.yaml @@ -0,0 +1,159 @@ +{{- if .Values.app.enabled }} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "hyperdx.app.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + app.kubernetes.io/component: app + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.app.replicaCount }} + {{- if .Values.app.updateStrategy }} + strategy: {{- toYaml .Values.app.updateStrategy | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.app.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: app + template: + metadata: + annotations: + {{- if .Values.app.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.app.podAnnotations "context" $) | nindent 8 }} + {{- end }} + checksum/default-configmap: {{ include (print $.Template.BasePath "/app/default-configmap.yaml") . | sha256sum }} + {{- if .Values.app.extraConfig }} + checksum/extra-configmap: {{ include (print $.Template.BasePath "/app/extra-configmap.yaml") . | sha256sum }} + {{- end }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: app + spec: + serviceAccountName: {{ template "hyperdx.serviceAccountName" . }} + {{- include "hyperdx.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.app.automountServiceAccountToken }} + {{- if .Values.app.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.app.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.app.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.app.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.app.podAffinityPreset "component" "app" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.app.podAntiAffinityPreset "component" "app" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.app.nodeAffinityPreset.type "key" .Values.app.nodeAffinityPreset.key "values" .Values.app.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.app.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.app.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.app.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.app.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.app.priorityClassName }} + priorityClassName: {{ .Values.app.priorityClassName | quote }} + {{- end }} + {{- if .Values.app.schedulerName }} + schedulerName: {{ .Values.app.schedulerName | quote }} + {{- end }} + {{- if .Values.app.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.app.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.app.podSecurityContext.enabled }} + securityContext: {{- omit .Values.app.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.app.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.app.terminationGracePeriodSeconds }} + {{- end }} + {{/* initContainers: */}} + {{/* {{- if not .Values.diagnosticMode.enabled }} */}} + {{/* {{- include "hyperdx.waitForDBInitContainer" . | nindent 8 }} */}} + {{/* {{- end }} */}} + {{/* {{- if .Values.app.initContainers }} */}} + {{/* {{- include "common.tplvalues.render" (dict "value" .Values.app.initContainers "context" $) | nindent 8 }} */}} + {{/* {{- end }} */}} + containers: + - name: hdx-oss-app + image: {{ template "hyperdx.app.image" . }} + imagePullPolicy: {{ .Values.app.image.pullPolicy }} + {{- if .Values.app.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.app.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.app.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.app.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.app.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.app.args "context" $) | nindent 12 }} + {{- end }} + env: + {{- if .Values.app.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.app.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + - configMapRef: + name: {{ include "hyperdx.app.defaultConfigmapName" . }} + {{- if .Values.app.extraConfigExistingConfigmap }} + - configMapRef: + name: {{ include "hyperdx.app.extraConfigmapName" . }} + {{- end }} + {{- if .Values.app.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.app.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.app.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.app.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- if .Values.app.resources }} + resources: {{- toYaml .Values.app.resources | nindent 12 }} + {{- end }} + ports: + - name: http + containerPort: {{ .Values.app.containerPorts.http }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.app.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.app.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.app.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.app.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: http + {{- end }} + {{- if .Values.app.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.app.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.app.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.app.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: http + {{- end }} + {{- if .Values.app.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.app.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.app.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.app.startupProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: http + {{- end }} + {{- end }} + {{- if .Values.app.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.app.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.app.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.app.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.app.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.app.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.app.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.app.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/hyperdx/templates/app/extra-configmap.yaml b/charts/hyperdx/templates/app/extra-configmap.yaml new file mode 100644 index 000000000..70e5e018b --- /dev/null +++ b/charts/hyperdx/templates/app/extra-configmap.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.app.enabled .Values.app.extraConfig (not .Values.app.extraConfigExistingConfigmap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-extra" (include "hyperdx.app.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + app.kubernetes.io/component: app + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: {{- include "common.tplvalues.render" (dict "value" .Values.app.extraConfig "context" $) | nindent 2 }} +{{- end }} diff --git a/charts/hyperdx/templates/app/service.yaml b/charts/hyperdx/templates/app/service.yaml new file mode 100644 index 000000000..7ba09a596 --- /dev/null +++ b/charts/hyperdx/templates/app/service.yaml @@ -0,0 +1,48 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "hyperdx.app.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + app.kubernetes.io/component: app + {{- if or .Values.app.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.app.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.app.service.type }} + {{- if and .Values.app.service.clusterIP (eq .Values.app.service.type "ClusterIP") }} + clusterIP: {{ .Values.app.service.clusterIP }} + {{- end }} + {{- if .Values.app.service.sessionAffinity }} + sessionAffinity: {{ .Values.app.service.sessionAffinity }} + {{- end }} + {{- if .Values.app.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.app.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if or (eq .Values.app.service.type "LoadBalancer") (eq .Values.app.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.app.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.app.service.type "LoadBalancer") (not (empty .Values.app.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.app.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.app.service.type "LoadBalancer") (not (empty .Values.app.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.app.service.loadBalancerIP }} + {{- end }} + ports: + - name: http + port: {{ .Values.app.service.ports.http }} + protocol: TCP + targetPort: http + {{- if and (or (eq .Values.app.service.type "NodePort") (eq .Values.app.service.type "LoadBalancer")) (not (empty .Values.app.service.nodePorts.http)) }} + nodePort: {{ .Values.app.service.nodePorts.http }} + {{- else if eq .Values.app.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.app.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.app.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.app.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: app diff --git a/charts/hyperdx/templates/go-parser/default-configmap.yaml b/charts/hyperdx/templates/go-parser/default-configmap.yaml new file mode 100644 index 000000000..738141c43 --- /dev/null +++ b/charts/hyperdx/templates/go-parser/default-configmap.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.goParser.enabled (not .Values.goParser.existingConfigmap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-default" (include "hyperdx.goParser.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + app.kubernetes.io/component: go-parser + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: {{- include "common.tplvalues.render" (dict "value" .Values.goParser.defaultConfig "context" $) | nindent 2 }} +{{- end }} diff --git a/charts/hyperdx/templates/go-parser/deployment.yaml b/charts/hyperdx/templates/go-parser/deployment.yaml new file mode 100644 index 000000000..7e3ab6504 --- /dev/null +++ b/charts/hyperdx/templates/go-parser/deployment.yaml @@ -0,0 +1,152 @@ +{{- if .Values.goParser.enabled }} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "hyperdx.goParser.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + app.kubernetes.io/component: go-parser + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.goParser.replicaCount }} + {{- if .Values.goParser.updateStrategy }} + strategy: {{- toYaml .Values.goParser.updateStrategy | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.goParser.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: go-parser + template: + metadata: + annotations: + {{- if .Values.goParser.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.goParser.podAnnotations "context" $) | nindent 8 }} + {{- end }} + checksum/default-configmap: {{ include (print $.Template.BasePath "/go-parser/default-configmap.yaml") . | sha256sum }} + {{- if .Values.goParser.extraConfig }} + checksum/extra-configmap: {{ include (print $.Template.BasePath "/go-parser/extra-configmap.yaml") . | sha256sum }} + {{- end }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: go-parser + spec: + serviceAccountName: {{ template "hyperdx.serviceAccountName" . }} + {{- include "hyperdx.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.goParser.automountServiceAccountToken }} + {{- if .Values.goParser.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.goParser.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.goParser.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.goParser.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.goParser.podAffinityPreset "component" "go-parser" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.goParser.podAntiAffinityPreset "component" "go-parser" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.goParser.nodeAffinityPreset.type "key" .Values.goParser.nodeAffinityPreset.key "values" .Values.goParser.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.goParser.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.goParser.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.goParser.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.goParser.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.goParser.priorityClassName }} + priorityClassName: {{ .Values.goParser.priorityClassName | quote }} + {{- end }} + {{- if .Values.goParser.schedulerName }} + schedulerName: {{ .Values.goParser.schedulerName | quote }} + {{- end }} + {{- if .Values.goParser.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.goParser.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.goParser.podSecurityContext.enabled }} + securityContext: {{- omit .Values.goParser.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.goParser.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.goParser.terminationGracePeriodSeconds }} + {{- end }} + containers: + - name: hdx-oss-go-parser + image: {{ template "hyperdx.goParser.image" . }} + imagePullPolicy: {{ .Values.goParser.image.pullPolicy }} + {{- if .Values.goParser.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.goParser.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.goParser.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.goParser.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.goParser.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.goParser.args "context" $) | nindent 12 }} + {{- end }} + env: + {{- if .Values.goParser.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.goParser.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + - configMapRef: + name: {{ include "hyperdx.goParser.defaultConfigmapName" . }} + {{- if .Values.goParser.extraConfigExistingConfigmap }} + - configMapRef: + name: {{ include "hyperdx.goParser.extraConfigmapName" . }} + {{- end }} + {{- if .Values.goParser.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.goParser.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.goParser.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.goParser.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- if .Values.goParser.resources }} + resources: {{- toYaml .Values.goParser.resources | nindent 12 }} + {{- end }} + ports: + - name: http + containerPort: {{ .Values.goParser.containerPorts.http }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.goParser.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.goParser.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.goParser.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.goParser.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /health + port: http + {{- end }} + {{- if .Values.goParser.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.goParser.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.goParser.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.goParser.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /health + port: http + {{- end }} + {{- if .Values.goParser.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.goParser.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.goParser.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.goParser.startupProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /health + port: http + {{- end }} + {{- end }} + {{- if .Values.goParser.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.goParser.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.goParser.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.goParser.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.goParser.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.goParser.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.goParser.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.goParser.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/hyperdx/templates/go-parser/extra-configmap.yaml b/charts/hyperdx/templates/go-parser/extra-configmap.yaml new file mode 100644 index 000000000..c8d93f922 --- /dev/null +++ b/charts/hyperdx/templates/go-parser/extra-configmap.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.goParser.enabled .Values.goParser.extraConfig (not .Values.goParser.extraConfigExistingConfigmap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-extra" (include "hyperdx.goParser.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + app.kubernetes.io/component: go-parser + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: {{- include "common.tplvalues.render" (dict "value" .Values.goParser.extraConfig "context" $) | nindent 2 }} +{{- end }} diff --git a/charts/hyperdx/templates/go-parser/service.yaml b/charts/hyperdx/templates/go-parser/service.yaml new file mode 100644 index 000000000..a574b903b --- /dev/null +++ b/charts/hyperdx/templates/go-parser/service.yaml @@ -0,0 +1,51 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "hyperdx.goParser.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $versionLabel := dict "app.kubernetes.io/version" ( include "common.images.version" ( dict "imageRoot" .Values.goParser.image "chart" .Chart ) ) }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.commonLabels $versionLabel ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + app.kubernetes.io/component: go-parser + {{- if or .Values.goParser.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.goParser.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.goParser.service.type }} + {{- if and .Values.goParser.service.clusterIP (eq .Values.goParser.service.type "ClusterIP") }} + clusterIP: {{ .Values.goParser.service.clusterIP }} + {{- end }} + {{- if .Values.goParser.service.sessionAffinity }} + sessionAffinity: {{ .Values.goParser.service.sessionAffinity }} + {{- end }} + {{- if .Values.goParser.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.goParser.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if or (eq .Values.goParser.service.type "LoadBalancer") (eq .Values.goParser.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.goParser.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.goParser.service.type "LoadBalancer") (not (empty .Values.goParser.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.goParser.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.goParser.service.type "LoadBalancer") (not (empty .Values.goParser.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.goParser.service.loadBalancerIP }} + {{- end }} + ports: + - name: http + port: {{ .Values.goParser.service.ports.http }} + protocol: TCP + {{- if and (or (eq .Values.goParser.service.type "NodePort") (eq .Values.goParser.service.type "LoadBalancer")) (not (empty .Values.goParser.service.nodePorts.http)) }} + nodePort: {{ .Values.goParser.service.nodePorts.http }} + targetPort: http + {{- else if eq .Values.goParser.service.type "ClusterIP" }} + nodePort: null + targetPort: {{ .Values.goParser.containerPorts.http }} + {{- end }} + {{- if .Values.goParser.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.goParser.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.goParser.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: go-parser diff --git a/charts/hyperdx/templates/ingestor/default-configmap.yaml b/charts/hyperdx/templates/ingestor/default-configmap.yaml new file mode 100644 index 000000000..d16e63ede --- /dev/null +++ b/charts/hyperdx/templates/ingestor/default-configmap.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.ingestor.enabled (not .Values.ingestor.existingConfigmap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-default" (include "hyperdx.ingestor.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + app.kubernetes.io/component: ingestor + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: {{- include "common.tplvalues.render" (dict "value" .Values.ingestor.defaultConfig "context" $) | nindent 2 }} +{{- end }} diff --git a/charts/hyperdx/templates/ingestor/deployment.yaml b/charts/hyperdx/templates/ingestor/deployment.yaml new file mode 100644 index 000000000..4cb412939 --- /dev/null +++ b/charts/hyperdx/templates/ingestor/deployment.yaml @@ -0,0 +1,155 @@ +{{- if .Values.ingestor.enabled }} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "hyperdx.ingestor.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + app.kubernetes.io/component: ingestor + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.ingestor.replicaCount }} + {{- if .Values.ingestor.updateStrategy }} + strategy: {{- toYaml .Values.ingestor.updateStrategy | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.ingestor.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: ingestor + template: + metadata: + annotations: + {{- if .Values.ingestor.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingestor.podAnnotations "context" $) | nindent 8 }} + {{- end }} + checksum/default-configmap: {{ include (print $.Template.BasePath "/ingestor/default-configmap.yaml") . | sha256sum }} + {{- if .Values.ingestor.extraConfig }} + checksum/extra-configmap: {{ include (print $.Template.BasePath "/ingestor/extra-configmap.yaml") . | sha256sum }} + {{- end }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: ingestor + spec: + serviceAccountName: {{ template "hyperdx.serviceAccountName" . }} + {{- include "hyperdx.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.ingestor.automountServiceAccountToken }} + {{- if .Values.ingestor.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.ingestor.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.ingestor.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.ingestor.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.ingestor.podAffinityPreset "component" "ingestor" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.ingestor.podAntiAffinityPreset "component" "ingestor" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.ingestor.nodeAffinityPreset.type "key" .Values.ingestor.nodeAffinityPreset.key "values" .Values.ingestor.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.ingestor.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.ingestor.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.ingestor.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.ingestor.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.ingestor.priorityClassName }} + priorityClassName: {{ .Values.ingestor.priorityClassName | quote }} + {{- end }} + {{- if .Values.ingestor.schedulerName }} + schedulerName: {{ .Values.ingestor.schedulerName | quote }} + {{- end }} + {{- if .Values.ingestor.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.ingestor.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.ingestor.podSecurityContext.enabled }} + securityContext: {{- omit .Values.ingestor.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.ingestor.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.ingestor.terminationGracePeriodSeconds }} + {{- end }} + containers: + - name: hdx-oss-ingestor + image: {{ template "hyperdx.ingestor.image" . }} + imagePullPolicy: {{ .Values.ingestor.image.pullPolicy }} + {{- if .Values.ingestor.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.ingestor.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.ingestor.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.ingestor.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.ingestor.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.ingestor.args "context" $) | nindent 12 }} + {{- end }} + env: + {{- if .Values.ingestor.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingestor.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + - configMapRef: + name: {{ include "hyperdx.ingestor.defaultConfigmapName" . }} + {{- if .Values.ingestor.extraConfigExistingConfigmap }} + - configMapRef: + name: {{ include "hyperdx.ingestor.extraConfigmapName" . }} + {{- end }} + {{- if .Values.ingestor.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.ingestor.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.ingestor.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.ingestor.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- if .Values.ingestor.resources }} + resources: {{- toYaml .Values.ingestor.resources | nindent 12 }} + {{- end }} + ports: + - name: http + containerPort: {{ .Values.ingestor.containerPorts.http }} + - name: http-health + containerPort: {{ .Values.ingestor.containerPorts.health }} + protocol: TCP + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.ingestor.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.ingestor.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.ingestor.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.ingestor.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /health + port: http-health + {{- end }} + {{- if .Values.ingestor.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.ingestor.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.ingestor.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.ingestor.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /health + port: http-health + {{- end }} + {{- if .Values.ingestor.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.ingestor.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.ingestor.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.ingestor.startupProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /health + port: http-health + {{- end }} + {{- end }} + {{- if .Values.ingestor.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.ingestor.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.ingestor.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingestor.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.ingestor.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.ingestor.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.ingestor.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingestor.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/hyperdx/templates/ingestor/extra-configmap.yaml b/charts/hyperdx/templates/ingestor/extra-configmap.yaml new file mode 100644 index 000000000..c2005a844 --- /dev/null +++ b/charts/hyperdx/templates/ingestor/extra-configmap.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.ingestor.enabled .Values.ingestor.extraConfig (not .Values.ingestor.extraConfigExistingConfigmap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-extra" (include "hyperdx.ingestor.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + ingestor.kubernetes.io/part-of: hyperdx + ingestor.kubernetes.io/component: ingestor + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: {{- include "common.tplvalues.render" (dict "value" .Values.ingestor.extraConfig "context" $) | nindent 2 }} +{{- end }} diff --git a/charts/hyperdx/templates/ingestor/service.yaml b/charts/hyperdx/templates/ingestor/service.yaml new file mode 100644 index 000000000..a267430e7 --- /dev/null +++ b/charts/hyperdx/templates/ingestor/service.yaml @@ -0,0 +1,51 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "hyperdx.ingestor.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $versionLabel := dict "app.kubernetes.io/version" ( include "common.images.version" ( dict "imageRoot" .Values.ingestor.image "chart" .Chart ) ) }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.commonLabels $versionLabel ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + app.kubernetes.io/component: ingestor + {{- if or .Values.ingestor.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.ingestor.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.ingestor.service.type }} + {{- if and .Values.ingestor.service.clusterIP (eq .Values.ingestor.service.type "ClusterIP") }} + clusterIP: {{ .Values.ingestor.service.clusterIP }} + {{- end }} + {{- if .Values.ingestor.service.sessionAffinity }} + sessionAffinity: {{ .Values.ingestor.service.sessionAffinity }} + {{- end }} + {{- if .Values.ingestor.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.ingestor.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if or (eq .Values.ingestor.service.type "LoadBalancer") (eq .Values.ingestor.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.ingestor.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.ingestor.service.type "LoadBalancer") (not (empty .Values.ingestor.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.ingestor.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.ingestor.service.type "LoadBalancer") (not (empty .Values.ingestor.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.ingestor.service.loadBalancerIP }} + {{- end }} + ports: + - name: http + port: {{ .Values.ingestor.service.ports.http }} + protocol: TCP + {{- if and (or (eq .Values.ingestor.service.type "NodePort") (eq .Values.ingestor.service.type "LoadBalancer")) (not (empty .Values.ingestor.service.nodePorts.http)) }} + nodePort: {{ .Values.ingestor.service.nodePorts.http }} + targetPort: http + {{- else if eq .Values.ingestor.service.type "ClusterIP" }} + nodePort: null + targetPort: {{ .Values.ingestor.containerPorts.http }} + {{- end }} + {{- if .Values.ingestor.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingestor.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.ingestor.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: ingestor diff --git a/charts/hyperdx/templates/kong/declarative-conf-configmap.yaml b/charts/hyperdx/templates/kong/declarative-conf-configmap.yaml new file mode 100644 index 000000000..af29663c7 --- /dev/null +++ b/charts/hyperdx/templates/kong/declarative-conf-configmap.yaml @@ -0,0 +1,49 @@ +{{- if and .Values.kong.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: "hyperdx-kong-declarative-conf" + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + app.kubernetes.io/component: kong + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + kong.yml.tpl: | + _format_version: "1.1" + + services: + - name: api-v1 + _comment: "api: /api/v1/* -> {{ include "hyperdx.api.url" . }}" + url: {{ include "hyperdx.api.url" . }} + routes: + - name: api-all + strip_path: true + paths: + - /api/v1/ + plugins: + - name: cors + + - name: app + _comment: "dashboard-app: /* -> {{ include "hyperdx.app.url" . }}" + url: {{ include "hyperdx.app.url" . }} + routes: + - name: app-all + strip_path: false + paths: + - / + plugins: + - name: cors + + - name: otel-collector + _comment: "otel-collector: /collector/ -> {{ include "hyperdx.otelCollector.url" . }}" + url: {{ include "hyperdx.otelCollector.url" . }} + routes: + - name: collector-all + strip_path: true + paths: + - /collector/ + plugins: + - name: cors +{{- end }} diff --git a/charts/hyperdx/templates/miner/default-configmap.yaml b/charts/hyperdx/templates/miner/default-configmap.yaml new file mode 100644 index 000000000..259169321 --- /dev/null +++ b/charts/hyperdx/templates/miner/default-configmap.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.miner.enabled (not .Values.miner.existingConfigmap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-default" (include "hyperdx.miner.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + app.kubernetes.io/component: miner + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: {{- include "common.tplvalues.render" (dict "value" .Values.miner.defaultConfig "context" $) | nindent 2 }} +{{- end }} diff --git a/charts/hyperdx/templates/miner/deployment.yaml b/charts/hyperdx/templates/miner/deployment.yaml new file mode 100644 index 000000000..62c2961ec --- /dev/null +++ b/charts/hyperdx/templates/miner/deployment.yaml @@ -0,0 +1,152 @@ +{{- if .Values.miner.enabled }} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "hyperdx.miner.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + app.kubernetes.io/component: miner + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.miner.replicaCount }} + {{- if .Values.miner.updateStrategy }} + strategy: {{- toYaml .Values.miner.updateStrategy | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.miner.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: miner + template: + metadata: + annotations: + {{- if .Values.miner.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.miner.podAnnotations "context" $) | nindent 8 }} + {{- end }} + checksum/default-configmap: {{ include (print $.Template.BasePath "/miner/default-configmap.yaml") . | sha256sum }} + {{- if .Values.miner.extraConfig }} + checksum/extra-configmap: {{ include (print $.Template.BasePath "/miner/extra-configmap.yaml") . | sha256sum }} + {{- end }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: miner + spec: + serviceAccountName: {{ template "hyperdx.serviceAccountName" . }} + {{- include "hyperdx.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.miner.automountServiceAccountToken }} + {{- if .Values.miner.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.miner.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.miner.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.miner.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.miner.podAffinityPreset "component" "miner" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.miner.podAntiAffinityPreset "component" "miner" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.miner.nodeAffinityPreset.type "key" .Values.miner.nodeAffinityPreset.key "values" .Values.miner.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.miner.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.miner.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.miner.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.miner.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.miner.priorityClassName }} + priorityClassName: {{ .Values.miner.priorityClassName | quote }} + {{- end }} + {{- if .Values.miner.schedulerName }} + schedulerName: {{ .Values.miner.schedulerName | quote }} + {{- end }} + {{- if .Values.miner.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.miner.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.miner.podSecurityContext.enabled }} + securityContext: {{- omit .Values.miner.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.miner.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.miner.terminationGracePeriodSeconds }} + {{- end }} + containers: + - name: hdx-oss-miner + image: {{ template "hyperdx.miner.image" . }} + imagePullPolicy: {{ .Values.miner.image.pullPolicy }} + {{- if .Values.miner.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.miner.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.miner.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.miner.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.miner.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.miner.args "context" $) | nindent 12 }} + {{- end }} + env: + {{- if .Values.miner.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.miner.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + - configMapRef: + name: {{ include "hyperdx.miner.defaultConfigmapName" . }} + {{- if .Values.miner.extraConfigExistingConfigmap }} + - configMapRef: + name: {{ include "hyperdx.miner.extraConfigmapName" . }} + {{- end }} + {{- if .Values.miner.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.miner.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.miner.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.miner.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- if .Values.miner.resources }} + resources: {{- toYaml .Values.miner.resources | nindent 12 }} + {{- end }} + ports: + - name: http + containerPort: {{ .Values.miner.containerPorts.http }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.miner.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.miner.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.miner.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.miner.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /health + port: http + {{- end }} + {{- if .Values.miner.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.miner.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.miner.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.miner.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /health + port: http + {{- end }} + {{- if .Values.miner.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.miner.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.miner.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.miner.startupProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /health + port: http + {{- end }} + {{- end }} + {{- if .Values.miner.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.miner.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.miner.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.miner.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.miner.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.miner.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.miner.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.miner.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/hyperdx/templates/miner/extra-configmap.yaml b/charts/hyperdx/templates/miner/extra-configmap.yaml new file mode 100644 index 000000000..07c4b45ed --- /dev/null +++ b/charts/hyperdx/templates/miner/extra-configmap.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.miner.enabled .Values.miner.extraConfig (not .Values.miner.extraConfigExistingConfigmap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-extra" (include "hyperdx.miner.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + app.kubernetes.io/component: miner + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: {{- include "common.tplvalues.render" (dict "value" .Values.miner.extraConfig "context" $) | nindent 2 }} +{{- end }} diff --git a/charts/hyperdx/templates/miner/service.yaml b/charts/hyperdx/templates/miner/service.yaml new file mode 100644 index 000000000..b5f7c0a61 --- /dev/null +++ b/charts/hyperdx/templates/miner/service.yaml @@ -0,0 +1,51 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "hyperdx.miner.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $versionLabel := dict "app.kubernetes.io/version" ( include "common.images.version" ( dict "imageRoot" .Values.miner.image "chart" .Chart ) ) }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.commonLabels $versionLabel ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + app.kubernetes.io/component: miner + {{- if or .Values.miner.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.miner.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.miner.service.type }} + {{- if and .Values.miner.service.clusterIP (eq .Values.miner.service.type "ClusterIP") }} + clusterIP: {{ .Values.miner.service.clusterIP }} + {{- end }} + {{- if .Values.miner.service.sessionAffinity }} + sessionAffinity: {{ .Values.miner.service.sessionAffinity }} + {{- end }} + {{- if .Values.miner.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.miner.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if or (eq .Values.miner.service.type "LoadBalancer") (eq .Values.miner.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.miner.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.miner.service.type "LoadBalancer") (not (empty .Values.miner.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.miner.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.miner.service.type "LoadBalancer") (not (empty .Values.miner.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.miner.service.loadBalancerIP }} + {{- end }} + ports: + - name: http + port: {{ .Values.miner.service.ports.http }} + protocol: TCP + {{- if and (or (eq .Values.miner.service.type "NodePort") (eq .Values.miner.service.type "LoadBalancer")) (not (empty .Values.miner.service.nodePorts.http)) }} + nodePort: {{ .Values.miner.service.nodePorts.http }} + targetPort: http + {{- else if eq .Values.miner.service.type "ClusterIP" }} + nodePort: null + targetPort: {{ .Values.miner.containerPorts.http }} + {{- end }} + {{- if .Values.miner.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.miner.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.miner.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: miner diff --git a/charts/hyperdx/templates/otel-collector/default-configmap.yaml b/charts/hyperdx/templates/otel-collector/default-configmap.yaml new file mode 100644 index 000000000..2d2df600d --- /dev/null +++ b/charts/hyperdx/templates/otel-collector/default-configmap.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.otelCollector.enabled (not .Values.otelCollector.existingConfigmap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-default" (include "hyperdx.otelCollector.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + app.kubernetes.io/component: otel-collector + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: {{- include "common.tplvalues.render" (dict "value" .Values.otelCollector.defaultConfig "context" $) | nindent 2 }} +{{- end }} diff --git a/charts/hyperdx/templates/otel-collector/deployment.yaml b/charts/hyperdx/templates/otel-collector/deployment.yaml new file mode 100644 index 000000000..f61c11f04 --- /dev/null +++ b/charts/hyperdx/templates/otel-collector/deployment.yaml @@ -0,0 +1,154 @@ +{{- if .Values.otelCollector.enabled }} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "hyperdx.otelCollector.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + app.kubernetes.io/component: otel-collector + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.otelCollector.replicaCount }} + {{- if .Values.otelCollector.updateStrategy }} + strategy: {{- toYaml .Values.otelCollector.updateStrategy | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.otelCollector.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: otel-collector + template: + metadata: + annotations: + {{- if .Values.otelCollector.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.otelCollector.podAnnotations "context" $) | nindent 8 }} + {{- end }} + checksum/default-configmap: {{ include (print $.Template.BasePath "/otel-collector/default-configmap.yaml") . | sha256sum }} + {{- if .Values.otelCollector.extraConfig }} + checksum/extra-configmap: {{ include (print $.Template.BasePath "/otel-collector/extra-configmap.yaml") . | sha256sum }} + {{- end }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: otel-collector + spec: + serviceAccountName: {{ template "hyperdx.serviceAccountName" . }} + {{- include "hyperdx.imagePullSecrets" . | nindent 6 }} + automountServiceAccountToken: {{ .Values.otelCollector.automountServiceAccountToken }} + {{- if .Values.otelCollector.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.otelCollector.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.otelCollector.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.otelCollector.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.otelCollector.podAffinityPreset "component" "otelCollector" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.otelCollector.podAntiAffinityPreset "component" "otelCollector" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.otelCollector.nodeAffinityPreset.type "key" .Values.otelCollector.nodeAffinityPreset.key "values" .Values.otelCollector.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.otelCollector.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.otelCollector.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.otelCollector.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.otelCollector.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.otelCollector.priorityClassName }} + priorityClassName: {{ .Values.otelCollector.priorityClassName | quote }} + {{- end }} + {{- if .Values.otelCollector.schedulerName }} + schedulerName: {{ .Values.otelCollector.schedulerName | quote }} + {{- end }} + {{- if .Values.otelCollector.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.otelCollector.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.otelCollector.podSecurityContext.enabled }} + securityContext: {{- omit .Values.otelCollector.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.otelCollector.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.otelCollector.terminationGracePeriodSeconds }} + {{- end }} + containers: + - name: hdx-oss-otel-collector + image: {{ template "hyperdx.otelCollector.image" . }} + imagePullPolicy: {{ .Values.otelCollector.image.pullPolicy }} + {{- if .Values.otelCollector.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.otelCollector.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.otelCollector.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.otelCollector.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.otelCollector.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.otelCollector.args "context" $) | nindent 12 }} + {{- end }} + env: + {{- if .Values.otelCollector.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.otelCollector.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + - configMapRef: + name: {{ include "hyperdx.otelCollector.defaultConfigmapName" . }} + {{- if .Values.otelCollector.extraConfigExistingConfigmap }} + - configMapRef: + name: {{ include "hyperdx.otelCollector.extraConfigmapName" . }} + {{- end }} + {{- if .Values.otelCollector.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.otelCollector.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.otelCollector.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.otelCollector.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- if .Values.otelCollector.resources }} + resources: {{- toYaml .Values.otelCollector.resources | nindent 12 }} + {{- end }} + ports: + {{- range $name, $port := .Values.otelCollector.containerPorts }} + - name: {{ $name }} + containerPort: {{ $port }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.otelCollector.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.otelCollector.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.otelCollector.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.otelCollector.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: health + {{- end }} + {{- if .Values.otelCollector.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.otelCollector.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.otelCollector.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.otelCollector.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: health + {{- end }} + {{- if .Values.otelCollector.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.otelCollector.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.otelCollector.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.otelCollector.startupProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: health + {{- end }} + {{- end }} + {{- if .Values.otelCollector.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.otelCollector.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.otelCollector.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.otelCollector.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.otelCollector.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.otelCollector.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.otelCollector.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.otelCollector.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/hyperdx/templates/otel-collector/extra-configmap.yaml b/charts/hyperdx/templates/otel-collector/extra-configmap.yaml new file mode 100644 index 000000000..72d6f1fd7 --- /dev/null +++ b/charts/hyperdx/templates/otel-collector/extra-configmap.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.otelCollector.enabled .Values.otelCollector.extraConfig (not .Values.otelCollector.extraConfigExistingConfigmap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-extra" (include "hyperdx.otelCollector.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + app.kubernetes.io/component: otel-collector + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: {{- include "common.tplvalues.render" (dict "value" .Values.otelCollector.extraConfig "context" $) | nindent 2 }} +{{- end }} diff --git a/charts/hyperdx/templates/otel-collector/service.yaml b/charts/hyperdx/templates/otel-collector/service.yaml new file mode 100644 index 000000000..e4f491375 --- /dev/null +++ b/charts/hyperdx/templates/otel-collector/service.yaml @@ -0,0 +1,51 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "hyperdx.otelCollector.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $versionLabel := dict "app.kubernetes.io/version" ( include "common.images.version" ( dict "imageRoot" .Values.otelCollector.image "chart" .Chart ) ) }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.commonLabels $versionLabel ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + app.kubernetes.io/component: otel-collector + {{- if or .Values.otelCollector.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.otelCollector.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.otelCollector.service.type }} + {{- if and .Values.otelCollector.service.clusterIP (eq .Values.otelCollector.service.type "ClusterIP") }} + clusterIP: {{ .Values.otelCollector.service.clusterIP }} + {{- end }} + {{- if .Values.otelCollector.service.sessionAffinity }} + sessionAffinity: {{ .Values.otelCollector.service.sessionAffinity }} + {{- end }} + {{- if .Values.otelCollector.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.otelCollector.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if or (eq .Values.otelCollector.service.type "LoadBalancer") (eq .Values.otelCollector.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.otelCollector.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.otelCollector.service.type "LoadBalancer") (not (empty .Values.otelCollector.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.otelCollector.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.otelCollector.service.type "LoadBalancer") (not (empty .Values.otelCollector.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.otelCollector.service.loadBalancerIP }} + {{- end }} + ports: + - name: http + port: {{ .Values.otelCollector.service.ports.http }} + protocol: TCP + {{- if and (or (eq .Values.otelCollector.service.type "NodePort") (eq .Values.otelCollector.service.type "LoadBalancer")) (not (empty .Values.otelCollector.service.nodePorts.http)) }} + nodePort: {{ .Values.otelCollector.service.nodePorts.http }} + targetPort: http + {{- else if eq .Values.otelCollector.service.type "ClusterIP" }} + nodePort: null + targetPort: {{ .Values.otelCollector.containerPorts.http }} + {{- end }} + {{- if .Values.otelCollector.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.otelCollector.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.otelCollector.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: otel-collector diff --git a/charts/hyperdx/templates/service-account.yaml b/charts/hyperdx/templates/service-account.yaml new file mode 100644 index 000000000..172c68d4c --- /dev/null +++ b/charts/hyperdx/templates/service-account.yaml @@ -0,0 +1,14 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "hyperdx.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + {{- if or .Values.serviceAccount.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/charts/hyperdx/templates/task-check-alerts/cronjob.yaml b/charts/hyperdx/templates/task-check-alerts/cronjob.yaml new file mode 100644 index 000000000..6107b1ac2 --- /dev/null +++ b/charts/hyperdx/templates/task-check-alerts/cronjob.yaml @@ -0,0 +1,67 @@ +{{- if .Values.taskCheckAlerts.enabled }} +apiVersion: {{ include "common.capabilities.cronjob.apiVersion" . }} +kind: CronJob +metadata: + name: {{ template "hyperdx.taskCheckAlerts.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + schedule: {{ .Values.taskCheckAlerts.cronjob.schedule | quote }} + concurrencyPolicy: {{ .Values.taskCheckAlerts.cronjob.concurrencyPolicy }} + failedJobsHistoryLimit: {{ .Values.taskCheckAlerts.cronjob.failedJobsHistoryLimit }} + successfulJobsHistoryLimit: {{ .Values.taskCheckAlerts.cronjob.successfulJobsHistoryLimit }} + jobTemplate: + spec: + {{- if .Values.taskCheckAlerts.cronjob.ttlSecondsAfterFinished }} + ttlSecondsAfterFinished: {{ .Values.taskCheckAlerts.cronjob.ttlSecondsAfterFinished }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 12 }} + app.kubernetes.io/component: task-check-alerts + {{- if .Values.taskCheckAlerts.cronjob.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.taskCheckAlerts.cronjob.podLabels "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.taskCheckAlerts.cronjob.podAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.taskCheckAlerts.cronjob.podAnnotations "context" $ ) | nindent 12 }} + {{- end }} + spec: + {{- include "hyperdx.imagePullSecrets" . | nindent 10 }} + {{- if .Values.taskCheckAlerts.cronjob.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.taskCheckAlerts.cronjob.affinity "context" $) | nindent 12 }} + {{- end }} + restartPolicy: {{ .Values.taskCheckAlerts.cronjob.restartPolicy }} + containers: + - name: {{ template "hyperdx.taskCheckAlerts.fullname" . }} + image: {{ template "hyperdx.taskCheckAlerts.image" . }} + imagePullPolicy: {{ .Values.taskCheckAlerts.cronjob.image.pullPolicy }} + {{- if .Values.taskCheckAlerts.cronjob.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.taskCheckAlerts.cronjob.containerSecurityContext "enabled" | toYaml | nindent 16 }} + {{- end }} + {{- if .Values.taskCheckAlerts.cronjob.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.taskCheckAlerts.cronjob.command "context" $) | nindent 16 }} + {{- else }} + command: + - /bin/bash + - -c + {{- end }} + {{- if .Values.taskCheckAlerts.cronjob.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.taskCheckAlerts.cronjob.args "context" $) | nindent 16 }} + {{- else }} + args: + - node ./build/tasks/index.js check-alerts + {{- end }} + env: + {{/* Currently, when NODE_ENV === development the cronjob scheduling is builtin and the app never exits. Always override this here to prevent accidental continuous spawning of non-exiting jobs. */}} + - name: NODE_ENV + value: "production" + envFrom: + - configMapRef: + name: {{ include "hyperdx.taskCheckAlerts.defaultConfigmapName" . }} +{{- end }} diff --git a/charts/hyperdx/templates/task-check-alerts/default-configmap.yaml b/charts/hyperdx/templates/task-check-alerts/default-configmap.yaml new file mode 100644 index 000000000..ec6df10a2 --- /dev/null +++ b/charts/hyperdx/templates/task-check-alerts/default-configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.taskCheckAlerts.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-default" (include "hyperdx.taskCheckAlerts.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: hyperdx + app.kubernetes.io/component: task-check-alerts + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: {{- include "common.tplvalues.render" (dict "value" .Values.taskCheckAlerts.cronjob.defaultConfig "context" $) | nindent 2 }} +{{- end }} diff --git a/charts/hyperdx/values.yaml b/charts/hyperdx/values.yaml new file mode 100644 index 000000000..af069e47c --- /dev/null +++ b/charts/hyperdx/values.yaml @@ -0,0 +1,3116 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +global: + imageRegistry: '' + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: '' + +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: '' +## @param nameOverride String to partially override common.names.name +## +nameOverride: '' +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: '' +## @param namespaceOverride String to fully override common.names.namespace +## +namespaceOverride: '' +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param clusterDomain Kubernetes cluster domain name +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] + +## Enable diagnostic mode in all Hyperdx deployments +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @section Hyperdx Common parameters +## + +## @param apiKey the Hyperdx api key. TODO: If not specified, use the ingestion api key for self-instrumentation. +## +apiKey: '' +## @param logLevel the logging level across all Hyperdx pipeline components. Allowed values: `error`, `warn`, `info`, `http`, `verbose`, `debug`, `silly` +## +logLevel: info +## @param publicUrl Type A DNS record that points to the the LoadBalancer service of the API gateway (Kong) +## +publicUrl: '' + +## @section Hyperdx dashboard app Parameters +## +## +app: + ## @param app.enabled Enable hyperdx dashboard app + ## + enabled: true + ## @param app.replicaCount Number of Hyperdx app replicas to deploy + ## + replicaCount: 1 + ## @param app.defaultConfig [string] Hyperdx app default configuration + ## + # TODO: dashboard app client side is not yet able to read envs injected at runtime. See README for details + defaultConfig: | + HYPERDX_API_KEY: {{ .Values.apiKey }} + PORT: "8080" + NEXT_PUBLIC_OTEL_EXPORTER_OTLP_ENDPOINT: {{ .Values.publicUrl }}/collector + NEXT_PUBLIC_OTEL_SERVICE_NAME: "hdx-oss-app" + NEXT_PUBLIC_SERVER_URL: {{ include "hyperdx.api.publicUrl" . }} + + ## @param app.extraConfig Hyperdx app extra configuration + ## + extraConfig: {} + + ## @param app.existingConfigmap The name of an existing ConfigMap with the default configuration + ## + existingConfigmap: '' + ## @param app.extraConfigExistingConfigmap The name of an existing ConfigMap with extra configuration + ## + extraConfigExistingConfigmap: '' + + ## Hyperdx dashboard app image + ## ref: https://github.com/hyperdxio/hyperdx/pkgs/container/hyperdx + ## @param app.image.registry [default: ghcr.io] app image registry + ## @param app.image.repository [default: hyperdxio/hyperdx] app image repository + ## @skip app.image.tag app image tag (immutable tags are recommended) + ## @param app.image.digest app image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) + ## @param app.image.pullPolicy app image pull policy + ## @param app.image.pullSecrets app image pull secrets + ## + image: + registry: ghcr.io + repository: hyperdxio/hyperdx + tag: 1.6.0-app + digest: '' + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## @param app.containerPorts.http Hyperdx dashboard app HTTP container port + ## + containerPorts: + http: 8080 + ## Configure extra options for Hyperdx app containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param app.livenessProbe.enabled Enable livenessProbe on Hyperdx app containers + ## @param app.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param app.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param app.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param app.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param app.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param app.readinessProbe.enabled Enable readinessProbe on Hyperdx app containers + ## @param app.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param app.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param app.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param app.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param app.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param app.startupProbe.enabled Enable startupProbe on Hyperdx app containers + ## @param app.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param app.startupProbe.periodSeconds Period seconds for startupProbe + ## @param app.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param app.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param app.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param app.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param app.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param app.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## Hyperdx app resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param app.resources.limits The resources limits for the Hyperdx app containers + ## @param app.resources.requests The requested resources for the Hyperdx app containers + ## + resources: + limits: {} + requests: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param app.podSecurityContext.enabled Enabled Hyperdx app pods' Security Context + ## @param app.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param app.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param app.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param app.podSecurityContext.fsGroup Set Hyperdx app pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param app.containerSecurityContext.enabled Enabled containers' Security Context + ## @param app.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param app.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param app.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param app.containerSecurityContext.privileged Set container's Security Context privileged + ## @param app.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param app.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param app.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param app.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: null + runAsUser: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: false + allowPrivilegeEscalation: false + capabilities: + drop: ['ALL'] + seccompProfile: + type: 'RuntimeDefault' + + ## @param app.command Override default container command (useful when using custom images) + ## + command: [] + ## @param app.args Override default container args (useful when using custom images) + ## + args: [] + ## @param app.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param app.hostAliases Hyperdx app pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param app.podLabels Extra labels for Hyperdx app pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param app.podAnnotations Annotations for Hyperdx app pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param app.podAffinityPreset Pod affinity preset. Ignored if `app.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: '' + ## @param app.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `app.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node app.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param app.nodeAffinityPreset.type Node affinity preset type. Ignored if `app.affinity` is set. Allowed values: `soft` or `hard` + ## + type: '' + ## @param app.nodeAffinityPreset.key Node label key to match. Ignored if `app.affinity` is set + ## + key: '' + ## @param app.nodeAffinityPreset.values Node label values to match. Ignored if `app.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param app.affinity Affinity for Hyperdx app pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `app.podAffinityPreset`, `app.podAntiAffinityPreset`, and `app.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param app.nodeSelector Node labels for Hyperdx app pods assignment + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param app.tolerations Tolerations for Hyperdx app pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param app.updateStrategy.type Hyperdx app statefulset strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + + ## @param app.priorityClassName Hyperdx app pods' priorityClassName + ## + priorityClassName: '' + ## @param app.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param app.schedulerName Name of the k8s scheduler (other than default) for Hyperdx app pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: '' + ## @param app.terminationGracePeriodSeconds Seconds Redmine pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: '' + ## @param app.lifecycleHooks for the Hyperdx app container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param app.extraEnvVars Array with extra environment variables to add to Hyperdx app nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param app.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Hyperdx app nodes + ## + extraEnvVarsCM: '' + ## @param app.extraEnvVarsSecret Name of existing Secret containing extra env vars for Hyperdx app nodes + ## + extraEnvVarsSecret: '' + ## @param app.extraVolumes Optionally specify extra list of additional volumes for the Hyperdx app pod(s) + ## + extraVolumes: [] + ## @param app.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Hyperdx app container(s) + ## + extraVolumeMounts: [] + ## @param app.sidecars Add additional sidecar containers to the Hyperdx app pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param app.initContainers Add additional init containers to the Hyperdx app pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + + ## @section Hyperdx app Traffic Exposure Parameters + ## + service: + ## @param app.service.type Hyperdx app service type + ## + type: ClusterIP + ## @param app.service.ports.http Hyperdx app service HTTP port + ## + ports: + http: 80 + ## Node ports to expose + ## @param app.service.nodePorts.http Node port for HTTP + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + http: '' + ## @param app.service.clusterIP Hyperdx app service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: '' + ## @param app.service.loadBalancerIP Hyperdx app service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: '' + ## @param app.service.loadBalancerSourceRanges Hyperdx app service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param app.service.externalTrafficPolicy Hyperdx app service external traffic policy + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-app-source-ip + ## + externalTrafficPolicy: Cluster + ## @param app.service.annotations Additional custom annotations for Hyperdx app service + ## + annotations: {} + ## @param app.service.extraPorts Extra ports to expose in Hyperdx app service (normally used with the `sidecars` value) + ## + extraPorts: [] + ## @param app.service.sessionAffinity Control where app requests go, to the same pod or round-robin + ## Values: appIP or None + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/ + ## + sessionAffinity: None + ## @param app.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## appIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + +## @section Hyperdx api Parameters +## +## +api: + ## @param api.enabled Enable Hyperdx api + ## + enabled: true + ## @param api.replicaCount Number of Hyperdx api replicas to deploy + ## + replicaCount: 1 + + ## @param api.defaultConfig [string] Default configuration for the Hyperdx api service + ## + defaultConfig: | + AGGREGATOR_API_URL: {{ include "hyperdx.aggregator.url" . }} + APP_TYPE: 'api' + CLICKHOUSE_HOST: {{ include "hyperdx.clickhouse.host" . | quote }} + CLICKHOUSE_LOG_LEVEL: {{ .Values.logLevel }} + CLICKHOUSE_USER: api + CLICKHOUSE_PASSWORD: api + EXPRESS_SESSION_SECRET: 'hyperdx is cool 👋' + FRONTEND_URL: {{ include "hyperdx.app.publicUrl" . }} + HDX_NODE_ADVANCED_NETWORK_CAPTURE: "1" + HDX_NODE_BETA_MODE: "1" + HDX_NODE_CONSOLE_CAPTURE: "1" + HYPERDX_API_KEY: {{ .Values.apiKey }} + HYPERDX_LOG_LEVEL: {{ .Values.logLevel }} + INGESTOR_API_URL: {{ include "hyperdx.ingestor.url" . }} + MINER_API_URL: {{ include "hyperdx.miner.url" . }} + MONGO_URI: {{ include "hyperdx.mongodb.uri" . }} + OTEL_EXPORTER_OTLP_ENDPOINT: {{ include "hyperdx.otelCollector.url" . }} + OTEL_SERVICE_NAME: "hdx-oss-api" + PORT: {{ .Values.api.containerPorts.http | quote }} + REDIS_URL: {{ include "hyperdx.redis.url" . }} + SERVER_URL: {{ include "hyperdx.api.url" . }} + USAGE_STATS_ENABLED: "false" + + ## @param api.extraConfig Extra configuration for the Hyperdx api service + ## + extraConfig: {} + + ## @param api.existingConfigmap The name of an existing ConfigMap with the default configuration + ## + existingConfigmap: '' + ## @param api.extraConfigExistingConfigmap The name of an existing ConfigMap with extra configuration + ## + extraConfigExistingConfigmap: '' + + ## Hyperdx api image + ## ref: https://github.com/hyperdxio/hyperdx/pkgs/container/hyperdx + ## @param api.image.registry [default: ghcr.io] api image registry + ## @param api.image.repository [default: hyperdxio/hyperdx] api image repository + ## @skip api.image.tag api image tag (immutable tags are recommended) + ## @param api.image.digest api image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) + ## @param api.image.pullPolicy api image pull policy + ## @param api.image.pullSecrets api image pull secrets + ## + image: + registry: ghcr.io + repository: hyperdxio/hyperdx + tag: 1.6.0-api + digest: '' + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## @param api.containerPorts.http Hyperdx api HTTP container port + ## + containerPorts: + http: 8000 + ## Configure extra options for Hyperdx api containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param api.livenessProbe.enabled Enable livenessProbe on Hyperdx api containers + ## @param api.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param api.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param api.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param api.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param api.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param api.readinessProbe.enabled Enable readinessProbe on Hyperdx api containers + ## @param api.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param api.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param api.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param api.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param api.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param api.startupProbe.enabled Enable startupProbe on Hyperdx api containers + ## @param api.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param api.startupProbe.periodSeconds Period seconds for startupProbe + ## @param api.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param api.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param api.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param api.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param api.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param api.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## Hyperdx api resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param api.resources.limits The resources limits for the Hyperdx api containers + ## @param api.resources.requests The requested resources for the Hyperdx api containers + ## + resources: + limits: {} + requests: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param api.podSecurityContext.enabled Enabled Hyperdx api pods' Security Context + ## @param api.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param api.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param api.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param api.podSecurityContext.fsGroup Set Hyperdx api pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param api.containerSecurityContext.enabled Enabled containers' Security Context + ## @param api.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param api.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param api.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param api.containerSecurityContext.privileged Set container's Security Context privileged + ## @param api.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param api.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param api.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param api.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: null + runAsUser: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: false + allowPrivilegeEscalation: false + capabilities: + drop: ['ALL'] + seccompProfile: + type: 'RuntimeDefault' + + ## @param api.command Override default container command (useful when using custom images) + ## + command: [] + ## @param api.args Override default container args (useful when using custom images) + ## + args: [] + ## @param api.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param api.hostAliases Hyperdx api pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param api.podLabels Extra labels for Hyperdx api pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param api.podAnnotations Annotations for Hyperdx api pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param api.podAffinityPreset Pod affinity preset. Ignored if `api.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: '' + ## @param api.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `api.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node api.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param api.nodeAffinityPreset.type Node affinity preset type. Ignored if `api.affinity` is set. Allowed values: `soft` or `hard` + ## + type: '' + ## @param api.nodeAffinityPreset.key Node label key to match. Ignored if `api.affinity` is set + ## + key: '' + ## @param api.nodeAffinityPreset.values Node label values to match. Ignored if `api.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param api.affinity Affinity for Hyperdx api pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `api.podAffinityPreset`, `api.podAntiAffinityPreset`, and `api.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param api.nodeSelector Node labels for Hyperdx api pods assignment + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param api.tolerations Tolerations for Hyperdx api pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param api.updateStrategy.type Hyperdx api statefulset strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + + ## @param api.priorityClassName Hyperdx api pods' priorityClassName + ## + priorityClassName: '' + ## @param api.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param api.schedulerName Name of the k8s scheduler (other than default) for Hyperdx api pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: '' + ## @param api.terminationGracePeriodSeconds Seconds Redmine pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: '' + ## @param api.lifecycleHooks for the Hyperdx api container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param api.extraEnvVars Array with extra environment variables to add to Hyperdx api nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param api.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Hyperdx api nodes + ## + extraEnvVarsCM: '' + ## @param api.extraEnvVarsSecret Name of existing Secret containing extra env vars for Hyperdx api nodes + ## + extraEnvVarsSecret: '' + ## @param api.extraVolumes Optionally specify extra list of additional volumes for the Hyperdx api pod(s) + ## + extraVolumes: [] + ## @param api.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Hyperdx api container(s) + ## + extraVolumeMounts: [] + ## @param api.sidecars Add additional sidecar containers to the Hyperdx api pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param api.initContainers Add additional init containers to the Hyperdx api pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + + ## @section Hyperdx Rest Traffic Exposure Parameters + ## + service: + ## @param api.service.type Hyperdx api service type + ## + type: ClusterIP + ## @param api.service.ports.http Hyperdx api service HTTP port + ## + ports: + http: 80 + ## Node ports to expose + ## @param api.service.nodePorts.http Node port for HTTP + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + http: '' + ## @param api.service.clusterIP Hyperdx api service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: '' + ## @param api.service.loadBalancerIP Hyperdx api service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: '' + ## @param api.service.loadBalancerSourceRanges Hyperdx api service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param api.service.externalTrafficPolicy Hyperdx api service external traffic policy + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-api-source-ip + ## + externalTrafficPolicy: Cluster + ## @param api.service.annotations Additional custom annotations for Hyperdx api service + ## + annotations: {} + ## @param api.service.extraPorts Extra ports to expose in Hyperdx api service (normally used with the `sidecars` value) + ## + extraPorts: [] + ## @param api.service.sessionAffinity Control where api requests go, to the same pod or round-robin + ## Values: RestIP or None + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/ + ## + sessionAffinity: None + ## @param api.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## restIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + +## @section Hyperdx Ingestor Parameters +## +## +ingestor: + ## @param ingestor.enabled Enable Hyperdx ingestor + ## + enabled: true + ## @param ingestor.replicaCount Number of Hyperdx ingestor replicas to deploy + ## + replicaCount: 1 + + ## @param ingestor.defaultConfig [string] Default configuration for the Hyperdx ingestor service + ## + defaultConfig: | + AGGREGATOR_API_URL: {{ include "hyperdx.aggregator.url" . }} + ENABLE_GO_PARSER: {{ .Values.goParser.enabled | quote }} + GO_PARSER_API_URL: {{ include "hyperdx.goParser.url" . }} + RUST_BACKTRACE: "full" + VECTOR_LOG: {{ .Values.logLevel }} + VECTOR_OPENSSL_LEGACY_PROVIDER: "false" + + ## @param ingestor.extraConfig Extra configuration for the Hyperdx ingestor service + ## + extraConfig: {} + + ## @param ingestor.existingConfigmap The name of an existing ConfigMap with the default configuration + ## + existingConfigmap: '' + ## @param ingestor.extraConfigExistingConfigmap The name of an existing ConfigMap with extra configuration + ## + extraConfigExistingConfigmap: '' + + ## Hyperdx ingestor image + ## ref: https://github.com/hyperdxio/hyperdx/pkgs/container/hyperdx + ## @param ingestor.image.registry [default: ghcr.io] ingestor image registry + ## @param ingestor.image.repository [default: hyperdxio/hyperdx] ingestor image repository + ## @skip ingestor.image.tag ingestor image tag (immutable tags are recommended) + ## @param ingestor.image.digest ingestor image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) + ## @param ingestor.image.pullPolicy ingestor image pull policy + ## @param ingestor.image.pullSecrets ingestor image pull secrets + ## + image: + registry: ghcr.io + repository: hyperdxio/hyperdx + tag: 1.6.0-ingestor + digest: '' + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## @param ingestor.containerPorts.http Hyperdx ingestor HTTP container port + ## @param ingestor.containerPorts.health Hyperdx ingestor HTTP health container port + ## + containerPorts: + http: 8002 + health: 8686 + ## Configure extra options for Hyperdx ingestor containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param ingestor.livenessProbe.enabled Enable livenessProbe on Hyperdx ingestor containers + ## @param ingestor.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param ingestor.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param ingestor.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param ingestor.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param ingestor.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param ingestor.readinessProbe.enabled Enable readinessProbe on Hyperdx ingestor containers + ## @param ingestor.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param ingestor.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param ingestor.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param ingestor.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param ingestor.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param ingestor.startupProbe.enabled Enable startupProbe on Hyperdx ingestor containers + ## @param ingestor.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param ingestor.startupProbe.periodSeconds Period seconds for startupProbe + ## @param ingestor.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param ingestor.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param ingestor.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param ingestor.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param ingestor.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param ingestor.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## Hyperdx ingestor resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param ingestor.resources.limits The resources limits for the Hyperdx ingestor containers + ## @param ingestor.resources.requests The requested resources for the Hyperdx ingestor containers + ## + resources: + limits: {} + requests: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param ingestor.podSecurityContext.enabled Enabled Hyperdx ingestor pods' Security Context + ## @param ingestor.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param ingestor.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param ingestor.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param ingestor.podSecurityContext.fsGroup Set Hyperdx ingestor pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param ingestor.containerSecurityContext.enabled Enabled containers' Security Context + ## @param ingestor.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param ingestor.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param ingestor.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param ingestor.containerSecurityContext.privileged Set container's Security Context privileged + ## @param ingestor.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param ingestor.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param ingestor.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param ingestor.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: null + runAsUser: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: false + allowPrivilegeEscalation: false + capabilities: + drop: ['ALL'] + seccompProfile: + type: 'RuntimeDefault' + + ## @param ingestor.command Override default container command (useful when using custom images) + ## + command: [] + ## @param ingestor.args Override default container args (useful when using custom images) + ## + args: [] + ## @param ingestor.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param ingestor.hostAliases Hyperdx ingestor pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param ingestor.podLabels Extra labels for Hyperdx ingestor pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param ingestor.podAnnotations Annotations for Hyperdx ingestor pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param ingestor.podAffinityPreset Pod affinity preset. Ignored if `ingestor.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: '' + ## @param ingestor.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `ingestor.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node ingestor.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param ingestor.nodeAffinityPreset.type Node affinity preset type. Ignored if `ingestor.affinity` is set. Allowed values: `soft` or `hard` + ## + type: '' + ## @param ingestor.nodeAffinityPreset.key Node label key to match. Ignored if `ingestor.affinity` is set + ## + key: '' + ## @param ingestor.nodeAffinityPreset.values Node label values to match. Ignored if `ingestor.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param ingestor.affinity Affinity for Hyperdx ingestor pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `ingestor.podAffinityPreset`, `ingestor.podAntiAffinityPreset`, and `ingestor.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param ingestor.nodeSelector Node labels for Hyperdx ingestor pods assignment + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param ingestor.tolerations Tolerations for Hyperdx ingestor pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param ingestor.updateStrategy.type Hyperdx ingestor statefulset strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + + ## @param ingestor.priorityClassName Hyperdx ingestor pods' priorityClassName + ## + priorityClassName: '' + ## @param ingestor.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param ingestor.schedulerName Name of the k8s scheduler (other than default) for Hyperdx ingestor pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: '' + ## @param ingestor.terminationGracePeriodSeconds Seconds Redmine pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: '' + ## @param ingestor.lifecycleHooks for the Hyperdx ingestor container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param ingestor.extraEnvVars Array with extra environment variables to add to Hyperdx ingestor nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param ingestor.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Hyperdx ingestor nodes + ## + extraEnvVarsCM: '' + ## @param ingestor.extraEnvVarsSecret Name of existing Secret containing extra env vars for Hyperdx ingestor nodes + ## + extraEnvVarsSecret: '' + ## @param ingestor.extraVolumes Optionally specify extra list of additional volumes for the Hyperdx ingestor pod(s) + ## + extraVolumes: [] + ## @param ingestor.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Hyperdx ingestor container(s) + ## + extraVolumeMounts: [] + ## @param ingestor.sidecars Add additional sidecar containers to the Hyperdx ingestor pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param ingestor.initContainers Add additional init containers to the Hyperdx ingestor pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + + ## @section Hyperdx Rest Traffic Exposure Parameters + ## + service: + ## @param ingestor.service.type Hyperdx ingestor service type + ## + type: ClusterIP + ## @param ingestor.service.ports.http Hyperdx ingestor service HTTP port + ## + ports: + http: 80 + ## Node ports to expose + ## @param ingestor.service.nodePorts.http Node port for HTTP + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + http: '' + ## @param ingestor.service.clusterIP Hyperdx ingestor service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: '' + ## @param ingestor.service.loadBalancerIP Hyperdx ingestor service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: '' + ## @param ingestor.service.loadBalancerSourceRanges Hyperdx ingestor service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param ingestor.service.externalTrafficPolicy Hyperdx ingestor service external traffic policy + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-ingestor-source-ip + ## + externalTrafficPolicy: Cluster + ## @param ingestor.service.annotations Additional custom annotations for Hyperdx ingestor service + ## + annotations: {} + ## @param ingestor.service.extraPorts Extra ports to expose in Hyperdx ingestor service (normally used with the `sidecars` value) + ## + extraPorts: [] + ## @param ingestor.service.sessionAffinity Control where ingestor requests go, to the same pod or round-robin + ## Values: RestIP or None + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/ + ## + sessionAffinity: None + ## @param ingestor.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## restIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + +## @section Hyperdx aggregator Parameters +## +## +aggregator: + ## @param aggregator.enabled Enable Hyperdx aggregator + ## + enabled: true + ## @param aggregator.replicaCount Number of Hyperdx aggregator replicas to deploy + ## + replicaCount: 1 + + ## @param aggregator.defaultConfig [string] Default configuration for the Hyperdx aggregator service + ## + defaultConfig: | + APP_TYPE: "aggregator" + CLICKHOUSE_HOST: {{ include "hyperdx.clickhouse.host" . }} + CLICKHOUSE_PASSWORD: aggregator + CLICKHOUSE_USER: aggregator + FRONTEND_URL: {{ include "hyperdx.app.publicUrl" . }} + HYPERDX_LOG_LEVEL: {{ .Values.logLevel }} + MONGO_URI: {{ include "hyperdx.mongodb.uri" . }} + NODE_ENV: development + PORT: {{ .Values.aggregator.containerPorts.http | quote }} + REDIS_URL: {{ include "hyperdx.redis.url" . }} + SERVER_URL: {{ include "hyperdx.api.url" . }} + + ## @param aggregator.extraConfig Extra configuration for the Hyperdx aggregator service + ## + extraConfig: {} + + ## @param aggregator.existingConfigmap The name of an existing ConfigMap with the default configuration + ## + existingConfigmap: '' + ## @param aggregator.extraConfigExistingConfigmap The name of an existing ConfigMap with extra configuration + ## + extraConfigExistingConfigmap: '' + + ## Hyperdx aggregator image + ## ref: https://github.com/hyperdxio/hyperdx/pkgs/container/hyperdx + ## @param aggregator.image.registry [default: ghcr.io] aggregator image registry + ## @param aggregator.image.repository [default: hyperdxio/hyperdx] aggregator image repository + ## @skip aggregator.image.tag aggregator image tag (immutable tags are recommended) + ## @param aggregator.image.digest aggregator image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) + ## @param aggregator.image.pullPolicy aggregator image pull policy + ## @param aggregator.image.pullSecrets aggregator image pull secrets + ## + image: + registry: ghcr.io + repository: hyperdxio/hyperdx + tag: 1.6.0-api + digest: '' + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## @param aggregator.containerPorts.http Hyperdx aggregator HTTP container port + ## + containerPorts: + http: 8001 + ## Configure extra options for Hyperdx aggregator containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param aggregator.livenessProbe.enabled Enable livenessProbe on Hyperdx aggregator containers + ## @param aggregator.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param aggregator.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param aggregator.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param aggregator.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param aggregator.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param aggregator.readinessProbe.enabled Enable readinessProbe on Hyperdx aggregator containers + ## @param aggregator.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param aggregator.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param aggregator.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param aggregator.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param aggregator.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param aggregator.startupProbe.enabled Enable startupProbe on Hyperdx aggregator containers + ## @param aggregator.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param aggregator.startupProbe.periodSeconds Period seconds for startupProbe + ## @param aggregator.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param aggregator.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param aggregator.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param aggregator.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param aggregator.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param aggregator.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## Hyperdx aggregator resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param aggregator.resources.limits The resources limits for the Hyperdx aggregator containers + ## @param aggregator.resources.requests The requested resources for the Hyperdx aggregator containers + ## + resources: + limits: {} + requests: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param aggregator.podSecurityContext.enabled Enabled Hyperdx aggregator pods' Security Context + ## @param aggregator.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param aggregator.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param aggregator.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param aggregator.podSecurityContext.fsGroup Set Hyperdx aggregator pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param aggregator.containerSecurityContext.enabled Enabled containers' Security Context + ## @param aggregator.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param aggregator.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param aggregator.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param aggregator.containerSecurityContext.privileged Set container's Security Context privileged + ## @param aggregator.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param aggregator.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param aggregator.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param aggregator.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: null + runAsUser: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: false + allowPrivilegeEscalation: false + capabilities: + drop: ['ALL'] + seccompProfile: + type: 'RuntimeDefault' + + ## @param aggregator.command Override default container command (useful when using custom images) + ## + command: [] + ## @param aggregator.args Override default container args (useful when using custom images) + ## + args: [] + ## @param aggregator.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param aggregator.hostAliases Hyperdx aggregator pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param aggregator.podLabels Extra labels for Hyperdx aggregator pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param aggregator.podAnnotations Annotations for Hyperdx aggregator pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param aggregator.podAffinityPreset Pod affinity preset. Ignored if `aggregator.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: '' + ## @param aggregator.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `aggregator.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node aggregator.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param aggregator.nodeAffinityPreset.type Node affinity preset type. Ignored if `aggregator.affinity` is set. Allowed values: `soft` or `hard` + ## + type: '' + ## @param aggregator.nodeAffinityPreset.key Node label key to match. Ignored if `aggregator.affinity` is set + ## + key: '' + ## @param aggregator.nodeAffinityPreset.values Node label values to match. Ignored if `aggregator.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param aggregator.affinity Affinity for Hyperdx aggregator pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `aggregator.podAffinityPreset`, `aggregator.podAntiAffinityPreset`, and `aggregator.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param aggregator.nodeSelector Node labels for Hyperdx aggregator pods assignment + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param aggregator.tolerations Tolerations for Hyperdx aggregator pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param aggregator.updateStrategy.type Hyperdx aggregator statefulset strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + + ## @param aggregator.priorityClassName Hyperdx aggregator pods' priorityClassName + ## + priorityClassName: '' + ## @param aggregator.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param aggregator.schedulerName Name of the k8s scheduler (other than default) for Hyperdx aggregator pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: '' + ## @param aggregator.terminationGracePeriodSeconds Seconds Redmine pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: '' + ## @param aggregator.lifecycleHooks for the Hyperdx aggregator container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param aggregator.extraEnvVars Array with extra environment variables to add to Hyperdx aggregator nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param aggregator.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Hyperdx aggregator nodes + ## + extraEnvVarsCM: '' + ## @param aggregator.extraEnvVarsSecret Name of existing Secret containing extra env vars for Hyperdx aggregator nodes + ## + extraEnvVarsSecret: '' + ## @param aggregator.extraVolumes Optionally specify extra list of additional volumes for the Hyperdx aggregator pod(s) + ## + extraVolumes: [] + ## @param aggregator.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Hyperdx aggregator container(s) + ## + extraVolumeMounts: [] + ## @param aggregator.sidecars Add additional sidecar containers to the Hyperdx aggregator pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param aggregator.initContainers Add additional init containers to the Hyperdx aggregator pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + + ## @section Hyperdx Rest Traffic Exposure Parameters + ## + service: + ## @param aggregator.service.type Hyperdx aggregator service type + ## + type: ClusterIP + ## @param aggregator.service.ports.http Hyperdx aggregator service HTTP port + ## + ports: + http: 80 + ## Node ports to expose + ## @param aggregator.service.nodePorts.http Node port for HTTP + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + http: '' + ## @param aggregator.service.clusterIP Hyperdx aggregator service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: '' + ## @param aggregator.service.loadBalancerIP Hyperdx aggregator service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: '' + ## @param aggregator.service.loadBalancerSourceRanges Hyperdx aggregator service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param aggregator.service.externalTrafficPolicy Hyperdx aggregator service external traffic policy + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-aggregator-source-ip + ## + externalTrafficPolicy: Cluster + ## @param aggregator.service.annotations Additional custom annotations for Hyperdx aggregator service + ## + annotations: {} + ## @param aggregator.service.extraPorts Extra ports to expose in Hyperdx aggregator service (normally used with the `sidecars` value) + ## + extraPorts: [] + ## @param aggregator.service.sessionAffinity Control where aggregator requests go, to the same pod or round-robin + ## Values: RestIP or None + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/ + ## + sessionAffinity: None + ## @param aggregator.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## restIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + +## @section Hyperdx goParser Parameters +## +## +goParser: + ## @param goParser.enabled Enable Hyperdx goParser + ## + enabled: true + ## @param goParser.replicaCount Number of Hyperdx goParser replicas to deploy + ## + replicaCount: 1 + + ## @param goParser.defaultConfig [string] Default configuration for the Hyperdx goParser service + ## + defaultConfig: | + AGGREGATOR_API_URL: {{ include "hyperdx.aggregator.url" . }} + HYPERDX_API_KEY: {{ .Values.apiKey }} + HYPERDX_LOG_LEVEL: {{ .Values.logLevel }} + OTEL_EXPORTER_OTLP_ENDPOINT: {{ include "hyperdx.otelCollector.url" . }} + OTEL_LOG_LEVEL: {{ .Values.logLevel }} + OTEL_SERVICE_NAME: hdx-oss-go-parser + PORT: {{ .Values.goParser.containerPorts.http | quote }} + + ## @param goParser.extraConfig Extra configuration for the Hyperdx goParser service + ## + extraConfig: {} + + ## @param goParser.existingConfigmap The name of an existing ConfigMap with the default configuration + ## + existingConfigmap: '' + ## @param goParser.extraConfigExistingConfigmap The name of an existing ConfigMap with extra configuration + ## + extraConfigExistingConfigmap: '' + + ## Hyperdx go parser image + ## ref: https://github.com/hyperdxio/hyperdx/pkgs/container/hyperdx + ## @param goParser.image.registry [default: ghcr.io] go parser image registry + ## @param goParser.image.repository [default: hyperdxio/hyperdx] go parser image repository + ## @skip goParser.image.tag go parser image tag (immutable tags are recommended) + ## @param goParser.image.digest go parser image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) + ## @param goParser.image.pullPolicy go parser image pull policy + ## @param goParser.image.pullSecrets go parser image pull secrets + ## + image: + registry: ghcr.io + repository: hyperdxio/hyperdx + tag: 1.6.0-go-parser + digest: '' + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## @param goParser.containerPorts.http Hyperdx goParser HTTP container port + ## + containerPorts: + http: 7777 + ## Configure extra options for Hyperdx goParser containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param goParser.livenessProbe.enabled Enable livenessProbe on Hyperdx goParser containers + ## @param goParser.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param goParser.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param goParser.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param goParser.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param goParser.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param goParser.readinessProbe.enabled Enable readinessProbe on Hyperdx goParser containers + ## @param goParser.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param goParser.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param goParser.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param goParser.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param goParser.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param goParser.startupProbe.enabled Enable startupProbe on Hyperdx goParser containers + ## @param goParser.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param goParser.startupProbe.periodSeconds Period seconds for startupProbe + ## @param goParser.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param goParser.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param goParser.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param goParser.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param goParser.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param goParser.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## Hyperdx goParser resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param goParser.resources.limits The resources limits for the Hyperdx goParser containers + ## @param goParser.resources.requests The requested resources for the Hyperdx goParser containers + ## + resources: + limits: {} + requests: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param goParser.podSecurityContext.enabled Enabled Hyperdx goParser pods' Security Context + ## @param goParser.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param goParser.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param goParser.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param goParser.podSecurityContext.fsGroup Set Hyperdx goParser pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param goParser.containerSecurityContext.enabled Enabled containers' Security Context + ## @param goParser.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param goParser.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param goParser.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param goParser.containerSecurityContext.privileged Set container's Security Context privileged + ## @param goParser.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param goParser.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param goParser.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param goParser.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: null + runAsUser: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: false + allowPrivilegeEscalation: false + capabilities: + drop: ['ALL'] + seccompProfile: + type: 'RuntimeDefault' + + ## @param goParser.command Override default container command (useful when using custom images) + ## + command: [] + ## @param goParser.args Override default container args (useful when using custom images) + ## + args: [] + ## @param goParser.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param goParser.hostAliases Hyperdx goParser pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param goParser.podLabels Extra labels for Hyperdx goParser pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param goParser.podAnnotations Annotations for Hyperdx goParser pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param goParser.podAffinityPreset Pod affinity preset. Ignored if `goParser.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: '' + ## @param goParser.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `goParser.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node goParser.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param goParser.nodeAffinityPreset.type Node affinity preset type. Ignored if `goParser.affinity` is set. Allowed values: `soft` or `hard` + ## + type: '' + ## @param goParser.nodeAffinityPreset.key Node label key to match. Ignored if `goParser.affinity` is set + ## + key: '' + ## @param goParser.nodeAffinityPreset.values Node label values to match. Ignored if `goParser.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param goParser.affinity Affinity for Hyperdx goParser pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `goParser.podAffinityPreset`, `goParser.podAntiAffinityPreset`, and `goParser.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param goParser.nodeSelector Node labels for Hyperdx goParser pods assignment + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param goParser.tolerations Tolerations for Hyperdx goParser pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param goParser.updateStrategy.type Hyperdx goParser statefulset strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + + ## @param goParser.priorityClassName Hyperdx goParser pods' priorityClassName + ## + priorityClassName: '' + ## @param goParser.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param goParser.schedulerName Name of the k8s scheduler (other than default) for Hyperdx goParser pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: '' + ## @param goParser.terminationGracePeriodSeconds Seconds Redmine pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: '' + ## @param goParser.lifecycleHooks for the Hyperdx goParser container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param goParser.extraEnvVars Array with extra environment variables to add to Hyperdx goParser nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param goParser.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Hyperdx goParser nodes + ## + extraEnvVarsCM: '' + ## @param goParser.extraEnvVarsSecret Name of existing Secret containing extra env vars for Hyperdx goParser nodes + ## + extraEnvVarsSecret: '' + ## @param goParser.extraVolumes Optionally specify extra list of additional volumes for the Hyperdx goParser pod(s) + ## + extraVolumes: [] + ## @param goParser.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Hyperdx goParser container(s) + ## + extraVolumeMounts: [] + ## @param goParser.sidecars Add additional sidecar containers to the Hyperdx goParser pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param goParser.initContainers Add additional init containers to the Hyperdx goParser pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + + ## @section Hyperdx Rest Traffic Exposure Parameters + ## + service: + ## @param goParser.service.type Hyperdx goParser service type + ## + type: ClusterIP + ## @param goParser.service.ports.http Hyperdx goParser service HTTP port + ## + ports: + http: 80 + ## Node ports to expose + ## @param goParser.service.nodePorts.http Node port for HTTP + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + http: '' + ## @param goParser.service.clusterIP Hyperdx goParser service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: '' + ## @param goParser.service.loadBalancerIP Hyperdx goParser service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: '' + ## @param goParser.service.loadBalancerSourceRanges Hyperdx goParser service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param goParser.service.externalTrafficPolicy Hyperdx goParser service external traffic policy + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-goParser-source-ip + ## + externalTrafficPolicy: Cluster + ## @param goParser.service.annotations Additional custom annotations for Hyperdx goParser service + ## + annotations: {} + ## @param goParser.service.extraPorts Extra ports to expose in Hyperdx goParser service (normally used with the `sidecars` value) + ## + extraPorts: [] + ## @param goParser.service.sessionAffinity Control where goParser requests go, to the same pod or round-robin + ## Values: RestIP or None + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/ + ## + sessionAffinity: None + ## @param goParser.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## restIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + +## @section Hyperdx miner Parameters +## +## +miner: + ## @param miner.enabled Enable Hyperdx miner + ## + enabled: true + ## @param miner.replicaCount Number of Hyperdx miner replicas to deploy + ## + replicaCount: 1 + + ## @param miner.defaultConfig [string] Default configuration for the Hyperdx miner service + ## + defaultConfig: | + HYPERDX_API_KEY: {{ .Values.apiKey }} + HYPERDX_ENABLE_ADVANCED_NETWORK_CAPTURE: "1" + HYPERDX_LOG_LEVEL: {{ .Values.logLevel }} + OTEL_EXPORTER_OTLP_ENDPOINT: {{ include "hyperdx.otelCollector.url" . }} + OTEL_LOG_LEVEL: {{ .Values.logLevel }} + OTEL_SERVICE_NAME: hdx-oss-miner + + ## @param miner.extraConfig Extra configuration for the Hyperdx miner service + ## + extraConfig: {} + + ## @param miner.existingConfigmap The name of an existing ConfigMap with the default configuration + ## + existingConfigmap: '' + ## @param miner.extraConfigExistingConfigmap The name of an existing ConfigMap with extra configuration + ## + extraConfigExistingConfigmap: '' + + ## Hyperdx miner image + ## ref: https://github.com/hyperdxio/hyperdx/pkgs/container/hyperdx + ## @param miner.image.registry [default: ghcr.io] miner image registry + ## @param miner.image.repository [default: hyperdxio/hyperdx] miner image repository + ## @skip miner.image.tag miner image tag (immutable tags are recommended) + ## @param miner.image.digest miner image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) + ## @param miner.image.pullPolicy miner image pull policy + ## @param miner.image.pullSecrets miner image pull secrets + ## + image: + registry: ghcr.io + repository: hyperdxio/hyperdx + tag: 1.6.0-miner + digest: '' + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## @param miner.containerPorts.http Hyperdx miner HTTP container port + ## + containerPorts: + http: 5123 + ## Configure extra options for Hyperdx miner containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param miner.livenessProbe.enabled Enable livenessProbe on Hyperdx miner containers + ## @param miner.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param miner.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param miner.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param miner.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param miner.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param miner.readinessProbe.enabled Enable readinessProbe on Hyperdx miner containers + ## @param miner.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param miner.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param miner.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param miner.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param miner.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param miner.startupProbe.enabled Enable startupProbe on Hyperdx miner containers + ## @param miner.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param miner.startupProbe.periodSeconds Period seconds for startupProbe + ## @param miner.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param miner.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param miner.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param miner.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param miner.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param miner.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## Hyperdx miner resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param miner.resources.limits The resources limits for the Hyperdx miner containers + ## @param miner.resources.requests The requested resources for the Hyperdx miner containers + ## + resources: + limits: {} + requests: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param miner.podSecurityContext.enabled Enabled Hyperdx miner pods' Security Context + ## @param miner.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param miner.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param miner.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param miner.podSecurityContext.fsGroup Set Hyperdx miner pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param miner.containerSecurityContext.enabled Enabled containers' Security Context + ## @param miner.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param miner.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param miner.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param miner.containerSecurityContext.privileged Set container's Security Context privileged + ## @param miner.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param miner.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param miner.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param miner.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: null + runAsUser: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: false + allowPrivilegeEscalation: false + capabilities: + drop: ['ALL'] + seccompProfile: + type: 'RuntimeDefault' + + ## @param miner.command Override default container command (useful when using custom images) + ## + command: [] + ## @param miner.args Override default container args (useful when using custom images) + ## + args: [] + ## @param miner.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param miner.hostAliases Hyperdx miner pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param miner.podLabels Extra labels for Hyperdx miner pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param miner.podAnnotations Annotations for Hyperdx miner pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param miner.podAffinityPreset Pod affinity preset. Ignored if `miner.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: '' + ## @param miner.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `miner.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node miner.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param miner.nodeAffinityPreset.type Node affinity preset type. Ignored if `miner.affinity` is set. Allowed values: `soft` or `hard` + ## + type: '' + ## @param miner.nodeAffinityPreset.key Node label key to match. Ignored if `miner.affinity` is set + ## + key: '' + ## @param miner.nodeAffinityPreset.values Node label values to match. Ignored if `miner.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param miner.affinity Affinity for Hyperdx miner pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `miner.podAffinityPreset`, `miner.podAntiAffinityPreset`, and `miner.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param miner.nodeSelector Node labels for Hyperdx miner pods assignment + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param miner.tolerations Tolerations for Hyperdx miner pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param miner.updateStrategy.type Hyperdx miner statefulset strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + + ## @param miner.priorityClassName Hyperdx miner pods' priorityClassName + ## + priorityClassName: '' + ## @param miner.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param miner.schedulerName Name of the k8s scheduler (other than default) for Hyperdx miner pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: '' + ## @param miner.terminationGracePeriodSeconds Seconds Redmine pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: '' + ## @param miner.lifecycleHooks for the Hyperdx miner container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param miner.extraEnvVars Array with extra environment variables to add to Hyperdx miner nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param miner.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Hyperdx miner nodes + ## + extraEnvVarsCM: '' + ## @param miner.extraEnvVarsSecret Name of existing Secret containing extra env vars for Hyperdx miner nodes + ## + extraEnvVarsSecret: '' + ## @param miner.extraVolumes Optionally specify extra list of additional volumes for the Hyperdx miner pod(s) + ## + extraVolumes: [] + ## @param miner.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Hyperdx miner container(s) + ## + extraVolumeMounts: [] + ## @param miner.sidecars Add additional sidecar containers to the Hyperdx miner pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param miner.initContainers Add additional init containers to the Hyperdx miner pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + + ## @section Hyperdx Rest Traffic Exposure Parameters + ## + service: + ## @param miner.service.type Hyperdx miner service type + ## + type: ClusterIP + ## @param miner.service.ports.http Hyperdx miner service HTTP port + ## + ports: + http: 80 + ## Node ports to expose + ## @param miner.service.nodePorts.http Node port for HTTP + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + http: '' + ## @param miner.service.clusterIP Hyperdx miner service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: '' + ## @param miner.service.loadBalancerIP Hyperdx miner service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: '' + ## @param miner.service.loadBalancerSourceRanges Hyperdx miner service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param miner.service.externalTrafficPolicy Hyperdx miner service external traffic policy + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-miner-source-ip + ## + externalTrafficPolicy: Cluster + ## @param miner.service.annotations Additional custom annotations for Hyperdx miner service + ## + annotations: {} + ## @param miner.service.extraPorts Extra ports to expose in Hyperdx miner service (normally used with the `sidecars` value) + ## + extraPorts: [] + ## @param miner.service.sessionAffinity Control where miner requests go, to the same pod or round-robin + ## Values: RestIP or None + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/ + ## + sessionAffinity: None + ## @param miner.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## restIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + +## @section Hyperdx open-telemetry collector Parameters +## +## +otelCollector: + ## @param otelCollector.enabled Enable Hyperdx open-telemetry collector + ## + enabled: true + ## @param otelCollector.replicaCount Number of Hyperdx otelCollector replicas to deploy + ## + replicaCount: 1 + + ## @param otelCollector.defaultConfig [string] Default configuration for the Hyperdx otelCollector service + ## + defaultConfig: | + HYPERDX_LOG_LEVEL: {{ .Values.logLevel }} + INGESTOR_API_URL: {{ include "hyperdx.ingestor.url" . }} + ## @param otelCollector.extraConfig Extra configuration for the Hyperdx otelCollector service + ## + extraConfig: {} + + ## @param otelCollector.existingConfigmap The name of an existing ConfigMap with the default configuration + ## + existingConfigmap: '' + ## @param otelCollector.extraConfigExistingConfigmap The name of an existing ConfigMap with extra configuration + ## + extraConfigExistingConfigmap: '' + + ## Hyperdx otelCollector image + ## ref: https://github.com/hyperdxio/hyperdx/pkgs/container/hyperdx + ## @param otelCollector.image.registry [default: ghcr.io] otelCollector image registry + ## @param otelCollector.image.repository [default: hyperdxio/hyperdx] otelCollector image repository + ## @skip otelCollector.image.tag otelCollector image tag (immutable tags are recommended) + ## @param otelCollector.image.digest otelCollector image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) + ## @param otelCollector.image.pullPolicy otelCollector image pull policy + ## @param otelCollector.image.pullSecrets otelCollector image pull secrets + ## + image: + registry: ghcr.io + repository: hyperdxio/hyperdx + tag: 1.6.0-otel-collector + digest: '' + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## @param otelCollector.containerPorts.http Hyperdx otelCollector HTTP container port + ## @param otelCollector.containerPorts.health Hyperdx otelCollector health container port + ## @param otelCollector.containerPorts.grpc Hyperdx otelCollector grpc container port + ## @param otelCollector.containerPorts.pprof Hyperdx otelCollector pprof container port + ## @param otelCollector.containerPorts.fluentd Hyperdx otelCollector fluentd container port + ## @param otelCollector.containerPorts.zpages Hyperdx otelCollector zpages container port + ## @param otelCollector.containerPorts.metrics Hyperdx otelCollector metrics container port + ## @param otelCollector.containerPorts.zipkin Hyperdx otelCollector zipkin container port + ## + containerPorts: + http: 4318 + health: 13133 + grpc: 4317 + pprof: 1888 + fluentd: 24225 + zpages: 55679 + metrics: 8888 + zipkin: 9411 + ## Configure extra options for Hyperdx otelCollector containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param otelCollector.livenessProbe.enabled Enable livenessProbe on Hyperdx otelCollector containers + ## @param otelCollector.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param otelCollector.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param otelCollector.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param otelCollector.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param otelCollector.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param otelCollector.readinessProbe.enabled Enable readinessProbe on Hyperdx otelCollector containers + ## @param otelCollector.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param otelCollector.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param otelCollector.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param otelCollector.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param otelCollector.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param otelCollector.startupProbe.enabled Enable startupProbe on Hyperdx otelCollector containers + ## @param otelCollector.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param otelCollector.startupProbe.periodSeconds Period seconds for startupProbe + ## @param otelCollector.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param otelCollector.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param otelCollector.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param otelCollector.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param otelCollector.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param otelCollector.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## Hyperdx otelCollector resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param otelCollector.resources.limits The resources limits for the Hyperdx otelCollector containers + ## @param otelCollector.resources.requests The requested resources for the Hyperdx otelCollector containers + ## + resources: + limits: {} + requests: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param otelCollector.podSecurityContext.enabled Enabled Hyperdx otelCollector pods' Security Context + ## @param otelCollector.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param otelCollector.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param otelCollector.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param otelCollector.podSecurityContext.fsGroup Set Hyperdx otelCollector pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param otelCollector.containerSecurityContext.enabled Enabled containers' Security Context + ## @param otelCollector.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param otelCollector.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param otelCollector.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param otelCollector.containerSecurityContext.privileged Set container's Security Context privileged + ## @param otelCollector.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param otelCollector.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param otelCollector.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param otelCollector.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: null + runAsUser: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: false + allowPrivilegeEscalation: false + capabilities: + drop: ['ALL'] + seccompProfile: + type: 'RuntimeDefault' + + ## @param otelCollector.command Override default container command (useful when using custom images) + ## + command: [] + ## @param otelCollector.args Override default container args (useful when using custom images) + ## + args: [] + ## @param otelCollector.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param otelCollector.hostAliases Hyperdx otelCollector pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param otelCollector.podLabels Extra labels for Hyperdx otelCollector pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param otelCollector.podAnnotations Annotations for Hyperdx otelCollector pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param otelCollector.podAffinityPreset Pod affinity preset. Ignored if `otelCollector.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: '' + ## @param otelCollector.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `otelCollector.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node otelCollector.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param otelCollector.nodeAffinityPreset.type Node affinity preset type. Ignored if `otelCollector.affinity` is set. Allowed values: `soft` or `hard` + ## + type: '' + ## @param otelCollector.nodeAffinityPreset.key Node label key to match. Ignored if `otelCollector.affinity` is set + ## + key: '' + ## @param otelCollector.nodeAffinityPreset.values Node label values to match. Ignored if `otelCollector.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param otelCollector.affinity Affinity for Hyperdx otelCollector pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `otelCollector.podAffinityPreset`, `otelCollector.podAntiAffinityPreset`, and `otelCollector.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param otelCollector.nodeSelector Node labels for Hyperdx otelCollector pods assignment + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param otelCollector.tolerations Tolerations for Hyperdx otelCollector pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param otelCollector.updateStrategy.type Hyperdx otelCollector statefulset strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + + ## @param otelCollector.priorityClassName Hyperdx otelCollector pods' priorityClassName + ## + priorityClassName: '' + ## @param otelCollector.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param otelCollector.schedulerName Name of the k8s scheduler (other than default) for Hyperdx otelCollector pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: '' + ## @param otelCollector.terminationGracePeriodSeconds Seconds Redmine pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: '' + ## @param otelCollector.lifecycleHooks for the Hyperdx otelCollector container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param otelCollector.extraEnvVars Array with extra environment variables to add to Hyperdx otelCollector nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param otelCollector.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Hyperdx otelCollector nodes + ## + extraEnvVarsCM: '' + ## @param otelCollector.extraEnvVarsSecret Name of existing Secret containing extra env vars for Hyperdx otelCollector nodes + ## + extraEnvVarsSecret: '' + ## @param otelCollector.extraVolumes Optionally specify extra list of additional volumes for the Hyperdx otelCollector pod(s) + ## + extraVolumes: [] + ## @param otelCollector.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Hyperdx otelCollector container(s) + ## + extraVolumeMounts: [] + ## @param otelCollector.sidecars Add additional sidecar containers to the Hyperdx otelCollector pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param otelCollector.initContainers Add additional init containers to the Hyperdx otelCollector pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + + ## @section Hyperdx Rest Traffic Exposure Parameters + ## + service: + ## @param otelCollector.service.type Hyperdx otelCollector service type + ## + type: ClusterIP + ## @param otelCollector.service.ports.http Hyperdx otelCollector service HTTP port + ## + ports: + http: 80 + ## Node ports to expose + ## @param otelCollector.service.nodePorts.http Node port for HTTP + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + http: '' + ## @param otelCollector.service.clusterIP Hyperdx otelCollector service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: '' + ## @param otelCollector.service.loadBalancerIP Hyperdx otelCollector service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: '' + ## @param otelCollector.service.loadBalancerSourceRanges Hyperdx otelCollector service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param otelCollector.service.externalTrafficPolicy Hyperdx otelCollector service external traffic policy + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-otelCollector-source-ip + ## + externalTrafficPolicy: Cluster + ## @param otelCollector.service.annotations Additional custom annotations for Hyperdx otelCollector service + ## + annotations: {} + ## @param otelCollector.service.extraPorts Extra ports to expose in Hyperdx otelCollector service (normally used with the `sidecars` value) + ## + extraPorts: [] + ## @param otelCollector.service.sessionAffinity Control where otelCollector requests go, to the same pod or round-robin + ## Values: RestIP or None + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/ + ## + sessionAffinity: None + ## @param otelCollector.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## restIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + +## @section Hyperdx task check alerts Parameters +## +## +taskCheckAlerts: + ## @param taskCheckAlerts.enabled Enable the task-check-alerts Cronjob which checks for alert criteria and fires off any alerts as needed + ## + enabled: false + + ## @param taskCheckAlerts.defaultConfig [string] Hyperdx taskCheckAlerts default configuration + ## + defaultConfig: | + APP_TYPE: 'scheduled-task' + CLICKHOUSE_HOST: {{ include "hyperdx.clickhouse.host" . | quote }} + CLICKHOUSE_LOG_LEVEL: {{ .Values.logLevel }} + CLICKHOUSE_PASSWORD: worker + CLICKHOUSE_USER: worker + FRONTEND_URL: {{ include "hyperdx.app.publicUrl" . }} + HYPERDX_API_KEY: {{ .Values.apiKey }} + HYPERDX_LOG_LEVEL: {{ .Values.logLevel }} + HDX_NODE_ADVANCED_NETWORK_CAPTURE: "1" + HDX_NODE_BETA_MODE: "0" + HDX_NODE_CONSOLE_CAPTURE: "1" + INGESTOR_API_URL: {{ include "hyperdx.ingestor.url" . }} + MINER_API_URL: {{ include "hyperdx.miner.url" . }} + MONGO_URI: {{ include "hyperdx.mongodb.uri" . }} + OTEL_EXPORTER_OTLP_ENDPOINT: {{ include "hyperdx.otelCollector.url" . }} + OTEL_SERVICE_NAME: 'hdx-oss-task-check-alerts' + REDIS_URL: {{ include "hyperdx.redis.url" . }} + + ## Hyperdx task check alerts image + ## ref: https://github.com/hyperdxio/hyperdx/pkgs/container/hyperdx + ## @param taskCheckAlerts.image.registry [default: ghcr.io] taskCheckAlerts image registry + ## @param taskCheckAlerts.image.repository [default: hyperdxio/hyperdx] taskCheckAlerts image repository + ## @skip taskCheckAlerts.image.tag taskCheckAlerts image tag (immutable tags are recommended) + ## @param taskCheckAlerts.image.digest taskCheckAlerts image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) + ## @param taskCheckAlerts.image.pullPolicy taskCheckAlerts image pull policy + ## @param taskCheckAlerts.image.pullSecrets taskCheckAlerts image pull secrets + ## + image: + registry: ghcr.io + repository: hyperdxio/hyperdx + tag: 1.6.0-api + digest: '' + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## Cronjob configurations for Hyperdx task check alerts + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/ + cronjob: + ## @param taskCheckAlerts.cronjob.schedule Kubernetes CronJob schedule + ## + schedule: '* * * * *' + ## @param taskCheckAlerts.cronjob.concurrencyPolicy Set the cronjob parameter concurrencyPolicy + ## + concurrencyPolicy: Forbid + ## @param taskCheckAlerts.cronjob.failedJobsHistoryLimit Set the cronjob parameter failedJobsHistoryLimit + ## + failedJobsHistoryLimit: 1 + ## @param taskCheckAlerts.cronjob.successfulJobsHistoryLimit Set the cronjob parameter successfulJobsHistoryLimit + ## + successfulJobsHistoryLimit: 3 + ## @param taskCheckAlerts.cronjob.ttlSecondsAfterFinished Set the cronjob parameter ttlSecondsAfterFinished + ## + ttlSecondsAfterFinished: '' + ## @param taskCheckAlerts.cronjob.restartPolicy Set the cronjob parameter restartPolicy + ## + restartPolicy: OnFailure + ## @param taskCheckAlerts.cronjob.affinity Affinity for CronJob pod assignment + ## + affinity: {} + ## @param taskCheckAlerts.cronjob.command Override default container command (useful when using custom images) + ## + command: [] + ## @param taskCheckAlerts.cronjob.args Override default container args (useful when using custom images) + ## + args: [] + ## @param + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param taskCheckAlerts.cronjob.containerSecurityContext.enabled Enabled containers' Security Context + ## @param taskCheckAlerts.cronjob.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param taskCheckAlerts.cronjob.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param taskCheckAlerts.cronjob.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param taskCheckAlerts.cronjob.containerSecurityContext.privileged Set container's Security Context privileged + ## @param taskCheckAlerts.cronjob.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param taskCheckAlerts.cronjob.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param taskCheckAlerts.cronjob.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param taskCheckAlerts.cronjob.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: null + runAsUser: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: false + allowPrivilegeEscalation: false + capabilities: + drop: ['ALL'] + seccompProfile: + type: 'RuntimeDefault' + ## @param taskCheckAlerts.cronjob.podAnnotations Additional pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param taskCheckAlerts.cronjob.podLabels Additional pod labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + +## ========================================== +## Dependency charts +## ========================================== + +## @section Kong chart configurations +## ref: https://github.com/bitnami/charts/blob/main/bitnami/kong/values.yaml +## +kong: + ## @param kong.enabled Enable Kong + ## + enabled: true + + ## Bitnami kong image version + ## ref: https://hub.docker.com/r/bitnami/kong/tags/ + ## @param kong.image.registry [default: docker.io] kong image registry + ## @param kong.image.repository [default: bitnami/kong] kong image repository + ## @skip kong.image.tag kong image tag (immutable tags are recommended) + ## @param kong.image.digest kong image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param kong.image.pullPolicy kong image pull policy + ## @param kong.image.pullSecrets Specify docker-registry secret names as an array + ## @param kong.image.debug Enable image debug mode + ## + image: + registry: docker.io + repository: bitnami/kong + tag: 3.4.2-debian-11-r1 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## E.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false + + ## @param kong.replicaCount Number of Kong replicas + ## + replicaCount: 1 + + ## @param kong.initContainers Add additional init containers to the Kong pods + ## e.g. + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: | + - name: render-kong-declarative-conf + image: '{{ include "kong.image" . }}' + command: + - /bin/bash + args: + - -ec + - | + #!/bin/bash + # https://github.com/bitnami/containers/blob/main/bitnami/kong/3/debian-11/prebuildfs/opt/bitnami/scripts/liblog.sh + . /opt/bitnami/scripts/liblog.sh + info "Rendering hyperdx declarative config template for Kong" + + # https://github.com/bitnami/render-template + render-template /bitnami/kong/declarative-template/kong.yml.tpl > "/bitnami/kong/declarative-conf/kong.yml" + volumeMounts: + - name: declarative-conf-template + mountPath: /bitnami/kong/declarative-template/ + - name: rendered-declarative-conf + mountPath: /bitnami/kong/declarative-conf/ + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 6 }} + {{- end }} + + ## @param kong.ingressController.enabled Enable Kong Ingress Controller + ## + ingressController: + enabled: false + ## @param kong.extraVolumes [array] Adittional volumes to be added to the Kong deployment pods (evaluated as template). Requires setting `kong.kong.extraVolumeMounts` + ## + extraVolumes: + - name: declarative-conf-template + configMap: + name: "hyperdx-kong-declarative-conf" + - name: rendered-declarative-conf + emptyDir: {} + ## @param kong.kong.extraVolumeMounts [array] Additional volumeMounts to be added to the Kong Container (evaluated as template). Normally used with `kong.extraVolumes`. + ## @param kong.kong.extraEnvVars [array] Additional env variables to configure Kong. + ## ref: https://docs.konghq.com/gateway/latest/production/environment-variables/ + ## + kong: + extraVolumeMounts: + - name: rendered-declarative-conf + mountPath: /bitnami/kong/declarative-conf/ + extraEnvVars: + - name: KONG_DECLARATIVE_CONFIG + value: "/bitnami/kong/declarative-conf/kong.yml" + - name: KONG_DNS_ORDER + value: LAST,A,CNAME + - name: KONG_PLUGINS + value: request-transformer,cors,key-auth,acl + - name: KONG_NGINX_HTTP_GZIP + value: "on" + - name: KONG_NGINX_HTTP_GZIP_VARY + value: "on" + # do not add text/html to gzip_types because it's added by default and will generate + # nginx: [warn] duplicate MIME type "text/html" + # http://stackoverflow.com/a/6475493/703144 + - name: KONG_NGINX_HTTP_GZIP_TYPES + value: "application/json application/x-javascript application/xml application/xml+rss text/css text/javascript text/plain text/xml" + - name: KONG_NGINX_HTTP_GZIP_PROXIED + value: "any" + + ## @param kong.ingress.enabled Enable Ingress rule + ## + ingress: + enabled: false + ## @param kong.service.loadBalancerIP Kubernetes service LoadBalancer IP + ## @param kong.service.type Kong Kubernetes service type + ## @param kong.service.ports.proxyHttp Kong proxy service HTTP port + ## + service: + loadBalancerIP: "" + type: LoadBalancer + ports: + proxyHttp: 80 + + ## @param kong.database Select which database backend Kong will use. Can be 'postgresql', 'cassandra' or 'off'. + ## Default to 'off' as declarative configuration is enabled by default so we don't need the database + ## ref: https://docs.konghq.com/gateway/latest/production/deployment-topologies/db-less-and-declarative-config/ + ## + database: 'off' + ## @param kong.postgresql.enabled Switch to enable or disable the PostgreSQL helm chart inside the Kong subchart + ## db is not needed since we use declarative configuration + ## + postgresql: + enabled: false + +## Mongodb chart configurations +## ref: https://github.com/bitnami/charts/blob/main/bitnami/mongodb/values.yaml +## +mongodb: + ## @param mongodb.enabled Switch to enable or disable the mongodb helm chart + ## + enabled: true + ## Bitnami mongodb image version + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## @param mongodb.image.registry [default: docker.io] mongodb image registry + ## @param mongodb.image.repository [default: bitnami/mongodb] mongodb image repository + ## @skip mongodb.image.tag mongodb image tag (immutable tags are recommended) + ## @param mongodb.image.digest mongodb image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param mongodb.image.pullPolicy mongodb image pull policy + ## @param mongodb.image.pullSecrets Specify image pull secrets + ## @param mongodb.image.debug Specify if debug values should be set + ## + image: + registry: docker.io + repository: bitnami/mongodb + tag: 7.0.5-debian-11-r3 + digest: "" + ## Specify a imagePullPolicy + ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false + + ## Authentication + ## @param mongodb.auth.enabled Enable authentication + ## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/ + ## + ## @param mongodb.auth.rootUser root user + ## @param mongodb.auth.rootPassword root password + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/mongodb#setting-the-root-user-and-password-on-first-run + ## + auth: + enabled: false + rootUser: root + rootPassword: "root" + + ## @param mongodb.architecture MongoDB architecture (`standalone` or `replicaset`) + ## + architecture: standalone + ## @param mongodb.service.ports.mongodb mongodb service port + ## + service: + ports: + mongodb: 27017 + +## Redis chart configuration +## ref: https://github.com/bitnami/charts/blob/main/bitnami/redis/values.yaml +## +redis: + ## @param redis.enabled Switch to enable or disable the Redis helm chart + ## + enabled: true + ## Bitnami Redis image version + ## ref: https://hub.docker.com/r/bitnami/redis/tags/ + ## @param redis.image.registry [default: docker.io] Redis image registry + ## @param redis.image.repository [default: bitnami/redis] Redis image repository + ## @skip redis.image.tag Redis image tag (immutable tags are recommended) + ## @param redis.image.digest Redis image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param redis.image.pullPolicy Redis image pull policy + ## @param redis.image.pullSecrets Specify image pull secrets + ## @param redis.image.debug Specify if debug values should be set + ## + image: + registry: docker.io + repository: bitnami/redis + tag: 7.2.4-debian-11-r2 + digest: '' + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false + + ## @param redis.architecture Redis architecture. Allowed values: `standalone` or `replication` + ## + architecture: standalone + ## @param redis.service.ports.redis Redis service port + ## + service: + ports: + redis: 5432 + + ## @param redis.auth.enabled Enable password authentication + ## + auth: + enabled: false + +## Clickhouse chart configuration +## ref: https://github.com/bitnami/charts/blob/main/bitnami/clickhouse/values.yaml +## +clickhouse: + ## @param clickhouse.enabled Switch to enable or disable the Clickhouse helm chart + ## + enabled: true + + ## Bitnami ClickHouse image + ## ref: https://hub.docker.com/r/bitnami/clickhouse/tags/ + ## @param clickhouse.image.registry [default: docker.io] ClickHouse image registry + ## @param clickhouse.image.repository [default: bitnami/clickhouse] ClickHouse image repository + ## @skip clickhouse.image.tag ClickHouse image tag (immutable tags are recommended) + ## @param clickhouse.image.digest ClickHouse image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param clickhouse.image.pullPolicy ClickHouse image pull policy + ## @param clickhouse.image.pullSecrets ClickHouse image pull secrets + ## @param clickhouse.image.debug Enable ClickHouse image debug mode + ## + image: + registry: docker.io + repository: bitnami/clickhouse + tag: 23.12.2-debian-11-r0 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false + + ## Authentication + ## @param clickhouse.auth.username ClickHouse Admin username + ## @param clickhouse.auth.password ClickHouse Admin password + ## @param clickhouse.auth.existingSecret Name of a secret containing the Admin password + ## @param clickhouse.auth.existingSecretKey Name of the key inside the existing secret + ## + auth: + username: default + password: "default" + existingSecret: "" + existingSecretKey: "" + + ## @param clickhouse.shards Number of ClickHouse shards to deploy + ## + shards: 1 + + ## @param clickhouse.replicaCount Number of ClickHouse replicas per shard to deploy + ## if keeper enable, same as keeper count, keeper cluster by shards. + ## + replicaCount: 1 + + ## @section ClickHouse keeper configuration parameters + ## @param clickhouse.keeper.enabled Deploy ClickHouse keeper. Support is experimental. + ## + keeper: + enabled: false + ## @section Zookeeper subchart parameters + ## + ## @param clickhouse.zookeeper.enabled Deploy Zookeeper subchart + ## + zookeeper: + enabled: false + + ## ClickHouse service parameters + ## + service: + ## @param clickhouse.service.type ClickHouse service type + ## + type: ClusterIP + ## @param clickhouse.service.ports.http ClickHouse service HTTP port + ## @param clickhouse.service.ports.https ClickHouse service HTTPS port + ## @param clickhouse.service.ports.tcp ClickHouse service TCP port + ## @param clickhouse.service.ports.tcpSecure ClickHouse service TCP (secure) port + ## @param clickhouse.service.ports.keeper ClickHouse keeper TCP container port + ## @param clickhouse.service.ports.keeperSecure ClickHouse keeper TCP (secure) container port + ## @param clickhouse.service.ports.keeperInter ClickHouse keeper interserver TCP container port + ## @param clickhouse.service.ports.mysql ClickHouse service MySQL port + ## @param clickhouse.service.ports.postgresql ClickHouse service PostgreSQL port + ## @param clickhouse.service.ports.interserver ClickHouse service Interserver port + ## @param clickhouse.service.ports.metrics ClickHouse service metrics port + ## + ports: + http: 8123 + https: 443 + tcp: 9000 + tcpSecure: 9440 + keeper: 2181 + keeperSecure: 3181 + keeperInter: 9444 + mysql: 9004 + postgresql: 9005 + interserver: 9009 + metrics: 8001 + + ## @param clickhouse.extraOverrides Extra configuration overrides (evaluated as a template) apart from the default + ## + extraOverrides: | + + + system + query_log
+
+ + 4096 + 64 + 100 + 8589934592 + 5368709120 + + default + default + UTC + false + + + + + engine MergeTree + partition by toYYYYMM(finish_date) + order by (finish_date, finish_time_us, trace_id) + + system + opentelemetry_span_log
+ 7500 +
+ + + /clickhouse/task_queue/ddl + + + /var/lib/clickhouse/format_schemas/ +
+ + + ## @param clickhouse.usersExtraOverrides Users extra configuration overrides (evaluated as a template) apart from the default + ## + usersExtraOverrides: | + + + + 10000000000 + 0 + in_order + 1 + + + + + + + default + + ::/0 + + default + + + api + default + + ::/0 + + default + + + aggregator + default + + ::/0 + + default + + + worker + default + + ::/0 + + default + + + + + + + 3600 + 0 + 0 + 0 + 0 + 0 + + + +